1 /* 2 * fs/nfs/nfs4proc.c 3 * 4 * Client-side procedure declarations for NFSv4. 5 * 6 * Copyright (c) 2002 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Kendrick Smith <kmsmith@umich.edu> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <linux/mm.h> 39 #include <linux/delay.h> 40 #include <linux/errno.h> 41 #include <linux/string.h> 42 #include <linux/ratelimit.h> 43 #include <linux/printk.h> 44 #include <linux/slab.h> 45 #include <linux/sunrpc/clnt.h> 46 #include <linux/nfs.h> 47 #include <linux/nfs4.h> 48 #include <linux/nfs_fs.h> 49 #include <linux/nfs_page.h> 50 #include <linux/nfs_mount.h> 51 #include <linux/namei.h> 52 #include <linux/mount.h> 53 #include <linux/module.h> 54 #include <linux/xattr.h> 55 #include <linux/utsname.h> 56 #include <linux/freezer.h> 57 #include <linux/iversion.h> 58 59 #include "nfs4_fs.h" 60 #include "delegation.h" 61 #include "internal.h" 62 #include "iostat.h" 63 #include "callback.h" 64 #include "pnfs.h" 65 #include "netns.h" 66 #include "sysfs.h" 67 #include "nfs4idmap.h" 68 #include "nfs4session.h" 69 #include "fscache.h" 70 #include "nfs42.h" 71 72 #include "nfs4trace.h" 73 74 #define NFSDBG_FACILITY NFSDBG_PROC 75 76 #define NFS4_BITMASK_SZ 3 77 78 #define NFS4_POLL_RETRY_MIN (HZ/10) 79 #define NFS4_POLL_RETRY_MAX (15*HZ) 80 81 /* file attributes which can be mapped to nfs attributes */ 82 #define NFS4_VALID_ATTRS (ATTR_MODE \ 83 | ATTR_UID \ 84 | ATTR_GID \ 85 | ATTR_SIZE \ 86 | ATTR_ATIME \ 87 | ATTR_MTIME \ 88 | ATTR_CTIME \ 89 | ATTR_ATIME_SET \ 90 | ATTR_MTIME_SET) 91 92 struct nfs4_opendata; 93 static int _nfs4_recover_proc_open(struct nfs4_opendata *data); 94 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); 95 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); 96 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 97 struct nfs_fattr *fattr, struct inode *inode); 98 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred, 99 struct nfs_fattr *fattr, struct iattr *sattr, 100 struct nfs_open_context *ctx, struct nfs4_label *ilabel); 101 #ifdef CONFIG_NFS_V4_1 102 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 103 const struct cred *cred, 104 struct nfs4_slot *slot, 105 bool is_privileged); 106 static int nfs41_test_stateid(struct nfs_server *, const nfs4_stateid *, 107 const struct cred *); 108 static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *, 109 const struct cred *, bool); 110 #endif 111 112 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 113 static inline struct nfs4_label * 114 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 115 struct iattr *sattr, struct nfs4_label *label) 116 { 117 struct lsm_context shim; 118 int err; 119 120 if (label == NULL) 121 return NULL; 122 123 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0) 124 return NULL; 125 126 label->lfs = 0; 127 label->pi = 0; 128 label->len = 0; 129 label->label = NULL; 130 131 err = security_dentry_init_security(dentry, sattr->ia_mode, 132 &dentry->d_name, NULL, &shim); 133 if (err) 134 return NULL; 135 136 label->lsmid = shim.id; 137 label->label = shim.context; 138 label->len = shim.len; 139 return label; 140 } 141 static inline void 142 nfs4_label_release_security(struct nfs4_label *label) 143 { 144 struct lsm_context shim; 145 146 if (label) { 147 shim.context = label->label; 148 shim.len = label->len; 149 shim.id = label->lsmid; 150 security_release_secctx(&shim); 151 } 152 } 153 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 154 { 155 if (label) 156 return server->attr_bitmask; 157 158 return server->attr_bitmask_nl; 159 } 160 #else 161 static inline struct nfs4_label * 162 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 163 struct iattr *sattr, struct nfs4_label *l) 164 { return NULL; } 165 static inline void 166 nfs4_label_release_security(struct nfs4_label *label) 167 { return; } 168 static inline u32 * 169 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 170 { return server->attr_bitmask; } 171 #endif 172 173 /* Prevent leaks of NFSv4 errors into userland */ 174 static int nfs4_map_errors(int err) 175 { 176 if (err >= -1000) 177 return err; 178 switch (err) { 179 case -NFS4ERR_RESOURCE: 180 case -NFS4ERR_LAYOUTTRYLATER: 181 case -NFS4ERR_RECALLCONFLICT: 182 case -NFS4ERR_RETURNCONFLICT: 183 return -EREMOTEIO; 184 case -NFS4ERR_WRONGSEC: 185 case -NFS4ERR_WRONG_CRED: 186 return -EPERM; 187 case -NFS4ERR_BADOWNER: 188 case -NFS4ERR_BADNAME: 189 return -EINVAL; 190 case -NFS4ERR_SHARE_DENIED: 191 return -EACCES; 192 case -NFS4ERR_MINOR_VERS_MISMATCH: 193 return -EPROTONOSUPPORT; 194 case -NFS4ERR_FILE_OPEN: 195 return -EBUSY; 196 case -NFS4ERR_NOT_SAME: 197 return -ENOTSYNC; 198 case -ENETDOWN: 199 case -ENETUNREACH: 200 break; 201 default: 202 dprintk("%s could not handle NFSv4 error %d\n", 203 __func__, -err); 204 break; 205 } 206 return -EIO; 207 } 208 209 /* 210 * This is our standard bitmap for GETATTR requests. 211 */ 212 const u32 nfs4_fattr_bitmap[3] = { 213 FATTR4_WORD0_TYPE 214 | FATTR4_WORD0_CHANGE 215 | FATTR4_WORD0_SIZE 216 | FATTR4_WORD0_FSID 217 | FATTR4_WORD0_FILEID, 218 FATTR4_WORD1_MODE 219 | FATTR4_WORD1_NUMLINKS 220 | FATTR4_WORD1_OWNER 221 | FATTR4_WORD1_OWNER_GROUP 222 | FATTR4_WORD1_RAWDEV 223 | FATTR4_WORD1_SPACE_USED 224 | FATTR4_WORD1_TIME_ACCESS 225 | FATTR4_WORD1_TIME_METADATA 226 | FATTR4_WORD1_TIME_MODIFY 227 | FATTR4_WORD1_MOUNTED_ON_FILEID, 228 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 229 FATTR4_WORD2_SECURITY_LABEL 230 #endif 231 }; 232 233 static const u32 nfs4_pnfs_open_bitmap[3] = { 234 FATTR4_WORD0_TYPE 235 | FATTR4_WORD0_CHANGE 236 | FATTR4_WORD0_SIZE 237 | FATTR4_WORD0_FSID 238 | FATTR4_WORD0_FILEID, 239 FATTR4_WORD1_MODE 240 | FATTR4_WORD1_NUMLINKS 241 | FATTR4_WORD1_OWNER 242 | FATTR4_WORD1_OWNER_GROUP 243 | FATTR4_WORD1_RAWDEV 244 | FATTR4_WORD1_SPACE_USED 245 | FATTR4_WORD1_TIME_ACCESS 246 | FATTR4_WORD1_TIME_METADATA 247 | FATTR4_WORD1_TIME_MODIFY, 248 FATTR4_WORD2_MDSTHRESHOLD 249 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 250 | FATTR4_WORD2_SECURITY_LABEL 251 #endif 252 }; 253 254 static const u32 nfs4_open_noattr_bitmap[3] = { 255 FATTR4_WORD0_TYPE 256 | FATTR4_WORD0_FILEID, 257 }; 258 259 const u32 nfs4_statfs_bitmap[3] = { 260 FATTR4_WORD0_FILES_AVAIL 261 | FATTR4_WORD0_FILES_FREE 262 | FATTR4_WORD0_FILES_TOTAL, 263 FATTR4_WORD1_SPACE_AVAIL 264 | FATTR4_WORD1_SPACE_FREE 265 | FATTR4_WORD1_SPACE_TOTAL 266 }; 267 268 const u32 nfs4_pathconf_bitmap[3] = { 269 FATTR4_WORD0_MAXLINK 270 | FATTR4_WORD0_MAXNAME, 271 0 272 }; 273 274 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE 275 | FATTR4_WORD0_MAXREAD 276 | FATTR4_WORD0_MAXWRITE 277 | FATTR4_WORD0_LEASE_TIME, 278 FATTR4_WORD1_TIME_DELTA 279 | FATTR4_WORD1_FS_LAYOUT_TYPES, 280 FATTR4_WORD2_LAYOUT_BLKSIZE 281 | FATTR4_WORD2_CLONE_BLKSIZE 282 | FATTR4_WORD2_CHANGE_ATTR_TYPE 283 | FATTR4_WORD2_XATTR_SUPPORT 284 }; 285 286 const u32 nfs4_fs_locations_bitmap[3] = { 287 FATTR4_WORD0_CHANGE 288 | FATTR4_WORD0_SIZE 289 | FATTR4_WORD0_FSID 290 | FATTR4_WORD0_FILEID 291 | FATTR4_WORD0_FS_LOCATIONS, 292 FATTR4_WORD1_OWNER 293 | FATTR4_WORD1_OWNER_GROUP 294 | FATTR4_WORD1_RAWDEV 295 | FATTR4_WORD1_SPACE_USED 296 | FATTR4_WORD1_TIME_ACCESS 297 | FATTR4_WORD1_TIME_METADATA 298 | FATTR4_WORD1_TIME_MODIFY 299 | FATTR4_WORD1_MOUNTED_ON_FILEID, 300 }; 301 302 static void nfs4_bitmap_copy_adjust(__u32 *dst, const __u32 *src, 303 struct inode *inode, unsigned long flags) 304 { 305 unsigned long cache_validity; 306 307 memcpy(dst, src, NFS4_BITMASK_SZ*sizeof(*dst)); 308 if (!inode || !nfs_have_read_or_write_delegation(inode)) 309 return; 310 311 cache_validity = READ_ONCE(NFS_I(inode)->cache_validity) | flags; 312 313 /* Remove the attributes over which we have full control */ 314 dst[1] &= ~FATTR4_WORD1_RAWDEV; 315 if (!(cache_validity & NFS_INO_INVALID_SIZE)) 316 dst[0] &= ~FATTR4_WORD0_SIZE; 317 318 if (!(cache_validity & NFS_INO_INVALID_CHANGE)) 319 dst[0] &= ~FATTR4_WORD0_CHANGE; 320 321 if (!(cache_validity & NFS_INO_INVALID_MODE)) 322 dst[1] &= ~FATTR4_WORD1_MODE; 323 if (!(cache_validity & NFS_INO_INVALID_OTHER)) 324 dst[1] &= ~(FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP); 325 326 if (nfs_have_delegated_mtime(inode)) { 327 if (!(cache_validity & NFS_INO_INVALID_ATIME)) 328 dst[1] &= ~FATTR4_WORD1_TIME_ACCESS; 329 if (!(cache_validity & NFS_INO_INVALID_MTIME)) 330 dst[1] &= ~FATTR4_WORD1_TIME_MODIFY; 331 if (!(cache_validity & NFS_INO_INVALID_CTIME)) 332 dst[1] &= ~FATTR4_WORD1_TIME_METADATA; 333 } else if (nfs_have_delegated_atime(inode)) { 334 if (!(cache_validity & NFS_INO_INVALID_ATIME)) 335 dst[1] &= ~FATTR4_WORD1_TIME_ACCESS; 336 } 337 } 338 339 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, 340 struct nfs4_readdir_arg *readdir) 341 { 342 unsigned int attrs = FATTR4_WORD0_FILEID | FATTR4_WORD0_TYPE; 343 __be32 *start, *p; 344 345 if (cookie > 2) { 346 readdir->cookie = cookie; 347 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); 348 return; 349 } 350 351 readdir->cookie = 0; 352 memset(&readdir->verifier, 0, sizeof(readdir->verifier)); 353 if (cookie == 2) 354 return; 355 356 /* 357 * NFSv4 servers do not return entries for '.' and '..' 358 * Therefore, we fake these entries here. We let '.' 359 * have cookie 0 and '..' have cookie 1. Note that 360 * when talking to the server, we always send cookie 0 361 * instead of 1 or 2. 362 */ 363 start = p = kmap_atomic(*readdir->pages); 364 365 if (cookie == 0) { 366 *p++ = xdr_one; /* next */ 367 *p++ = xdr_zero; /* cookie, first word */ 368 *p++ = xdr_one; /* cookie, second word */ 369 *p++ = xdr_one; /* entry len */ 370 memcpy(p, ".\0\0\0", 4); /* entry */ 371 p++; 372 *p++ = xdr_one; /* bitmap length */ 373 *p++ = htonl(attrs); /* bitmap */ 374 *p++ = htonl(12); /* attribute buffer length */ 375 *p++ = htonl(NF4DIR); 376 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry))); 377 } 378 379 *p++ = xdr_one; /* next */ 380 *p++ = xdr_zero; /* cookie, first word */ 381 *p++ = xdr_two; /* cookie, second word */ 382 *p++ = xdr_two; /* entry len */ 383 memcpy(p, "..\0\0", 4); /* entry */ 384 p++; 385 *p++ = xdr_one; /* bitmap length */ 386 *p++ = htonl(attrs); /* bitmap */ 387 *p++ = htonl(12); /* attribute buffer length */ 388 *p++ = htonl(NF4DIR); 389 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent))); 390 391 readdir->pgbase = (char *)p - (char *)start; 392 readdir->count -= readdir->pgbase; 393 kunmap_atomic(start); 394 } 395 396 static void nfs4_fattr_set_prechange(struct nfs_fattr *fattr, u64 version) 397 { 398 if (!(fattr->valid & NFS_ATTR_FATTR_PRECHANGE)) { 399 fattr->pre_change_attr = version; 400 fattr->valid |= NFS_ATTR_FATTR_PRECHANGE; 401 } 402 } 403 404 static void nfs4_test_and_free_stateid(struct nfs_server *server, 405 nfs4_stateid *stateid, 406 const struct cred *cred) 407 { 408 const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops; 409 410 ops->test_and_free_expired(server, stateid, cred); 411 } 412 413 static void __nfs4_free_revoked_stateid(struct nfs_server *server, 414 nfs4_stateid *stateid, 415 const struct cred *cred) 416 { 417 stateid->type = NFS4_REVOKED_STATEID_TYPE; 418 nfs4_test_and_free_stateid(server, stateid, cred); 419 } 420 421 static void nfs4_free_revoked_stateid(struct nfs_server *server, 422 const nfs4_stateid *stateid, 423 const struct cred *cred) 424 { 425 nfs4_stateid tmp; 426 427 nfs4_stateid_copy(&tmp, stateid); 428 __nfs4_free_revoked_stateid(server, &tmp, cred); 429 } 430 431 static long nfs4_update_delay(long *timeout) 432 { 433 long ret; 434 if (!timeout) 435 return NFS4_POLL_RETRY_MAX; 436 if (*timeout <= 0) 437 *timeout = NFS4_POLL_RETRY_MIN; 438 if (*timeout > NFS4_POLL_RETRY_MAX) 439 *timeout = NFS4_POLL_RETRY_MAX; 440 ret = *timeout; 441 *timeout <<= 1; 442 return ret; 443 } 444 445 static int nfs4_delay_killable(long *timeout) 446 { 447 might_sleep(); 448 449 if (unlikely(nfs_current_task_exiting())) 450 return -EINTR; 451 __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); 452 schedule_timeout(nfs4_update_delay(timeout)); 453 if (!__fatal_signal_pending(current)) 454 return 0; 455 return -EINTR; 456 } 457 458 static int nfs4_delay_interruptible(long *timeout) 459 { 460 might_sleep(); 461 462 if (unlikely(nfs_current_task_exiting())) 463 return -EINTR; 464 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE_UNSAFE); 465 schedule_timeout(nfs4_update_delay(timeout)); 466 if (!signal_pending(current)) 467 return 0; 468 return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS; 469 } 470 471 static int nfs4_delay(long *timeout, bool interruptible) 472 { 473 if (interruptible) 474 return nfs4_delay_interruptible(timeout); 475 return nfs4_delay_killable(timeout); 476 } 477 478 static const nfs4_stateid * 479 nfs4_recoverable_stateid(const nfs4_stateid *stateid) 480 { 481 if (!stateid) 482 return NULL; 483 switch (stateid->type) { 484 case NFS4_OPEN_STATEID_TYPE: 485 case NFS4_LOCK_STATEID_TYPE: 486 case NFS4_DELEGATION_STATEID_TYPE: 487 return stateid; 488 default: 489 break; 490 } 491 return NULL; 492 } 493 494 /* This is the error handling routine for processes that are allowed 495 * to sleep. 496 */ 497 static int nfs4_do_handle_exception(struct nfs_server *server, 498 int errorcode, struct nfs4_exception *exception) 499 { 500 struct nfs_client *clp = server->nfs_client; 501 struct nfs4_state *state = exception->state; 502 const nfs4_stateid *stateid; 503 struct inode *inode = exception->inode; 504 int ret = errorcode; 505 506 exception->delay = 0; 507 exception->recovering = 0; 508 exception->retry = 0; 509 510 stateid = nfs4_recoverable_stateid(exception->stateid); 511 if (stateid == NULL && state != NULL) 512 stateid = nfs4_recoverable_stateid(&state->stateid); 513 514 switch(errorcode) { 515 case 0: 516 return 0; 517 case -NFS4ERR_BADHANDLE: 518 case -ESTALE: 519 if (inode != NULL && S_ISREG(inode->i_mode)) 520 pnfs_destroy_layout(NFS_I(inode)); 521 break; 522 case -NFS4ERR_DELEG_REVOKED: 523 case -NFS4ERR_ADMIN_REVOKED: 524 case -NFS4ERR_EXPIRED: 525 case -NFS4ERR_BAD_STATEID: 526 case -NFS4ERR_PARTNER_NO_AUTH: 527 if (inode != NULL && stateid != NULL) { 528 nfs_inode_find_state_and_recover(inode, 529 stateid); 530 goto wait_on_recovery; 531 } 532 fallthrough; 533 case -NFS4ERR_OPENMODE: 534 if (inode) { 535 int err; 536 537 err = nfs_async_inode_return_delegation(inode, 538 stateid); 539 if (err == 0) 540 goto wait_on_recovery; 541 if (stateid != NULL && stateid->type == NFS4_DELEGATION_STATEID_TYPE) { 542 exception->retry = 1; 543 break; 544 } 545 } 546 if (state == NULL) 547 break; 548 ret = nfs4_schedule_stateid_recovery(server, state); 549 if (ret < 0) 550 break; 551 goto wait_on_recovery; 552 case -NFS4ERR_STALE_STATEID: 553 case -NFS4ERR_STALE_CLIENTID: 554 nfs4_schedule_lease_recovery(clp); 555 goto wait_on_recovery; 556 case -NFS4ERR_MOVED: 557 ret = nfs4_schedule_migration_recovery(server); 558 if (ret < 0) 559 break; 560 goto wait_on_recovery; 561 case -NFS4ERR_LEASE_MOVED: 562 nfs4_schedule_lease_moved_recovery(clp); 563 goto wait_on_recovery; 564 #if defined(CONFIG_NFS_V4_1) 565 case -NFS4ERR_BADSESSION: 566 case -NFS4ERR_BADSLOT: 567 case -NFS4ERR_BAD_HIGH_SLOT: 568 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 569 case -NFS4ERR_DEADSESSION: 570 case -NFS4ERR_SEQ_FALSE_RETRY: 571 case -NFS4ERR_SEQ_MISORDERED: 572 /* Handled in nfs41_sequence_process() */ 573 goto wait_on_recovery; 574 #endif /* defined(CONFIG_NFS_V4_1) */ 575 case -NFS4ERR_FILE_OPEN: 576 if (exception->timeout > HZ) { 577 /* We have retried a decent amount, time to 578 * fail 579 */ 580 ret = -EBUSY; 581 break; 582 } 583 fallthrough; 584 case -NFS4ERR_DELAY: 585 nfs_inc_server_stats(server, NFSIOS_DELAY); 586 fallthrough; 587 case -NFS4ERR_GRACE: 588 case -NFS4ERR_LAYOUTTRYLATER: 589 case -NFS4ERR_RECALLCONFLICT: 590 case -NFS4ERR_RETURNCONFLICT: 591 exception->delay = 1; 592 return 0; 593 594 case -NFS4ERR_RETRY_UNCACHED_REP: 595 case -NFS4ERR_OLD_STATEID: 596 exception->retry = 1; 597 break; 598 case -NFS4ERR_BADOWNER: 599 /* The following works around a Linux server bug! */ 600 case -NFS4ERR_BADNAME: 601 if (server->caps & NFS_CAP_UIDGID_NOMAP) { 602 server->caps &= ~NFS_CAP_UIDGID_NOMAP; 603 exception->retry = 1; 604 printk(KERN_WARNING "NFS: v4 server %s " 605 "does not accept raw " 606 "uid/gids. " 607 "Reenabling the idmapper.\n", 608 server->nfs_client->cl_hostname); 609 } 610 } 611 /* We failed to handle the error */ 612 return nfs4_map_errors(ret); 613 wait_on_recovery: 614 exception->recovering = 1; 615 return 0; 616 } 617 618 /* 619 * Track the number of NFS4ERR_DELAY related retransmissions and return 620 * EAGAIN if the 'softerr' mount option is set, and we've exceeded the limit 621 * set by 'nfs_delay_retrans'. 622 */ 623 static int nfs4_exception_should_retrans(const struct nfs_server *server, 624 struct nfs4_exception *exception) 625 { 626 if (server->flags & NFS_MOUNT_SOFTERR && nfs_delay_retrans >= 0) { 627 if (exception->retrans++ >= (unsigned short)nfs_delay_retrans) 628 return -EAGAIN; 629 } 630 return 0; 631 } 632 633 /* This is the error handling routine for processes that are allowed 634 * to sleep. 635 */ 636 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) 637 { 638 struct nfs_client *clp = server->nfs_client; 639 int ret; 640 641 ret = nfs4_do_handle_exception(server, errorcode, exception); 642 if (exception->delay) { 643 int ret2 = nfs4_exception_should_retrans(server, exception); 644 if (ret2 < 0) { 645 exception->retry = 0; 646 return ret2; 647 } 648 ret = nfs4_delay(&exception->timeout, 649 exception->interruptible); 650 goto out_retry; 651 } 652 if (exception->recovering) { 653 if (exception->task_is_privileged) 654 return -EDEADLOCK; 655 ret = nfs4_wait_clnt_recover(clp); 656 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 657 return -EIO; 658 goto out_retry; 659 } 660 return ret; 661 out_retry: 662 if (ret == 0) 663 exception->retry = 1; 664 return ret; 665 } 666 667 static int 668 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server, 669 int errorcode, struct nfs4_exception *exception) 670 { 671 struct nfs_client *clp = server->nfs_client; 672 int ret; 673 674 ret = nfs4_do_handle_exception(server, errorcode, exception); 675 if (exception->delay) { 676 int ret2 = nfs4_exception_should_retrans(server, exception); 677 if (ret2 < 0) { 678 exception->retry = 0; 679 return ret2; 680 } 681 rpc_delay(task, nfs4_update_delay(&exception->timeout)); 682 goto out_retry; 683 } 684 if (exception->recovering) { 685 if (exception->task_is_privileged) 686 return -EDEADLOCK; 687 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); 688 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) 689 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); 690 goto out_retry; 691 } 692 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 693 ret = -EIO; 694 return ret; 695 out_retry: 696 if (ret == 0) { 697 exception->retry = 1; 698 /* 699 * For NFS4ERR_MOVED, the client transport will need to 700 * be recomputed after migration recovery has completed. 701 */ 702 if (errorcode == -NFS4ERR_MOVED) 703 rpc_task_release_transport(task); 704 } 705 return ret; 706 } 707 708 int 709 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server, 710 struct nfs4_state *state, long *timeout) 711 { 712 struct nfs4_exception exception = { 713 .state = state, 714 }; 715 716 if (task->tk_status >= 0) 717 return 0; 718 if (timeout) 719 exception.timeout = *timeout; 720 task->tk_status = nfs4_async_handle_exception(task, server, 721 task->tk_status, 722 &exception); 723 if (exception.delay && timeout) 724 *timeout = exception.timeout; 725 if (exception.retry) 726 return -EAGAIN; 727 return 0; 728 } 729 730 /* 731 * Return 'true' if 'clp' is using an rpc_client that is integrity protected 732 * or 'false' otherwise. 733 */ 734 static bool _nfs4_is_integrity_protected(struct nfs_client *clp) 735 { 736 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor; 737 return (flavor == RPC_AUTH_GSS_KRB5I) || (flavor == RPC_AUTH_GSS_KRB5P); 738 } 739 740 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) 741 { 742 spin_lock(&clp->cl_lock); 743 if (time_before(clp->cl_last_renewal,timestamp)) 744 clp->cl_last_renewal = timestamp; 745 spin_unlock(&clp->cl_lock); 746 } 747 748 static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 749 { 750 struct nfs_client *clp = server->nfs_client; 751 752 if (!nfs4_has_session(clp)) 753 do_renew_lease(clp, timestamp); 754 } 755 756 struct nfs4_call_sync_data { 757 const struct nfs_server *seq_server; 758 struct nfs4_sequence_args *seq_args; 759 struct nfs4_sequence_res *seq_res; 760 }; 761 762 void nfs4_init_sequence(struct nfs4_sequence_args *args, 763 struct nfs4_sequence_res *res, int cache_reply, 764 int privileged) 765 { 766 args->sa_slot = NULL; 767 args->sa_cache_this = cache_reply; 768 args->sa_privileged = privileged; 769 770 res->sr_slot = NULL; 771 } 772 773 static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res) 774 { 775 struct nfs4_slot *slot = res->sr_slot; 776 struct nfs4_slot_table *tbl; 777 778 tbl = slot->table; 779 spin_lock(&tbl->slot_tbl_lock); 780 if (!nfs41_wake_and_assign_slot(tbl, slot)) 781 nfs4_free_slot(tbl, slot); 782 spin_unlock(&tbl->slot_tbl_lock); 783 784 res->sr_slot = NULL; 785 } 786 787 static int nfs40_sequence_done(struct rpc_task *task, 788 struct nfs4_sequence_res *res) 789 { 790 if (res->sr_slot != NULL) 791 nfs40_sequence_free_slot(res); 792 return 1; 793 } 794 795 #if defined(CONFIG_NFS_V4_1) 796 797 static void nfs41_release_slot(struct nfs4_slot *slot) 798 { 799 struct nfs4_session *session; 800 struct nfs4_slot_table *tbl; 801 bool send_new_highest_used_slotid = false; 802 803 if (!slot) 804 return; 805 tbl = slot->table; 806 session = tbl->session; 807 808 /* Bump the slot sequence number */ 809 if (slot->seq_done) 810 slot->seq_nr++; 811 slot->seq_done = 0; 812 813 spin_lock(&tbl->slot_tbl_lock); 814 /* Be nice to the server: try to ensure that the last transmitted 815 * value for highest_user_slotid <= target_highest_slotid 816 */ 817 if (tbl->highest_used_slotid > tbl->target_highest_slotid) 818 send_new_highest_used_slotid = true; 819 820 if (nfs41_wake_and_assign_slot(tbl, slot)) { 821 send_new_highest_used_slotid = false; 822 goto out_unlock; 823 } 824 nfs4_free_slot(tbl, slot); 825 826 if (tbl->highest_used_slotid != NFS4_NO_SLOT) 827 send_new_highest_used_slotid = false; 828 out_unlock: 829 spin_unlock(&tbl->slot_tbl_lock); 830 if (send_new_highest_used_slotid) 831 nfs41_notify_server(session->clp); 832 if (waitqueue_active(&tbl->slot_waitq)) 833 wake_up_all(&tbl->slot_waitq); 834 } 835 836 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) 837 { 838 nfs41_release_slot(res->sr_slot); 839 res->sr_slot = NULL; 840 } 841 842 static void nfs4_slot_sequence_record_sent(struct nfs4_slot *slot, 843 u32 seqnr) 844 { 845 if ((s32)(seqnr - slot->seq_nr_highest_sent) > 0) 846 slot->seq_nr_highest_sent = seqnr; 847 } 848 static void nfs4_slot_sequence_acked(struct nfs4_slot *slot, u32 seqnr) 849 { 850 nfs4_slot_sequence_record_sent(slot, seqnr); 851 slot->seq_nr_last_acked = seqnr; 852 } 853 854 static void nfs4_probe_sequence(struct nfs_client *client, const struct cred *cred, 855 struct nfs4_slot *slot) 856 { 857 struct rpc_task *task = _nfs41_proc_sequence(client, cred, slot, true); 858 if (!IS_ERR(task)) 859 rpc_put_task_async(task); 860 } 861 862 static int nfs41_sequence_process(struct rpc_task *task, 863 struct nfs4_sequence_res *res) 864 { 865 struct nfs4_session *session; 866 struct nfs4_slot *slot = res->sr_slot; 867 struct nfs_client *clp; 868 int status; 869 int ret = 1; 870 871 if (slot == NULL) 872 goto out_noaction; 873 /* don't increment the sequence number if the task wasn't sent */ 874 if (!RPC_WAS_SENT(task) || slot->seq_done) 875 goto out; 876 877 session = slot->table->session; 878 clp = session->clp; 879 880 trace_nfs4_sequence_done(session, res); 881 882 status = res->sr_status; 883 if (task->tk_status == -NFS4ERR_DEADSESSION) 884 status = -NFS4ERR_DEADSESSION; 885 886 /* Check the SEQUENCE operation status */ 887 switch (status) { 888 case 0: 889 /* Mark this sequence number as having been acked */ 890 nfs4_slot_sequence_acked(slot, slot->seq_nr); 891 /* Update the slot's sequence and clientid lease timer */ 892 slot->seq_done = 1; 893 do_renew_lease(clp, res->sr_timestamp); 894 /* Check sequence flags */ 895 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags, 896 !!slot->privileged); 897 nfs41_update_target_slotid(slot->table, slot, res); 898 break; 899 case 1: 900 /* 901 * sr_status remains 1 if an RPC level error occurred. 902 * The server may or may not have processed the sequence 903 * operation.. 904 */ 905 nfs4_slot_sequence_record_sent(slot, slot->seq_nr); 906 slot->seq_done = 1; 907 goto out; 908 case -NFS4ERR_DELAY: 909 /* The server detected a resend of the RPC call and 910 * returned NFS4ERR_DELAY as per Section 2.10.6.2 911 * of RFC5661. 912 */ 913 dprintk("%s: slot=%u seq=%u: Operation in progress\n", 914 __func__, 915 slot->slot_nr, 916 slot->seq_nr); 917 goto out_retry; 918 case -NFS4ERR_RETRY_UNCACHED_REP: 919 case -NFS4ERR_SEQ_FALSE_RETRY: 920 /* 921 * The server thinks we tried to replay a request. 922 * Retry the call after bumping the sequence ID. 923 */ 924 nfs4_slot_sequence_acked(slot, slot->seq_nr); 925 goto retry_new_seq; 926 case -NFS4ERR_BADSLOT: 927 /* 928 * The slot id we used was probably retired. Try again 929 * using a different slot id. 930 */ 931 if (slot->slot_nr < slot->table->target_highest_slotid) 932 goto session_recover; 933 goto retry_nowait; 934 case -NFS4ERR_SEQ_MISORDERED: 935 nfs4_slot_sequence_record_sent(slot, slot->seq_nr); 936 /* 937 * Were one or more calls using this slot interrupted? 938 * If the server never received the request, then our 939 * transmitted slot sequence number may be too high. However, 940 * if the server did receive the request then it might 941 * accidentally give us a reply with a mismatched operation. 942 * We can sort this out by sending a lone sequence operation 943 * to the server on the same slot. 944 */ 945 if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) { 946 slot->seq_nr--; 947 if (task->tk_msg.rpc_proc != &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE]) { 948 nfs4_probe_sequence(clp, task->tk_msg.rpc_cred, slot); 949 res->sr_slot = NULL; 950 } 951 goto retry_nowait; 952 } 953 /* 954 * RFC5661: 955 * A retry might be sent while the original request is 956 * still in progress on the replier. The replier SHOULD 957 * deal with the issue by returning NFS4ERR_DELAY as the 958 * reply to SEQUENCE or CB_SEQUENCE operation, but 959 * implementations MAY return NFS4ERR_SEQ_MISORDERED. 960 * 961 * Restart the search after a delay. 962 */ 963 slot->seq_nr = slot->seq_nr_highest_sent; 964 goto out_retry; 965 case -NFS4ERR_BADSESSION: 966 case -NFS4ERR_DEADSESSION: 967 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 968 goto session_recover; 969 default: 970 /* Just update the slot sequence no. */ 971 slot->seq_done = 1; 972 } 973 out: 974 /* The session may be reset by one of the error handlers. */ 975 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); 976 out_noaction: 977 return ret; 978 session_recover: 979 set_bit(NFS4_SLOT_TBL_DRAINING, &session->fc_slot_table.slot_tbl_state); 980 nfs4_schedule_session_recovery(session, status); 981 dprintk("%s ERROR: %d Reset session\n", __func__, status); 982 nfs41_sequence_free_slot(res); 983 goto out; 984 retry_new_seq: 985 ++slot->seq_nr; 986 retry_nowait: 987 if (rpc_restart_call_prepare(task)) { 988 nfs41_sequence_free_slot(res); 989 task->tk_status = 0; 990 ret = 0; 991 } 992 goto out; 993 out_retry: 994 if (!rpc_restart_call(task)) 995 goto out; 996 rpc_delay(task, NFS4_POLL_RETRY_MAX); 997 return 0; 998 } 999 1000 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 1001 { 1002 if (!nfs41_sequence_process(task, res)) 1003 return 0; 1004 if (res->sr_slot != NULL) 1005 nfs41_sequence_free_slot(res); 1006 return 1; 1007 1008 } 1009 EXPORT_SYMBOL_GPL(nfs41_sequence_done); 1010 1011 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) 1012 { 1013 if (res->sr_slot == NULL) 1014 return 1; 1015 if (res->sr_slot->table->session != NULL) 1016 return nfs41_sequence_process(task, res); 1017 return nfs40_sequence_done(task, res); 1018 } 1019 1020 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res) 1021 { 1022 if (res->sr_slot != NULL) { 1023 if (res->sr_slot->table->session != NULL) 1024 nfs41_sequence_free_slot(res); 1025 else 1026 nfs40_sequence_free_slot(res); 1027 } 1028 } 1029 1030 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 1031 { 1032 if (res->sr_slot == NULL) 1033 return 1; 1034 if (!res->sr_slot->table->session) 1035 return nfs40_sequence_done(task, res); 1036 return nfs41_sequence_done(task, res); 1037 } 1038 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 1039 1040 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) 1041 { 1042 struct nfs4_call_sync_data *data = calldata; 1043 1044 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); 1045 1046 nfs4_setup_sequence(data->seq_server->nfs_client, 1047 data->seq_args, data->seq_res, task); 1048 } 1049 1050 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) 1051 { 1052 struct nfs4_call_sync_data *data = calldata; 1053 1054 nfs41_sequence_done(task, data->seq_res); 1055 } 1056 1057 static const struct rpc_call_ops nfs41_call_sync_ops = { 1058 .rpc_call_prepare = nfs41_call_sync_prepare, 1059 .rpc_call_done = nfs41_call_sync_done, 1060 }; 1061 1062 #else /* !CONFIG_NFS_V4_1 */ 1063 1064 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) 1065 { 1066 return nfs40_sequence_done(task, res); 1067 } 1068 1069 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res) 1070 { 1071 if (res->sr_slot != NULL) 1072 nfs40_sequence_free_slot(res); 1073 } 1074 1075 int nfs4_sequence_done(struct rpc_task *task, 1076 struct nfs4_sequence_res *res) 1077 { 1078 return nfs40_sequence_done(task, res); 1079 } 1080 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 1081 1082 #endif /* !CONFIG_NFS_V4_1 */ 1083 1084 static void nfs41_sequence_res_init(struct nfs4_sequence_res *res) 1085 { 1086 res->sr_timestamp = jiffies; 1087 res->sr_status_flags = 0; 1088 res->sr_status = 1; 1089 } 1090 1091 static 1092 void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args, 1093 struct nfs4_sequence_res *res, 1094 struct nfs4_slot *slot) 1095 { 1096 if (!slot) 1097 return; 1098 slot->privileged = args->sa_privileged ? 1 : 0; 1099 args->sa_slot = slot; 1100 1101 res->sr_slot = slot; 1102 } 1103 1104 int nfs4_setup_sequence(struct nfs_client *client, 1105 struct nfs4_sequence_args *args, 1106 struct nfs4_sequence_res *res, 1107 struct rpc_task *task) 1108 { 1109 struct nfs4_session *session = nfs4_get_session(client); 1110 struct nfs4_slot_table *tbl = client->cl_slot_tbl; 1111 struct nfs4_slot *slot; 1112 1113 /* slot already allocated? */ 1114 if (res->sr_slot != NULL) 1115 goto out_start; 1116 1117 if (session) 1118 tbl = &session->fc_slot_table; 1119 1120 spin_lock(&tbl->slot_tbl_lock); 1121 /* The state manager will wait until the slot table is empty */ 1122 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) 1123 goto out_sleep; 1124 1125 slot = nfs4_alloc_slot(tbl); 1126 if (IS_ERR(slot)) { 1127 if (slot == ERR_PTR(-ENOMEM)) 1128 goto out_sleep_timeout; 1129 goto out_sleep; 1130 } 1131 spin_unlock(&tbl->slot_tbl_lock); 1132 1133 nfs4_sequence_attach_slot(args, res, slot); 1134 1135 trace_nfs4_setup_sequence(session, args); 1136 out_start: 1137 nfs41_sequence_res_init(res); 1138 rpc_call_start(task); 1139 return 0; 1140 out_sleep_timeout: 1141 /* Try again in 1/4 second */ 1142 if (args->sa_privileged) 1143 rpc_sleep_on_priority_timeout(&tbl->slot_tbl_waitq, task, 1144 jiffies + (HZ >> 2), RPC_PRIORITY_PRIVILEGED); 1145 else 1146 rpc_sleep_on_timeout(&tbl->slot_tbl_waitq, task, 1147 NULL, jiffies + (HZ >> 2)); 1148 spin_unlock(&tbl->slot_tbl_lock); 1149 return -EAGAIN; 1150 out_sleep: 1151 if (args->sa_privileged) 1152 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, 1153 RPC_PRIORITY_PRIVILEGED); 1154 else 1155 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 1156 spin_unlock(&tbl->slot_tbl_lock); 1157 return -EAGAIN; 1158 } 1159 EXPORT_SYMBOL_GPL(nfs4_setup_sequence); 1160 1161 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata) 1162 { 1163 struct nfs4_call_sync_data *data = calldata; 1164 nfs4_setup_sequence(data->seq_server->nfs_client, 1165 data->seq_args, data->seq_res, task); 1166 } 1167 1168 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata) 1169 { 1170 struct nfs4_call_sync_data *data = calldata; 1171 nfs4_sequence_done(task, data->seq_res); 1172 } 1173 1174 static const struct rpc_call_ops nfs40_call_sync_ops = { 1175 .rpc_call_prepare = nfs40_call_sync_prepare, 1176 .rpc_call_done = nfs40_call_sync_done, 1177 }; 1178 1179 static int nfs4_call_sync_custom(struct rpc_task_setup *task_setup) 1180 { 1181 int ret; 1182 struct rpc_task *task; 1183 1184 task = rpc_run_task(task_setup); 1185 if (IS_ERR(task)) 1186 return PTR_ERR(task); 1187 1188 ret = task->tk_status; 1189 rpc_put_task(task); 1190 return ret; 1191 } 1192 1193 static int nfs4_do_call_sync(struct rpc_clnt *clnt, 1194 struct nfs_server *server, 1195 struct rpc_message *msg, 1196 struct nfs4_sequence_args *args, 1197 struct nfs4_sequence_res *res, 1198 unsigned short task_flags) 1199 { 1200 struct nfs_client *clp = server->nfs_client; 1201 struct nfs4_call_sync_data data = { 1202 .seq_server = server, 1203 .seq_args = args, 1204 .seq_res = res, 1205 }; 1206 struct rpc_task_setup task_setup = { 1207 .rpc_client = clnt, 1208 .rpc_message = msg, 1209 .callback_ops = clp->cl_mvops->call_sync_ops, 1210 .callback_data = &data, 1211 .flags = task_flags, 1212 }; 1213 1214 return nfs4_call_sync_custom(&task_setup); 1215 } 1216 1217 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, 1218 struct nfs_server *server, 1219 struct rpc_message *msg, 1220 struct nfs4_sequence_args *args, 1221 struct nfs4_sequence_res *res) 1222 { 1223 unsigned short task_flags = 0; 1224 1225 if (server->caps & NFS_CAP_MOVEABLE) 1226 task_flags = RPC_TASK_MOVEABLE; 1227 return nfs4_do_call_sync(clnt, server, msg, args, res, task_flags); 1228 } 1229 1230 1231 int nfs4_call_sync(struct rpc_clnt *clnt, 1232 struct nfs_server *server, 1233 struct rpc_message *msg, 1234 struct nfs4_sequence_args *args, 1235 struct nfs4_sequence_res *res, 1236 int cache_reply) 1237 { 1238 nfs4_init_sequence(args, res, cache_reply, 0); 1239 return nfs4_call_sync_sequence(clnt, server, msg, args, res); 1240 } 1241 1242 static void 1243 nfs4_inc_nlink_locked(struct inode *inode) 1244 { 1245 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 1246 NFS_INO_INVALID_CTIME | 1247 NFS_INO_INVALID_NLINK); 1248 inc_nlink(inode); 1249 } 1250 1251 static void 1252 nfs4_inc_nlink(struct inode *inode) 1253 { 1254 spin_lock(&inode->i_lock); 1255 nfs4_inc_nlink_locked(inode); 1256 spin_unlock(&inode->i_lock); 1257 } 1258 1259 static void 1260 nfs4_dec_nlink_locked(struct inode *inode) 1261 { 1262 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 1263 NFS_INO_INVALID_CTIME | 1264 NFS_INO_INVALID_NLINK); 1265 drop_nlink(inode); 1266 } 1267 1268 static void 1269 nfs4_update_changeattr_locked(struct inode *inode, 1270 struct nfs4_change_info *cinfo, 1271 unsigned long timestamp, unsigned long cache_validity) 1272 { 1273 struct nfs_inode *nfsi = NFS_I(inode); 1274 u64 change_attr = inode_peek_iversion_raw(inode); 1275 1276 if (!nfs_have_delegated_mtime(inode)) 1277 cache_validity |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME; 1278 if (S_ISDIR(inode->i_mode)) 1279 cache_validity |= NFS_INO_INVALID_DATA; 1280 1281 switch (NFS_SERVER(inode)->change_attr_type) { 1282 case NFS4_CHANGE_TYPE_IS_UNDEFINED: 1283 if (cinfo->after == change_attr) 1284 goto out; 1285 break; 1286 default: 1287 if ((s64)(change_attr - cinfo->after) >= 0) 1288 goto out; 1289 } 1290 1291 inode_set_iversion_raw(inode, cinfo->after); 1292 if (!cinfo->atomic || cinfo->before != change_attr) { 1293 if (S_ISDIR(inode->i_mode)) 1294 nfs_force_lookup_revalidate(inode); 1295 1296 if (!nfs_have_delegated_attributes(inode)) 1297 cache_validity |= 1298 NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL | 1299 NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER | 1300 NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK | 1301 NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR; 1302 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); 1303 } 1304 nfsi->attrtimeo_timestamp = jiffies; 1305 nfsi->read_cache_jiffies = timestamp; 1306 nfsi->attr_gencount = nfs_inc_attr_generation_counter(); 1307 nfsi->cache_validity &= ~NFS_INO_INVALID_CHANGE; 1308 out: 1309 nfs_set_cache_invalid(inode, cache_validity); 1310 } 1311 1312 void 1313 nfs4_update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo, 1314 unsigned long timestamp, unsigned long cache_validity) 1315 { 1316 spin_lock(&dir->i_lock); 1317 nfs4_update_changeattr_locked(dir, cinfo, timestamp, cache_validity); 1318 spin_unlock(&dir->i_lock); 1319 } 1320 1321 struct nfs4_open_createattrs { 1322 struct nfs4_label *label; 1323 struct iattr *sattr; 1324 const __u32 verf[2]; 1325 }; 1326 1327 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server, 1328 int err, struct nfs4_exception *exception) 1329 { 1330 if (err != -EINVAL) 1331 return false; 1332 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1333 return false; 1334 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1; 1335 exception->retry = 1; 1336 return true; 1337 } 1338 1339 static fmode_t _nfs4_ctx_to_accessmode(const struct nfs_open_context *ctx) 1340 { 1341 return ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC); 1342 } 1343 1344 static fmode_t _nfs4_ctx_to_openmode(const struct nfs_open_context *ctx) 1345 { 1346 fmode_t ret = ctx->mode & (FMODE_READ|FMODE_WRITE); 1347 1348 return (ctx->mode & FMODE_EXEC) ? FMODE_READ | ret : ret; 1349 } 1350 1351 static u32 1352 nfs4_fmode_to_share_access(fmode_t fmode) 1353 { 1354 u32 res = 0; 1355 1356 switch (fmode & (FMODE_READ | FMODE_WRITE)) { 1357 case FMODE_READ: 1358 res = NFS4_SHARE_ACCESS_READ; 1359 break; 1360 case FMODE_WRITE: 1361 res = NFS4_SHARE_ACCESS_WRITE; 1362 break; 1363 case FMODE_READ|FMODE_WRITE: 1364 res = NFS4_SHARE_ACCESS_BOTH; 1365 } 1366 return res; 1367 } 1368 1369 static u32 1370 nfs4_map_atomic_open_share(struct nfs_server *server, 1371 fmode_t fmode, int openflags) 1372 { 1373 u32 res = nfs4_fmode_to_share_access(fmode); 1374 1375 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1376 goto out; 1377 /* Want no delegation if we're using O_DIRECT */ 1378 if (openflags & O_DIRECT) { 1379 res |= NFS4_SHARE_WANT_NO_DELEG; 1380 goto out; 1381 } 1382 /* res |= NFS4_SHARE_WANT_NO_PREFERENCE; */ 1383 if (server->caps & NFS_CAP_DELEGTIME) 1384 res |= NFS4_SHARE_WANT_DELEG_TIMESTAMPS; 1385 if (server->caps & NFS_CAP_OPEN_XOR) 1386 res |= NFS4_SHARE_WANT_OPEN_XOR_DELEGATION; 1387 out: 1388 return res; 1389 } 1390 1391 static enum open_claim_type4 1392 nfs4_map_atomic_open_claim(struct nfs_server *server, 1393 enum open_claim_type4 claim) 1394 { 1395 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1) 1396 return claim; 1397 switch (claim) { 1398 default: 1399 return claim; 1400 case NFS4_OPEN_CLAIM_FH: 1401 return NFS4_OPEN_CLAIM_NULL; 1402 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1403 return NFS4_OPEN_CLAIM_DELEGATE_CUR; 1404 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1405 return NFS4_OPEN_CLAIM_DELEGATE_PREV; 1406 } 1407 } 1408 1409 static void nfs4_init_opendata_res(struct nfs4_opendata *p) 1410 { 1411 p->o_res.f_attr = &p->f_attr; 1412 p->o_res.seqid = p->o_arg.seqid; 1413 p->c_res.seqid = p->c_arg.seqid; 1414 p->o_res.server = p->o_arg.server; 1415 p->o_res.access_request = p->o_arg.access; 1416 nfs_fattr_init(&p->f_attr); 1417 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); 1418 } 1419 1420 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, 1421 struct nfs4_state_owner *sp, fmode_t fmode, int flags, 1422 const struct nfs4_open_createattrs *c, 1423 enum open_claim_type4 claim, 1424 gfp_t gfp_mask) 1425 { 1426 struct dentry *parent = dget_parent(dentry); 1427 struct inode *dir = d_inode(parent); 1428 struct nfs_server *server = NFS_SERVER(dir); 1429 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 1430 struct nfs4_label *label = (c != NULL) ? c->label : NULL; 1431 struct nfs4_opendata *p; 1432 1433 p = kzalloc(sizeof(*p), gfp_mask); 1434 if (p == NULL) 1435 goto err; 1436 1437 p->f_attr.label = nfs4_label_alloc(server, gfp_mask); 1438 if (IS_ERR(p->f_attr.label)) 1439 goto err_free_p; 1440 1441 p->a_label = nfs4_label_alloc(server, gfp_mask); 1442 if (IS_ERR(p->a_label)) 1443 goto err_free_f; 1444 1445 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 1446 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask); 1447 if (IS_ERR(p->o_arg.seqid)) 1448 goto err_free_label; 1449 nfs_sb_active(dentry->d_sb); 1450 p->dentry = dget(dentry); 1451 p->dir = parent; 1452 p->owner = sp; 1453 atomic_inc(&sp->so_count); 1454 p->o_arg.open_flags = flags; 1455 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); 1456 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim); 1457 p->o_arg.share_access = nfs4_map_atomic_open_share(server, 1458 fmode, flags); 1459 if (flags & O_CREAT) { 1460 p->o_arg.umask = current_umask(); 1461 p->o_arg.label = nfs4_label_copy(p->a_label, label); 1462 if (c->sattr != NULL && c->sattr->ia_valid != 0) { 1463 p->o_arg.u.attrs = &p->attrs; 1464 memcpy(&p->attrs, c->sattr, sizeof(p->attrs)); 1465 1466 memcpy(p->o_arg.u.verifier.data, c->verf, 1467 sizeof(p->o_arg.u.verifier.data)); 1468 } 1469 } 1470 /* ask server to check for all possible rights as results 1471 * are cached */ 1472 switch (p->o_arg.claim) { 1473 default: 1474 break; 1475 case NFS4_OPEN_CLAIM_NULL: 1476 case NFS4_OPEN_CLAIM_FH: 1477 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY | 1478 NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE | 1479 NFS4_ACCESS_EXECUTE | 1480 nfs_access_xattr_mask(server); 1481 } 1482 p->o_arg.clientid = server->nfs_client->cl_clientid; 1483 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time); 1484 p->o_arg.id.uniquifier = sp->so_seqid.owner_id; 1485 p->o_arg.name = &dentry->d_name; 1486 p->o_arg.server = server; 1487 p->o_arg.bitmask = nfs4_bitmask(server, label); 1488 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 1489 switch (p->o_arg.claim) { 1490 case NFS4_OPEN_CLAIM_NULL: 1491 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 1492 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 1493 p->o_arg.fh = NFS_FH(dir); 1494 break; 1495 case NFS4_OPEN_CLAIM_PREVIOUS: 1496 case NFS4_OPEN_CLAIM_FH: 1497 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1498 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1499 p->o_arg.fh = NFS_FH(d_inode(dentry)); 1500 } 1501 p->c_arg.fh = &p->o_res.fh; 1502 p->c_arg.stateid = &p->o_res.stateid; 1503 p->c_arg.seqid = p->o_arg.seqid; 1504 nfs4_init_opendata_res(p); 1505 kref_init(&p->kref); 1506 return p; 1507 1508 err_free_label: 1509 nfs4_label_free(p->a_label); 1510 err_free_f: 1511 nfs4_label_free(p->f_attr.label); 1512 err_free_p: 1513 kfree(p); 1514 err: 1515 dput(parent); 1516 return NULL; 1517 } 1518 1519 static void nfs4_opendata_free(struct kref *kref) 1520 { 1521 struct nfs4_opendata *p = container_of(kref, 1522 struct nfs4_opendata, kref); 1523 struct super_block *sb = p->dentry->d_sb; 1524 1525 nfs4_lgopen_release(p->lgp); 1526 nfs_free_seqid(p->o_arg.seqid); 1527 nfs4_sequence_free_slot(&p->o_res.seq_res); 1528 if (p->state != NULL) 1529 nfs4_put_open_state(p->state); 1530 nfs4_put_state_owner(p->owner); 1531 1532 nfs4_label_free(p->a_label); 1533 nfs4_label_free(p->f_attr.label); 1534 1535 dput(p->dir); 1536 dput(p->dentry); 1537 nfs_sb_deactive(sb); 1538 nfs_fattr_free_names(&p->f_attr); 1539 kfree(p->f_attr.mdsthreshold); 1540 kfree(p); 1541 } 1542 1543 static void nfs4_opendata_put(struct nfs4_opendata *p) 1544 { 1545 if (p != NULL) 1546 kref_put(&p->kref, nfs4_opendata_free); 1547 } 1548 1549 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state, 1550 fmode_t fmode) 1551 { 1552 switch(fmode & (FMODE_READ|FMODE_WRITE)) { 1553 case FMODE_READ|FMODE_WRITE: 1554 return state->n_rdwr != 0; 1555 case FMODE_WRITE: 1556 return state->n_wronly != 0; 1557 case FMODE_READ: 1558 return state->n_rdonly != 0; 1559 } 1560 WARN_ON_ONCE(1); 1561 return false; 1562 } 1563 1564 static int can_open_cached(struct nfs4_state *state, fmode_t mode, 1565 int open_mode, enum open_claim_type4 claim) 1566 { 1567 int ret = 0; 1568 1569 if (open_mode & (O_EXCL|O_TRUNC)) 1570 goto out; 1571 switch (claim) { 1572 case NFS4_OPEN_CLAIM_NULL: 1573 case NFS4_OPEN_CLAIM_FH: 1574 goto out; 1575 default: 1576 break; 1577 } 1578 switch (mode & (FMODE_READ|FMODE_WRITE)) { 1579 case FMODE_READ: 1580 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 1581 && state->n_rdonly != 0; 1582 break; 1583 case FMODE_WRITE: 1584 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 1585 && state->n_wronly != 0; 1586 break; 1587 case FMODE_READ|FMODE_WRITE: 1588 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 1589 && state->n_rdwr != 0; 1590 } 1591 out: 1592 return ret; 1593 } 1594 1595 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode, 1596 enum open_claim_type4 claim) 1597 { 1598 if (delegation == NULL) 1599 return 0; 1600 if ((delegation->type & fmode) != fmode) 1601 return 0; 1602 switch (claim) { 1603 case NFS4_OPEN_CLAIM_NULL: 1604 case NFS4_OPEN_CLAIM_FH: 1605 break; 1606 case NFS4_OPEN_CLAIM_PREVIOUS: 1607 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) 1608 break; 1609 fallthrough; 1610 default: 1611 return 0; 1612 } 1613 nfs_mark_delegation_referenced(delegation); 1614 return 1; 1615 } 1616 1617 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) 1618 { 1619 switch (fmode) { 1620 case FMODE_WRITE: 1621 state->n_wronly++; 1622 break; 1623 case FMODE_READ: 1624 state->n_rdonly++; 1625 break; 1626 case FMODE_READ|FMODE_WRITE: 1627 state->n_rdwr++; 1628 } 1629 nfs4_state_set_mode_locked(state, state->state | fmode); 1630 } 1631 1632 #ifdef CONFIG_NFS_V4_1 1633 static bool nfs_open_stateid_recover_openmode(struct nfs4_state *state) 1634 { 1635 if (state->n_rdonly && !test_bit(NFS_O_RDONLY_STATE, &state->flags)) 1636 return true; 1637 if (state->n_wronly && !test_bit(NFS_O_WRONLY_STATE, &state->flags)) 1638 return true; 1639 if (state->n_rdwr && !test_bit(NFS_O_RDWR_STATE, &state->flags)) 1640 return true; 1641 return false; 1642 } 1643 #endif /* CONFIG_NFS_V4_1 */ 1644 1645 static void nfs_state_log_update_open_stateid(struct nfs4_state *state) 1646 { 1647 if (test_and_clear_bit(NFS_STATE_CHANGE_WAIT, &state->flags)) 1648 wake_up_all(&state->waitq); 1649 } 1650 1651 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state) 1652 { 1653 struct nfs_client *clp = state->owner->so_server->nfs_client; 1654 bool need_recover = false; 1655 1656 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly) 1657 need_recover = true; 1658 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly) 1659 need_recover = true; 1660 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr) 1661 need_recover = true; 1662 if (need_recover) 1663 nfs4_state_mark_reclaim_nograce(clp, state); 1664 } 1665 1666 /* 1667 * Check for whether or not the caller may update the open stateid 1668 * to the value passed in by stateid. 1669 * 1670 * Note: This function relies heavily on the server implementing 1671 * RFC7530 Section 9.1.4.2, and RFC5661 Section 8.2.2 1672 * correctly. 1673 * i.e. The stateid seqids have to be initialised to 1, and 1674 * are then incremented on every state transition. 1675 */ 1676 static bool nfs_stateid_is_sequential(struct nfs4_state *state, 1677 const nfs4_stateid *stateid) 1678 { 1679 if (test_bit(NFS_OPEN_STATE, &state->flags)) { 1680 /* The common case - we're updating to a new sequence number */ 1681 if (nfs4_stateid_match_other(stateid, &state->open_stateid)) { 1682 if (nfs4_stateid_is_next(&state->open_stateid, stateid)) 1683 return true; 1684 return false; 1685 } 1686 /* The server returned a new stateid */ 1687 } 1688 /* This is the first OPEN in this generation */ 1689 if (stateid->seqid == cpu_to_be32(1)) 1690 return true; 1691 return false; 1692 } 1693 1694 static void nfs_resync_open_stateid_locked(struct nfs4_state *state) 1695 { 1696 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr)) 1697 return; 1698 if (state->n_wronly) 1699 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1700 if (state->n_rdonly) 1701 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1702 if (state->n_rdwr) 1703 set_bit(NFS_O_RDWR_STATE, &state->flags); 1704 set_bit(NFS_OPEN_STATE, &state->flags); 1705 } 1706 1707 static void nfs_clear_open_stateid_locked(struct nfs4_state *state, 1708 nfs4_stateid *stateid, fmode_t fmode) 1709 { 1710 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1711 switch (fmode & (FMODE_READ|FMODE_WRITE)) { 1712 case FMODE_WRITE: 1713 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1714 break; 1715 case FMODE_READ: 1716 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1717 break; 1718 case 0: 1719 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1720 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1721 clear_bit(NFS_OPEN_STATE, &state->flags); 1722 } 1723 if (stateid == NULL) 1724 return; 1725 /* Handle OPEN+OPEN_DOWNGRADE races */ 1726 if (nfs4_stateid_match_other(stateid, &state->open_stateid) && 1727 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) { 1728 nfs_resync_open_stateid_locked(state); 1729 goto out; 1730 } 1731 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1732 nfs4_stateid_copy(&state->stateid, stateid); 1733 nfs4_stateid_copy(&state->open_stateid, stateid); 1734 trace_nfs4_open_stateid_update(state->inode, stateid, 0); 1735 out: 1736 nfs_state_log_update_open_stateid(state); 1737 } 1738 1739 static void nfs_clear_open_stateid(struct nfs4_state *state, 1740 nfs4_stateid *arg_stateid, 1741 nfs4_stateid *stateid, fmode_t fmode) 1742 { 1743 write_seqlock(&state->seqlock); 1744 /* Ignore, if the CLOSE argment doesn't match the current stateid */ 1745 if (nfs4_state_match_open_stateid_other(state, arg_stateid)) 1746 nfs_clear_open_stateid_locked(state, stateid, fmode); 1747 write_sequnlock(&state->seqlock); 1748 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1749 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1750 } 1751 1752 static void nfs_set_open_stateid_locked(struct nfs4_state *state, 1753 const nfs4_stateid *stateid, nfs4_stateid *freeme) 1754 __must_hold(&state->owner->so_lock) 1755 __must_hold(&state->seqlock) 1756 __must_hold(RCU) 1757 1758 { 1759 DEFINE_WAIT(wait); 1760 int status = 0; 1761 for (;;) { 1762 1763 if (nfs_stateid_is_sequential(state, stateid)) 1764 break; 1765 1766 if (status) 1767 break; 1768 /* Rely on seqids for serialisation with NFSv4.0 */ 1769 if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client)) 1770 break; 1771 1772 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags); 1773 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE); 1774 /* 1775 * Ensure we process the state changes in the same order 1776 * in which the server processed them by delaying the 1777 * update of the stateid until we are in sequence. 1778 */ 1779 write_sequnlock(&state->seqlock); 1780 spin_unlock(&state->owner->so_lock); 1781 rcu_read_unlock(); 1782 trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0); 1783 1784 if (!fatal_signal_pending(current) && 1785 !nfs_current_task_exiting()) { 1786 if (schedule_timeout(5*HZ) == 0) 1787 status = -EAGAIN; 1788 else 1789 status = 0; 1790 } else 1791 status = -EINTR; 1792 finish_wait(&state->waitq, &wait); 1793 rcu_read_lock(); 1794 spin_lock(&state->owner->so_lock); 1795 write_seqlock(&state->seqlock); 1796 } 1797 1798 if (test_bit(NFS_OPEN_STATE, &state->flags) && 1799 !nfs4_stateid_match_other(stateid, &state->open_stateid)) { 1800 nfs4_stateid_copy(freeme, &state->open_stateid); 1801 nfs_test_and_clear_all_open_stateid(state); 1802 } 1803 1804 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1805 nfs4_stateid_copy(&state->stateid, stateid); 1806 nfs4_stateid_copy(&state->open_stateid, stateid); 1807 trace_nfs4_open_stateid_update(state->inode, stateid, status); 1808 nfs_state_log_update_open_stateid(state); 1809 } 1810 1811 static void nfs_state_set_open_stateid(struct nfs4_state *state, 1812 const nfs4_stateid *open_stateid, 1813 fmode_t fmode, 1814 nfs4_stateid *freeme) 1815 { 1816 /* 1817 * Protect the call to nfs4_state_set_mode_locked and 1818 * serialise the stateid update 1819 */ 1820 write_seqlock(&state->seqlock); 1821 nfs_set_open_stateid_locked(state, open_stateid, freeme); 1822 switch (fmode) { 1823 case FMODE_READ: 1824 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1825 break; 1826 case FMODE_WRITE: 1827 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1828 break; 1829 case FMODE_READ|FMODE_WRITE: 1830 set_bit(NFS_O_RDWR_STATE, &state->flags); 1831 } 1832 set_bit(NFS_OPEN_STATE, &state->flags); 1833 write_sequnlock(&state->seqlock); 1834 } 1835 1836 static void nfs_state_clear_open_state_flags(struct nfs4_state *state) 1837 { 1838 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1839 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1840 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1841 clear_bit(NFS_OPEN_STATE, &state->flags); 1842 } 1843 1844 static void nfs_state_set_delegation(struct nfs4_state *state, 1845 const nfs4_stateid *deleg_stateid, 1846 fmode_t fmode) 1847 { 1848 /* 1849 * Protect the call to nfs4_state_set_mode_locked and 1850 * serialise the stateid update 1851 */ 1852 write_seqlock(&state->seqlock); 1853 nfs4_stateid_copy(&state->stateid, deleg_stateid); 1854 set_bit(NFS_DELEGATED_STATE, &state->flags); 1855 write_sequnlock(&state->seqlock); 1856 } 1857 1858 static void nfs_state_clear_delegation(struct nfs4_state *state) 1859 { 1860 write_seqlock(&state->seqlock); 1861 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1862 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1863 write_sequnlock(&state->seqlock); 1864 } 1865 1866 int update_open_stateid(struct nfs4_state *state, 1867 const nfs4_stateid *open_stateid, 1868 const nfs4_stateid *delegation, 1869 fmode_t fmode) 1870 { 1871 struct nfs_server *server = NFS_SERVER(state->inode); 1872 struct nfs_client *clp = server->nfs_client; 1873 struct nfs_inode *nfsi = NFS_I(state->inode); 1874 struct nfs_delegation *deleg_cur; 1875 nfs4_stateid freeme = { }; 1876 int ret = 0; 1877 1878 fmode &= (FMODE_READ|FMODE_WRITE); 1879 1880 rcu_read_lock(); 1881 spin_lock(&state->owner->so_lock); 1882 if (open_stateid != NULL) { 1883 nfs_state_set_open_stateid(state, open_stateid, fmode, &freeme); 1884 ret = 1; 1885 } 1886 1887 deleg_cur = nfs4_get_valid_delegation(state->inode); 1888 if (deleg_cur == NULL) 1889 goto no_delegation; 1890 1891 spin_lock(&deleg_cur->lock); 1892 if (rcu_dereference(nfsi->delegation) != deleg_cur || 1893 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) || 1894 (deleg_cur->type & fmode) != fmode) 1895 goto no_delegation_unlock; 1896 1897 if (delegation == NULL) 1898 delegation = &deleg_cur->stateid; 1899 else if (!nfs4_stateid_match_other(&deleg_cur->stateid, delegation)) 1900 goto no_delegation_unlock; 1901 1902 nfs_mark_delegation_referenced(deleg_cur); 1903 nfs_state_set_delegation(state, &deleg_cur->stateid, fmode); 1904 ret = 1; 1905 no_delegation_unlock: 1906 spin_unlock(&deleg_cur->lock); 1907 no_delegation: 1908 if (ret) 1909 update_open_stateflags(state, fmode); 1910 spin_unlock(&state->owner->so_lock); 1911 rcu_read_unlock(); 1912 1913 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1914 nfs4_schedule_state_manager(clp); 1915 if (freeme.type != 0) 1916 nfs4_test_and_free_stateid(server, &freeme, 1917 state->owner->so_cred); 1918 1919 return ret; 1920 } 1921 1922 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp, 1923 const nfs4_stateid *stateid) 1924 { 1925 struct nfs4_state *state = lsp->ls_state; 1926 bool ret = false; 1927 1928 spin_lock(&state->state_lock); 1929 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid)) 1930 goto out_noupdate; 1931 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid)) 1932 goto out_noupdate; 1933 nfs4_stateid_copy(&lsp->ls_stateid, stateid); 1934 ret = true; 1935 out_noupdate: 1936 spin_unlock(&state->state_lock); 1937 return ret; 1938 } 1939 1940 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) 1941 { 1942 struct nfs_delegation *delegation; 1943 1944 fmode &= FMODE_READ|FMODE_WRITE; 1945 rcu_read_lock(); 1946 delegation = nfs4_get_valid_delegation(inode); 1947 if (delegation == NULL || (delegation->type & fmode) == fmode) { 1948 rcu_read_unlock(); 1949 return; 1950 } 1951 rcu_read_unlock(); 1952 nfs4_inode_return_delegation(inode); 1953 } 1954 1955 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) 1956 { 1957 struct nfs4_state *state = opendata->state; 1958 struct nfs_delegation *delegation; 1959 int open_mode = opendata->o_arg.open_flags; 1960 fmode_t fmode = opendata->o_arg.fmode; 1961 enum open_claim_type4 claim = opendata->o_arg.claim; 1962 nfs4_stateid stateid; 1963 int ret = -EAGAIN; 1964 1965 for (;;) { 1966 spin_lock(&state->owner->so_lock); 1967 if (can_open_cached(state, fmode, open_mode, claim)) { 1968 update_open_stateflags(state, fmode); 1969 spin_unlock(&state->owner->so_lock); 1970 goto out_return_state; 1971 } 1972 spin_unlock(&state->owner->so_lock); 1973 rcu_read_lock(); 1974 delegation = nfs4_get_valid_delegation(state->inode); 1975 if (!can_open_delegated(delegation, fmode, claim)) { 1976 rcu_read_unlock(); 1977 break; 1978 } 1979 /* Save the delegation */ 1980 nfs4_stateid_copy(&stateid, &delegation->stateid); 1981 rcu_read_unlock(); 1982 nfs_release_seqid(opendata->o_arg.seqid); 1983 if (!opendata->is_recover) { 1984 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); 1985 if (ret != 0) 1986 goto out; 1987 } 1988 ret = -EAGAIN; 1989 1990 /* Try to update the stateid using the delegation */ 1991 if (update_open_stateid(state, NULL, &stateid, fmode)) 1992 goto out_return_state; 1993 } 1994 out: 1995 return ERR_PTR(ret); 1996 out_return_state: 1997 refcount_inc(&state->count); 1998 return state; 1999 } 2000 2001 static void 2002 nfs4_process_delegation(struct inode *inode, const struct cred *cred, 2003 enum open_claim_type4 claim, 2004 const struct nfs4_open_delegation *delegation) 2005 { 2006 switch (delegation->open_delegation_type) { 2007 case NFS4_OPEN_DELEGATE_READ: 2008 case NFS4_OPEN_DELEGATE_WRITE: 2009 case NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG: 2010 case NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG: 2011 break; 2012 default: 2013 return; 2014 } 2015 switch (claim) { 2016 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 2017 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 2018 pr_err_ratelimited("NFS: Broken NFSv4 server %s is " 2019 "returning a delegation for " 2020 "OPEN(CLAIM_DELEGATE_CUR)\n", 2021 NFS_SERVER(inode)->nfs_client->cl_hostname); 2022 break; 2023 case NFS4_OPEN_CLAIM_PREVIOUS: 2024 nfs_inode_reclaim_delegation(inode, cred, delegation->type, 2025 &delegation->stateid, 2026 delegation->pagemod_limit, 2027 delegation->open_delegation_type); 2028 break; 2029 default: 2030 nfs_inode_set_delegation(inode, cred, delegation->type, 2031 &delegation->stateid, 2032 delegation->pagemod_limit, 2033 delegation->open_delegation_type); 2034 } 2035 if (delegation->do_recall) 2036 nfs_async_inode_return_delegation(inode, &delegation->stateid); 2037 } 2038 2039 /* 2040 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes 2041 * and update the nfs4_state. 2042 */ 2043 static struct nfs4_state * 2044 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data) 2045 { 2046 struct inode *inode = data->state->inode; 2047 struct nfs4_state *state = data->state; 2048 int ret; 2049 2050 if (!data->rpc_done) { 2051 if (data->rpc_status) 2052 return ERR_PTR(data->rpc_status); 2053 return nfs4_try_open_cached(data); 2054 } 2055 2056 ret = nfs_refresh_inode(inode, &data->f_attr); 2057 if (ret) 2058 return ERR_PTR(ret); 2059 2060 nfs4_process_delegation(state->inode, 2061 data->owner->so_cred, 2062 data->o_arg.claim, 2063 &data->o_res.delegation); 2064 2065 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_NO_OPEN_STATEID)) { 2066 if (!update_open_stateid(state, &data->o_res.stateid, 2067 NULL, data->o_arg.fmode)) 2068 return ERR_PTR(-EAGAIN); 2069 } else if (!update_open_stateid(state, NULL, NULL, data->o_arg.fmode)) 2070 return ERR_PTR(-EAGAIN); 2071 refcount_inc(&state->count); 2072 2073 return state; 2074 } 2075 2076 static struct inode * 2077 nfs4_opendata_get_inode(struct nfs4_opendata *data) 2078 { 2079 struct inode *inode; 2080 2081 switch (data->o_arg.claim) { 2082 case NFS4_OPEN_CLAIM_NULL: 2083 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 2084 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 2085 if (!(data->f_attr.valid & NFS_ATTR_FATTR)) 2086 return ERR_PTR(-EAGAIN); 2087 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, 2088 &data->f_attr); 2089 break; 2090 default: 2091 inode = d_inode(data->dentry); 2092 ihold(inode); 2093 nfs_refresh_inode(inode, &data->f_attr); 2094 } 2095 return inode; 2096 } 2097 2098 static struct nfs4_state * 2099 nfs4_opendata_find_nfs4_state(struct nfs4_opendata *data) 2100 { 2101 struct nfs4_state *state; 2102 struct inode *inode; 2103 2104 inode = nfs4_opendata_get_inode(data); 2105 if (IS_ERR(inode)) 2106 return ERR_CAST(inode); 2107 if (data->state != NULL && data->state->inode == inode) { 2108 state = data->state; 2109 refcount_inc(&state->count); 2110 } else 2111 state = nfs4_get_open_state(inode, data->owner); 2112 iput(inode); 2113 if (state == NULL) 2114 state = ERR_PTR(-ENOMEM); 2115 return state; 2116 } 2117 2118 static struct nfs4_state * 2119 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 2120 { 2121 struct nfs4_state *state; 2122 2123 if (!data->rpc_done) { 2124 state = nfs4_try_open_cached(data); 2125 trace_nfs4_cached_open(data->state); 2126 goto out; 2127 } 2128 2129 state = nfs4_opendata_find_nfs4_state(data); 2130 if (IS_ERR(state)) 2131 goto out; 2132 2133 nfs4_process_delegation(state->inode, 2134 data->owner->so_cred, 2135 data->o_arg.claim, 2136 &data->o_res.delegation); 2137 2138 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_NO_OPEN_STATEID)) { 2139 if (!update_open_stateid(state, &data->o_res.stateid, 2140 NULL, data->o_arg.fmode)) { 2141 nfs4_put_open_state(state); 2142 state = ERR_PTR(-EAGAIN); 2143 } 2144 } else if (!update_open_stateid(state, NULL, NULL, data->o_arg.fmode)) { 2145 nfs4_put_open_state(state); 2146 state = ERR_PTR(-EAGAIN); 2147 } 2148 out: 2149 nfs_release_seqid(data->o_arg.seqid); 2150 return state; 2151 } 2152 2153 static struct nfs4_state * 2154 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 2155 { 2156 struct nfs4_state *ret; 2157 2158 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) 2159 ret =_nfs4_opendata_reclaim_to_nfs4_state(data); 2160 else 2161 ret = _nfs4_opendata_to_nfs4_state(data); 2162 nfs4_sequence_free_slot(&data->o_res.seq_res); 2163 return ret; 2164 } 2165 2166 static struct nfs_open_context * 2167 nfs4_state_find_open_context_mode(struct nfs4_state *state, fmode_t mode) 2168 { 2169 struct nfs_inode *nfsi = NFS_I(state->inode); 2170 struct nfs_open_context *ctx; 2171 2172 rcu_read_lock(); 2173 list_for_each_entry_rcu(ctx, &nfsi->open_files, list) { 2174 if (ctx->state != state) 2175 continue; 2176 if ((ctx->mode & mode) != mode) 2177 continue; 2178 if (!get_nfs_open_context(ctx)) 2179 continue; 2180 rcu_read_unlock(); 2181 return ctx; 2182 } 2183 rcu_read_unlock(); 2184 return ERR_PTR(-ENOENT); 2185 } 2186 2187 static struct nfs_open_context * 2188 nfs4_state_find_open_context(struct nfs4_state *state) 2189 { 2190 struct nfs_open_context *ctx; 2191 2192 ctx = nfs4_state_find_open_context_mode(state, FMODE_READ|FMODE_WRITE); 2193 if (!IS_ERR(ctx)) 2194 return ctx; 2195 ctx = nfs4_state_find_open_context_mode(state, FMODE_WRITE); 2196 if (!IS_ERR(ctx)) 2197 return ctx; 2198 return nfs4_state_find_open_context_mode(state, FMODE_READ); 2199 } 2200 2201 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, 2202 struct nfs4_state *state, enum open_claim_type4 claim) 2203 { 2204 struct nfs4_opendata *opendata; 2205 2206 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, 2207 NULL, claim, GFP_NOFS); 2208 if (opendata == NULL) 2209 return ERR_PTR(-ENOMEM); 2210 opendata->state = state; 2211 refcount_inc(&state->count); 2212 return opendata; 2213 } 2214 2215 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, 2216 fmode_t fmode) 2217 { 2218 struct nfs4_state *newstate; 2219 struct nfs_server *server = NFS_SB(opendata->dentry->d_sb); 2220 int openflags = opendata->o_arg.open_flags; 2221 int ret; 2222 2223 if (!nfs4_mode_match_open_stateid(opendata->state, fmode)) 2224 return 0; 2225 opendata->o_arg.fmode = fmode; 2226 opendata->o_arg.share_access = 2227 nfs4_map_atomic_open_share(server, fmode, openflags); 2228 memset(&opendata->o_res, 0, sizeof(opendata->o_res)); 2229 memset(&opendata->c_res, 0, sizeof(opendata->c_res)); 2230 nfs4_init_opendata_res(opendata); 2231 ret = _nfs4_recover_proc_open(opendata); 2232 if (ret != 0) 2233 return ret; 2234 newstate = nfs4_opendata_to_nfs4_state(opendata); 2235 if (IS_ERR(newstate)) 2236 return PTR_ERR(newstate); 2237 if (newstate != opendata->state) 2238 ret = -ESTALE; 2239 nfs4_close_state(newstate, fmode); 2240 return ret; 2241 } 2242 2243 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) 2244 { 2245 int ret; 2246 2247 /* memory barrier prior to reading state->n_* */ 2248 smp_rmb(); 2249 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 2250 if (ret != 0) 2251 return ret; 2252 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE); 2253 if (ret != 0) 2254 return ret; 2255 ret = nfs4_open_recover_helper(opendata, FMODE_READ); 2256 if (ret != 0) 2257 return ret; 2258 /* 2259 * We may have performed cached opens for all three recoveries. 2260 * Check if we need to update the current stateid. 2261 */ 2262 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && 2263 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) { 2264 write_seqlock(&state->seqlock); 2265 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 2266 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 2267 write_sequnlock(&state->seqlock); 2268 } 2269 return 0; 2270 } 2271 2272 /* 2273 * OPEN_RECLAIM: 2274 * reclaim state on the server after a reboot. 2275 */ 2276 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 2277 { 2278 struct nfs_delegation *delegation; 2279 struct nfs4_opendata *opendata; 2280 u32 delegation_type = NFS4_OPEN_DELEGATE_NONE; 2281 int status; 2282 2283 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2284 NFS4_OPEN_CLAIM_PREVIOUS); 2285 if (IS_ERR(opendata)) 2286 return PTR_ERR(opendata); 2287 rcu_read_lock(); 2288 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2289 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) { 2290 switch(delegation->type) { 2291 case FMODE_READ: 2292 delegation_type = NFS4_OPEN_DELEGATE_READ; 2293 if (test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) 2294 delegation_type = NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG; 2295 break; 2296 case FMODE_WRITE: 2297 case FMODE_READ|FMODE_WRITE: 2298 delegation_type = NFS4_OPEN_DELEGATE_WRITE; 2299 if (test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) 2300 delegation_type = NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG; 2301 } 2302 } 2303 rcu_read_unlock(); 2304 opendata->o_arg.u.delegation_type = delegation_type; 2305 status = nfs4_open_recover(opendata, state); 2306 nfs4_opendata_put(opendata); 2307 return status; 2308 } 2309 2310 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 2311 { 2312 struct nfs_server *server = NFS_SERVER(state->inode); 2313 struct nfs4_exception exception = { }; 2314 int err; 2315 do { 2316 err = _nfs4_do_open_reclaim(ctx, state); 2317 trace_nfs4_open_reclaim(ctx, 0, err); 2318 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 2319 continue; 2320 if (err != -NFS4ERR_DELAY) 2321 break; 2322 nfs4_handle_exception(server, err, &exception); 2323 } while (exception.retry); 2324 return err; 2325 } 2326 2327 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) 2328 { 2329 struct nfs_open_context *ctx; 2330 int ret; 2331 2332 ctx = nfs4_state_find_open_context(state); 2333 if (IS_ERR(ctx)) 2334 return -EAGAIN; 2335 clear_bit(NFS_DELEGATED_STATE, &state->flags); 2336 nfs_state_clear_open_state_flags(state); 2337 ret = nfs4_do_open_reclaim(ctx, state); 2338 put_nfs_open_context(ctx); 2339 return ret; 2340 } 2341 2342 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err) 2343 { 2344 switch (err) { 2345 default: 2346 printk(KERN_ERR "NFS: %s: unhandled error " 2347 "%d.\n", __func__, err); 2348 fallthrough; 2349 case 0: 2350 case -ENOENT: 2351 case -EAGAIN: 2352 case -ESTALE: 2353 case -ETIMEDOUT: 2354 break; 2355 case -NFS4ERR_BADSESSION: 2356 case -NFS4ERR_BADSLOT: 2357 case -NFS4ERR_BAD_HIGH_SLOT: 2358 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 2359 case -NFS4ERR_DEADSESSION: 2360 return -EAGAIN; 2361 case -NFS4ERR_STALE_CLIENTID: 2362 case -NFS4ERR_STALE_STATEID: 2363 /* Don't recall a delegation if it was lost */ 2364 nfs4_schedule_lease_recovery(server->nfs_client); 2365 return -EAGAIN; 2366 case -NFS4ERR_MOVED: 2367 nfs4_schedule_migration_recovery(server); 2368 return -EAGAIN; 2369 case -NFS4ERR_LEASE_MOVED: 2370 nfs4_schedule_lease_moved_recovery(server->nfs_client); 2371 return -EAGAIN; 2372 case -NFS4ERR_DELEG_REVOKED: 2373 case -NFS4ERR_ADMIN_REVOKED: 2374 case -NFS4ERR_EXPIRED: 2375 case -NFS4ERR_BAD_STATEID: 2376 case -NFS4ERR_OPENMODE: 2377 nfs_inode_find_state_and_recover(state->inode, 2378 stateid); 2379 nfs4_schedule_stateid_recovery(server, state); 2380 return -EAGAIN; 2381 case -NFS4ERR_DELAY: 2382 case -NFS4ERR_GRACE: 2383 ssleep(1); 2384 return -EAGAIN; 2385 case -ENOMEM: 2386 case -NFS4ERR_DENIED: 2387 if (fl) { 2388 struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner; 2389 if (lsp) 2390 set_bit(NFS_LOCK_LOST, &lsp->ls_flags); 2391 } 2392 return 0; 2393 } 2394 return err; 2395 } 2396 2397 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, 2398 struct nfs4_state *state, const nfs4_stateid *stateid) 2399 { 2400 struct nfs_server *server = NFS_SERVER(state->inode); 2401 struct nfs4_opendata *opendata; 2402 int err = 0; 2403 2404 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2405 NFS4_OPEN_CLAIM_DELEG_CUR_FH); 2406 if (IS_ERR(opendata)) 2407 return PTR_ERR(opendata); 2408 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 2409 if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) { 2410 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 2411 if (err) 2412 goto out; 2413 } 2414 if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) { 2415 err = nfs4_open_recover_helper(opendata, FMODE_WRITE); 2416 if (err) 2417 goto out; 2418 } 2419 if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) { 2420 err = nfs4_open_recover_helper(opendata, FMODE_READ); 2421 if (err) 2422 goto out; 2423 } 2424 nfs_state_clear_delegation(state); 2425 out: 2426 nfs4_opendata_put(opendata); 2427 return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err); 2428 } 2429 2430 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata) 2431 { 2432 struct nfs4_opendata *data = calldata; 2433 2434 nfs4_setup_sequence(data->o_arg.server->nfs_client, 2435 &data->c_arg.seq_args, &data->c_res.seq_res, task); 2436 } 2437 2438 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) 2439 { 2440 struct nfs4_opendata *data = calldata; 2441 2442 nfs40_sequence_done(task, &data->c_res.seq_res); 2443 2444 data->rpc_status = task->tk_status; 2445 if (data->rpc_status == 0) { 2446 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid); 2447 nfs_confirm_seqid(&data->owner->so_seqid, 0); 2448 renew_lease(data->o_res.server, data->timestamp); 2449 data->rpc_done = true; 2450 } 2451 } 2452 2453 static void nfs4_open_confirm_release(void *calldata) 2454 { 2455 struct nfs4_opendata *data = calldata; 2456 struct nfs4_state *state = NULL; 2457 2458 /* If this request hasn't been cancelled, do nothing */ 2459 if (!data->cancelled) 2460 goto out_free; 2461 /* In case of error, no cleanup! */ 2462 if (!data->rpc_done) 2463 goto out_free; 2464 state = nfs4_opendata_to_nfs4_state(data); 2465 if (!IS_ERR(state)) 2466 nfs4_close_state(state, data->o_arg.fmode); 2467 out_free: 2468 nfs4_opendata_put(data); 2469 } 2470 2471 static const struct rpc_call_ops nfs4_open_confirm_ops = { 2472 .rpc_call_prepare = nfs4_open_confirm_prepare, 2473 .rpc_call_done = nfs4_open_confirm_done, 2474 .rpc_release = nfs4_open_confirm_release, 2475 }; 2476 2477 /* 2478 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata 2479 */ 2480 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) 2481 { 2482 struct nfs_server *server = NFS_SERVER(d_inode(data->dir)); 2483 struct rpc_task *task; 2484 struct rpc_message msg = { 2485 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], 2486 .rpc_argp = &data->c_arg, 2487 .rpc_resp = &data->c_res, 2488 .rpc_cred = data->owner->so_cred, 2489 }; 2490 struct rpc_task_setup task_setup_data = { 2491 .rpc_client = server->client, 2492 .rpc_message = &msg, 2493 .callback_ops = &nfs4_open_confirm_ops, 2494 .callback_data = data, 2495 .workqueue = nfsiod_workqueue, 2496 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 2497 }; 2498 int status; 2499 2500 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1, 2501 data->is_recover); 2502 kref_get(&data->kref); 2503 data->rpc_done = false; 2504 data->rpc_status = 0; 2505 data->timestamp = jiffies; 2506 task = rpc_run_task(&task_setup_data); 2507 if (IS_ERR(task)) 2508 return PTR_ERR(task); 2509 status = rpc_wait_for_completion_task(task); 2510 if (status != 0) { 2511 data->cancelled = true; 2512 smp_wmb(); 2513 } else 2514 status = data->rpc_status; 2515 rpc_put_task(task); 2516 return status; 2517 } 2518 2519 static void nfs4_open_prepare(struct rpc_task *task, void *calldata) 2520 { 2521 struct nfs4_opendata *data = calldata; 2522 struct nfs4_state_owner *sp = data->owner; 2523 struct nfs_client *clp = sp->so_server->nfs_client; 2524 enum open_claim_type4 claim = data->o_arg.claim; 2525 2526 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) 2527 goto out_wait; 2528 /* 2529 * Check if we still need to send an OPEN call, or if we can use 2530 * a delegation instead. 2531 */ 2532 if (data->state != NULL) { 2533 struct nfs_delegation *delegation; 2534 2535 if (can_open_cached(data->state, data->o_arg.fmode, 2536 data->o_arg.open_flags, claim)) 2537 goto out_no_action; 2538 rcu_read_lock(); 2539 delegation = nfs4_get_valid_delegation(data->state->inode); 2540 if (can_open_delegated(delegation, data->o_arg.fmode, claim)) 2541 goto unlock_no_action; 2542 rcu_read_unlock(); 2543 } 2544 /* Update client id. */ 2545 data->o_arg.clientid = clp->cl_clientid; 2546 switch (claim) { 2547 default: 2548 break; 2549 case NFS4_OPEN_CLAIM_PREVIOUS: 2550 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 2551 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 2552 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0]; 2553 fallthrough; 2554 case NFS4_OPEN_CLAIM_FH: 2555 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 2556 } 2557 data->timestamp = jiffies; 2558 if (nfs4_setup_sequence(data->o_arg.server->nfs_client, 2559 &data->o_arg.seq_args, 2560 &data->o_res.seq_res, 2561 task) != 0) 2562 nfs_release_seqid(data->o_arg.seqid); 2563 2564 /* Set the create mode (note dependency on the session type) */ 2565 data->o_arg.createmode = NFS4_CREATE_UNCHECKED; 2566 if (data->o_arg.open_flags & O_EXCL) { 2567 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1; 2568 if (clp->cl_mvops->minor_version == 0) { 2569 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE; 2570 /* don't put an ACCESS op in OPEN compound if O_EXCL, 2571 * because ACCESS will return permission denied for 2572 * all bits until close */ 2573 data->o_res.access_request = data->o_arg.access = 0; 2574 } else if (nfs4_has_persistent_session(clp)) 2575 data->o_arg.createmode = NFS4_CREATE_GUARDED; 2576 } 2577 return; 2578 unlock_no_action: 2579 trace_nfs4_cached_open(data->state); 2580 rcu_read_unlock(); 2581 out_no_action: 2582 task->tk_action = NULL; 2583 out_wait: 2584 nfs4_sequence_done(task, &data->o_res.seq_res); 2585 } 2586 2587 static void nfs4_open_done(struct rpc_task *task, void *calldata) 2588 { 2589 struct nfs4_opendata *data = calldata; 2590 2591 data->rpc_status = task->tk_status; 2592 2593 if (!nfs4_sequence_process(task, &data->o_res.seq_res)) 2594 return; 2595 2596 if (task->tk_status == 0) { 2597 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) { 2598 switch (data->o_res.f_attr->mode & S_IFMT) { 2599 case S_IFREG: 2600 break; 2601 case S_IFLNK: 2602 data->rpc_status = -ELOOP; 2603 break; 2604 case S_IFDIR: 2605 data->rpc_status = -EISDIR; 2606 break; 2607 default: 2608 data->rpc_status = -ENOTDIR; 2609 } 2610 } 2611 renew_lease(data->o_res.server, data->timestamp); 2612 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) 2613 nfs_confirm_seqid(&data->owner->so_seqid, 0); 2614 } 2615 data->rpc_done = true; 2616 } 2617 2618 static void nfs4_open_release(void *calldata) 2619 { 2620 struct nfs4_opendata *data = calldata; 2621 struct nfs4_state *state = NULL; 2622 2623 /* In case of error, no cleanup! */ 2624 if (data->rpc_status != 0 || !data->rpc_done) { 2625 nfs_release_seqid(data->o_arg.seqid); 2626 goto out_free; 2627 } 2628 /* If this request hasn't been cancelled, do nothing */ 2629 if (!data->cancelled) 2630 goto out_free; 2631 /* In case we need an open_confirm, no cleanup! */ 2632 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) 2633 goto out_free; 2634 state = nfs4_opendata_to_nfs4_state(data); 2635 if (!IS_ERR(state)) 2636 nfs4_close_state(state, data->o_arg.fmode); 2637 out_free: 2638 nfs4_opendata_put(data); 2639 } 2640 2641 static const struct rpc_call_ops nfs4_open_ops = { 2642 .rpc_call_prepare = nfs4_open_prepare, 2643 .rpc_call_done = nfs4_open_done, 2644 .rpc_release = nfs4_open_release, 2645 }; 2646 2647 static int nfs4_run_open_task(struct nfs4_opendata *data, 2648 struct nfs_open_context *ctx) 2649 { 2650 struct inode *dir = d_inode(data->dir); 2651 struct nfs_server *server = NFS_SERVER(dir); 2652 struct nfs_openargs *o_arg = &data->o_arg; 2653 struct nfs_openres *o_res = &data->o_res; 2654 struct rpc_task *task; 2655 struct rpc_message msg = { 2656 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN], 2657 .rpc_argp = o_arg, 2658 .rpc_resp = o_res, 2659 .rpc_cred = data->owner->so_cred, 2660 }; 2661 struct rpc_task_setup task_setup_data = { 2662 .rpc_client = server->client, 2663 .rpc_message = &msg, 2664 .callback_ops = &nfs4_open_ops, 2665 .callback_data = data, 2666 .workqueue = nfsiod_workqueue, 2667 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 2668 }; 2669 int status; 2670 2671 if (nfs_server_capable(dir, NFS_CAP_MOVEABLE)) 2672 task_setup_data.flags |= RPC_TASK_MOVEABLE; 2673 2674 kref_get(&data->kref); 2675 data->rpc_done = false; 2676 data->rpc_status = 0; 2677 data->cancelled = false; 2678 data->is_recover = false; 2679 if (!ctx) { 2680 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 1); 2681 data->is_recover = true; 2682 task_setup_data.flags |= RPC_TASK_TIMEOUT; 2683 } else { 2684 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 0); 2685 pnfs_lgopen_prepare(data, ctx); 2686 } 2687 task = rpc_run_task(&task_setup_data); 2688 if (IS_ERR(task)) 2689 return PTR_ERR(task); 2690 status = rpc_wait_for_completion_task(task); 2691 if (status != 0) { 2692 data->cancelled = true; 2693 smp_wmb(); 2694 } else 2695 status = data->rpc_status; 2696 rpc_put_task(task); 2697 2698 return status; 2699 } 2700 2701 static int _nfs4_recover_proc_open(struct nfs4_opendata *data) 2702 { 2703 struct inode *dir = d_inode(data->dir); 2704 struct nfs_openres *o_res = &data->o_res; 2705 int status; 2706 2707 status = nfs4_run_open_task(data, NULL); 2708 if (status != 0 || !data->rpc_done) 2709 return status; 2710 2711 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); 2712 2713 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) 2714 status = _nfs4_proc_open_confirm(data); 2715 2716 return status; 2717 } 2718 2719 /* 2720 * Additional permission checks in order to distinguish between an 2721 * open for read, and an open for execute. This works around the 2722 * fact that NFSv4 OPEN treats read and execute permissions as being 2723 * the same. 2724 * Note that in the non-execute case, we want to turn off permission 2725 * checking if we just created a new file (POSIX open() semantics). 2726 */ 2727 static int nfs4_opendata_access(const struct cred *cred, 2728 struct nfs4_opendata *opendata, 2729 struct nfs4_state *state, fmode_t fmode) 2730 { 2731 struct nfs_access_entry cache; 2732 u32 mask, flags; 2733 2734 /* access call failed or for some reason the server doesn't 2735 * support any access modes -- defer access call until later */ 2736 if (opendata->o_res.access_supported == 0) 2737 return 0; 2738 2739 mask = 0; 2740 if (fmode & FMODE_EXEC) { 2741 /* ONLY check for exec rights */ 2742 if (S_ISDIR(state->inode->i_mode)) 2743 mask = NFS4_ACCESS_LOOKUP; 2744 else 2745 mask = NFS4_ACCESS_EXECUTE; 2746 } else if ((fmode & FMODE_READ) && !opendata->file_created) 2747 mask = NFS4_ACCESS_READ; 2748 2749 nfs_access_set_mask(&cache, opendata->o_res.access_result); 2750 nfs_access_add_cache(state->inode, &cache, cred); 2751 2752 flags = NFS4_ACCESS_READ | NFS4_ACCESS_EXECUTE | NFS4_ACCESS_LOOKUP; 2753 if ((mask & ~cache.mask & flags) == 0) 2754 return 0; 2755 2756 return -EACCES; 2757 } 2758 2759 /* 2760 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata 2761 */ 2762 static int _nfs4_proc_open(struct nfs4_opendata *data, 2763 struct nfs_open_context *ctx) 2764 { 2765 struct inode *dir = d_inode(data->dir); 2766 struct nfs_server *server = NFS_SERVER(dir); 2767 struct nfs_openargs *o_arg = &data->o_arg; 2768 struct nfs_openres *o_res = &data->o_res; 2769 int status; 2770 2771 status = nfs4_run_open_task(data, ctx); 2772 if (!data->rpc_done) 2773 return status; 2774 if (status != 0) { 2775 if (status == -NFS4ERR_BADNAME && 2776 !(o_arg->open_flags & O_CREAT)) 2777 return -ENOENT; 2778 return status; 2779 } 2780 2781 nfs_fattr_map_and_free_names(server, &data->f_attr); 2782 2783 if (o_arg->open_flags & O_CREAT) { 2784 if (o_arg->open_flags & O_EXCL) 2785 data->file_created = true; 2786 else if (o_res->cinfo.before != o_res->cinfo.after) 2787 data->file_created = true; 2788 if (data->file_created || 2789 inode_peek_iversion_raw(dir) != o_res->cinfo.after) 2790 nfs4_update_changeattr(dir, &o_res->cinfo, 2791 o_res->f_attr->time_start, 2792 NFS_INO_INVALID_DATA); 2793 } 2794 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 2795 server->caps &= ~NFS_CAP_POSIX_LOCK; 2796 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 2797 status = _nfs4_proc_open_confirm(data); 2798 if (status != 0) 2799 return status; 2800 } 2801 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) { 2802 struct nfs_fh *fh = &o_res->fh; 2803 2804 nfs4_sequence_free_slot(&o_res->seq_res); 2805 if (o_arg->claim == NFS4_OPEN_CLAIM_FH) 2806 fh = NFS_FH(d_inode(data->dentry)); 2807 nfs4_proc_getattr(server, fh, o_res->f_attr, NULL); 2808 } 2809 return 0; 2810 } 2811 2812 /* 2813 * OPEN_EXPIRED: 2814 * reclaim state on the server after a network partition. 2815 * Assumes caller holds the appropriate lock 2816 */ 2817 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2818 { 2819 struct nfs4_opendata *opendata; 2820 int ret; 2821 2822 opendata = nfs4_open_recoverdata_alloc(ctx, state, NFS4_OPEN_CLAIM_FH); 2823 if (IS_ERR(opendata)) 2824 return PTR_ERR(opendata); 2825 /* 2826 * We're not recovering a delegation, so ask for no delegation. 2827 * Otherwise the recovery thread could deadlock with an outstanding 2828 * delegation return. 2829 */ 2830 opendata->o_arg.open_flags = O_DIRECT; 2831 ret = nfs4_open_recover(opendata, state); 2832 if (ret == -ESTALE) 2833 d_drop(ctx->dentry); 2834 nfs4_opendata_put(opendata); 2835 return ret; 2836 } 2837 2838 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2839 { 2840 struct nfs_server *server = NFS_SERVER(state->inode); 2841 struct nfs4_exception exception = { }; 2842 int err; 2843 2844 do { 2845 err = _nfs4_open_expired(ctx, state); 2846 trace_nfs4_open_expired(ctx, 0, err); 2847 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 2848 continue; 2849 switch (err) { 2850 default: 2851 goto out; 2852 case -NFS4ERR_GRACE: 2853 case -NFS4ERR_DELAY: 2854 nfs4_handle_exception(server, err, &exception); 2855 err = 0; 2856 } 2857 } while (exception.retry); 2858 out: 2859 return err; 2860 } 2861 2862 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2863 { 2864 struct nfs_open_context *ctx; 2865 int ret; 2866 2867 ctx = nfs4_state_find_open_context(state); 2868 if (IS_ERR(ctx)) 2869 return -EAGAIN; 2870 ret = nfs4_do_open_expired(ctx, state); 2871 put_nfs_open_context(ctx); 2872 return ret; 2873 } 2874 2875 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state, 2876 const nfs4_stateid *stateid) 2877 { 2878 nfs_remove_bad_delegation(state->inode, stateid); 2879 nfs_state_clear_delegation(state); 2880 } 2881 2882 static void nfs40_clear_delegation_stateid(struct nfs4_state *state) 2883 { 2884 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL) 2885 nfs_finish_clear_delegation_stateid(state, NULL); 2886 } 2887 2888 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2889 { 2890 /* NFSv4.0 doesn't allow for delegation recovery on open expire */ 2891 nfs40_clear_delegation_stateid(state); 2892 nfs_state_clear_open_state_flags(state); 2893 return nfs4_open_expired(sp, state); 2894 } 2895 2896 static int nfs40_test_and_free_expired_stateid(struct nfs_server *server, 2897 const nfs4_stateid *stateid, 2898 const struct cred *cred) 2899 { 2900 return -NFS4ERR_BAD_STATEID; 2901 } 2902 2903 #if defined(CONFIG_NFS_V4_1) 2904 static int nfs41_test_and_free_expired_stateid(struct nfs_server *server, 2905 const nfs4_stateid *stateid, 2906 const struct cred *cred) 2907 { 2908 int status; 2909 2910 switch (stateid->type) { 2911 default: 2912 break; 2913 case NFS4_INVALID_STATEID_TYPE: 2914 case NFS4_SPECIAL_STATEID_TYPE: 2915 return -NFS4ERR_BAD_STATEID; 2916 case NFS4_REVOKED_STATEID_TYPE: 2917 goto out_free; 2918 } 2919 2920 status = nfs41_test_stateid(server, stateid, cred); 2921 switch (status) { 2922 case -NFS4ERR_EXPIRED: 2923 case -NFS4ERR_ADMIN_REVOKED: 2924 case -NFS4ERR_DELEG_REVOKED: 2925 break; 2926 default: 2927 return status; 2928 } 2929 out_free: 2930 /* Ack the revoked state to the server */ 2931 nfs41_free_stateid(server, stateid, cred, true); 2932 return -NFS4ERR_EXPIRED; 2933 } 2934 2935 static int nfs41_check_delegation_stateid(struct nfs4_state *state) 2936 { 2937 struct nfs_server *server = NFS_SERVER(state->inode); 2938 nfs4_stateid stateid; 2939 struct nfs_delegation *delegation; 2940 const struct cred *cred = NULL; 2941 int status, ret = NFS_OK; 2942 2943 /* Get the delegation credential for use by test/free_stateid */ 2944 rcu_read_lock(); 2945 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2946 if (delegation == NULL) { 2947 rcu_read_unlock(); 2948 nfs_state_clear_delegation(state); 2949 return NFS_OK; 2950 } 2951 2952 spin_lock(&delegation->lock); 2953 nfs4_stateid_copy(&stateid, &delegation->stateid); 2954 2955 if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, 2956 &delegation->flags)) { 2957 spin_unlock(&delegation->lock); 2958 rcu_read_unlock(); 2959 return NFS_OK; 2960 } 2961 2962 if (delegation->cred) 2963 cred = get_cred(delegation->cred); 2964 spin_unlock(&delegation->lock); 2965 rcu_read_unlock(); 2966 status = nfs41_test_and_free_expired_stateid(server, &stateid, cred); 2967 trace_nfs4_test_delegation_stateid(state, NULL, status); 2968 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) 2969 nfs_finish_clear_delegation_stateid(state, &stateid); 2970 else 2971 ret = status; 2972 2973 put_cred(cred); 2974 return ret; 2975 } 2976 2977 static void nfs41_delegation_recover_stateid(struct nfs4_state *state) 2978 { 2979 nfs4_stateid tmp; 2980 2981 if (test_bit(NFS_DELEGATED_STATE, &state->flags) && 2982 nfs4_copy_delegation_stateid(state->inode, state->state, 2983 &tmp, NULL) && 2984 nfs4_stateid_match_other(&state->stateid, &tmp)) 2985 nfs_state_set_delegation(state, &tmp, state->state); 2986 else 2987 nfs_state_clear_delegation(state); 2988 } 2989 2990 /** 2991 * nfs41_check_expired_locks - possibly free a lock stateid 2992 * 2993 * @state: NFSv4 state for an inode 2994 * 2995 * Returns NFS_OK if recovery for this stateid is now finished. 2996 * Otherwise a negative NFS4ERR value is returned. 2997 */ 2998 static int nfs41_check_expired_locks(struct nfs4_state *state) 2999 { 3000 int status, ret = NFS_OK; 3001 struct nfs4_lock_state *lsp, *prev = NULL; 3002 struct nfs_server *server = NFS_SERVER(state->inode); 3003 3004 if (!test_bit(LK_STATE_IN_USE, &state->flags)) 3005 goto out; 3006 3007 spin_lock(&state->state_lock); 3008 list_for_each_entry(lsp, &state->lock_states, ls_locks) { 3009 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 3010 const struct cred *cred = lsp->ls_state->owner->so_cred; 3011 3012 refcount_inc(&lsp->ls_count); 3013 spin_unlock(&state->state_lock); 3014 3015 nfs4_put_lock_state(prev); 3016 prev = lsp; 3017 3018 status = nfs41_test_and_free_expired_stateid(server, 3019 &lsp->ls_stateid, 3020 cred); 3021 trace_nfs4_test_lock_stateid(state, lsp, status); 3022 if (status == -NFS4ERR_EXPIRED || 3023 status == -NFS4ERR_BAD_STATEID) { 3024 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 3025 lsp->ls_stateid.type = NFS4_INVALID_STATEID_TYPE; 3026 if (!recover_lost_locks) 3027 set_bit(NFS_LOCK_LOST, &lsp->ls_flags); 3028 } else if (status != NFS_OK) { 3029 ret = status; 3030 nfs4_put_lock_state(prev); 3031 goto out; 3032 } 3033 spin_lock(&state->state_lock); 3034 } 3035 } 3036 spin_unlock(&state->state_lock); 3037 nfs4_put_lock_state(prev); 3038 out: 3039 return ret; 3040 } 3041 3042 /** 3043 * nfs41_check_open_stateid - possibly free an open stateid 3044 * 3045 * @state: NFSv4 state for an inode 3046 * 3047 * Returns NFS_OK if recovery for this stateid is now finished. 3048 * Otherwise a negative NFS4ERR value is returned. 3049 */ 3050 static int nfs41_check_open_stateid(struct nfs4_state *state) 3051 { 3052 struct nfs_server *server = NFS_SERVER(state->inode); 3053 nfs4_stateid *stateid = &state->open_stateid; 3054 const struct cred *cred = state->owner->so_cred; 3055 int status; 3056 3057 if (test_bit(NFS_OPEN_STATE, &state->flags) == 0) 3058 return -NFS4ERR_BAD_STATEID; 3059 status = nfs41_test_and_free_expired_stateid(server, stateid, cred); 3060 trace_nfs4_test_open_stateid(state, NULL, status); 3061 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) { 3062 nfs_state_clear_open_state_flags(state); 3063 stateid->type = NFS4_INVALID_STATEID_TYPE; 3064 return status; 3065 } 3066 if (nfs_open_stateid_recover_openmode(state)) 3067 return -NFS4ERR_OPENMODE; 3068 return NFS_OK; 3069 } 3070 3071 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 3072 { 3073 int status; 3074 3075 status = nfs41_check_delegation_stateid(state); 3076 if (status != NFS_OK) 3077 return status; 3078 nfs41_delegation_recover_stateid(state); 3079 3080 status = nfs41_check_expired_locks(state); 3081 if (status != NFS_OK) 3082 return status; 3083 status = nfs41_check_open_stateid(state); 3084 if (status != NFS_OK) 3085 status = nfs4_open_expired(sp, state); 3086 return status; 3087 } 3088 #endif 3089 3090 /* 3091 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* 3092 * fields corresponding to attributes that were used to store the verifier. 3093 * Make sure we clobber those fields in the later setattr call 3094 */ 3095 static unsigned nfs4_exclusive_attrset(struct nfs4_opendata *opendata, 3096 struct iattr *sattr, struct nfs4_label **label) 3097 { 3098 const __u32 *bitmask = opendata->o_arg.server->exclcreat_bitmask; 3099 __u32 attrset[3]; 3100 unsigned ret; 3101 unsigned i; 3102 3103 for (i = 0; i < ARRAY_SIZE(attrset); i++) { 3104 attrset[i] = opendata->o_res.attrset[i]; 3105 if (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE4_1) 3106 attrset[i] &= ~bitmask[i]; 3107 } 3108 3109 ret = (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE) ? 3110 sattr->ia_valid : 0; 3111 3112 if ((attrset[1] & (FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET))) { 3113 if (sattr->ia_valid & ATTR_ATIME_SET) 3114 ret |= ATTR_ATIME_SET; 3115 else 3116 ret |= ATTR_ATIME; 3117 } 3118 3119 if ((attrset[1] & (FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET))) { 3120 if (sattr->ia_valid & ATTR_MTIME_SET) 3121 ret |= ATTR_MTIME_SET; 3122 else 3123 ret |= ATTR_MTIME; 3124 } 3125 3126 if (!(attrset[2] & FATTR4_WORD2_SECURITY_LABEL)) 3127 *label = NULL; 3128 return ret; 3129 } 3130 3131 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, 3132 struct nfs_open_context *ctx) 3133 { 3134 struct nfs4_state_owner *sp = opendata->owner; 3135 struct nfs_server *server = sp->so_server; 3136 struct dentry *dentry; 3137 struct nfs4_state *state; 3138 fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx); 3139 struct inode *dir = d_inode(opendata->dir); 3140 unsigned long dir_verifier; 3141 int ret; 3142 3143 dir_verifier = nfs_save_change_attribute(dir); 3144 3145 ret = _nfs4_proc_open(opendata, ctx); 3146 if (ret != 0) 3147 goto out; 3148 3149 state = _nfs4_opendata_to_nfs4_state(opendata); 3150 ret = PTR_ERR(state); 3151 if (IS_ERR(state)) 3152 goto out; 3153 ctx->state = state; 3154 if (server->caps & NFS_CAP_POSIX_LOCK) 3155 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); 3156 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK) 3157 set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags); 3158 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_PRESERVE_UNLINKED) 3159 set_bit(NFS_INO_PRESERVE_UNLINKED, &NFS_I(state->inode)->flags); 3160 3161 dentry = opendata->dentry; 3162 if (d_really_is_negative(dentry)) { 3163 struct dentry *alias; 3164 d_drop(dentry); 3165 alias = d_splice_alias(igrab(state->inode), dentry); 3166 /* d_splice_alias() can't fail here - it's a non-directory */ 3167 if (alias) { 3168 dput(ctx->dentry); 3169 ctx->dentry = dentry = alias; 3170 } 3171 } 3172 3173 switch(opendata->o_arg.claim) { 3174 default: 3175 break; 3176 case NFS4_OPEN_CLAIM_NULL: 3177 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 3178 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 3179 if (!opendata->rpc_done) 3180 break; 3181 if (opendata->o_res.delegation.type != 0) 3182 dir_verifier = nfs_save_change_attribute(dir); 3183 nfs_set_verifier(dentry, dir_verifier); 3184 } 3185 3186 /* Parse layoutget results before we check for access */ 3187 pnfs_parse_lgopen(state->inode, opendata->lgp, ctx); 3188 3189 ret = nfs4_opendata_access(sp->so_cred, opendata, state, acc_mode); 3190 if (ret != 0) 3191 goto out; 3192 3193 if (d_inode(dentry) == state->inode) 3194 nfs_inode_attach_open_context(ctx); 3195 3196 out: 3197 if (!opendata->cancelled) { 3198 if (opendata->lgp) { 3199 nfs4_lgopen_release(opendata->lgp); 3200 opendata->lgp = NULL; 3201 } 3202 nfs4_sequence_free_slot(&opendata->o_res.seq_res); 3203 } 3204 return ret; 3205 } 3206 3207 /* 3208 * Returns a referenced nfs4_state 3209 */ 3210 static int _nfs4_do_open(struct inode *dir, 3211 struct nfs_open_context *ctx, 3212 int flags, 3213 const struct nfs4_open_createattrs *c, 3214 int *opened) 3215 { 3216 struct nfs4_state_owner *sp; 3217 struct nfs4_state *state = NULL; 3218 struct nfs_server *server = NFS_SERVER(dir); 3219 struct nfs4_opendata *opendata; 3220 struct dentry *dentry = ctx->dentry; 3221 const struct cred *cred = ctx->cred; 3222 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold; 3223 fmode_t fmode = _nfs4_ctx_to_openmode(ctx); 3224 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL; 3225 struct iattr *sattr = c->sattr; 3226 struct nfs4_label *label = c->label; 3227 int status; 3228 3229 /* Protect against reboot recovery conflicts */ 3230 status = -ENOMEM; 3231 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL); 3232 if (sp == NULL) { 3233 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); 3234 goto out_err; 3235 } 3236 status = nfs4_client_recover_expired_lease(server->nfs_client); 3237 if (status != 0) 3238 goto err_put_state_owner; 3239 if (d_really_is_positive(dentry)) 3240 nfs4_return_incompatible_delegation(d_inode(dentry), fmode); 3241 status = -ENOMEM; 3242 if (d_really_is_positive(dentry)) 3243 claim = NFS4_OPEN_CLAIM_FH; 3244 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, 3245 c, claim, GFP_KERNEL); 3246 if (opendata == NULL) 3247 goto err_put_state_owner; 3248 3249 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) { 3250 if (!opendata->f_attr.mdsthreshold) { 3251 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); 3252 if (!opendata->f_attr.mdsthreshold) 3253 goto err_opendata_put; 3254 } 3255 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; 3256 } 3257 if (d_really_is_positive(dentry)) 3258 opendata->state = nfs4_get_open_state(d_inode(dentry), sp); 3259 3260 status = _nfs4_open_and_get_state(opendata, ctx); 3261 if (status != 0) 3262 goto err_opendata_put; 3263 state = ctx->state; 3264 3265 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) && 3266 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) { 3267 unsigned attrs = nfs4_exclusive_attrset(opendata, sattr, &label); 3268 /* 3269 * send create attributes which was not set by open 3270 * with an extra setattr. 3271 */ 3272 if (attrs || label) { 3273 unsigned ia_old = sattr->ia_valid; 3274 3275 sattr->ia_valid = attrs; 3276 nfs_fattr_init(opendata->o_res.f_attr); 3277 status = nfs4_do_setattr(state->inode, cred, 3278 opendata->o_res.f_attr, sattr, 3279 ctx, label); 3280 if (status == 0) { 3281 nfs_setattr_update_inode(state->inode, sattr, 3282 opendata->o_res.f_attr); 3283 nfs_setsecurity(state->inode, opendata->o_res.f_attr); 3284 } 3285 sattr->ia_valid = ia_old; 3286 } 3287 } 3288 if (opened && opendata->file_created) 3289 *opened = 1; 3290 3291 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) { 3292 *ctx_th = opendata->f_attr.mdsthreshold; 3293 opendata->f_attr.mdsthreshold = NULL; 3294 } 3295 3296 nfs4_opendata_put(opendata); 3297 nfs4_put_state_owner(sp); 3298 return 0; 3299 err_opendata_put: 3300 nfs4_opendata_put(opendata); 3301 err_put_state_owner: 3302 nfs4_put_state_owner(sp); 3303 out_err: 3304 return status; 3305 } 3306 3307 3308 static struct nfs4_state *nfs4_do_open(struct inode *dir, 3309 struct nfs_open_context *ctx, 3310 int flags, 3311 struct iattr *sattr, 3312 struct nfs4_label *label, 3313 int *opened) 3314 { 3315 struct nfs_server *server = NFS_SERVER(dir); 3316 struct nfs4_exception exception = { 3317 .interruptible = true, 3318 }; 3319 struct nfs4_state *res; 3320 struct nfs4_open_createattrs c = { 3321 .label = label, 3322 .sattr = sattr, 3323 .verf = { 3324 [0] = (__u32)jiffies, 3325 [1] = (__u32)current->pid, 3326 }, 3327 }; 3328 int status; 3329 3330 do { 3331 status = _nfs4_do_open(dir, ctx, flags, &c, opened); 3332 res = ctx->state; 3333 trace_nfs4_open_file(ctx, flags, status); 3334 if (status == 0) 3335 break; 3336 /* NOTE: BAD_SEQID means the server and client disagree about the 3337 * book-keeping w.r.t. state-changing operations 3338 * (OPEN/CLOSE/LOCK/LOCKU...) 3339 * It is actually a sign of a bug on the client or on the server. 3340 * 3341 * If we receive a BAD_SEQID error in the particular case of 3342 * doing an OPEN, we assume that nfs_increment_open_seqid() will 3343 * have unhashed the old state_owner for us, and that we can 3344 * therefore safely retry using a new one. We should still warn 3345 * the user though... 3346 */ 3347 if (status == -NFS4ERR_BAD_SEQID) { 3348 pr_warn_ratelimited("NFS: v4 server %s " 3349 " returned a bad sequence-id error!\n", 3350 NFS_SERVER(dir)->nfs_client->cl_hostname); 3351 exception.retry = 1; 3352 continue; 3353 } 3354 /* 3355 * BAD_STATEID on OPEN means that the server cancelled our 3356 * state before it received the OPEN_CONFIRM. 3357 * Recover by retrying the request as per the discussion 3358 * on Page 181 of RFC3530. 3359 */ 3360 if (status == -NFS4ERR_BAD_STATEID) { 3361 exception.retry = 1; 3362 continue; 3363 } 3364 if (status == -NFS4ERR_EXPIRED) { 3365 nfs4_schedule_lease_recovery(server->nfs_client); 3366 exception.retry = 1; 3367 continue; 3368 } 3369 if (status == -EAGAIN) { 3370 /* We must have found a delegation */ 3371 exception.retry = 1; 3372 continue; 3373 } 3374 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception)) 3375 continue; 3376 res = ERR_PTR(nfs4_handle_exception(server, 3377 status, &exception)); 3378 } while (exception.retry); 3379 return res; 3380 } 3381 3382 static int _nfs4_do_setattr(struct inode *inode, 3383 struct nfs_setattrargs *arg, 3384 struct nfs_setattrres *res, 3385 const struct cred *cred, 3386 struct nfs_open_context *ctx) 3387 { 3388 struct nfs_server *server = NFS_SERVER(inode); 3389 struct rpc_message msg = { 3390 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 3391 .rpc_argp = arg, 3392 .rpc_resp = res, 3393 .rpc_cred = cred, 3394 }; 3395 const struct cred *delegation_cred = NULL; 3396 unsigned long timestamp = jiffies; 3397 bool truncate; 3398 int status; 3399 3400 nfs_fattr_init(res->fattr); 3401 3402 /* Servers should only apply open mode checks for file size changes */ 3403 truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false; 3404 if (!truncate) { 3405 nfs4_inode_make_writeable(inode); 3406 goto zero_stateid; 3407 } 3408 3409 if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) { 3410 /* Use that stateid */ 3411 } else if (ctx != NULL && ctx->state) { 3412 struct nfs_lock_context *l_ctx; 3413 if (!nfs4_valid_open_stateid(ctx->state)) 3414 return -EBADF; 3415 l_ctx = nfs_get_lock_context(ctx); 3416 if (IS_ERR(l_ctx)) 3417 return PTR_ERR(l_ctx); 3418 status = nfs4_select_rw_stateid(ctx->state, FMODE_WRITE, l_ctx, 3419 &arg->stateid, &delegation_cred); 3420 nfs_put_lock_context(l_ctx); 3421 if (status == -EIO) 3422 return -EBADF; 3423 else if (status == -EAGAIN) 3424 goto zero_stateid; 3425 } else { 3426 zero_stateid: 3427 nfs4_stateid_copy(&arg->stateid, &zero_stateid); 3428 } 3429 if (delegation_cred) 3430 msg.rpc_cred = delegation_cred; 3431 3432 status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1); 3433 3434 put_cred(delegation_cred); 3435 if (status == 0 && ctx != NULL) 3436 renew_lease(server, timestamp); 3437 trace_nfs4_setattr(inode, &arg->stateid, status); 3438 return status; 3439 } 3440 3441 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred, 3442 struct nfs_fattr *fattr, struct iattr *sattr, 3443 struct nfs_open_context *ctx, struct nfs4_label *ilabel) 3444 { 3445 struct nfs_server *server = NFS_SERVER(inode); 3446 __u32 bitmask[NFS4_BITMASK_SZ]; 3447 struct nfs4_state *state = ctx ? ctx->state : NULL; 3448 struct nfs_setattrargs arg = { 3449 .fh = NFS_FH(inode), 3450 .iap = sattr, 3451 .server = server, 3452 .bitmask = bitmask, 3453 .label = ilabel, 3454 }; 3455 struct nfs_setattrres res = { 3456 .fattr = fattr, 3457 .server = server, 3458 }; 3459 struct nfs4_exception exception = { 3460 .state = state, 3461 .inode = inode, 3462 .stateid = &arg.stateid, 3463 }; 3464 unsigned long adjust_flags = NFS_INO_INVALID_CHANGE | 3465 NFS_INO_INVALID_CTIME; 3466 int err; 3467 3468 if (sattr->ia_valid & (ATTR_MODE | ATTR_KILL_SUID | ATTR_KILL_SGID)) 3469 adjust_flags |= NFS_INO_INVALID_MODE; 3470 if (sattr->ia_valid & (ATTR_UID | ATTR_GID)) 3471 adjust_flags |= NFS_INO_INVALID_OTHER; 3472 if (sattr->ia_valid & ATTR_ATIME) 3473 adjust_flags |= NFS_INO_INVALID_ATIME; 3474 if (sattr->ia_valid & ATTR_MTIME) 3475 adjust_flags |= NFS_INO_INVALID_MTIME; 3476 3477 do { 3478 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label), 3479 inode, adjust_flags); 3480 3481 err = _nfs4_do_setattr(inode, &arg, &res, cred, ctx); 3482 switch (err) { 3483 case -NFS4ERR_OPENMODE: 3484 if (!(sattr->ia_valid & ATTR_SIZE)) { 3485 pr_warn_once("NFSv4: server %s is incorrectly " 3486 "applying open mode checks to " 3487 "a SETATTR that is not " 3488 "changing file size.\n", 3489 server->nfs_client->cl_hostname); 3490 } 3491 if (state && !(state->state & FMODE_WRITE)) { 3492 err = -EBADF; 3493 if (sattr->ia_valid & ATTR_OPEN) 3494 err = -EACCES; 3495 goto out; 3496 } 3497 } 3498 err = nfs4_handle_exception(server, err, &exception); 3499 } while (exception.retry); 3500 out: 3501 return err; 3502 } 3503 3504 static bool 3505 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task) 3506 { 3507 if (inode == NULL || !nfs_have_layout(inode)) 3508 return false; 3509 3510 return pnfs_wait_on_layoutreturn(inode, task); 3511 } 3512 3513 /* 3514 * Update the seqid of an open stateid 3515 */ 3516 static void nfs4_sync_open_stateid(nfs4_stateid *dst, 3517 struct nfs4_state *state) 3518 { 3519 __be32 seqid_open; 3520 u32 dst_seqid; 3521 int seq; 3522 3523 for (;;) { 3524 if (!nfs4_valid_open_stateid(state)) 3525 break; 3526 seq = read_seqbegin(&state->seqlock); 3527 if (!nfs4_state_match_open_stateid_other(state, dst)) { 3528 nfs4_stateid_copy(dst, &state->open_stateid); 3529 if (read_seqretry(&state->seqlock, seq)) 3530 continue; 3531 break; 3532 } 3533 seqid_open = state->open_stateid.seqid; 3534 if (read_seqretry(&state->seqlock, seq)) 3535 continue; 3536 3537 dst_seqid = be32_to_cpu(dst->seqid); 3538 if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) < 0) 3539 dst->seqid = seqid_open; 3540 break; 3541 } 3542 } 3543 3544 /* 3545 * Update the seqid of an open stateid after receiving 3546 * NFS4ERR_OLD_STATEID 3547 */ 3548 static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst, 3549 struct nfs4_state *state) 3550 { 3551 __be32 seqid_open; 3552 u32 dst_seqid; 3553 bool ret; 3554 int seq, status = -EAGAIN; 3555 DEFINE_WAIT(wait); 3556 3557 for (;;) { 3558 ret = false; 3559 if (!nfs4_valid_open_stateid(state)) 3560 break; 3561 seq = read_seqbegin(&state->seqlock); 3562 if (!nfs4_state_match_open_stateid_other(state, dst)) { 3563 if (read_seqretry(&state->seqlock, seq)) 3564 continue; 3565 break; 3566 } 3567 3568 write_seqlock(&state->seqlock); 3569 seqid_open = state->open_stateid.seqid; 3570 3571 dst_seqid = be32_to_cpu(dst->seqid); 3572 3573 /* Did another OPEN bump the state's seqid? try again: */ 3574 if ((s32)(be32_to_cpu(seqid_open) - dst_seqid) > 0) { 3575 dst->seqid = seqid_open; 3576 write_sequnlock(&state->seqlock); 3577 ret = true; 3578 break; 3579 } 3580 3581 /* server says we're behind but we haven't seen the update yet */ 3582 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags); 3583 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE); 3584 write_sequnlock(&state->seqlock); 3585 trace_nfs4_close_stateid_update_wait(state->inode, dst, 0); 3586 3587 if (fatal_signal_pending(current) || nfs_current_task_exiting()) 3588 status = -EINTR; 3589 else 3590 if (schedule_timeout(5*HZ) != 0) 3591 status = 0; 3592 3593 finish_wait(&state->waitq, &wait); 3594 3595 if (!status) 3596 continue; 3597 if (status == -EINTR) 3598 break; 3599 3600 /* we slept the whole 5 seconds, we must have lost a seqid */ 3601 dst->seqid = cpu_to_be32(dst_seqid + 1); 3602 ret = true; 3603 break; 3604 } 3605 3606 return ret; 3607 } 3608 3609 struct nfs4_closedata { 3610 struct inode *inode; 3611 struct nfs4_state *state; 3612 struct nfs_closeargs arg; 3613 struct nfs_closeres res; 3614 struct { 3615 struct nfs4_layoutreturn_args arg; 3616 struct nfs4_layoutreturn_res res; 3617 struct nfs4_xdr_opaque_data ld_private; 3618 u32 roc_barrier; 3619 bool roc; 3620 } lr; 3621 struct nfs_fattr fattr; 3622 unsigned long timestamp; 3623 }; 3624 3625 static void nfs4_free_closedata(void *data) 3626 { 3627 struct nfs4_closedata *calldata = data; 3628 struct nfs4_state_owner *sp = calldata->state->owner; 3629 struct super_block *sb = calldata->state->inode->i_sb; 3630 3631 if (calldata->lr.roc) 3632 pnfs_roc_release(&calldata->lr.arg, &calldata->lr.res, 3633 calldata->res.lr_ret); 3634 nfs4_put_open_state(calldata->state); 3635 nfs_free_seqid(calldata->arg.seqid); 3636 nfs4_put_state_owner(sp); 3637 nfs_sb_deactive(sb); 3638 kfree(calldata); 3639 } 3640 3641 static void nfs4_close_done(struct rpc_task *task, void *data) 3642 { 3643 struct nfs4_closedata *calldata = data; 3644 struct nfs4_state *state = calldata->state; 3645 struct nfs_server *server = NFS_SERVER(calldata->inode); 3646 nfs4_stateid *res_stateid = NULL; 3647 struct nfs4_exception exception = { 3648 .state = state, 3649 .inode = calldata->inode, 3650 .stateid = &calldata->arg.stateid, 3651 }; 3652 3653 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 3654 return; 3655 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status); 3656 3657 /* Handle Layoutreturn errors */ 3658 if (pnfs_roc_done(task, &calldata->arg.lr_args, &calldata->res.lr_res, 3659 &calldata->res.lr_ret) == -EAGAIN) 3660 goto out_restart; 3661 3662 /* hmm. we are done with the inode, and in the process of freeing 3663 * the state_owner. we keep this around to process errors 3664 */ 3665 switch (task->tk_status) { 3666 case 0: 3667 res_stateid = &calldata->res.stateid; 3668 renew_lease(server, calldata->timestamp); 3669 break; 3670 case -NFS4ERR_ACCESS: 3671 if (calldata->arg.bitmask != NULL) { 3672 calldata->arg.bitmask = NULL; 3673 calldata->res.fattr = NULL; 3674 goto out_restart; 3675 3676 } 3677 break; 3678 case -NFS4ERR_OLD_STATEID: 3679 /* Did we race with OPEN? */ 3680 if (nfs4_refresh_open_old_stateid(&calldata->arg.stateid, 3681 state)) 3682 goto out_restart; 3683 goto out_release; 3684 case -NFS4ERR_ADMIN_REVOKED: 3685 case -NFS4ERR_STALE_STATEID: 3686 case -NFS4ERR_EXPIRED: 3687 nfs4_free_revoked_stateid(server, 3688 &calldata->arg.stateid, 3689 task->tk_msg.rpc_cred); 3690 fallthrough; 3691 case -NFS4ERR_BAD_STATEID: 3692 if (calldata->arg.fmode == 0) 3693 break; 3694 fallthrough; 3695 default: 3696 task->tk_status = nfs4_async_handle_exception(task, 3697 server, task->tk_status, &exception); 3698 if (exception.retry) 3699 goto out_restart; 3700 } 3701 nfs_clear_open_stateid(state, &calldata->arg.stateid, 3702 res_stateid, calldata->arg.fmode); 3703 out_release: 3704 task->tk_status = 0; 3705 nfs_release_seqid(calldata->arg.seqid); 3706 nfs_refresh_inode(calldata->inode, &calldata->fattr); 3707 dprintk("%s: ret = %d\n", __func__, task->tk_status); 3708 return; 3709 out_restart: 3710 task->tk_status = 0; 3711 rpc_restart_call_prepare(task); 3712 goto out_release; 3713 } 3714 3715 static void nfs4_close_prepare(struct rpc_task *task, void *data) 3716 { 3717 struct nfs4_closedata *calldata = data; 3718 struct nfs4_state *state = calldata->state; 3719 struct inode *inode = calldata->inode; 3720 struct nfs_server *server = NFS_SERVER(inode); 3721 struct pnfs_layout_hdr *lo; 3722 bool is_rdonly, is_wronly, is_rdwr; 3723 int call_close = 0; 3724 3725 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 3726 goto out_wait; 3727 3728 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; 3729 spin_lock(&state->owner->so_lock); 3730 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags); 3731 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags); 3732 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags); 3733 /* Calculate the change in open mode */ 3734 calldata->arg.fmode = 0; 3735 if (state->n_rdwr == 0) { 3736 if (state->n_rdonly == 0) 3737 call_close |= is_rdonly; 3738 else if (is_rdonly) 3739 calldata->arg.fmode |= FMODE_READ; 3740 if (state->n_wronly == 0) 3741 call_close |= is_wronly; 3742 else if (is_wronly) 3743 calldata->arg.fmode |= FMODE_WRITE; 3744 if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE)) 3745 call_close |= is_rdwr; 3746 } else if (is_rdwr) 3747 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; 3748 3749 nfs4_sync_open_stateid(&calldata->arg.stateid, state); 3750 if (!nfs4_valid_open_stateid(state)) 3751 call_close = 0; 3752 spin_unlock(&state->owner->so_lock); 3753 3754 if (!call_close) { 3755 /* Note: exit _without_ calling nfs4_close_done */ 3756 goto out_no_action; 3757 } 3758 3759 if (!calldata->lr.roc && nfs4_wait_on_layoutreturn(inode, task)) { 3760 nfs_release_seqid(calldata->arg.seqid); 3761 goto out_wait; 3762 } 3763 3764 lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL; 3765 if (lo && !pnfs_layout_is_valid(lo)) { 3766 calldata->arg.lr_args = NULL; 3767 calldata->res.lr_res = NULL; 3768 } 3769 3770 if (calldata->arg.fmode == 0) 3771 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; 3772 3773 if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) { 3774 /* Close-to-open cache consistency revalidation */ 3775 if (!nfs4_have_delegation(inode, FMODE_READ, 0)) { 3776 nfs4_bitmask_set(calldata->arg.bitmask_store, 3777 server->cache_consistency_bitmask, 3778 inode, 0); 3779 calldata->arg.bitmask = calldata->arg.bitmask_store; 3780 } else 3781 calldata->arg.bitmask = NULL; 3782 } 3783 3784 calldata->arg.share_access = 3785 nfs4_fmode_to_share_access(calldata->arg.fmode); 3786 3787 if (calldata->res.fattr == NULL) 3788 calldata->arg.bitmask = NULL; 3789 else if (calldata->arg.bitmask == NULL) 3790 calldata->res.fattr = NULL; 3791 calldata->timestamp = jiffies; 3792 if (nfs4_setup_sequence(NFS_SERVER(inode)->nfs_client, 3793 &calldata->arg.seq_args, 3794 &calldata->res.seq_res, 3795 task) != 0) 3796 nfs_release_seqid(calldata->arg.seqid); 3797 return; 3798 out_no_action: 3799 task->tk_action = NULL; 3800 out_wait: 3801 nfs4_sequence_done(task, &calldata->res.seq_res); 3802 } 3803 3804 static const struct rpc_call_ops nfs4_close_ops = { 3805 .rpc_call_prepare = nfs4_close_prepare, 3806 .rpc_call_done = nfs4_close_done, 3807 .rpc_release = nfs4_free_closedata, 3808 }; 3809 3810 /* 3811 * It is possible for data to be read/written from a mem-mapped file 3812 * after the sys_close call (which hits the vfs layer as a flush). 3813 * This means that we can't safely call nfsv4 close on a file until 3814 * the inode is cleared. This in turn means that we are not good 3815 * NFSv4 citizens - we do not indicate to the server to update the file's 3816 * share state even when we are done with one of the three share 3817 * stateid's in the inode. 3818 * 3819 * NOTE: Caller must be holding the sp->so_owner semaphore! 3820 */ 3821 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) 3822 { 3823 struct nfs_server *server = NFS_SERVER(state->inode); 3824 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 3825 struct nfs4_closedata *calldata; 3826 struct nfs4_state_owner *sp = state->owner; 3827 struct rpc_task *task; 3828 struct rpc_message msg = { 3829 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], 3830 .rpc_cred = state->owner->so_cred, 3831 }; 3832 struct rpc_task_setup task_setup_data = { 3833 .rpc_client = server->client, 3834 .rpc_message = &msg, 3835 .callback_ops = &nfs4_close_ops, 3836 .workqueue = nfsiod_workqueue, 3837 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 3838 }; 3839 int status = -ENOMEM; 3840 3841 if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE)) 3842 task_setup_data.flags |= RPC_TASK_MOVEABLE; 3843 3844 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP, 3845 &task_setup_data.rpc_client, &msg); 3846 3847 calldata = kzalloc(sizeof(*calldata), gfp_mask); 3848 if (calldata == NULL) 3849 goto out; 3850 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1, 0); 3851 calldata->inode = state->inode; 3852 calldata->state = state; 3853 calldata->arg.fh = NFS_FH(state->inode); 3854 if (!nfs4_copy_open_stateid(&calldata->arg.stateid, state)) 3855 goto out_free_calldata; 3856 /* Serialization for the sequence id */ 3857 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 3858 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask); 3859 if (IS_ERR(calldata->arg.seqid)) 3860 goto out_free_calldata; 3861 nfs_fattr_init(&calldata->fattr); 3862 calldata->arg.fmode = 0; 3863 calldata->lr.arg.ld_private = &calldata->lr.ld_private; 3864 calldata->res.fattr = &calldata->fattr; 3865 calldata->res.seqid = calldata->arg.seqid; 3866 calldata->res.server = server; 3867 calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT; 3868 calldata->lr.roc = pnfs_roc(state->inode, 3869 &calldata->lr.arg, &calldata->lr.res, msg.rpc_cred); 3870 if (calldata->lr.roc) { 3871 calldata->arg.lr_args = &calldata->lr.arg; 3872 calldata->res.lr_res = &calldata->lr.res; 3873 } 3874 nfs_sb_active(calldata->inode->i_sb); 3875 3876 msg.rpc_argp = &calldata->arg; 3877 msg.rpc_resp = &calldata->res; 3878 task_setup_data.callback_data = calldata; 3879 task = rpc_run_task(&task_setup_data); 3880 if (IS_ERR(task)) 3881 return PTR_ERR(task); 3882 status = 0; 3883 if (wait) 3884 status = rpc_wait_for_completion_task(task); 3885 rpc_put_task(task); 3886 return status; 3887 out_free_calldata: 3888 kfree(calldata); 3889 out: 3890 nfs4_put_open_state(state); 3891 nfs4_put_state_owner(sp); 3892 return status; 3893 } 3894 3895 static struct inode * 3896 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, 3897 int open_flags, struct iattr *attr, int *opened) 3898 { 3899 struct nfs4_state *state; 3900 struct nfs4_label l, *label; 3901 3902 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l); 3903 3904 /* Protect against concurrent sillydeletes */ 3905 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened); 3906 3907 nfs4_label_release_security(label); 3908 3909 if (IS_ERR(state)) 3910 return ERR_CAST(state); 3911 return state->inode; 3912 } 3913 3914 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) 3915 { 3916 struct dentry *dentry = ctx->dentry; 3917 if (ctx->state == NULL) 3918 return; 3919 if (dentry->d_flags & DCACHE_NFSFS_RENAMED) 3920 nfs4_inode_set_return_delegation_on_close(d_inode(dentry)); 3921 if (is_sync) 3922 nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx)); 3923 else 3924 nfs4_close_state(ctx->state, _nfs4_ctx_to_openmode(ctx)); 3925 } 3926 3927 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL) 3928 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL) 3929 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_OPEN_ARGUMENTS - 1UL) 3930 3931 #define FATTR4_WORD2_NFS42_TIME_DELEG_MASK \ 3932 (FATTR4_WORD2_TIME_DELEG_MODIFY|FATTR4_WORD2_TIME_DELEG_ACCESS) 3933 static bool nfs4_server_delegtime_capable(struct nfs4_server_caps_res *res) 3934 { 3935 u32 share_access_want = res->open_caps.oa_share_access_want[0]; 3936 u32 attr_bitmask = res->attr_bitmask[2]; 3937 3938 return (share_access_want & NFS4_SHARE_WANT_DELEG_TIMESTAMPS) && 3939 ((attr_bitmask & FATTR4_WORD2_NFS42_TIME_DELEG_MASK) == 3940 FATTR4_WORD2_NFS42_TIME_DELEG_MASK); 3941 } 3942 3943 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 3944 { 3945 u32 minorversion = server->nfs_client->cl_minorversion; 3946 u32 bitmask[3] = { 3947 [0] = FATTR4_WORD0_SUPPORTED_ATTRS, 3948 }; 3949 struct nfs4_server_caps_arg args = { 3950 .fhandle = fhandle, 3951 .bitmask = bitmask, 3952 }; 3953 struct nfs4_server_caps_res res = {}; 3954 struct rpc_message msg = { 3955 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], 3956 .rpc_argp = &args, 3957 .rpc_resp = &res, 3958 }; 3959 int status; 3960 int i; 3961 3962 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS | 3963 FATTR4_WORD0_FH_EXPIRE_TYPE | 3964 FATTR4_WORD0_LINK_SUPPORT | 3965 FATTR4_WORD0_SYMLINK_SUPPORT | 3966 FATTR4_WORD0_ACLSUPPORT | 3967 FATTR4_WORD0_CASE_INSENSITIVE | 3968 FATTR4_WORD0_CASE_PRESERVING; 3969 if (minorversion) 3970 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT | 3971 FATTR4_WORD2_OPEN_ARGUMENTS; 3972 3973 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3974 if (status == 0) { 3975 bitmask[0] = (FATTR4_WORD0_SUPPORTED_ATTRS | 3976 FATTR4_WORD0_FH_EXPIRE_TYPE | 3977 FATTR4_WORD0_LINK_SUPPORT | 3978 FATTR4_WORD0_SYMLINK_SUPPORT | 3979 FATTR4_WORD0_ACLSUPPORT | 3980 FATTR4_WORD0_CASE_INSENSITIVE | 3981 FATTR4_WORD0_CASE_PRESERVING) & 3982 res.attr_bitmask[0]; 3983 /* Sanity check the server answers */ 3984 switch (minorversion) { 3985 case 0: 3986 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK; 3987 res.attr_bitmask[2] = 0; 3988 break; 3989 case 1: 3990 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK; 3991 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT & 3992 res.attr_bitmask[2]; 3993 break; 3994 case 2: 3995 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK; 3996 bitmask[2] = (FATTR4_WORD2_SUPPATTR_EXCLCREAT | 3997 FATTR4_WORD2_OPEN_ARGUMENTS) & 3998 res.attr_bitmask[2]; 3999 } 4000 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); 4001 server->caps &= ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS | 4002 NFS_CAP_SYMLINKS| NFS_CAP_SECURITY_LABEL); 4003 server->fattr_valid = NFS_ATTR_FATTR_V4; 4004 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL && 4005 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) 4006 server->caps |= NFS_CAP_ACLS; 4007 if (res.has_links != 0) 4008 server->caps |= NFS_CAP_HARDLINKS; 4009 if (res.has_symlinks != 0) 4010 server->caps |= NFS_CAP_SYMLINKS; 4011 if (res.case_insensitive) 4012 server->caps |= NFS_CAP_CASE_INSENSITIVE; 4013 if (res.case_preserving) 4014 server->caps |= NFS_CAP_CASE_PRESERVING; 4015 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 4016 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL) 4017 server->caps |= NFS_CAP_SECURITY_LABEL; 4018 #endif 4019 if (res.attr_bitmask[0] & FATTR4_WORD0_FS_LOCATIONS) 4020 server->caps |= NFS_CAP_FS_LOCATIONS; 4021 if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID)) 4022 server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID; 4023 if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE)) 4024 server->fattr_valid &= ~NFS_ATTR_FATTR_MODE; 4025 if (!(res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)) 4026 server->fattr_valid &= ~NFS_ATTR_FATTR_NLINK; 4027 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER)) 4028 server->fattr_valid &= ~(NFS_ATTR_FATTR_OWNER | 4029 NFS_ATTR_FATTR_OWNER_NAME); 4030 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)) 4031 server->fattr_valid &= ~(NFS_ATTR_FATTR_GROUP | 4032 NFS_ATTR_FATTR_GROUP_NAME); 4033 if (!(res.attr_bitmask[1] & FATTR4_WORD1_SPACE_USED)) 4034 server->fattr_valid &= ~NFS_ATTR_FATTR_SPACE_USED; 4035 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)) 4036 server->fattr_valid &= ~NFS_ATTR_FATTR_ATIME; 4037 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)) 4038 server->fattr_valid &= ~NFS_ATTR_FATTR_CTIME; 4039 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)) 4040 server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME; 4041 memcpy(server->attr_bitmask_nl, res.attr_bitmask, 4042 sizeof(server->attr_bitmask)); 4043 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL; 4044 4045 if (res.open_caps.oa_share_access_want[0] & 4046 NFS4_SHARE_WANT_OPEN_XOR_DELEGATION) 4047 server->caps |= NFS_CAP_OPEN_XOR; 4048 if (nfs4_server_delegtime_capable(&res)) 4049 server->caps |= NFS_CAP_DELEGTIME; 4050 4051 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); 4052 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; 4053 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; 4054 server->cache_consistency_bitmask[2] = 0; 4055 4056 /* Avoid a regression due to buggy server */ 4057 for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++) 4058 res.exclcreat_bitmask[i] &= res.attr_bitmask[i]; 4059 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask, 4060 sizeof(server->exclcreat_bitmask)); 4061 4062 server->acl_bitmask = res.acl_bitmask; 4063 server->fh_expire_type = res.fh_expire_type; 4064 } 4065 4066 return status; 4067 } 4068 4069 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 4070 { 4071 struct nfs4_exception exception = { 4072 .interruptible = true, 4073 }; 4074 int err; 4075 4076 nfs4_server_set_init_caps(server); 4077 do { 4078 err = nfs4_handle_exception(server, 4079 _nfs4_server_capabilities(server, fhandle), 4080 &exception); 4081 } while (exception.retry); 4082 return err; 4083 } 4084 4085 static void test_fs_location_for_trunking(struct nfs4_fs_location *location, 4086 struct nfs_client *clp, 4087 struct nfs_server *server) 4088 { 4089 int i; 4090 4091 for (i = 0; i < location->nservers; i++) { 4092 struct nfs4_string *srv_loc = &location->servers[i]; 4093 struct sockaddr_storage addr; 4094 size_t addrlen; 4095 struct xprt_create xprt_args = { 4096 .ident = 0, 4097 .net = clp->cl_net, 4098 }; 4099 struct nfs4_add_xprt_data xprtdata = { 4100 .clp = clp, 4101 }; 4102 struct rpc_add_xprt_test rpcdata = { 4103 .add_xprt_test = clp->cl_mvops->session_trunk, 4104 .data = &xprtdata, 4105 }; 4106 char *servername = NULL; 4107 4108 if (!srv_loc->len) 4109 continue; 4110 4111 addrlen = nfs_parse_server_name(srv_loc->data, srv_loc->len, 4112 &addr, sizeof(addr), 4113 clp->cl_net, server->port); 4114 if (!addrlen) 4115 return; 4116 xprt_args.dstaddr = (struct sockaddr *)&addr; 4117 xprt_args.addrlen = addrlen; 4118 servername = kmalloc(srv_loc->len + 1, GFP_KERNEL); 4119 if (!servername) 4120 return; 4121 memcpy(servername, srv_loc->data, srv_loc->len); 4122 servername[srv_loc->len] = '\0'; 4123 xprt_args.servername = servername; 4124 4125 xprtdata.cred = nfs4_get_clid_cred(clp); 4126 rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args, 4127 rpc_clnt_setup_test_and_add_xprt, 4128 &rpcdata); 4129 if (xprtdata.cred) 4130 put_cred(xprtdata.cred); 4131 kfree(servername); 4132 } 4133 } 4134 4135 static bool _is_same_nfs4_pathname(struct nfs4_pathname *path1, 4136 struct nfs4_pathname *path2) 4137 { 4138 int i; 4139 4140 if (path1->ncomponents != path2->ncomponents) 4141 return false; 4142 for (i = 0; i < path1->ncomponents; i++) { 4143 if (path1->components[i].len != path2->components[i].len) 4144 return false; 4145 if (memcmp(path1->components[i].data, path2->components[i].data, 4146 path1->components[i].len)) 4147 return false; 4148 } 4149 return true; 4150 } 4151 4152 static int _nfs4_discover_trunking(struct nfs_server *server, 4153 struct nfs_fh *fhandle) 4154 { 4155 struct nfs4_fs_locations *locations = NULL; 4156 struct page *page; 4157 const struct cred *cred; 4158 struct nfs_client *clp = server->nfs_client; 4159 const struct nfs4_state_maintenance_ops *ops = 4160 clp->cl_mvops->state_renewal_ops; 4161 int status = -ENOMEM, i; 4162 4163 cred = ops->get_state_renewal_cred(clp); 4164 if (cred == NULL) { 4165 cred = nfs4_get_clid_cred(clp); 4166 if (cred == NULL) 4167 return -ENOKEY; 4168 } 4169 4170 page = alloc_page(GFP_KERNEL); 4171 if (!page) 4172 goto out_put_cred; 4173 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 4174 if (!locations) 4175 goto out_free; 4176 locations->fattr = nfs_alloc_fattr(); 4177 if (!locations->fattr) 4178 goto out_free_2; 4179 4180 status = nfs4_proc_get_locations(server, fhandle, locations, page, 4181 cred); 4182 if (status) 4183 goto out_free_3; 4184 4185 for (i = 0; i < locations->nlocations; i++) { 4186 if (!_is_same_nfs4_pathname(&locations->fs_path, 4187 &locations->locations[i].rootpath)) 4188 continue; 4189 test_fs_location_for_trunking(&locations->locations[i], clp, 4190 server); 4191 } 4192 out_free_3: 4193 kfree(locations->fattr); 4194 out_free_2: 4195 kfree(locations); 4196 out_free: 4197 __free_page(page); 4198 out_put_cred: 4199 put_cred(cred); 4200 return status; 4201 } 4202 4203 static int nfs4_discover_trunking(struct nfs_server *server, 4204 struct nfs_fh *fhandle) 4205 { 4206 struct nfs4_exception exception = { 4207 .interruptible = true, 4208 }; 4209 struct nfs_client *clp = server->nfs_client; 4210 int err = 0; 4211 4212 if (!nfs4_has_session(clp)) 4213 goto out; 4214 do { 4215 err = nfs4_handle_exception(server, 4216 _nfs4_discover_trunking(server, fhandle), 4217 &exception); 4218 } while (exception.retry); 4219 out: 4220 return err; 4221 } 4222 4223 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 4224 struct nfs_fsinfo *info) 4225 { 4226 u32 bitmask[3]; 4227 struct nfs4_lookup_root_arg args = { 4228 .bitmask = bitmask, 4229 }; 4230 struct nfs4_lookup_res res = { 4231 .server = server, 4232 .fattr = info->fattr, 4233 .fh = fhandle, 4234 }; 4235 struct rpc_message msg = { 4236 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT], 4237 .rpc_argp = &args, 4238 .rpc_resp = &res, 4239 }; 4240 4241 bitmask[0] = nfs4_fattr_bitmap[0]; 4242 bitmask[1] = nfs4_fattr_bitmap[1]; 4243 /* 4244 * Process the label in the upcoming getfattr 4245 */ 4246 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL; 4247 4248 nfs_fattr_init(info->fattr); 4249 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4250 } 4251 4252 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 4253 struct nfs_fsinfo *info) 4254 { 4255 struct nfs4_exception exception = { 4256 .interruptible = true, 4257 }; 4258 int err; 4259 do { 4260 err = _nfs4_lookup_root(server, fhandle, info); 4261 trace_nfs4_lookup_root(server, fhandle, info->fattr, err); 4262 switch (err) { 4263 case 0: 4264 case -NFS4ERR_WRONGSEC: 4265 goto out; 4266 default: 4267 err = nfs4_handle_exception(server, err, &exception); 4268 } 4269 } while (exception.retry); 4270 out: 4271 return err; 4272 } 4273 4274 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 4275 struct nfs_fsinfo *info, rpc_authflavor_t flavor) 4276 { 4277 struct rpc_auth_create_args auth_args = { 4278 .pseudoflavor = flavor, 4279 }; 4280 struct rpc_auth *auth; 4281 4282 auth = rpcauth_create(&auth_args, server->client); 4283 if (IS_ERR(auth)) 4284 return -EACCES; 4285 return nfs4_lookup_root(server, fhandle, info); 4286 } 4287 4288 /* 4289 * Retry pseudoroot lookup with various security flavors. We do this when: 4290 * 4291 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC 4292 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation 4293 * 4294 * Returns zero on success, or a negative NFS4ERR value, or a 4295 * negative errno value. 4296 */ 4297 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 4298 struct nfs_fsinfo *info) 4299 { 4300 /* Per 3530bis 15.33.5 */ 4301 static const rpc_authflavor_t flav_array[] = { 4302 RPC_AUTH_GSS_KRB5P, 4303 RPC_AUTH_GSS_KRB5I, 4304 RPC_AUTH_GSS_KRB5, 4305 RPC_AUTH_UNIX, /* courtesy */ 4306 RPC_AUTH_NULL, 4307 }; 4308 int status = -EPERM; 4309 size_t i; 4310 4311 if (server->auth_info.flavor_len > 0) { 4312 /* try each flavor specified by user */ 4313 for (i = 0; i < server->auth_info.flavor_len; i++) { 4314 status = nfs4_lookup_root_sec(server, fhandle, info, 4315 server->auth_info.flavors[i]); 4316 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 4317 continue; 4318 break; 4319 } 4320 } else { 4321 /* no flavors specified by user, try default list */ 4322 for (i = 0; i < ARRAY_SIZE(flav_array); i++) { 4323 status = nfs4_lookup_root_sec(server, fhandle, info, 4324 flav_array[i]); 4325 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 4326 continue; 4327 break; 4328 } 4329 } 4330 4331 /* 4332 * -EACCES could mean that the user doesn't have correct permissions 4333 * to access the mount. It could also mean that we tried to mount 4334 * with a gss auth flavor, but rpc.gssd isn't running. Either way, 4335 * existing mount programs don't handle -EACCES very well so it should 4336 * be mapped to -EPERM instead. 4337 */ 4338 if (status == -EACCES) 4339 status = -EPERM; 4340 return status; 4341 } 4342 4343 /** 4344 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot 4345 * @server: initialized nfs_server handle 4346 * @fhandle: we fill in the pseudo-fs root file handle 4347 * @info: we fill in an FSINFO struct 4348 * @auth_probe: probe the auth flavours 4349 * 4350 * Returns zero on success, or a negative errno. 4351 */ 4352 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle, 4353 struct nfs_fsinfo *info, 4354 bool auth_probe) 4355 { 4356 int status = 0; 4357 4358 if (!auth_probe) 4359 status = nfs4_lookup_root(server, fhandle, info); 4360 4361 if (auth_probe || status == NFS4ERR_WRONGSEC) 4362 status = server->nfs_client->cl_mvops->find_root_sec(server, 4363 fhandle, info); 4364 4365 if (status == 0) 4366 status = nfs4_server_capabilities(server, fhandle); 4367 if (status == 0) 4368 status = nfs4_do_fsinfo(server, fhandle, info); 4369 4370 return nfs4_map_errors(status); 4371 } 4372 4373 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh, 4374 struct nfs_fsinfo *info) 4375 { 4376 int error; 4377 struct nfs_fattr *fattr = info->fattr; 4378 4379 error = nfs4_server_capabilities(server, mntfh); 4380 if (error < 0) { 4381 dprintk("nfs4_get_root: getcaps error = %d\n", -error); 4382 return error; 4383 } 4384 4385 error = nfs4_proc_getattr(server, mntfh, fattr, NULL); 4386 if (error < 0) { 4387 dprintk("nfs4_get_root: getattr error = %d\n", -error); 4388 goto out; 4389 } 4390 4391 if (fattr->valid & NFS_ATTR_FATTR_FSID && 4392 !nfs_fsid_equal(&server->fsid, &fattr->fsid)) 4393 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid)); 4394 4395 out: 4396 return error; 4397 } 4398 4399 /* 4400 * Get locations and (maybe) other attributes of a referral. 4401 * Note that we'll actually follow the referral later when 4402 * we detect fsid mismatch in inode revalidation 4403 */ 4404 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir, 4405 const struct qstr *name, struct nfs_fattr *fattr, 4406 struct nfs_fh *fhandle) 4407 { 4408 int status = -ENOMEM; 4409 struct page *page = NULL; 4410 struct nfs4_fs_locations *locations = NULL; 4411 4412 page = alloc_page(GFP_KERNEL); 4413 if (page == NULL) 4414 goto out; 4415 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 4416 if (locations == NULL) 4417 goto out; 4418 4419 locations->fattr = fattr; 4420 4421 status = nfs4_proc_fs_locations(client, dir, name, locations, page); 4422 if (status != 0) 4423 goto out; 4424 4425 /* 4426 * If the fsid didn't change, this is a migration event, not a 4427 * referral. Cause us to drop into the exception handler, which 4428 * will kick off migration recovery. 4429 */ 4430 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &fattr->fsid)) { 4431 dprintk("%s: server did not return a different fsid for" 4432 " a referral at %s\n", __func__, name->name); 4433 status = -NFS4ERR_MOVED; 4434 goto out; 4435 } 4436 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ 4437 nfs_fixup_referral_attributes(fattr); 4438 memset(fhandle, 0, sizeof(struct nfs_fh)); 4439 out: 4440 if (page) 4441 __free_page(page); 4442 kfree(locations); 4443 return status; 4444 } 4445 4446 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 4447 struct nfs_fattr *fattr, struct inode *inode) 4448 { 4449 __u32 bitmask[NFS4_BITMASK_SZ]; 4450 struct nfs4_getattr_arg args = { 4451 .fh = fhandle, 4452 .bitmask = bitmask, 4453 }; 4454 struct nfs4_getattr_res res = { 4455 .fattr = fattr, 4456 .server = server, 4457 }; 4458 struct rpc_message msg = { 4459 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 4460 .rpc_argp = &args, 4461 .rpc_resp = &res, 4462 }; 4463 unsigned short task_flags = 0; 4464 4465 if (nfs4_has_session(server->nfs_client)) 4466 task_flags = RPC_TASK_MOVEABLE; 4467 4468 /* Is this is an attribute revalidation, subject to softreval? */ 4469 if (inode && (server->flags & NFS_MOUNT_SOFTREVAL)) 4470 task_flags |= RPC_TASK_TIMEOUT; 4471 4472 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label), inode, 0); 4473 nfs_fattr_init(fattr); 4474 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 4475 return nfs4_do_call_sync(server->client, server, &msg, 4476 &args.seq_args, &res.seq_res, task_flags); 4477 } 4478 4479 int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 4480 struct nfs_fattr *fattr, struct inode *inode) 4481 { 4482 struct nfs4_exception exception = { 4483 .interruptible = true, 4484 }; 4485 int err; 4486 do { 4487 err = _nfs4_proc_getattr(server, fhandle, fattr, inode); 4488 trace_nfs4_getattr(server, fhandle, fattr, err); 4489 err = nfs4_handle_exception(server, err, 4490 &exception); 4491 } while (exception.retry); 4492 return err; 4493 } 4494 4495 /* 4496 * The file is not closed if it is opened due to the a request to change 4497 * the size of the file. The open call will not be needed once the 4498 * VFS layer lookup-intents are implemented. 4499 * 4500 * Close is called when the inode is destroyed. 4501 * If we haven't opened the file for O_WRONLY, we 4502 * need to in the size_change case to obtain a stateid. 4503 * 4504 * Got race? 4505 * Because OPEN is always done by name in nfsv4, it is 4506 * possible that we opened a different file by the same 4507 * name. We can recognize this race condition, but we 4508 * can't do anything about it besides returning an error. 4509 * 4510 * This will be fixed with VFS changes (lookup-intent). 4511 */ 4512 static int 4513 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, 4514 struct iattr *sattr) 4515 { 4516 struct inode *inode = d_inode(dentry); 4517 const struct cred *cred = NULL; 4518 struct nfs_open_context *ctx = NULL; 4519 int status; 4520 4521 if (pnfs_ld_layoutret_on_setattr(inode) && 4522 sattr->ia_valid & ATTR_SIZE && 4523 sattr->ia_size < i_size_read(inode)) 4524 pnfs_commit_and_return_layout(inode); 4525 4526 nfs_fattr_init(fattr); 4527 4528 /* Deal with open(O_TRUNC) */ 4529 if (sattr->ia_valid & ATTR_OPEN) 4530 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME); 4531 4532 /* Optimization: if the end result is no change, don't RPC */ 4533 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0) 4534 return 0; 4535 4536 /* Search for an existing open(O_WRITE) file */ 4537 if (sattr->ia_valid & ATTR_FILE) { 4538 4539 ctx = nfs_file_open_context(sattr->ia_file); 4540 if (ctx) 4541 cred = ctx->cred; 4542 } 4543 4544 /* Return any delegations if we're going to change ACLs */ 4545 if ((sattr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) 4546 nfs4_inode_make_writeable(inode); 4547 4548 status = nfs4_do_setattr(inode, cred, fattr, sattr, ctx, NULL); 4549 if (status == 0) { 4550 nfs_setattr_update_inode(inode, sattr, fattr); 4551 nfs_setsecurity(inode, fattr); 4552 } 4553 return status; 4554 } 4555 4556 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, 4557 struct dentry *dentry, const struct qstr *name, 4558 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4559 { 4560 struct nfs_server *server = NFS_SERVER(dir); 4561 int status; 4562 struct nfs4_lookup_arg args = { 4563 .bitmask = server->attr_bitmask, 4564 .dir_fh = NFS_FH(dir), 4565 .name = name, 4566 }; 4567 struct nfs4_lookup_res res = { 4568 .server = server, 4569 .fattr = fattr, 4570 .fh = fhandle, 4571 }; 4572 struct rpc_message msg = { 4573 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], 4574 .rpc_argp = &args, 4575 .rpc_resp = &res, 4576 }; 4577 unsigned short task_flags = 0; 4578 4579 if (nfs_server_capable(dir, NFS_CAP_MOVEABLE)) 4580 task_flags = RPC_TASK_MOVEABLE; 4581 4582 /* Is this is an attribute revalidation, subject to softreval? */ 4583 if (nfs_lookup_is_soft_revalidate(dentry)) 4584 task_flags |= RPC_TASK_TIMEOUT; 4585 4586 args.bitmask = nfs4_bitmask(server, fattr->label); 4587 4588 nfs_fattr_init(fattr); 4589 4590 dprintk("NFS call lookup %pd2\n", dentry); 4591 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 4592 status = nfs4_do_call_sync(clnt, server, &msg, 4593 &args.seq_args, &res.seq_res, task_flags); 4594 dprintk("NFS reply lookup: %d\n", status); 4595 return status; 4596 } 4597 4598 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr) 4599 { 4600 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 4601 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT; 4602 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 4603 fattr->nlink = 2; 4604 } 4605 4606 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir, 4607 struct dentry *dentry, const struct qstr *name, 4608 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4609 { 4610 struct nfs4_exception exception = { 4611 .interruptible = true, 4612 }; 4613 struct rpc_clnt *client = *clnt; 4614 int err; 4615 do { 4616 err = _nfs4_proc_lookup(client, dir, dentry, name, fhandle, fattr); 4617 trace_nfs4_lookup(dir, name, err); 4618 switch (err) { 4619 case -NFS4ERR_BADNAME: 4620 err = -ENOENT; 4621 goto out; 4622 case -NFS4ERR_MOVED: 4623 err = nfs4_get_referral(client, dir, name, fattr, fhandle); 4624 if (err == -NFS4ERR_MOVED) 4625 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 4626 goto out; 4627 case -NFS4ERR_WRONGSEC: 4628 err = -EPERM; 4629 if (client != *clnt) 4630 goto out; 4631 client = nfs4_negotiate_security(client, dir, name); 4632 if (IS_ERR(client)) 4633 return PTR_ERR(client); 4634 4635 exception.retry = 1; 4636 break; 4637 default: 4638 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 4639 } 4640 } while (exception.retry); 4641 4642 out: 4643 if (err == 0) 4644 *clnt = client; 4645 else if (client != *clnt) 4646 rpc_shutdown_client(client); 4647 4648 return err; 4649 } 4650 4651 static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry, const struct qstr *name, 4652 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4653 { 4654 int status; 4655 struct rpc_clnt *client = NFS_CLIENT(dir); 4656 4657 status = nfs4_proc_lookup_common(&client, dir, dentry, name, fhandle, fattr); 4658 if (client != NFS_CLIENT(dir)) { 4659 rpc_shutdown_client(client); 4660 nfs_fixup_secinfo_attributes(fattr); 4661 } 4662 return status; 4663 } 4664 4665 struct rpc_clnt * 4666 nfs4_proc_lookup_mountpoint(struct inode *dir, struct dentry *dentry, 4667 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4668 { 4669 struct rpc_clnt *client = NFS_CLIENT(dir); 4670 int status; 4671 4672 status = nfs4_proc_lookup_common(&client, dir, dentry, &dentry->d_name, 4673 fhandle, fattr); 4674 if (status < 0) 4675 return ERR_PTR(status); 4676 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client; 4677 } 4678 4679 static int _nfs4_proc_lookupp(struct inode *inode, 4680 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4681 { 4682 struct rpc_clnt *clnt = NFS_CLIENT(inode); 4683 struct nfs_server *server = NFS_SERVER(inode); 4684 int status; 4685 struct nfs4_lookupp_arg args = { 4686 .bitmask = server->attr_bitmask, 4687 .fh = NFS_FH(inode), 4688 }; 4689 struct nfs4_lookupp_res res = { 4690 .server = server, 4691 .fattr = fattr, 4692 .fh = fhandle, 4693 }; 4694 struct rpc_message msg = { 4695 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUPP], 4696 .rpc_argp = &args, 4697 .rpc_resp = &res, 4698 }; 4699 unsigned short task_flags = 0; 4700 4701 if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL) 4702 task_flags |= RPC_TASK_TIMEOUT; 4703 4704 args.bitmask = nfs4_bitmask(server, fattr->label); 4705 4706 nfs_fattr_init(fattr); 4707 4708 dprintk("NFS call lookupp ino=0x%lx\n", inode->i_ino); 4709 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, 4710 &res.seq_res, task_flags); 4711 dprintk("NFS reply lookupp: %d\n", status); 4712 return status; 4713 } 4714 4715 static int nfs4_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle, 4716 struct nfs_fattr *fattr) 4717 { 4718 struct nfs4_exception exception = { 4719 .interruptible = true, 4720 }; 4721 int err; 4722 do { 4723 err = _nfs4_proc_lookupp(inode, fhandle, fattr); 4724 trace_nfs4_lookupp(inode, err); 4725 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4726 &exception); 4727 } while (exception.retry); 4728 return err; 4729 } 4730 4731 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry, 4732 const struct cred *cred) 4733 { 4734 struct nfs_server *server = NFS_SERVER(inode); 4735 struct nfs4_accessargs args = { 4736 .fh = NFS_FH(inode), 4737 .access = entry->mask, 4738 }; 4739 struct nfs4_accessres res = { 4740 .server = server, 4741 }; 4742 struct rpc_message msg = { 4743 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], 4744 .rpc_argp = &args, 4745 .rpc_resp = &res, 4746 .rpc_cred = cred, 4747 }; 4748 int status = 0; 4749 4750 if (!nfs4_have_delegation(inode, FMODE_READ, 0)) { 4751 res.fattr = nfs_alloc_fattr(); 4752 if (res.fattr == NULL) 4753 return -ENOMEM; 4754 args.bitmask = server->cache_consistency_bitmask; 4755 } 4756 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4757 if (!status) { 4758 nfs_access_set_mask(entry, res.access); 4759 if (res.fattr) 4760 nfs_refresh_inode(inode, res.fattr); 4761 } 4762 nfs_free_fattr(res.fattr); 4763 return status; 4764 } 4765 4766 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry, 4767 const struct cred *cred) 4768 { 4769 struct nfs4_exception exception = { 4770 .interruptible = true, 4771 }; 4772 int err; 4773 do { 4774 err = _nfs4_proc_access(inode, entry, cred); 4775 trace_nfs4_access(inode, err); 4776 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4777 &exception); 4778 } while (exception.retry); 4779 return err; 4780 } 4781 4782 /* 4783 * TODO: For the time being, we don't try to get any attributes 4784 * along with any of the zero-copy operations READ, READDIR, 4785 * READLINK, WRITE. 4786 * 4787 * In the case of the first three, we want to put the GETATTR 4788 * after the read-type operation -- this is because it is hard 4789 * to predict the length of a GETATTR response in v4, and thus 4790 * align the READ data correctly. This means that the GETATTR 4791 * may end up partially falling into the page cache, and we should 4792 * shift it into the 'tail' of the xdr_buf before processing. 4793 * To do this efficiently, we need to know the total length 4794 * of data received, which doesn't seem to be available outside 4795 * of the RPC layer. 4796 * 4797 * In the case of WRITE, we also want to put the GETATTR after 4798 * the operation -- in this case because we want to make sure 4799 * we get the post-operation mtime and size. 4800 * 4801 * Both of these changes to the XDR layer would in fact be quite 4802 * minor, but I decided to leave them for a subsequent patch. 4803 */ 4804 static int _nfs4_proc_readlink(struct inode *inode, struct page *page, 4805 unsigned int pgbase, unsigned int pglen) 4806 { 4807 struct nfs4_readlink args = { 4808 .fh = NFS_FH(inode), 4809 .pgbase = pgbase, 4810 .pglen = pglen, 4811 .pages = &page, 4812 }; 4813 struct nfs4_readlink_res res; 4814 struct rpc_message msg = { 4815 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], 4816 .rpc_argp = &args, 4817 .rpc_resp = &res, 4818 }; 4819 4820 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); 4821 } 4822 4823 static int nfs4_proc_readlink(struct inode *inode, struct page *page, 4824 unsigned int pgbase, unsigned int pglen) 4825 { 4826 struct nfs4_exception exception = { 4827 .interruptible = true, 4828 }; 4829 int err; 4830 do { 4831 err = _nfs4_proc_readlink(inode, page, pgbase, pglen); 4832 trace_nfs4_readlink(inode, err); 4833 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4834 &exception); 4835 } while (exception.retry); 4836 return err; 4837 } 4838 4839 /* 4840 * This is just for mknod. open(O_CREAT) will always do ->open_context(). 4841 */ 4842 static int 4843 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, 4844 int flags) 4845 { 4846 struct nfs_server *server = NFS_SERVER(dir); 4847 struct nfs4_label l, *ilabel; 4848 struct nfs_open_context *ctx; 4849 struct nfs4_state *state; 4850 int status = 0; 4851 4852 ctx = alloc_nfs_open_context(dentry, FMODE_READ, NULL); 4853 if (IS_ERR(ctx)) 4854 return PTR_ERR(ctx); 4855 4856 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l); 4857 4858 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 4859 sattr->ia_mode &= ~current_umask(); 4860 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL); 4861 if (IS_ERR(state)) { 4862 status = PTR_ERR(state); 4863 goto out; 4864 } 4865 out: 4866 nfs4_label_release_security(ilabel); 4867 put_nfs_open_context(ctx); 4868 return status; 4869 } 4870 4871 static int 4872 _nfs4_proc_remove(struct inode *dir, const struct qstr *name, u32 ftype) 4873 { 4874 struct nfs_server *server = NFS_SERVER(dir); 4875 struct nfs_removeargs args = { 4876 .fh = NFS_FH(dir), 4877 .name = *name, 4878 }; 4879 struct nfs_removeres res = { 4880 .server = server, 4881 }; 4882 struct rpc_message msg = { 4883 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], 4884 .rpc_argp = &args, 4885 .rpc_resp = &res, 4886 }; 4887 unsigned long timestamp = jiffies; 4888 int status; 4889 4890 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 4891 if (status == 0) { 4892 spin_lock(&dir->i_lock); 4893 /* Removing a directory decrements nlink in the parent */ 4894 if (ftype == NF4DIR && dir->i_nlink > 2) 4895 nfs4_dec_nlink_locked(dir); 4896 nfs4_update_changeattr_locked(dir, &res.cinfo, timestamp, 4897 NFS_INO_INVALID_DATA); 4898 spin_unlock(&dir->i_lock); 4899 } 4900 return status; 4901 } 4902 4903 static int nfs4_proc_remove(struct inode *dir, struct dentry *dentry) 4904 { 4905 struct nfs4_exception exception = { 4906 .interruptible = true, 4907 }; 4908 struct inode *inode = d_inode(dentry); 4909 int err; 4910 4911 if (inode) { 4912 if (inode->i_nlink == 1) 4913 nfs4_inode_return_delegation(inode); 4914 else 4915 nfs4_inode_make_writeable(inode); 4916 } 4917 do { 4918 err = _nfs4_proc_remove(dir, &dentry->d_name, NF4REG); 4919 trace_nfs4_remove(dir, &dentry->d_name, err); 4920 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4921 &exception); 4922 } while (exception.retry); 4923 return err; 4924 } 4925 4926 static int nfs4_proc_rmdir(struct inode *dir, const struct qstr *name) 4927 { 4928 struct nfs4_exception exception = { 4929 .interruptible = true, 4930 }; 4931 int err; 4932 4933 do { 4934 err = _nfs4_proc_remove(dir, name, NF4DIR); 4935 trace_nfs4_remove(dir, name, err); 4936 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4937 &exception); 4938 } while (exception.retry); 4939 return err; 4940 } 4941 4942 static void nfs4_proc_unlink_setup(struct rpc_message *msg, 4943 struct dentry *dentry, 4944 struct inode *inode) 4945 { 4946 struct nfs_removeargs *args = msg->rpc_argp; 4947 struct nfs_removeres *res = msg->rpc_resp; 4948 4949 res->server = NFS_SB(dentry->d_sb); 4950 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; 4951 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1, 0); 4952 4953 nfs_fattr_init(res->dir_attr); 4954 4955 if (inode) { 4956 nfs4_inode_return_delegation(inode); 4957 nfs_d_prune_case_insensitive_aliases(inode); 4958 } 4959 } 4960 4961 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) 4962 { 4963 nfs4_setup_sequence(NFS_SB(data->dentry->d_sb)->nfs_client, 4964 &data->args.seq_args, 4965 &data->res.seq_res, 4966 task); 4967 } 4968 4969 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) 4970 { 4971 struct nfs_unlinkdata *data = task->tk_calldata; 4972 struct nfs_removeres *res = &data->res; 4973 4974 if (!nfs4_sequence_done(task, &res->seq_res)) 4975 return 0; 4976 if (nfs4_async_handle_error(task, res->server, NULL, 4977 &data->timeout) == -EAGAIN) 4978 return 0; 4979 if (task->tk_status == 0) 4980 nfs4_update_changeattr(dir, &res->cinfo, 4981 res->dir_attr->time_start, 4982 NFS_INO_INVALID_DATA); 4983 return 1; 4984 } 4985 4986 static void nfs4_proc_rename_setup(struct rpc_message *msg, 4987 struct dentry *old_dentry, 4988 struct dentry *new_dentry) 4989 { 4990 struct nfs_renameargs *arg = msg->rpc_argp; 4991 struct nfs_renameres *res = msg->rpc_resp; 4992 struct inode *old_inode = d_inode(old_dentry); 4993 struct inode *new_inode = d_inode(new_dentry); 4994 4995 if (old_inode) 4996 nfs4_inode_make_writeable(old_inode); 4997 if (new_inode) 4998 nfs4_inode_return_delegation(new_inode); 4999 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; 5000 res->server = NFS_SB(old_dentry->d_sb); 5001 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1, 0); 5002 } 5003 5004 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) 5005 { 5006 nfs4_setup_sequence(NFS_SERVER(data->old_dir)->nfs_client, 5007 &data->args.seq_args, 5008 &data->res.seq_res, 5009 task); 5010 } 5011 5012 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, 5013 struct inode *new_dir) 5014 { 5015 struct nfs_renamedata *data = task->tk_calldata; 5016 struct nfs_renameres *res = &data->res; 5017 5018 if (!nfs4_sequence_done(task, &res->seq_res)) 5019 return 0; 5020 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN) 5021 return 0; 5022 5023 if (task->tk_status == 0) { 5024 nfs_d_prune_case_insensitive_aliases(d_inode(data->old_dentry)); 5025 if (new_dir != old_dir) { 5026 /* Note: If we moved a directory, nlink will change */ 5027 nfs4_update_changeattr(old_dir, &res->old_cinfo, 5028 res->old_fattr->time_start, 5029 NFS_INO_INVALID_NLINK | 5030 NFS_INO_INVALID_DATA); 5031 nfs4_update_changeattr(new_dir, &res->new_cinfo, 5032 res->new_fattr->time_start, 5033 NFS_INO_INVALID_NLINK | 5034 NFS_INO_INVALID_DATA); 5035 } else 5036 nfs4_update_changeattr(old_dir, &res->old_cinfo, 5037 res->old_fattr->time_start, 5038 NFS_INO_INVALID_DATA); 5039 } 5040 return 1; 5041 } 5042 5043 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name) 5044 { 5045 struct nfs_server *server = NFS_SERVER(inode); 5046 __u32 bitmask[NFS4_BITMASK_SZ]; 5047 struct nfs4_link_arg arg = { 5048 .fh = NFS_FH(inode), 5049 .dir_fh = NFS_FH(dir), 5050 .name = name, 5051 .bitmask = bitmask, 5052 }; 5053 struct nfs4_link_res res = { 5054 .server = server, 5055 }; 5056 struct rpc_message msg = { 5057 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], 5058 .rpc_argp = &arg, 5059 .rpc_resp = &res, 5060 }; 5061 int status = -ENOMEM; 5062 5063 res.fattr = nfs_alloc_fattr_with_label(server); 5064 if (res.fattr == NULL) 5065 goto out; 5066 5067 nfs4_inode_make_writeable(inode); 5068 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, res.fattr->label), 5069 inode, 5070 NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME); 5071 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 5072 if (!status) { 5073 nfs4_update_changeattr(dir, &res.cinfo, res.fattr->time_start, 5074 NFS_INO_INVALID_DATA); 5075 nfs4_inc_nlink(inode); 5076 status = nfs_post_op_update_inode(inode, res.fattr); 5077 if (!status) 5078 nfs_setsecurity(inode, res.fattr); 5079 } 5080 5081 out: 5082 nfs_free_fattr(res.fattr); 5083 return status; 5084 } 5085 5086 static int nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name) 5087 { 5088 struct nfs4_exception exception = { 5089 .interruptible = true, 5090 }; 5091 int err; 5092 do { 5093 err = nfs4_handle_exception(NFS_SERVER(inode), 5094 _nfs4_proc_link(inode, dir, name), 5095 &exception); 5096 } while (exception.retry); 5097 return err; 5098 } 5099 5100 struct nfs4_createdata { 5101 struct rpc_message msg; 5102 struct nfs4_create_arg arg; 5103 struct nfs4_create_res res; 5104 struct nfs_fh fh; 5105 struct nfs_fattr fattr; 5106 }; 5107 5108 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, 5109 const struct qstr *name, struct iattr *sattr, u32 ftype) 5110 { 5111 struct nfs4_createdata *data; 5112 5113 data = kzalloc(sizeof(*data), GFP_KERNEL); 5114 if (data != NULL) { 5115 struct nfs_server *server = NFS_SERVER(dir); 5116 5117 data->fattr.label = nfs4_label_alloc(server, GFP_KERNEL); 5118 if (IS_ERR(data->fattr.label)) 5119 goto out_free; 5120 5121 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE]; 5122 data->msg.rpc_argp = &data->arg; 5123 data->msg.rpc_resp = &data->res; 5124 data->arg.dir_fh = NFS_FH(dir); 5125 data->arg.server = server; 5126 data->arg.name = name; 5127 data->arg.attrs = sattr; 5128 data->arg.ftype = ftype; 5129 data->arg.bitmask = nfs4_bitmask(server, data->fattr.label); 5130 data->arg.umask = current_umask(); 5131 data->res.server = server; 5132 data->res.fh = &data->fh; 5133 data->res.fattr = &data->fattr; 5134 nfs_fattr_init(data->res.fattr); 5135 } 5136 return data; 5137 out_free: 5138 kfree(data); 5139 return NULL; 5140 } 5141 5142 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) 5143 { 5144 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 5145 &data->arg.seq_args, &data->res.seq_res, 1); 5146 if (status == 0) { 5147 spin_lock(&dir->i_lock); 5148 nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo, 5149 data->res.fattr->time_start, 5150 NFS_INO_INVALID_DATA); 5151 spin_unlock(&dir->i_lock); 5152 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr); 5153 } 5154 return status; 5155 } 5156 5157 static struct dentry *nfs4_do_mkdir(struct inode *dir, struct dentry *dentry, 5158 struct nfs4_createdata *data) 5159 { 5160 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 5161 &data->arg.seq_args, &data->res.seq_res, 1); 5162 5163 if (status) 5164 return ERR_PTR(status); 5165 5166 spin_lock(&dir->i_lock); 5167 /* Creating a directory bumps nlink in the parent */ 5168 nfs4_inc_nlink_locked(dir); 5169 nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo, 5170 data->res.fattr->time_start, 5171 NFS_INO_INVALID_DATA); 5172 spin_unlock(&dir->i_lock); 5173 return nfs_add_or_obtain(dentry, data->res.fh, data->res.fattr); 5174 } 5175 5176 static void nfs4_free_createdata(struct nfs4_createdata *data) 5177 { 5178 nfs4_label_free(data->fattr.label); 5179 kfree(data); 5180 } 5181 5182 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 5183 struct folio *folio, unsigned int len, struct iattr *sattr, 5184 struct nfs4_label *label) 5185 { 5186 struct page *page = &folio->page; 5187 struct nfs4_createdata *data; 5188 int status = -ENAMETOOLONG; 5189 5190 if (len > NFS4_MAXPATHLEN) 5191 goto out; 5192 5193 status = -ENOMEM; 5194 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK); 5195 if (data == NULL) 5196 goto out; 5197 5198 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK]; 5199 data->arg.u.symlink.pages = &page; 5200 data->arg.u.symlink.len = len; 5201 data->arg.label = label; 5202 5203 status = nfs4_do_create(dir, dentry, data); 5204 5205 nfs4_free_createdata(data); 5206 out: 5207 return status; 5208 } 5209 5210 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 5211 struct folio *folio, unsigned int len, struct iattr *sattr) 5212 { 5213 struct nfs4_exception exception = { 5214 .interruptible = true, 5215 }; 5216 struct nfs4_label l, *label; 5217 int err; 5218 5219 label = nfs4_label_init_security(dir, dentry, sattr, &l); 5220 5221 do { 5222 err = _nfs4_proc_symlink(dir, dentry, folio, len, sattr, label); 5223 trace_nfs4_symlink(dir, &dentry->d_name, err); 5224 err = nfs4_handle_exception(NFS_SERVER(dir), err, 5225 &exception); 5226 } while (exception.retry); 5227 5228 nfs4_label_release_security(label); 5229 return err; 5230 } 5231 5232 static struct dentry *_nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 5233 struct iattr *sattr, 5234 struct nfs4_label *label) 5235 { 5236 struct nfs4_createdata *data; 5237 struct dentry *ret = ERR_PTR(-ENOMEM); 5238 5239 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); 5240 if (data == NULL) 5241 goto out; 5242 5243 data->arg.label = label; 5244 ret = nfs4_do_mkdir(dir, dentry, data); 5245 5246 nfs4_free_createdata(data); 5247 out: 5248 return ret; 5249 } 5250 5251 static struct dentry *nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 5252 struct iattr *sattr) 5253 { 5254 struct nfs_server *server = NFS_SERVER(dir); 5255 struct nfs4_exception exception = { 5256 .interruptible = true, 5257 }; 5258 struct nfs4_label l, *label; 5259 struct dentry *alias; 5260 int err; 5261 5262 label = nfs4_label_init_security(dir, dentry, sattr, &l); 5263 5264 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 5265 sattr->ia_mode &= ~current_umask(); 5266 do { 5267 alias = _nfs4_proc_mkdir(dir, dentry, sattr, label); 5268 err = PTR_ERR_OR_ZERO(alias); 5269 trace_nfs4_mkdir(dir, &dentry->d_name, err); 5270 err = nfs4_handle_exception(NFS_SERVER(dir), err, 5271 &exception); 5272 } while (exception.retry); 5273 nfs4_label_release_security(label); 5274 5275 return alias; 5276 } 5277 5278 static int _nfs4_proc_readdir(struct nfs_readdir_arg *nr_arg, 5279 struct nfs_readdir_res *nr_res) 5280 { 5281 struct inode *dir = d_inode(nr_arg->dentry); 5282 struct nfs_server *server = NFS_SERVER(dir); 5283 struct nfs4_readdir_arg args = { 5284 .fh = NFS_FH(dir), 5285 .pages = nr_arg->pages, 5286 .pgbase = 0, 5287 .count = nr_arg->page_len, 5288 .plus = nr_arg->plus, 5289 }; 5290 struct nfs4_readdir_res res; 5291 struct rpc_message msg = { 5292 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR], 5293 .rpc_argp = &args, 5294 .rpc_resp = &res, 5295 .rpc_cred = nr_arg->cred, 5296 }; 5297 int status; 5298 5299 dprintk("%s: dentry = %pd2, cookie = %llu\n", __func__, 5300 nr_arg->dentry, (unsigned long long)nr_arg->cookie); 5301 if (!(server->caps & NFS_CAP_SECURITY_LABEL)) 5302 args.bitmask = server->attr_bitmask_nl; 5303 else 5304 args.bitmask = server->attr_bitmask; 5305 5306 nfs4_setup_readdir(nr_arg->cookie, nr_arg->verf, nr_arg->dentry, &args); 5307 res.pgbase = args.pgbase; 5308 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, 5309 &res.seq_res, 0); 5310 if (status >= 0) { 5311 memcpy(nr_res->verf, res.verifier.data, NFS4_VERIFIER_SIZE); 5312 status += args.pgbase; 5313 } 5314 5315 nfs_invalidate_atime(dir); 5316 5317 dprintk("%s: returns %d\n", __func__, status); 5318 return status; 5319 } 5320 5321 static int nfs4_proc_readdir(struct nfs_readdir_arg *arg, 5322 struct nfs_readdir_res *res) 5323 { 5324 struct nfs4_exception exception = { 5325 .interruptible = true, 5326 }; 5327 int err; 5328 do { 5329 err = _nfs4_proc_readdir(arg, res); 5330 trace_nfs4_readdir(d_inode(arg->dentry), err); 5331 err = nfs4_handle_exception(NFS_SERVER(d_inode(arg->dentry)), 5332 err, &exception); 5333 } while (exception.retry); 5334 return err; 5335 } 5336 5337 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 5338 struct iattr *sattr, struct nfs4_label *label, dev_t rdev) 5339 { 5340 struct nfs4_createdata *data; 5341 int mode = sattr->ia_mode; 5342 int status = -ENOMEM; 5343 5344 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK); 5345 if (data == NULL) 5346 goto out; 5347 5348 if (S_ISFIFO(mode)) 5349 data->arg.ftype = NF4FIFO; 5350 else if (S_ISBLK(mode)) { 5351 data->arg.ftype = NF4BLK; 5352 data->arg.u.device.specdata1 = MAJOR(rdev); 5353 data->arg.u.device.specdata2 = MINOR(rdev); 5354 } 5355 else if (S_ISCHR(mode)) { 5356 data->arg.ftype = NF4CHR; 5357 data->arg.u.device.specdata1 = MAJOR(rdev); 5358 data->arg.u.device.specdata2 = MINOR(rdev); 5359 } else if (!S_ISSOCK(mode)) { 5360 status = -EINVAL; 5361 goto out_free; 5362 } 5363 5364 data->arg.label = label; 5365 status = nfs4_do_create(dir, dentry, data); 5366 out_free: 5367 nfs4_free_createdata(data); 5368 out: 5369 return status; 5370 } 5371 5372 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 5373 struct iattr *sattr, dev_t rdev) 5374 { 5375 struct nfs_server *server = NFS_SERVER(dir); 5376 struct nfs4_exception exception = { 5377 .interruptible = true, 5378 }; 5379 struct nfs4_label l, *label; 5380 int err; 5381 5382 label = nfs4_label_init_security(dir, dentry, sattr, &l); 5383 5384 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 5385 sattr->ia_mode &= ~current_umask(); 5386 do { 5387 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev); 5388 trace_nfs4_mknod(dir, &dentry->d_name, err); 5389 err = nfs4_handle_exception(NFS_SERVER(dir), err, 5390 &exception); 5391 } while (exception.retry); 5392 5393 nfs4_label_release_security(label); 5394 5395 return err; 5396 } 5397 5398 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, 5399 struct nfs_fsstat *fsstat) 5400 { 5401 struct nfs4_statfs_arg args = { 5402 .fh = fhandle, 5403 .bitmask = server->attr_bitmask, 5404 }; 5405 struct nfs4_statfs_res res = { 5406 .fsstat = fsstat, 5407 }; 5408 struct rpc_message msg = { 5409 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], 5410 .rpc_argp = &args, 5411 .rpc_resp = &res, 5412 }; 5413 5414 nfs_fattr_init(fsstat->fattr); 5415 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5416 } 5417 5418 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) 5419 { 5420 struct nfs4_exception exception = { 5421 .interruptible = true, 5422 }; 5423 int err; 5424 do { 5425 err = nfs4_handle_exception(server, 5426 _nfs4_proc_statfs(server, fhandle, fsstat), 5427 &exception); 5428 } while (exception.retry); 5429 return err; 5430 } 5431 5432 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, 5433 struct nfs_fsinfo *fsinfo) 5434 { 5435 struct nfs4_fsinfo_arg args = { 5436 .fh = fhandle, 5437 .bitmask = server->attr_bitmask, 5438 }; 5439 struct nfs4_fsinfo_res res = { 5440 .fsinfo = fsinfo, 5441 }; 5442 struct rpc_message msg = { 5443 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], 5444 .rpc_argp = &args, 5445 .rpc_resp = &res, 5446 }; 5447 5448 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5449 } 5450 5451 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 5452 { 5453 struct nfs4_exception exception = { 5454 .interruptible = true, 5455 }; 5456 int err; 5457 5458 do { 5459 err = _nfs4_do_fsinfo(server, fhandle, fsinfo); 5460 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err); 5461 if (err == 0) { 5462 nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time * HZ); 5463 break; 5464 } 5465 err = nfs4_handle_exception(server, err, &exception); 5466 } while (exception.retry); 5467 return err; 5468 } 5469 5470 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 5471 { 5472 int error; 5473 5474 nfs_fattr_init(fsinfo->fattr); 5475 error = nfs4_do_fsinfo(server, fhandle, fsinfo); 5476 if (error == 0) { 5477 /* block layout checks this! */ 5478 server->pnfs_blksize = fsinfo->blksize; 5479 set_pnfs_layoutdriver(server, fhandle, fsinfo); 5480 } 5481 5482 return error; 5483 } 5484 5485 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 5486 struct nfs_pathconf *pathconf) 5487 { 5488 struct nfs4_pathconf_arg args = { 5489 .fh = fhandle, 5490 .bitmask = server->attr_bitmask, 5491 }; 5492 struct nfs4_pathconf_res res = { 5493 .pathconf = pathconf, 5494 }; 5495 struct rpc_message msg = { 5496 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], 5497 .rpc_argp = &args, 5498 .rpc_resp = &res, 5499 }; 5500 5501 /* None of the pathconf attributes are mandatory to implement */ 5502 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { 5503 memset(pathconf, 0, sizeof(*pathconf)); 5504 return 0; 5505 } 5506 5507 nfs_fattr_init(pathconf->fattr); 5508 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5509 } 5510 5511 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 5512 struct nfs_pathconf *pathconf) 5513 { 5514 struct nfs4_exception exception = { 5515 .interruptible = true, 5516 }; 5517 int err; 5518 5519 do { 5520 err = nfs4_handle_exception(server, 5521 _nfs4_proc_pathconf(server, fhandle, pathconf), 5522 &exception); 5523 } while (exception.retry); 5524 return err; 5525 } 5526 5527 int nfs4_set_rw_stateid(nfs4_stateid *stateid, 5528 const struct nfs_open_context *ctx, 5529 const struct nfs_lock_context *l_ctx, 5530 fmode_t fmode) 5531 { 5532 return nfs4_select_rw_stateid(ctx->state, fmode, l_ctx, stateid, NULL); 5533 } 5534 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid); 5535 5536 static bool nfs4_stateid_is_current(nfs4_stateid *stateid, 5537 const struct nfs_open_context *ctx, 5538 const struct nfs_lock_context *l_ctx, 5539 fmode_t fmode) 5540 { 5541 nfs4_stateid _current_stateid; 5542 5543 /* If the current stateid represents a lost lock, then exit */ 5544 if (nfs4_set_rw_stateid(&_current_stateid, ctx, l_ctx, fmode) == -EIO) 5545 return true; 5546 return nfs4_stateid_match(stateid, &_current_stateid); 5547 } 5548 5549 static bool nfs4_error_stateid_expired(int err) 5550 { 5551 switch (err) { 5552 case -NFS4ERR_DELEG_REVOKED: 5553 case -NFS4ERR_ADMIN_REVOKED: 5554 case -NFS4ERR_BAD_STATEID: 5555 case -NFS4ERR_STALE_STATEID: 5556 case -NFS4ERR_OLD_STATEID: 5557 case -NFS4ERR_OPENMODE: 5558 case -NFS4ERR_EXPIRED: 5559 return true; 5560 } 5561 return false; 5562 } 5563 5564 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr) 5565 { 5566 struct nfs_server *server = NFS_SERVER(hdr->inode); 5567 5568 trace_nfs4_read(hdr, task->tk_status); 5569 if (task->tk_status < 0) { 5570 struct nfs4_exception exception = { 5571 .inode = hdr->inode, 5572 .state = hdr->args.context->state, 5573 .stateid = &hdr->args.stateid, 5574 }; 5575 task->tk_status = nfs4_async_handle_exception(task, 5576 server, task->tk_status, &exception); 5577 if (exception.retry) { 5578 rpc_restart_call_prepare(task); 5579 return -EAGAIN; 5580 } 5581 } 5582 5583 if (task->tk_status > 0) 5584 renew_lease(server, hdr->timestamp); 5585 return 0; 5586 } 5587 5588 static bool nfs4_read_stateid_changed(struct rpc_task *task, 5589 struct nfs_pgio_args *args) 5590 { 5591 5592 if (!nfs4_error_stateid_expired(task->tk_status) || 5593 nfs4_stateid_is_current(&args->stateid, 5594 args->context, 5595 args->lock_context, 5596 FMODE_READ)) 5597 return false; 5598 rpc_restart_call_prepare(task); 5599 return true; 5600 } 5601 5602 static bool nfs4_read_plus_not_supported(struct rpc_task *task, 5603 struct nfs_pgio_header *hdr) 5604 { 5605 struct nfs_server *server = NFS_SERVER(hdr->inode); 5606 struct rpc_message *msg = &task->tk_msg; 5607 5608 if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS] && 5609 task->tk_status == -ENOTSUPP) { 5610 server->caps &= ~NFS_CAP_READ_PLUS; 5611 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 5612 rpc_restart_call_prepare(task); 5613 return true; 5614 } 5615 return false; 5616 } 5617 5618 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 5619 { 5620 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 5621 return -EAGAIN; 5622 if (nfs4_read_stateid_changed(task, &hdr->args)) 5623 return -EAGAIN; 5624 if (nfs4_read_plus_not_supported(task, hdr)) 5625 return -EAGAIN; 5626 if (task->tk_status > 0) 5627 nfs_invalidate_atime(hdr->inode); 5628 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 5629 nfs4_read_done_cb(task, hdr); 5630 } 5631 5632 #if defined CONFIG_NFS_V4_2 && defined CONFIG_NFS_V4_2_READ_PLUS 5633 static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr, 5634 struct rpc_message *msg) 5635 { 5636 /* Note: We don't use READ_PLUS with pNFS yet */ 5637 if (nfs_server_capable(hdr->inode, NFS_CAP_READ_PLUS) && !hdr->ds_clp) { 5638 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS]; 5639 return nfs_read_alloc_scratch(hdr, READ_PLUS_SCRATCH_SIZE); 5640 } 5641 return false; 5642 } 5643 #else 5644 static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr, 5645 struct rpc_message *msg) 5646 { 5647 return false; 5648 } 5649 #endif /* CONFIG_NFS_V4_2 */ 5650 5651 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr, 5652 struct rpc_message *msg) 5653 { 5654 hdr->timestamp = jiffies; 5655 if (!hdr->pgio_done_cb) 5656 hdr->pgio_done_cb = nfs4_read_done_cb; 5657 if (!nfs42_read_plus_support(hdr, msg)) 5658 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 5659 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0); 5660 } 5661 5662 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task, 5663 struct nfs_pgio_header *hdr) 5664 { 5665 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode)->nfs_client, 5666 &hdr->args.seq_args, 5667 &hdr->res.seq_res, 5668 task)) 5669 return 0; 5670 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context, 5671 hdr->args.lock_context, 5672 hdr->rw_mode) == -EIO) 5673 return -EIO; 5674 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) 5675 return -EIO; 5676 return 0; 5677 } 5678 5679 static int nfs4_write_done_cb(struct rpc_task *task, 5680 struct nfs_pgio_header *hdr) 5681 { 5682 struct inode *inode = hdr->inode; 5683 5684 trace_nfs4_write(hdr, task->tk_status); 5685 if (task->tk_status < 0) { 5686 struct nfs4_exception exception = { 5687 .inode = hdr->inode, 5688 .state = hdr->args.context->state, 5689 .stateid = &hdr->args.stateid, 5690 }; 5691 task->tk_status = nfs4_async_handle_exception(task, 5692 NFS_SERVER(inode), task->tk_status, 5693 &exception); 5694 if (exception.retry) { 5695 rpc_restart_call_prepare(task); 5696 return -EAGAIN; 5697 } 5698 } 5699 if (task->tk_status >= 0) { 5700 renew_lease(NFS_SERVER(inode), hdr->timestamp); 5701 nfs_writeback_update_inode(hdr); 5702 } 5703 return 0; 5704 } 5705 5706 static bool nfs4_write_stateid_changed(struct rpc_task *task, 5707 struct nfs_pgio_args *args) 5708 { 5709 5710 if (!nfs4_error_stateid_expired(task->tk_status) || 5711 nfs4_stateid_is_current(&args->stateid, 5712 args->context, 5713 args->lock_context, 5714 FMODE_WRITE)) 5715 return false; 5716 rpc_restart_call_prepare(task); 5717 return true; 5718 } 5719 5720 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 5721 { 5722 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 5723 return -EAGAIN; 5724 if (nfs4_write_stateid_changed(task, &hdr->args)) 5725 return -EAGAIN; 5726 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 5727 nfs4_write_done_cb(task, hdr); 5728 } 5729 5730 static 5731 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr) 5732 { 5733 /* Don't request attributes for pNFS or O_DIRECT writes */ 5734 if (hdr->ds_clp != NULL || hdr->dreq != NULL) 5735 return false; 5736 /* Otherwise, request attributes if and only if we don't hold 5737 * a delegation 5738 */ 5739 return nfs4_have_delegation(hdr->inode, FMODE_READ, 0) == 0; 5740 } 5741 5742 void nfs4_bitmask_set(__u32 bitmask[], const __u32 src[], 5743 struct inode *inode, unsigned long cache_validity) 5744 { 5745 struct nfs_server *server = NFS_SERVER(inode); 5746 unsigned int i; 5747 5748 memcpy(bitmask, src, sizeof(*bitmask) * NFS4_BITMASK_SZ); 5749 cache_validity |= READ_ONCE(NFS_I(inode)->cache_validity); 5750 5751 if (cache_validity & NFS_INO_INVALID_CHANGE) 5752 bitmask[0] |= FATTR4_WORD0_CHANGE; 5753 if (cache_validity & NFS_INO_INVALID_ATIME) 5754 bitmask[1] |= FATTR4_WORD1_TIME_ACCESS; 5755 if (cache_validity & NFS_INO_INVALID_MODE) 5756 bitmask[1] |= FATTR4_WORD1_MODE; 5757 if (cache_validity & NFS_INO_INVALID_OTHER) 5758 bitmask[1] |= FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP; 5759 if (cache_validity & NFS_INO_INVALID_NLINK) 5760 bitmask[1] |= FATTR4_WORD1_NUMLINKS; 5761 if (cache_validity & NFS_INO_INVALID_CTIME) 5762 bitmask[1] |= FATTR4_WORD1_TIME_METADATA; 5763 if (cache_validity & NFS_INO_INVALID_MTIME) 5764 bitmask[1] |= FATTR4_WORD1_TIME_MODIFY; 5765 if (cache_validity & NFS_INO_INVALID_BLOCKS) 5766 bitmask[1] |= FATTR4_WORD1_SPACE_USED; 5767 5768 if (cache_validity & NFS_INO_INVALID_SIZE) 5769 bitmask[0] |= FATTR4_WORD0_SIZE; 5770 5771 for (i = 0; i < NFS4_BITMASK_SZ; i++) 5772 bitmask[i] &= server->attr_bitmask[i]; 5773 } 5774 5775 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr, 5776 struct rpc_message *msg, 5777 struct rpc_clnt **clnt) 5778 { 5779 struct nfs_server *server = NFS_SERVER(hdr->inode); 5780 5781 if (!nfs4_write_need_cache_consistency_data(hdr)) { 5782 hdr->args.bitmask = NULL; 5783 hdr->res.fattr = NULL; 5784 } else { 5785 nfs4_bitmask_set(hdr->args.bitmask_store, 5786 server->cache_consistency_bitmask, 5787 hdr->inode, NFS_INO_INVALID_BLOCKS); 5788 hdr->args.bitmask = hdr->args.bitmask_store; 5789 } 5790 5791 if (!hdr->pgio_done_cb) 5792 hdr->pgio_done_cb = nfs4_write_done_cb; 5793 hdr->res.server = server; 5794 hdr->timestamp = jiffies; 5795 5796 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; 5797 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0); 5798 nfs4_state_protect_write(hdr->ds_clp ? hdr->ds_clp : server->nfs_client, clnt, msg, hdr); 5799 } 5800 5801 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) 5802 { 5803 nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client, 5804 &data->args.seq_args, 5805 &data->res.seq_res, 5806 task); 5807 } 5808 5809 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data) 5810 { 5811 struct inode *inode = data->inode; 5812 5813 trace_nfs4_commit(data, task->tk_status); 5814 if (nfs4_async_handle_error(task, NFS_SERVER(inode), 5815 NULL, NULL) == -EAGAIN) { 5816 rpc_restart_call_prepare(task); 5817 return -EAGAIN; 5818 } 5819 return 0; 5820 } 5821 5822 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data) 5823 { 5824 if (!nfs4_sequence_done(task, &data->res.seq_res)) 5825 return -EAGAIN; 5826 return data->commit_done_cb(task, data); 5827 } 5828 5829 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg, 5830 struct rpc_clnt **clnt) 5831 { 5832 struct nfs_server *server = NFS_SERVER(data->inode); 5833 5834 if (data->commit_done_cb == NULL) 5835 data->commit_done_cb = nfs4_commit_done_cb; 5836 data->res.server = server; 5837 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; 5838 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0); 5839 nfs4_state_protect(data->ds_clp ? data->ds_clp : server->nfs_client, 5840 NFS_SP4_MACH_CRED_COMMIT, clnt, msg); 5841 } 5842 5843 static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args, 5844 struct nfs_commitres *res) 5845 { 5846 struct inode *dst_inode = file_inode(dst); 5847 struct nfs_server *server = NFS_SERVER(dst_inode); 5848 struct rpc_message msg = { 5849 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT], 5850 .rpc_argp = args, 5851 .rpc_resp = res, 5852 }; 5853 5854 args->fh = NFS_FH(dst_inode); 5855 return nfs4_call_sync(server->client, server, &msg, 5856 &args->seq_args, &res->seq_res, 1); 5857 } 5858 5859 int nfs4_proc_commit(struct file *dst, __u64 offset, __u32 count, struct nfs_commitres *res) 5860 { 5861 struct nfs_commitargs args = { 5862 .offset = offset, 5863 .count = count, 5864 }; 5865 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst)); 5866 struct nfs4_exception exception = { }; 5867 int status; 5868 5869 do { 5870 status = _nfs4_proc_commit(dst, &args, res); 5871 status = nfs4_handle_exception(dst_server, status, &exception); 5872 } while (exception.retry); 5873 5874 return status; 5875 } 5876 5877 struct nfs4_renewdata { 5878 struct nfs_client *client; 5879 unsigned long timestamp; 5880 }; 5881 5882 /* 5883 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special 5884 * standalone procedure for queueing an asynchronous RENEW. 5885 */ 5886 static void nfs4_renew_release(void *calldata) 5887 { 5888 struct nfs4_renewdata *data = calldata; 5889 struct nfs_client *clp = data->client; 5890 5891 if (refcount_read(&clp->cl_count) > 1) 5892 nfs4_schedule_state_renewal(clp); 5893 nfs_put_client(clp); 5894 kfree(data); 5895 } 5896 5897 static void nfs4_renew_done(struct rpc_task *task, void *calldata) 5898 { 5899 struct nfs4_renewdata *data = calldata; 5900 struct nfs_client *clp = data->client; 5901 unsigned long timestamp = data->timestamp; 5902 5903 trace_nfs4_renew_async(clp, task->tk_status); 5904 switch (task->tk_status) { 5905 case 0: 5906 break; 5907 case -NFS4ERR_LEASE_MOVED: 5908 nfs4_schedule_lease_moved_recovery(clp); 5909 break; 5910 default: 5911 /* Unless we're shutting down, schedule state recovery! */ 5912 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) 5913 return; 5914 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { 5915 nfs4_schedule_lease_recovery(clp); 5916 return; 5917 } 5918 nfs4_schedule_path_down_recovery(clp); 5919 } 5920 do_renew_lease(clp, timestamp); 5921 } 5922 5923 static const struct rpc_call_ops nfs4_renew_ops = { 5924 .rpc_call_done = nfs4_renew_done, 5925 .rpc_release = nfs4_renew_release, 5926 }; 5927 5928 static int nfs4_proc_async_renew(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags) 5929 { 5930 struct rpc_message msg = { 5931 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 5932 .rpc_argp = clp, 5933 .rpc_cred = cred, 5934 }; 5935 struct nfs4_renewdata *data; 5936 5937 if (renew_flags == 0) 5938 return 0; 5939 if (!refcount_inc_not_zero(&clp->cl_count)) 5940 return -EIO; 5941 data = kmalloc(sizeof(*data), GFP_NOFS); 5942 if (data == NULL) { 5943 nfs_put_client(clp); 5944 return -ENOMEM; 5945 } 5946 data->client = clp; 5947 data->timestamp = jiffies; 5948 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT, 5949 &nfs4_renew_ops, data); 5950 } 5951 5952 static int nfs4_proc_renew(struct nfs_client *clp, const struct cred *cred) 5953 { 5954 struct rpc_message msg = { 5955 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 5956 .rpc_argp = clp, 5957 .rpc_cred = cred, 5958 }; 5959 unsigned long now = jiffies; 5960 int status; 5961 5962 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5963 if (status < 0) 5964 return status; 5965 do_renew_lease(clp, now); 5966 return 0; 5967 } 5968 5969 static bool nfs4_server_supports_acls(const struct nfs_server *server, 5970 enum nfs4_acl_type type) 5971 { 5972 switch (type) { 5973 default: 5974 return server->attr_bitmask[0] & FATTR4_WORD0_ACL; 5975 case NFS4ACL_DACL: 5976 return server->attr_bitmask[1] & FATTR4_WORD1_DACL; 5977 case NFS4ACL_SACL: 5978 return server->attr_bitmask[1] & FATTR4_WORD1_SACL; 5979 } 5980 } 5981 5982 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that 5983 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on 5984 * the stack. 5985 */ 5986 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 5987 5988 int nfs4_buf_to_pages_noslab(const void *buf, size_t buflen, 5989 struct page **pages) 5990 { 5991 struct page *newpage, **spages; 5992 int rc = 0; 5993 size_t len; 5994 spages = pages; 5995 5996 do { 5997 len = min_t(size_t, PAGE_SIZE, buflen); 5998 newpage = alloc_page(GFP_KERNEL); 5999 6000 if (newpage == NULL) 6001 goto unwind; 6002 memcpy(page_address(newpage), buf, len); 6003 buf += len; 6004 buflen -= len; 6005 *pages++ = newpage; 6006 rc++; 6007 } while (buflen != 0); 6008 6009 return rc; 6010 6011 unwind: 6012 for(; rc > 0; rc--) 6013 __free_page(spages[rc-1]); 6014 return -ENOMEM; 6015 } 6016 6017 struct nfs4_cached_acl { 6018 enum nfs4_acl_type type; 6019 int cached; 6020 size_t len; 6021 char data[]; 6022 }; 6023 6024 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) 6025 { 6026 struct nfs_inode *nfsi = NFS_I(inode); 6027 6028 spin_lock(&inode->i_lock); 6029 kfree(nfsi->nfs4_acl); 6030 nfsi->nfs4_acl = acl; 6031 spin_unlock(&inode->i_lock); 6032 } 6033 6034 static void nfs4_zap_acl_attr(struct inode *inode) 6035 { 6036 nfs4_set_cached_acl(inode, NULL); 6037 } 6038 6039 static ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, 6040 size_t buflen, enum nfs4_acl_type type) 6041 { 6042 struct nfs_inode *nfsi = NFS_I(inode); 6043 struct nfs4_cached_acl *acl; 6044 int ret = -ENOENT; 6045 6046 spin_lock(&inode->i_lock); 6047 acl = nfsi->nfs4_acl; 6048 if (acl == NULL) 6049 goto out; 6050 if (acl->type != type) 6051 goto out; 6052 if (buf == NULL) /* user is just asking for length */ 6053 goto out_len; 6054 if (acl->cached == 0) 6055 goto out; 6056 ret = -ERANGE; /* see getxattr(2) man page */ 6057 if (acl->len > buflen) 6058 goto out; 6059 memcpy(buf, acl->data, acl->len); 6060 out_len: 6061 ret = acl->len; 6062 out: 6063 spin_unlock(&inode->i_lock); 6064 return ret; 6065 } 6066 6067 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, 6068 size_t pgbase, size_t acl_len, 6069 enum nfs4_acl_type type) 6070 { 6071 struct nfs4_cached_acl *acl; 6072 size_t buflen = sizeof(*acl) + acl_len; 6073 6074 if (buflen <= PAGE_SIZE) { 6075 acl = kmalloc(buflen, GFP_KERNEL); 6076 if (acl == NULL) 6077 goto out; 6078 acl->cached = 1; 6079 _copy_from_pages(acl->data, pages, pgbase, acl_len); 6080 } else { 6081 acl = kmalloc(sizeof(*acl), GFP_KERNEL); 6082 if (acl == NULL) 6083 goto out; 6084 acl->cached = 0; 6085 } 6086 acl->type = type; 6087 acl->len = acl_len; 6088 out: 6089 nfs4_set_cached_acl(inode, acl); 6090 } 6091 6092 /* 6093 * The getxattr API returns the required buffer length when called with a 6094 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating 6095 * the required buf. On a NULL buf, we send a page of data to the server 6096 * guessing that the ACL request can be serviced by a page. If so, we cache 6097 * up to the page of ACL data, and the 2nd call to getxattr is serviced by 6098 * the cache. If not so, we throw away the page, and cache the required 6099 * length. The next getxattr call will then produce another round trip to 6100 * the server, this time with the input buf of the required size. 6101 */ 6102 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, 6103 size_t buflen, enum nfs4_acl_type type) 6104 { 6105 struct page **pages; 6106 struct nfs_getaclargs args = { 6107 .fh = NFS_FH(inode), 6108 .acl_type = type, 6109 .acl_len = buflen, 6110 }; 6111 struct nfs_getaclres res = { 6112 .acl_type = type, 6113 .acl_len = buflen, 6114 }; 6115 struct rpc_message msg = { 6116 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], 6117 .rpc_argp = &args, 6118 .rpc_resp = &res, 6119 }; 6120 unsigned int npages; 6121 int ret = -ENOMEM, i; 6122 struct nfs_server *server = NFS_SERVER(inode); 6123 6124 if (buflen == 0) 6125 buflen = server->rsize; 6126 6127 npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1; 6128 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 6129 if (!pages) 6130 return -ENOMEM; 6131 6132 args.acl_pages = pages; 6133 6134 for (i = 0; i < npages; i++) { 6135 pages[i] = alloc_page(GFP_KERNEL); 6136 if (!pages[i]) 6137 goto out_free; 6138 } 6139 6140 /* for decoding across pages */ 6141 res.acl_scratch = alloc_page(GFP_KERNEL); 6142 if (!res.acl_scratch) 6143 goto out_free; 6144 6145 args.acl_len = npages * PAGE_SIZE; 6146 6147 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", 6148 __func__, buf, buflen, npages, args.acl_len); 6149 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), 6150 &msg, &args.seq_args, &res.seq_res, 0); 6151 if (ret) 6152 goto out_free; 6153 6154 /* Handle the case where the passed-in buffer is too short */ 6155 if (res.acl_flags & NFS4_ACL_TRUNC) { 6156 /* Did the user only issue a request for the acl length? */ 6157 if (buf == NULL) 6158 goto out_ok; 6159 ret = -ERANGE; 6160 goto out_free; 6161 } 6162 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len, 6163 type); 6164 if (buf) { 6165 if (res.acl_len > buflen) { 6166 ret = -ERANGE; 6167 goto out_free; 6168 } 6169 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); 6170 } 6171 out_ok: 6172 ret = res.acl_len; 6173 out_free: 6174 while (--i >= 0) 6175 __free_page(pages[i]); 6176 if (res.acl_scratch) 6177 __free_page(res.acl_scratch); 6178 kfree(pages); 6179 return ret; 6180 } 6181 6182 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, 6183 size_t buflen, enum nfs4_acl_type type) 6184 { 6185 struct nfs4_exception exception = { 6186 .interruptible = true, 6187 }; 6188 ssize_t ret; 6189 do { 6190 ret = __nfs4_get_acl_uncached(inode, buf, buflen, type); 6191 trace_nfs4_get_acl(inode, ret); 6192 if (ret >= 0) 6193 break; 6194 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); 6195 } while (exception.retry); 6196 return ret; 6197 } 6198 6199 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen, 6200 enum nfs4_acl_type type) 6201 { 6202 struct nfs_server *server = NFS_SERVER(inode); 6203 int ret; 6204 6205 if (!nfs4_server_supports_acls(server, type)) 6206 return -EOPNOTSUPP; 6207 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 6208 if (ret < 0) 6209 return ret; 6210 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) 6211 nfs_zap_acl_cache(inode); 6212 ret = nfs4_read_cached_acl(inode, buf, buflen, type); 6213 if (ret != -ENOENT) 6214 /* -ENOENT is returned if there is no ACL or if there is an ACL 6215 * but no cached acl data, just the acl length */ 6216 return ret; 6217 return nfs4_get_acl_uncached(inode, buf, buflen, type); 6218 } 6219 6220 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, 6221 size_t buflen, enum nfs4_acl_type type) 6222 { 6223 struct nfs_server *server = NFS_SERVER(inode); 6224 struct page *pages[NFS4ACL_MAXPAGES]; 6225 struct nfs_setaclargs arg = { 6226 .fh = NFS_FH(inode), 6227 .acl_type = type, 6228 .acl_len = buflen, 6229 .acl_pages = pages, 6230 }; 6231 struct nfs_setaclres res; 6232 struct rpc_message msg = { 6233 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], 6234 .rpc_argp = &arg, 6235 .rpc_resp = &res, 6236 }; 6237 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 6238 int ret, i; 6239 6240 /* You can't remove system.nfs4_acl: */ 6241 if (buflen == 0) 6242 return -EINVAL; 6243 if (!nfs4_server_supports_acls(server, type)) 6244 return -EOPNOTSUPP; 6245 if (npages > ARRAY_SIZE(pages)) 6246 return -ERANGE; 6247 i = nfs4_buf_to_pages_noslab(buf, buflen, arg.acl_pages); 6248 if (i < 0) 6249 return i; 6250 nfs4_inode_make_writeable(inode); 6251 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 6252 6253 /* 6254 * Free each page after tx, so the only ref left is 6255 * held by the network stack 6256 */ 6257 for (; i > 0; i--) 6258 put_page(pages[i-1]); 6259 6260 /* 6261 * Acl update can result in inode attribute update. 6262 * so mark the attribute cache invalid. 6263 */ 6264 spin_lock(&inode->i_lock); 6265 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 6266 NFS_INO_INVALID_CTIME | 6267 NFS_INO_REVAL_FORCED); 6268 spin_unlock(&inode->i_lock); 6269 nfs_access_zap_cache(inode); 6270 nfs_zap_acl_cache(inode); 6271 return ret; 6272 } 6273 6274 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, 6275 size_t buflen, enum nfs4_acl_type type) 6276 { 6277 struct nfs4_exception exception = { }; 6278 int err; 6279 do { 6280 err = __nfs4_proc_set_acl(inode, buf, buflen, type); 6281 trace_nfs4_set_acl(inode, err); 6282 if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) { 6283 /* 6284 * no need to retry since the kernel 6285 * isn't involved in encoding the ACEs. 6286 */ 6287 err = -EINVAL; 6288 break; 6289 } 6290 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6291 &exception); 6292 } while (exception.retry); 6293 return err; 6294 } 6295 6296 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 6297 static int _nfs4_get_security_label(struct inode *inode, void *buf, 6298 size_t buflen) 6299 { 6300 struct nfs_server *server = NFS_SERVER(inode); 6301 struct nfs4_label label = {0, 0, 0, buflen, buf}; 6302 6303 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 6304 struct nfs_fattr fattr = { 6305 .label = &label, 6306 }; 6307 struct nfs4_getattr_arg arg = { 6308 .fh = NFS_FH(inode), 6309 .bitmask = bitmask, 6310 }; 6311 struct nfs4_getattr_res res = { 6312 .fattr = &fattr, 6313 .server = server, 6314 }; 6315 struct rpc_message msg = { 6316 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 6317 .rpc_argp = &arg, 6318 .rpc_resp = &res, 6319 }; 6320 int ret; 6321 6322 nfs_fattr_init(&fattr); 6323 6324 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0); 6325 if (ret) 6326 return ret; 6327 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL)) 6328 return -ENOENT; 6329 return label.len; 6330 } 6331 6332 static int nfs4_get_security_label(struct inode *inode, void *buf, 6333 size_t buflen) 6334 { 6335 struct nfs4_exception exception = { 6336 .interruptible = true, 6337 }; 6338 int err; 6339 6340 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 6341 return -EOPNOTSUPP; 6342 6343 do { 6344 err = _nfs4_get_security_label(inode, buf, buflen); 6345 trace_nfs4_get_security_label(inode, err); 6346 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6347 &exception); 6348 } while (exception.retry); 6349 return err; 6350 } 6351 6352 static int _nfs4_do_set_security_label(struct inode *inode, 6353 struct nfs4_label *ilabel, 6354 struct nfs_fattr *fattr) 6355 { 6356 6357 struct iattr sattr = {0}; 6358 struct nfs_server *server = NFS_SERVER(inode); 6359 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 6360 struct nfs_setattrargs arg = { 6361 .fh = NFS_FH(inode), 6362 .iap = &sattr, 6363 .server = server, 6364 .bitmask = bitmask, 6365 .label = ilabel, 6366 }; 6367 struct nfs_setattrres res = { 6368 .fattr = fattr, 6369 .server = server, 6370 }; 6371 struct rpc_message msg = { 6372 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 6373 .rpc_argp = &arg, 6374 .rpc_resp = &res, 6375 }; 6376 int status; 6377 6378 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 6379 6380 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 6381 if (status) 6382 dprintk("%s failed: %d\n", __func__, status); 6383 6384 return status; 6385 } 6386 6387 static int nfs4_do_set_security_label(struct inode *inode, 6388 struct nfs4_label *ilabel, 6389 struct nfs_fattr *fattr) 6390 { 6391 struct nfs4_exception exception = { }; 6392 int err; 6393 6394 do { 6395 err = _nfs4_do_set_security_label(inode, ilabel, fattr); 6396 trace_nfs4_set_security_label(inode, err); 6397 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6398 &exception); 6399 } while (exception.retry); 6400 return err; 6401 } 6402 6403 static int 6404 nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen) 6405 { 6406 struct nfs4_label ilabel = {0, 0, 0, buflen, (char *)buf }; 6407 struct nfs_fattr *fattr; 6408 int status; 6409 6410 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 6411 return -EOPNOTSUPP; 6412 6413 fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode)); 6414 if (fattr == NULL) 6415 return -ENOMEM; 6416 6417 status = nfs4_do_set_security_label(inode, &ilabel, fattr); 6418 if (status == 0) 6419 nfs_setsecurity(inode, fattr); 6420 6421 nfs_free_fattr(fattr); 6422 return status; 6423 } 6424 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */ 6425 6426 6427 static void nfs4_init_boot_verifier(const struct nfs_client *clp, 6428 nfs4_verifier *bootverf) 6429 { 6430 __be32 verf[2]; 6431 6432 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { 6433 /* An impossible timestamp guarantees this value 6434 * will never match a generated boot time. */ 6435 verf[0] = cpu_to_be32(U32_MAX); 6436 verf[1] = cpu_to_be32(U32_MAX); 6437 } else { 6438 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 6439 u64 ns = ktime_to_ns(nn->boot_time); 6440 6441 verf[0] = cpu_to_be32(ns >> 32); 6442 verf[1] = cpu_to_be32(ns); 6443 } 6444 memcpy(bootverf->data, verf, sizeof(bootverf->data)); 6445 } 6446 6447 static size_t 6448 nfs4_get_uniquifier(struct nfs_client *clp, char *buf, size_t buflen) 6449 { 6450 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 6451 struct nfs_netns_client *nn_clp = nn->nfs_client; 6452 const char *id; 6453 6454 buf[0] = '\0'; 6455 6456 if (nn_clp) { 6457 rcu_read_lock(); 6458 id = rcu_dereference(nn_clp->identifier); 6459 if (id) 6460 strscpy(buf, id, buflen); 6461 rcu_read_unlock(); 6462 } 6463 6464 if (nfs4_client_id_uniquifier[0] != '\0' && buf[0] == '\0') 6465 strscpy(buf, nfs4_client_id_uniquifier, buflen); 6466 6467 return strlen(buf); 6468 } 6469 6470 static int 6471 nfs4_init_nonuniform_client_string(struct nfs_client *clp) 6472 { 6473 char buf[NFS4_CLIENT_ID_UNIQ_LEN]; 6474 size_t buflen; 6475 size_t len; 6476 char *str; 6477 6478 if (clp->cl_owner_id != NULL) 6479 return 0; 6480 6481 rcu_read_lock(); 6482 len = 14 + 6483 strlen(clp->cl_rpcclient->cl_nodename) + 6484 1 + 6485 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) + 6486 1; 6487 rcu_read_unlock(); 6488 6489 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf)); 6490 if (buflen) 6491 len += buflen + 1; 6492 6493 if (len > NFS4_OPAQUE_LIMIT + 1) 6494 return -EINVAL; 6495 6496 /* 6497 * Since this string is allocated at mount time, and held until the 6498 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 6499 * about a memory-reclaim deadlock. 6500 */ 6501 str = kmalloc(len, GFP_KERNEL); 6502 if (!str) 6503 return -ENOMEM; 6504 6505 rcu_read_lock(); 6506 if (buflen) 6507 scnprintf(str, len, "Linux NFSv4.0 %s/%s/%s", 6508 clp->cl_rpcclient->cl_nodename, buf, 6509 rpc_peeraddr2str(clp->cl_rpcclient, 6510 RPC_DISPLAY_ADDR)); 6511 else 6512 scnprintf(str, len, "Linux NFSv4.0 %s/%s", 6513 clp->cl_rpcclient->cl_nodename, 6514 rpc_peeraddr2str(clp->cl_rpcclient, 6515 RPC_DISPLAY_ADDR)); 6516 rcu_read_unlock(); 6517 6518 clp->cl_owner_id = str; 6519 return 0; 6520 } 6521 6522 static int 6523 nfs4_init_uniform_client_string(struct nfs_client *clp) 6524 { 6525 char buf[NFS4_CLIENT_ID_UNIQ_LEN]; 6526 size_t buflen; 6527 size_t len; 6528 char *str; 6529 6530 if (clp->cl_owner_id != NULL) 6531 return 0; 6532 6533 len = 10 + 10 + 1 + 10 + 1 + 6534 strlen(clp->cl_rpcclient->cl_nodename) + 1; 6535 6536 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf)); 6537 if (buflen) 6538 len += buflen + 1; 6539 6540 if (len > NFS4_OPAQUE_LIMIT + 1) 6541 return -EINVAL; 6542 6543 /* 6544 * Since this string is allocated at mount time, and held until the 6545 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 6546 * about a memory-reclaim deadlock. 6547 */ 6548 str = kmalloc(len, GFP_KERNEL); 6549 if (!str) 6550 return -ENOMEM; 6551 6552 if (buflen) 6553 scnprintf(str, len, "Linux NFSv%u.%u %s/%s", 6554 clp->rpc_ops->version, clp->cl_minorversion, 6555 buf, clp->cl_rpcclient->cl_nodename); 6556 else 6557 scnprintf(str, len, "Linux NFSv%u.%u %s", 6558 clp->rpc_ops->version, clp->cl_minorversion, 6559 clp->cl_rpcclient->cl_nodename); 6560 clp->cl_owner_id = str; 6561 return 0; 6562 } 6563 6564 /* 6565 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback 6566 * services. Advertise one based on the address family of the 6567 * clientaddr. 6568 */ 6569 static unsigned int 6570 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len) 6571 { 6572 if (strchr(clp->cl_ipaddr, ':') != NULL) 6573 return scnprintf(buf, len, "tcp6"); 6574 else 6575 return scnprintf(buf, len, "tcp"); 6576 } 6577 6578 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata) 6579 { 6580 struct nfs4_setclientid *sc = calldata; 6581 6582 if (task->tk_status == 0) 6583 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred); 6584 } 6585 6586 static const struct rpc_call_ops nfs4_setclientid_ops = { 6587 .rpc_call_done = nfs4_setclientid_done, 6588 }; 6589 6590 /** 6591 * nfs4_proc_setclientid - Negotiate client ID 6592 * @clp: state data structure 6593 * @program: RPC program for NFSv4 callback service 6594 * @port: IP port number for NFS4 callback service 6595 * @cred: credential to use for this call 6596 * @res: where to place the result 6597 * 6598 * Returns zero, a negative errno, or a negative NFS4ERR status code. 6599 */ 6600 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, 6601 unsigned short port, const struct cred *cred, 6602 struct nfs4_setclientid_res *res) 6603 { 6604 nfs4_verifier sc_verifier; 6605 struct nfs4_setclientid setclientid = { 6606 .sc_verifier = &sc_verifier, 6607 .sc_prog = program, 6608 .sc_clnt = clp, 6609 }; 6610 struct rpc_message msg = { 6611 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], 6612 .rpc_argp = &setclientid, 6613 .rpc_resp = res, 6614 .rpc_cred = cred, 6615 }; 6616 struct rpc_task_setup task_setup_data = { 6617 .rpc_client = clp->cl_rpcclient, 6618 .rpc_message = &msg, 6619 .callback_ops = &nfs4_setclientid_ops, 6620 .callback_data = &setclientid, 6621 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN, 6622 }; 6623 unsigned long now = jiffies; 6624 int status; 6625 6626 /* nfs_client_id4 */ 6627 nfs4_init_boot_verifier(clp, &sc_verifier); 6628 6629 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags)) 6630 status = nfs4_init_uniform_client_string(clp); 6631 else 6632 status = nfs4_init_nonuniform_client_string(clp); 6633 6634 if (status) 6635 goto out; 6636 6637 /* cb_client4 */ 6638 setclientid.sc_netid_len = 6639 nfs4_init_callback_netid(clp, 6640 setclientid.sc_netid, 6641 sizeof(setclientid.sc_netid)); 6642 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, 6643 sizeof(setclientid.sc_uaddr), "%s.%u.%u", 6644 clp->cl_ipaddr, port >> 8, port & 255); 6645 6646 dprintk("NFS call setclientid auth=%s, '%s'\n", 6647 clp->cl_rpcclient->cl_auth->au_ops->au_name, 6648 clp->cl_owner_id); 6649 6650 status = nfs4_call_sync_custom(&task_setup_data); 6651 if (setclientid.sc_cred) { 6652 kfree(clp->cl_acceptor); 6653 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred); 6654 put_rpccred(setclientid.sc_cred); 6655 } 6656 6657 if (status == 0) 6658 do_renew_lease(clp, now); 6659 out: 6660 trace_nfs4_setclientid(clp, status); 6661 dprintk("NFS reply setclientid: %d\n", status); 6662 return status; 6663 } 6664 6665 /** 6666 * nfs4_proc_setclientid_confirm - Confirm client ID 6667 * @clp: state data structure 6668 * @arg: result of a previous SETCLIENTID 6669 * @cred: credential to use for this call 6670 * 6671 * Returns zero, a negative errno, or a negative NFS4ERR status code. 6672 */ 6673 int nfs4_proc_setclientid_confirm(struct nfs_client *clp, 6674 struct nfs4_setclientid_res *arg, 6675 const struct cred *cred) 6676 { 6677 struct rpc_message msg = { 6678 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], 6679 .rpc_argp = arg, 6680 .rpc_cred = cred, 6681 }; 6682 int status; 6683 6684 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n", 6685 clp->cl_rpcclient->cl_auth->au_ops->au_name, 6686 clp->cl_clientid); 6687 status = rpc_call_sync(clp->cl_rpcclient, &msg, 6688 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 6689 trace_nfs4_setclientid_confirm(clp, status); 6690 dprintk("NFS reply setclientid_confirm: %d\n", status); 6691 return status; 6692 } 6693 6694 struct nfs4_delegreturndata { 6695 struct nfs4_delegreturnargs args; 6696 struct nfs4_delegreturnres res; 6697 struct nfs_fh fh; 6698 nfs4_stateid stateid; 6699 unsigned long timestamp; 6700 struct { 6701 struct nfs4_layoutreturn_args arg; 6702 struct nfs4_layoutreturn_res res; 6703 struct nfs4_xdr_opaque_data ld_private; 6704 u32 roc_barrier; 6705 bool roc; 6706 } lr; 6707 struct nfs4_delegattr sattr; 6708 struct nfs_fattr fattr; 6709 int rpc_status; 6710 struct inode *inode; 6711 }; 6712 6713 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) 6714 { 6715 struct nfs4_delegreturndata *data = calldata; 6716 struct nfs4_exception exception = { 6717 .inode = data->inode, 6718 .stateid = &data->stateid, 6719 .task_is_privileged = data->args.seq_args.sa_privileged, 6720 }; 6721 6722 if (!nfs4_sequence_done(task, &data->res.seq_res)) 6723 return; 6724 6725 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status); 6726 6727 /* Handle Layoutreturn errors */ 6728 if (pnfs_roc_done(task, &data->args.lr_args, &data->res.lr_res, 6729 &data->res.lr_ret) == -EAGAIN) 6730 goto out_restart; 6731 6732 if (data->args.sattr_args && task->tk_status != 0) { 6733 switch(data->res.sattr_ret) { 6734 case 0: 6735 data->args.sattr_args = NULL; 6736 data->res.sattr_res = false; 6737 break; 6738 case -NFS4ERR_ADMIN_REVOKED: 6739 case -NFS4ERR_DELEG_REVOKED: 6740 case -NFS4ERR_EXPIRED: 6741 case -NFS4ERR_BAD_STATEID: 6742 /* Let the main handler below do stateid recovery */ 6743 break; 6744 case -NFS4ERR_OLD_STATEID: 6745 if (nfs4_refresh_delegation_stateid(&data->stateid, 6746 data->inode)) 6747 goto out_restart; 6748 fallthrough; 6749 default: 6750 data->args.sattr_args = NULL; 6751 data->res.sattr_res = false; 6752 goto out_restart; 6753 } 6754 } 6755 6756 switch (task->tk_status) { 6757 case 0: 6758 renew_lease(data->res.server, data->timestamp); 6759 break; 6760 case -NFS4ERR_ADMIN_REVOKED: 6761 case -NFS4ERR_DELEG_REVOKED: 6762 case -NFS4ERR_EXPIRED: 6763 nfs4_free_revoked_stateid(data->res.server, 6764 data->args.stateid, 6765 task->tk_msg.rpc_cred); 6766 fallthrough; 6767 case -NFS4ERR_BAD_STATEID: 6768 case -NFS4ERR_STALE_STATEID: 6769 case -ETIMEDOUT: 6770 task->tk_status = 0; 6771 break; 6772 case -NFS4ERR_OLD_STATEID: 6773 if (!nfs4_refresh_delegation_stateid(&data->stateid, data->inode)) 6774 nfs4_stateid_seqid_inc(&data->stateid); 6775 if (data->args.bitmask) { 6776 data->args.bitmask = NULL; 6777 data->res.fattr = NULL; 6778 } 6779 goto out_restart; 6780 case -NFS4ERR_ACCESS: 6781 if (data->args.bitmask) { 6782 data->args.bitmask = NULL; 6783 data->res.fattr = NULL; 6784 goto out_restart; 6785 } 6786 fallthrough; 6787 default: 6788 task->tk_status = nfs4_async_handle_exception(task, 6789 data->res.server, task->tk_status, 6790 &exception); 6791 if (exception.retry) 6792 goto out_restart; 6793 } 6794 nfs_delegation_mark_returned(data->inode, data->args.stateid); 6795 data->rpc_status = task->tk_status; 6796 return; 6797 out_restart: 6798 task->tk_status = 0; 6799 rpc_restart_call_prepare(task); 6800 } 6801 6802 static void nfs4_delegreturn_release(void *calldata) 6803 { 6804 struct nfs4_delegreturndata *data = calldata; 6805 struct inode *inode = data->inode; 6806 6807 if (data->lr.roc) 6808 pnfs_roc_release(&data->lr.arg, &data->lr.res, 6809 data->res.lr_ret); 6810 if (inode) { 6811 nfs4_fattr_set_prechange(&data->fattr, 6812 inode_peek_iversion_raw(inode)); 6813 nfs_refresh_inode(inode, &data->fattr); 6814 nfs_iput_and_deactive(inode); 6815 } 6816 kfree(calldata); 6817 } 6818 6819 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) 6820 { 6821 struct nfs4_delegreturndata *d_data; 6822 struct pnfs_layout_hdr *lo; 6823 6824 d_data = data; 6825 6826 if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) { 6827 nfs4_sequence_done(task, &d_data->res.seq_res); 6828 return; 6829 } 6830 6831 lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL; 6832 if (lo && !pnfs_layout_is_valid(lo)) { 6833 d_data->args.lr_args = NULL; 6834 d_data->res.lr_res = NULL; 6835 } 6836 6837 nfs4_setup_sequence(d_data->res.server->nfs_client, 6838 &d_data->args.seq_args, 6839 &d_data->res.seq_res, 6840 task); 6841 } 6842 6843 static const struct rpc_call_ops nfs4_delegreturn_ops = { 6844 .rpc_call_prepare = nfs4_delegreturn_prepare, 6845 .rpc_call_done = nfs4_delegreturn_done, 6846 .rpc_release = nfs4_delegreturn_release, 6847 }; 6848 6849 static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, 6850 const nfs4_stateid *stateid, 6851 struct nfs_delegation *delegation, 6852 int issync) 6853 { 6854 struct nfs4_delegreturndata *data; 6855 struct nfs_server *server = NFS_SERVER(inode); 6856 struct rpc_task *task; 6857 struct rpc_message msg = { 6858 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], 6859 .rpc_cred = cred, 6860 }; 6861 struct rpc_task_setup task_setup_data = { 6862 .rpc_client = server->client, 6863 .rpc_message = &msg, 6864 .callback_ops = &nfs4_delegreturn_ops, 6865 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT, 6866 }; 6867 int status = 0; 6868 6869 if (nfs_server_capable(inode, NFS_CAP_MOVEABLE)) 6870 task_setup_data.flags |= RPC_TASK_MOVEABLE; 6871 6872 data = kzalloc(sizeof(*data), GFP_KERNEL); 6873 if (data == NULL) 6874 return -ENOMEM; 6875 6876 nfs4_state_protect(server->nfs_client, 6877 NFS_SP4_MACH_CRED_CLEANUP, 6878 &task_setup_data.rpc_client, &msg); 6879 6880 data->args.fhandle = &data->fh; 6881 data->args.stateid = &data->stateid; 6882 nfs4_bitmask_set(data->args.bitmask_store, 6883 server->cache_consistency_bitmask, inode, 0); 6884 data->args.bitmask = data->args.bitmask_store; 6885 nfs_copy_fh(&data->fh, NFS_FH(inode)); 6886 nfs4_stateid_copy(&data->stateid, stateid); 6887 data->res.fattr = &data->fattr; 6888 data->res.server = server; 6889 data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT; 6890 data->lr.arg.ld_private = &data->lr.ld_private; 6891 nfs_fattr_init(data->res.fattr); 6892 data->timestamp = jiffies; 6893 data->rpc_status = 0; 6894 data->inode = nfs_igrab_and_active(inode); 6895 if (data->inode || issync) { 6896 data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res, 6897 cred); 6898 if (data->lr.roc) { 6899 data->args.lr_args = &data->lr.arg; 6900 data->res.lr_res = &data->lr.res; 6901 } 6902 } 6903 6904 if (delegation && 6905 test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) { 6906 if (delegation->type & FMODE_READ) { 6907 data->sattr.atime = inode_get_atime(inode); 6908 data->sattr.atime_set = true; 6909 } 6910 if (delegation->type & FMODE_WRITE) { 6911 data->sattr.mtime = inode_get_mtime(inode); 6912 data->sattr.mtime_set = true; 6913 } 6914 data->args.sattr_args = &data->sattr; 6915 data->res.sattr_res = true; 6916 } 6917 6918 if (!data->inode) 6919 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 6920 1); 6921 else 6922 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 6923 0); 6924 6925 task_setup_data.callback_data = data; 6926 msg.rpc_argp = &data->args; 6927 msg.rpc_resp = &data->res; 6928 task = rpc_run_task(&task_setup_data); 6929 if (IS_ERR(task)) 6930 return PTR_ERR(task); 6931 if (!issync) 6932 goto out; 6933 status = rpc_wait_for_completion_task(task); 6934 if (status != 0) 6935 goto out; 6936 status = data->rpc_status; 6937 out: 6938 rpc_put_task(task); 6939 return status; 6940 } 6941 6942 int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, 6943 const nfs4_stateid *stateid, 6944 struct nfs_delegation *delegation, int issync) 6945 { 6946 struct nfs_server *server = NFS_SERVER(inode); 6947 struct nfs4_exception exception = { }; 6948 int err; 6949 do { 6950 err = _nfs4_proc_delegreturn(inode, cred, stateid, 6951 delegation, issync); 6952 trace_nfs4_delegreturn(inode, stateid, err); 6953 switch (err) { 6954 case -NFS4ERR_STALE_STATEID: 6955 case -NFS4ERR_EXPIRED: 6956 case 0: 6957 return 0; 6958 } 6959 err = nfs4_handle_exception(server, err, &exception); 6960 } while (exception.retry); 6961 return err; 6962 } 6963 6964 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 6965 { 6966 struct inode *inode = state->inode; 6967 struct nfs_server *server = NFS_SERVER(inode); 6968 struct nfs_client *clp = server->nfs_client; 6969 struct nfs_lockt_args arg = { 6970 .fh = NFS_FH(inode), 6971 .fl = request, 6972 }; 6973 struct nfs_lockt_res res = { 6974 .denied = request, 6975 }; 6976 struct rpc_message msg = { 6977 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], 6978 .rpc_argp = &arg, 6979 .rpc_resp = &res, 6980 .rpc_cred = state->owner->so_cred, 6981 }; 6982 struct nfs4_lock_state *lsp; 6983 int status; 6984 6985 arg.lock_owner.clientid = clp->cl_clientid; 6986 status = nfs4_set_lock_state(state, request); 6987 if (status != 0) 6988 goto out; 6989 lsp = request->fl_u.nfs4_fl.owner; 6990 arg.lock_owner.id = lsp->ls_seqid.owner_id; 6991 arg.lock_owner.s_dev = server->s_dev; 6992 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 6993 switch (status) { 6994 case 0: 6995 request->c.flc_type = F_UNLCK; 6996 break; 6997 case -NFS4ERR_DENIED: 6998 status = 0; 6999 } 7000 request->fl_ops->fl_release_private(request); 7001 request->fl_ops = NULL; 7002 out: 7003 return status; 7004 } 7005 7006 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7007 { 7008 struct nfs4_exception exception = { 7009 .interruptible = true, 7010 }; 7011 int err; 7012 7013 do { 7014 err = _nfs4_proc_getlk(state, cmd, request); 7015 trace_nfs4_get_lock(request, state, cmd, err); 7016 err = nfs4_handle_exception(NFS_SERVER(state->inode), err, 7017 &exception); 7018 } while (exception.retry); 7019 return err; 7020 } 7021 7022 /* 7023 * Update the seqid of a lock stateid after receiving 7024 * NFS4ERR_OLD_STATEID 7025 */ 7026 static bool nfs4_refresh_lock_old_stateid(nfs4_stateid *dst, 7027 struct nfs4_lock_state *lsp) 7028 { 7029 struct nfs4_state *state = lsp->ls_state; 7030 bool ret = false; 7031 7032 spin_lock(&state->state_lock); 7033 if (!nfs4_stateid_match_other(dst, &lsp->ls_stateid)) 7034 goto out; 7035 if (!nfs4_stateid_is_newer(&lsp->ls_stateid, dst)) 7036 nfs4_stateid_seqid_inc(dst); 7037 else 7038 dst->seqid = lsp->ls_stateid.seqid; 7039 ret = true; 7040 out: 7041 spin_unlock(&state->state_lock); 7042 return ret; 7043 } 7044 7045 static bool nfs4_sync_lock_stateid(nfs4_stateid *dst, 7046 struct nfs4_lock_state *lsp) 7047 { 7048 struct nfs4_state *state = lsp->ls_state; 7049 bool ret; 7050 7051 spin_lock(&state->state_lock); 7052 ret = !nfs4_stateid_match_other(dst, &lsp->ls_stateid); 7053 nfs4_stateid_copy(dst, &lsp->ls_stateid); 7054 spin_unlock(&state->state_lock); 7055 return ret; 7056 } 7057 7058 struct nfs4_unlockdata { 7059 struct nfs_locku_args arg; 7060 struct nfs_locku_res res; 7061 struct nfs4_lock_state *lsp; 7062 struct nfs_open_context *ctx; 7063 struct nfs_lock_context *l_ctx; 7064 struct file_lock fl; 7065 struct nfs_server *server; 7066 unsigned long timestamp; 7067 }; 7068 7069 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, 7070 struct nfs_open_context *ctx, 7071 struct nfs4_lock_state *lsp, 7072 struct nfs_seqid *seqid) 7073 { 7074 struct nfs4_unlockdata *p; 7075 struct nfs4_state *state = lsp->ls_state; 7076 struct inode *inode = state->inode; 7077 7078 p = kzalloc(sizeof(*p), GFP_KERNEL); 7079 if (p == NULL) 7080 return NULL; 7081 p->arg.fh = NFS_FH(inode); 7082 p->arg.fl = &p->fl; 7083 p->arg.seqid = seqid; 7084 p->res.seqid = seqid; 7085 p->lsp = lsp; 7086 /* Ensure we don't close file until we're done freeing locks! */ 7087 p->ctx = get_nfs_open_context(ctx); 7088 p->l_ctx = nfs_get_lock_context(ctx); 7089 locks_init_lock(&p->fl); 7090 locks_copy_lock(&p->fl, fl); 7091 p->server = NFS_SERVER(inode); 7092 spin_lock(&state->state_lock); 7093 nfs4_stateid_copy(&p->arg.stateid, &lsp->ls_stateid); 7094 spin_unlock(&state->state_lock); 7095 return p; 7096 } 7097 7098 static void nfs4_locku_release_calldata(void *data) 7099 { 7100 struct nfs4_unlockdata *calldata = data; 7101 nfs_free_seqid(calldata->arg.seqid); 7102 nfs4_put_lock_state(calldata->lsp); 7103 nfs_put_lock_context(calldata->l_ctx); 7104 put_nfs_open_context(calldata->ctx); 7105 kfree(calldata); 7106 } 7107 7108 static void nfs4_locku_done(struct rpc_task *task, void *data) 7109 { 7110 struct nfs4_unlockdata *calldata = data; 7111 struct nfs4_exception exception = { 7112 .inode = calldata->lsp->ls_state->inode, 7113 .stateid = &calldata->arg.stateid, 7114 }; 7115 7116 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 7117 return; 7118 switch (task->tk_status) { 7119 case 0: 7120 renew_lease(calldata->server, calldata->timestamp); 7121 locks_lock_inode_wait(calldata->lsp->ls_state->inode, &calldata->fl); 7122 if (nfs4_update_lock_stateid(calldata->lsp, 7123 &calldata->res.stateid)) 7124 break; 7125 fallthrough; 7126 case -NFS4ERR_ADMIN_REVOKED: 7127 case -NFS4ERR_EXPIRED: 7128 nfs4_free_revoked_stateid(calldata->server, 7129 &calldata->arg.stateid, 7130 task->tk_msg.rpc_cred); 7131 fallthrough; 7132 case -NFS4ERR_BAD_STATEID: 7133 case -NFS4ERR_STALE_STATEID: 7134 if (nfs4_sync_lock_stateid(&calldata->arg.stateid, 7135 calldata->lsp)) 7136 rpc_restart_call_prepare(task); 7137 break; 7138 case -NFS4ERR_OLD_STATEID: 7139 if (nfs4_refresh_lock_old_stateid(&calldata->arg.stateid, 7140 calldata->lsp)) 7141 rpc_restart_call_prepare(task); 7142 break; 7143 default: 7144 task->tk_status = nfs4_async_handle_exception(task, 7145 calldata->server, task->tk_status, 7146 &exception); 7147 if (exception.retry) 7148 rpc_restart_call_prepare(task); 7149 } 7150 nfs_release_seqid(calldata->arg.seqid); 7151 } 7152 7153 static void nfs4_locku_prepare(struct rpc_task *task, void *data) 7154 { 7155 struct nfs4_unlockdata *calldata = data; 7156 7157 if (test_bit(NFS_CONTEXT_UNLOCK, &calldata->l_ctx->open_context->flags) && 7158 nfs_async_iocounter_wait(task, calldata->l_ctx)) 7159 return; 7160 7161 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 7162 goto out_wait; 7163 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) { 7164 /* Note: exit _without_ running nfs4_locku_done */ 7165 goto out_no_action; 7166 } 7167 calldata->timestamp = jiffies; 7168 if (nfs4_setup_sequence(calldata->server->nfs_client, 7169 &calldata->arg.seq_args, 7170 &calldata->res.seq_res, 7171 task) != 0) 7172 nfs_release_seqid(calldata->arg.seqid); 7173 return; 7174 out_no_action: 7175 task->tk_action = NULL; 7176 out_wait: 7177 nfs4_sequence_done(task, &calldata->res.seq_res); 7178 } 7179 7180 static const struct rpc_call_ops nfs4_locku_ops = { 7181 .rpc_call_prepare = nfs4_locku_prepare, 7182 .rpc_call_done = nfs4_locku_done, 7183 .rpc_release = nfs4_locku_release_calldata, 7184 }; 7185 7186 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, 7187 struct nfs_open_context *ctx, 7188 struct nfs4_lock_state *lsp, 7189 struct nfs_seqid *seqid) 7190 { 7191 struct nfs4_unlockdata *data; 7192 struct rpc_message msg = { 7193 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], 7194 .rpc_cred = ctx->cred, 7195 }; 7196 struct rpc_task_setup task_setup_data = { 7197 .rpc_client = NFS_CLIENT(lsp->ls_state->inode), 7198 .rpc_message = &msg, 7199 .callback_ops = &nfs4_locku_ops, 7200 .workqueue = nfsiod_workqueue, 7201 .flags = RPC_TASK_ASYNC, 7202 }; 7203 7204 if (nfs_server_capable(lsp->ls_state->inode, NFS_CAP_MOVEABLE)) 7205 task_setup_data.flags |= RPC_TASK_MOVEABLE; 7206 7207 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client, 7208 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg); 7209 7210 /* Ensure this is an unlock - when canceling a lock, the 7211 * canceled lock is passed in, and it won't be an unlock. 7212 */ 7213 fl->c.flc_type = F_UNLCK; 7214 if (fl->c.flc_flags & FL_CLOSE) 7215 set_bit(NFS_CONTEXT_UNLOCK, &ctx->flags); 7216 7217 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); 7218 if (data == NULL) { 7219 nfs_free_seqid(seqid); 7220 return ERR_PTR(-ENOMEM); 7221 } 7222 7223 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 0); 7224 msg.rpc_argp = &data->arg; 7225 msg.rpc_resp = &data->res; 7226 task_setup_data.callback_data = data; 7227 return rpc_run_task(&task_setup_data); 7228 } 7229 7230 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) 7231 { 7232 struct inode *inode = state->inode; 7233 struct nfs4_state_owner *sp = state->owner; 7234 struct nfs_inode *nfsi = NFS_I(inode); 7235 struct nfs_seqid *seqid; 7236 struct nfs4_lock_state *lsp; 7237 struct rpc_task *task; 7238 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 7239 int status = 0; 7240 unsigned char saved_flags = request->c.flc_flags; 7241 7242 status = nfs4_set_lock_state(state, request); 7243 /* Unlock _before_ we do the RPC call */ 7244 request->c.flc_flags |= FL_EXISTS; 7245 /* Exclude nfs_delegation_claim_locks() */ 7246 mutex_lock(&sp->so_delegreturn_mutex); 7247 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */ 7248 down_read(&nfsi->rwsem); 7249 if (locks_lock_inode_wait(inode, request) == -ENOENT) { 7250 up_read(&nfsi->rwsem); 7251 mutex_unlock(&sp->so_delegreturn_mutex); 7252 goto out; 7253 } 7254 lsp = request->fl_u.nfs4_fl.owner; 7255 set_bit(NFS_LOCK_UNLOCKING, &lsp->ls_flags); 7256 up_read(&nfsi->rwsem); 7257 mutex_unlock(&sp->so_delegreturn_mutex); 7258 if (status != 0) 7259 goto out; 7260 /* Is this a delegated lock? */ 7261 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0) 7262 goto out; 7263 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid; 7264 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); 7265 status = -ENOMEM; 7266 if (IS_ERR(seqid)) 7267 goto out; 7268 task = nfs4_do_unlck(request, 7269 nfs_file_open_context(request->c.flc_file), 7270 lsp, seqid); 7271 status = PTR_ERR(task); 7272 if (IS_ERR(task)) 7273 goto out; 7274 status = rpc_wait_for_completion_task(task); 7275 rpc_put_task(task); 7276 out: 7277 request->c.flc_flags = saved_flags; 7278 trace_nfs4_unlock(request, state, F_SETLK, status); 7279 return status; 7280 } 7281 7282 struct nfs4_lockdata { 7283 struct nfs_lock_args arg; 7284 struct nfs_lock_res res; 7285 struct nfs4_lock_state *lsp; 7286 struct nfs_open_context *ctx; 7287 struct file_lock fl; 7288 unsigned long timestamp; 7289 int rpc_status; 7290 int cancelled; 7291 struct nfs_server *server; 7292 }; 7293 7294 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, 7295 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, 7296 gfp_t gfp_mask) 7297 { 7298 struct nfs4_lockdata *p; 7299 struct inode *inode = lsp->ls_state->inode; 7300 struct nfs_server *server = NFS_SERVER(inode); 7301 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 7302 7303 p = kzalloc(sizeof(*p), gfp_mask); 7304 if (p == NULL) 7305 return NULL; 7306 7307 p->arg.fh = NFS_FH(inode); 7308 p->arg.fl = &p->fl; 7309 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); 7310 if (IS_ERR(p->arg.open_seqid)) 7311 goto out_free; 7312 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 7313 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask); 7314 if (IS_ERR(p->arg.lock_seqid)) 7315 goto out_free_seqid; 7316 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; 7317 p->arg.lock_owner.id = lsp->ls_seqid.owner_id; 7318 p->arg.lock_owner.s_dev = server->s_dev; 7319 p->res.lock_seqid = p->arg.lock_seqid; 7320 p->lsp = lsp; 7321 p->server = server; 7322 p->ctx = get_nfs_open_context(ctx); 7323 locks_init_lock(&p->fl); 7324 locks_copy_lock(&p->fl, fl); 7325 return p; 7326 out_free_seqid: 7327 nfs_free_seqid(p->arg.open_seqid); 7328 out_free: 7329 kfree(p); 7330 return NULL; 7331 } 7332 7333 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) 7334 { 7335 struct nfs4_lockdata *data = calldata; 7336 struct nfs4_state *state = data->lsp->ls_state; 7337 7338 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) 7339 goto out_wait; 7340 /* Do we need to do an open_to_lock_owner? */ 7341 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) { 7342 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) { 7343 goto out_release_lock_seqid; 7344 } 7345 nfs4_stateid_copy(&data->arg.open_stateid, 7346 &state->open_stateid); 7347 data->arg.new_lock_owner = 1; 7348 data->res.open_seqid = data->arg.open_seqid; 7349 } else { 7350 data->arg.new_lock_owner = 0; 7351 nfs4_stateid_copy(&data->arg.lock_stateid, 7352 &data->lsp->ls_stateid); 7353 } 7354 if (!nfs4_valid_open_stateid(state)) { 7355 data->rpc_status = -EBADF; 7356 task->tk_action = NULL; 7357 goto out_release_open_seqid; 7358 } 7359 data->timestamp = jiffies; 7360 if (nfs4_setup_sequence(data->server->nfs_client, 7361 &data->arg.seq_args, 7362 &data->res.seq_res, 7363 task) == 0) 7364 return; 7365 out_release_open_seqid: 7366 nfs_release_seqid(data->arg.open_seqid); 7367 out_release_lock_seqid: 7368 nfs_release_seqid(data->arg.lock_seqid); 7369 out_wait: 7370 nfs4_sequence_done(task, &data->res.seq_res); 7371 dprintk("%s: ret = %d\n", __func__, data->rpc_status); 7372 } 7373 7374 static void nfs4_lock_done(struct rpc_task *task, void *calldata) 7375 { 7376 struct nfs4_lockdata *data = calldata; 7377 struct nfs4_lock_state *lsp = data->lsp; 7378 7379 if (!nfs4_sequence_done(task, &data->res.seq_res)) 7380 return; 7381 7382 data->rpc_status = task->tk_status; 7383 switch (task->tk_status) { 7384 case 0: 7385 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)), 7386 data->timestamp); 7387 if (data->arg.new_lock && !data->cancelled) { 7388 data->fl.c.flc_flags &= ~(FL_SLEEP | FL_ACCESS); 7389 if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0) 7390 goto out_restart; 7391 } 7392 if (data->arg.new_lock_owner != 0) { 7393 nfs_confirm_seqid(&lsp->ls_seqid, 0); 7394 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid); 7395 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 7396 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid)) 7397 goto out_restart; 7398 break; 7399 case -NFS4ERR_OLD_STATEID: 7400 if (data->arg.new_lock_owner != 0 && 7401 nfs4_refresh_open_old_stateid(&data->arg.open_stateid, 7402 lsp->ls_state)) 7403 goto out_restart; 7404 if (nfs4_refresh_lock_old_stateid(&data->arg.lock_stateid, lsp)) 7405 goto out_restart; 7406 fallthrough; 7407 case -NFS4ERR_BAD_STATEID: 7408 case -NFS4ERR_STALE_STATEID: 7409 case -NFS4ERR_EXPIRED: 7410 if (data->arg.new_lock_owner != 0) { 7411 if (!nfs4_stateid_match(&data->arg.open_stateid, 7412 &lsp->ls_state->open_stateid)) 7413 goto out_restart; 7414 } else if (!nfs4_stateid_match(&data->arg.lock_stateid, 7415 &lsp->ls_stateid)) 7416 goto out_restart; 7417 } 7418 out_done: 7419 dprintk("%s: ret = %d!\n", __func__, data->rpc_status); 7420 return; 7421 out_restart: 7422 if (!data->cancelled) 7423 rpc_restart_call_prepare(task); 7424 goto out_done; 7425 } 7426 7427 static void nfs4_lock_release(void *calldata) 7428 { 7429 struct nfs4_lockdata *data = calldata; 7430 7431 nfs_free_seqid(data->arg.open_seqid); 7432 if (data->cancelled && data->rpc_status == 0) { 7433 struct rpc_task *task; 7434 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, 7435 data->arg.lock_seqid); 7436 if (!IS_ERR(task)) 7437 rpc_put_task_async(task); 7438 dprintk("%s: cancelling lock!\n", __func__); 7439 } else 7440 nfs_free_seqid(data->arg.lock_seqid); 7441 nfs4_put_lock_state(data->lsp); 7442 put_nfs_open_context(data->ctx); 7443 kfree(data); 7444 } 7445 7446 static const struct rpc_call_ops nfs4_lock_ops = { 7447 .rpc_call_prepare = nfs4_lock_prepare, 7448 .rpc_call_done = nfs4_lock_done, 7449 .rpc_release = nfs4_lock_release, 7450 }; 7451 7452 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) 7453 { 7454 switch (error) { 7455 case -NFS4ERR_ADMIN_REVOKED: 7456 case -NFS4ERR_EXPIRED: 7457 case -NFS4ERR_BAD_STATEID: 7458 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 7459 if (new_lock_owner != 0 || 7460 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) 7461 nfs4_schedule_stateid_recovery(server, lsp->ls_state); 7462 break; 7463 case -NFS4ERR_STALE_STATEID: 7464 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 7465 nfs4_schedule_lease_recovery(server->nfs_client); 7466 } 7467 } 7468 7469 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) 7470 { 7471 struct nfs4_lockdata *data; 7472 struct rpc_task *task; 7473 struct rpc_message msg = { 7474 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK], 7475 .rpc_cred = state->owner->so_cred, 7476 }; 7477 struct rpc_task_setup task_setup_data = { 7478 .rpc_client = NFS_CLIENT(state->inode), 7479 .rpc_message = &msg, 7480 .callback_ops = &nfs4_lock_ops, 7481 .workqueue = nfsiod_workqueue, 7482 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 7483 }; 7484 int ret; 7485 7486 if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE)) 7487 task_setup_data.flags |= RPC_TASK_MOVEABLE; 7488 7489 data = nfs4_alloc_lockdata(fl, 7490 nfs_file_open_context(fl->c.flc_file), 7491 fl->fl_u.nfs4_fl.owner, GFP_KERNEL); 7492 if (data == NULL) 7493 return -ENOMEM; 7494 if (IS_SETLKW(cmd)) 7495 data->arg.block = 1; 7496 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 7497 recovery_type > NFS_LOCK_NEW); 7498 msg.rpc_argp = &data->arg; 7499 msg.rpc_resp = &data->res; 7500 task_setup_data.callback_data = data; 7501 if (recovery_type > NFS_LOCK_NEW) { 7502 if (recovery_type == NFS_LOCK_RECLAIM) 7503 data->arg.reclaim = NFS_LOCK_RECLAIM; 7504 } else 7505 data->arg.new_lock = 1; 7506 task = rpc_run_task(&task_setup_data); 7507 if (IS_ERR(task)) 7508 return PTR_ERR(task); 7509 ret = rpc_wait_for_completion_task(task); 7510 if (ret == 0) { 7511 ret = data->rpc_status; 7512 if (ret) 7513 nfs4_handle_setlk_error(data->server, data->lsp, 7514 data->arg.new_lock_owner, ret); 7515 } else 7516 data->cancelled = true; 7517 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret); 7518 rpc_put_task(task); 7519 dprintk("%s: ret = %d\n", __func__, ret); 7520 return ret; 7521 } 7522 7523 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) 7524 { 7525 struct nfs_server *server = NFS_SERVER(state->inode); 7526 struct nfs4_exception exception = { 7527 .inode = state->inode, 7528 }; 7529 int err; 7530 7531 do { 7532 /* Cache the lock if possible... */ 7533 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 7534 return 0; 7535 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); 7536 if (err != -NFS4ERR_DELAY) 7537 break; 7538 nfs4_handle_exception(server, err, &exception); 7539 } while (exception.retry); 7540 return err; 7541 } 7542 7543 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) 7544 { 7545 struct nfs_server *server = NFS_SERVER(state->inode); 7546 struct nfs4_exception exception = { 7547 .inode = state->inode, 7548 }; 7549 int err; 7550 7551 err = nfs4_set_lock_state(state, request); 7552 if (err != 0) 7553 return err; 7554 if (!recover_lost_locks) { 7555 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags); 7556 return 0; 7557 } 7558 do { 7559 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 7560 return 0; 7561 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED); 7562 switch (err) { 7563 default: 7564 goto out; 7565 case -NFS4ERR_GRACE: 7566 case -NFS4ERR_DELAY: 7567 nfs4_handle_exception(server, err, &exception); 7568 err = 0; 7569 } 7570 } while (exception.retry); 7571 out: 7572 return err; 7573 } 7574 7575 #if defined(CONFIG_NFS_V4_1) 7576 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) 7577 { 7578 struct nfs4_lock_state *lsp; 7579 int status; 7580 7581 status = nfs4_set_lock_state(state, request); 7582 if (status != 0) 7583 return status; 7584 lsp = request->fl_u.nfs4_fl.owner; 7585 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) || 7586 test_bit(NFS_LOCK_LOST, &lsp->ls_flags)) 7587 return 0; 7588 return nfs4_lock_expired(state, request); 7589 } 7590 #endif 7591 7592 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7593 { 7594 struct nfs_inode *nfsi = NFS_I(state->inode); 7595 struct nfs4_state_owner *sp = state->owner; 7596 unsigned char flags = request->c.flc_flags; 7597 int status; 7598 7599 request->c.flc_flags |= FL_ACCESS; 7600 status = locks_lock_inode_wait(state->inode, request); 7601 if (status < 0) 7602 goto out; 7603 mutex_lock(&sp->so_delegreturn_mutex); 7604 down_read(&nfsi->rwsem); 7605 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { 7606 /* Yes: cache locks! */ 7607 /* ...but avoid races with delegation recall... */ 7608 request->c.flc_flags = flags & ~FL_SLEEP; 7609 status = locks_lock_inode_wait(state->inode, request); 7610 up_read(&nfsi->rwsem); 7611 mutex_unlock(&sp->so_delegreturn_mutex); 7612 goto out; 7613 } 7614 up_read(&nfsi->rwsem); 7615 mutex_unlock(&sp->so_delegreturn_mutex); 7616 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); 7617 out: 7618 request->c.flc_flags = flags; 7619 return status; 7620 } 7621 7622 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7623 { 7624 struct nfs4_exception exception = { 7625 .state = state, 7626 .inode = state->inode, 7627 .interruptible = true, 7628 }; 7629 int err; 7630 7631 do { 7632 err = _nfs4_proc_setlk(state, cmd, request); 7633 if (err == -NFS4ERR_DENIED) 7634 err = -EAGAIN; 7635 err = nfs4_handle_exception(NFS_SERVER(state->inode), 7636 err, &exception); 7637 } while (exception.retry); 7638 return err; 7639 } 7640 7641 #define NFS4_LOCK_MINTIMEOUT (1 * HZ) 7642 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) 7643 7644 static int 7645 nfs4_retry_setlk_simple(struct nfs4_state *state, int cmd, 7646 struct file_lock *request) 7647 { 7648 int status = -ERESTARTSYS; 7649 unsigned long timeout = NFS4_LOCK_MINTIMEOUT; 7650 7651 while(!signalled()) { 7652 status = nfs4_proc_setlk(state, cmd, request); 7653 if ((status != -EAGAIN) || IS_SETLK(cmd)) 7654 break; 7655 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); 7656 schedule_timeout(timeout); 7657 timeout *= 2; 7658 timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout); 7659 status = -ERESTARTSYS; 7660 } 7661 return status; 7662 } 7663 7664 #ifdef CONFIG_NFS_V4_1 7665 struct nfs4_lock_waiter { 7666 struct inode *inode; 7667 struct nfs_lowner owner; 7668 wait_queue_entry_t wait; 7669 }; 7670 7671 static int 7672 nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key) 7673 { 7674 struct nfs4_lock_waiter *waiter = 7675 container_of(wait, struct nfs4_lock_waiter, wait); 7676 7677 /* NULL key means to wake up everyone */ 7678 if (key) { 7679 struct cb_notify_lock_args *cbnl = key; 7680 struct nfs_lowner *lowner = &cbnl->cbnl_owner, 7681 *wowner = &waiter->owner; 7682 7683 /* Only wake if the callback was for the same owner. */ 7684 if (lowner->id != wowner->id || lowner->s_dev != wowner->s_dev) 7685 return 0; 7686 7687 /* Make sure it's for the right inode */ 7688 if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh)) 7689 return 0; 7690 } 7691 7692 return woken_wake_function(wait, mode, flags, key); 7693 } 7694 7695 static int 7696 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7697 { 7698 struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner; 7699 struct nfs_server *server = NFS_SERVER(state->inode); 7700 struct nfs_client *clp = server->nfs_client; 7701 wait_queue_head_t *q = &clp->cl_lock_waitq; 7702 struct nfs4_lock_waiter waiter = { 7703 .inode = state->inode, 7704 .owner = { .clientid = clp->cl_clientid, 7705 .id = lsp->ls_seqid.owner_id, 7706 .s_dev = server->s_dev }, 7707 }; 7708 int status; 7709 7710 /* Don't bother with waitqueue if we don't expect a callback */ 7711 if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags)) 7712 return nfs4_retry_setlk_simple(state, cmd, request); 7713 7714 init_wait(&waiter.wait); 7715 waiter.wait.func = nfs4_wake_lock_waiter; 7716 add_wait_queue(q, &waiter.wait); 7717 7718 do { 7719 status = nfs4_proc_setlk(state, cmd, request); 7720 if (status != -EAGAIN || IS_SETLK(cmd)) 7721 break; 7722 7723 status = -ERESTARTSYS; 7724 wait_woken(&waiter.wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE, 7725 NFS4_LOCK_MAXTIMEOUT); 7726 } while (!signalled()); 7727 7728 remove_wait_queue(q, &waiter.wait); 7729 7730 return status; 7731 } 7732 #else /* !CONFIG_NFS_V4_1 */ 7733 static inline int 7734 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7735 { 7736 return nfs4_retry_setlk_simple(state, cmd, request); 7737 } 7738 #endif 7739 7740 static int 7741 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) 7742 { 7743 struct nfs_open_context *ctx; 7744 struct nfs4_state *state; 7745 int status; 7746 7747 /* verify open state */ 7748 ctx = nfs_file_open_context(filp); 7749 state = ctx->state; 7750 7751 if (IS_GETLK(cmd)) { 7752 if (state != NULL) 7753 return nfs4_proc_getlk(state, F_GETLK, request); 7754 return 0; 7755 } 7756 7757 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) 7758 return -EINVAL; 7759 7760 if (lock_is_unlock(request)) { 7761 if (state != NULL) 7762 return nfs4_proc_unlck(state, cmd, request); 7763 return 0; 7764 } 7765 7766 if (state == NULL) 7767 return -ENOLCK; 7768 7769 if ((request->c.flc_flags & FL_POSIX) && 7770 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) 7771 return -ENOLCK; 7772 7773 /* 7774 * Don't rely on the VFS having checked the file open mode, 7775 * since it won't do this for flock() locks. 7776 */ 7777 switch (request->c.flc_type) { 7778 case F_RDLCK: 7779 if (!(filp->f_mode & FMODE_READ)) 7780 return -EBADF; 7781 break; 7782 case F_WRLCK: 7783 if (!(filp->f_mode & FMODE_WRITE)) 7784 return -EBADF; 7785 } 7786 7787 status = nfs4_set_lock_state(state, request); 7788 if (status != 0) 7789 return status; 7790 7791 return nfs4_retry_setlk(state, cmd, request); 7792 } 7793 7794 static int nfs4_delete_lease(struct file *file, void **priv) 7795 { 7796 return generic_setlease(file, F_UNLCK, NULL, priv); 7797 } 7798 7799 static int nfs4_add_lease(struct file *file, int arg, struct file_lease **lease, 7800 void **priv) 7801 { 7802 struct inode *inode = file_inode(file); 7803 fmode_t type = arg == F_RDLCK ? FMODE_READ : FMODE_WRITE; 7804 int ret; 7805 7806 /* No delegation, no lease */ 7807 if (!nfs4_have_delegation(inode, type, 0)) 7808 return -EAGAIN; 7809 ret = generic_setlease(file, arg, lease, priv); 7810 if (ret || nfs4_have_delegation(inode, type, 0)) 7811 return ret; 7812 /* We raced with a delegation return */ 7813 nfs4_delete_lease(file, priv); 7814 return -EAGAIN; 7815 } 7816 7817 int nfs4_proc_setlease(struct file *file, int arg, struct file_lease **lease, 7818 void **priv) 7819 { 7820 switch (arg) { 7821 case F_RDLCK: 7822 case F_WRLCK: 7823 return nfs4_add_lease(file, arg, lease, priv); 7824 case F_UNLCK: 7825 return nfs4_delete_lease(file, priv); 7826 default: 7827 return -EINVAL; 7828 } 7829 } 7830 7831 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid) 7832 { 7833 struct nfs_server *server = NFS_SERVER(state->inode); 7834 int err; 7835 7836 err = nfs4_set_lock_state(state, fl); 7837 if (err != 0) 7838 return err; 7839 do { 7840 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); 7841 if (err != -NFS4ERR_DELAY) 7842 break; 7843 ssleep(1); 7844 } while (err == -NFS4ERR_DELAY); 7845 return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err); 7846 } 7847 7848 struct nfs_release_lockowner_data { 7849 struct nfs4_lock_state *lsp; 7850 struct nfs_server *server; 7851 struct nfs_release_lockowner_args args; 7852 struct nfs_release_lockowner_res res; 7853 unsigned long timestamp; 7854 }; 7855 7856 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata) 7857 { 7858 struct nfs_release_lockowner_data *data = calldata; 7859 struct nfs_server *server = data->server; 7860 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 7861 &data->res.seq_res, task); 7862 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 7863 data->timestamp = jiffies; 7864 } 7865 7866 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata) 7867 { 7868 struct nfs_release_lockowner_data *data = calldata; 7869 struct nfs_server *server = data->server; 7870 7871 nfs40_sequence_done(task, &data->res.seq_res); 7872 7873 switch (task->tk_status) { 7874 case 0: 7875 renew_lease(server, data->timestamp); 7876 break; 7877 case -NFS4ERR_STALE_CLIENTID: 7878 case -NFS4ERR_EXPIRED: 7879 nfs4_schedule_lease_recovery(server->nfs_client); 7880 break; 7881 case -NFS4ERR_LEASE_MOVED: 7882 case -NFS4ERR_DELAY: 7883 if (nfs4_async_handle_error(task, server, 7884 NULL, NULL) == -EAGAIN) 7885 rpc_restart_call_prepare(task); 7886 } 7887 } 7888 7889 static void nfs4_release_lockowner_release(void *calldata) 7890 { 7891 struct nfs_release_lockowner_data *data = calldata; 7892 nfs4_free_lock_state(data->server, data->lsp); 7893 kfree(calldata); 7894 } 7895 7896 static const struct rpc_call_ops nfs4_release_lockowner_ops = { 7897 .rpc_call_prepare = nfs4_release_lockowner_prepare, 7898 .rpc_call_done = nfs4_release_lockowner_done, 7899 .rpc_release = nfs4_release_lockowner_release, 7900 }; 7901 7902 static void 7903 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp) 7904 { 7905 struct nfs_release_lockowner_data *data; 7906 struct rpc_message msg = { 7907 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER], 7908 }; 7909 7910 if (server->nfs_client->cl_mvops->minor_version != 0) 7911 return; 7912 7913 data = kmalloc(sizeof(*data), GFP_KERNEL); 7914 if (!data) 7915 return; 7916 data->lsp = lsp; 7917 data->server = server; 7918 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 7919 data->args.lock_owner.id = lsp->ls_seqid.owner_id; 7920 data->args.lock_owner.s_dev = server->s_dev; 7921 7922 msg.rpc_argp = &data->args; 7923 msg.rpc_resp = &data->res; 7924 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 7925 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); 7926 } 7927 7928 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" 7929 7930 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler, 7931 struct mnt_idmap *idmap, 7932 struct dentry *unused, struct inode *inode, 7933 const char *key, const void *buf, 7934 size_t buflen, int flags) 7935 { 7936 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_ACL); 7937 } 7938 7939 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler, 7940 struct dentry *unused, struct inode *inode, 7941 const char *key, void *buf, size_t buflen) 7942 { 7943 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_ACL); 7944 } 7945 7946 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry) 7947 { 7948 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_ACL); 7949 } 7950 7951 #if defined(CONFIG_NFS_V4_1) 7952 #define XATTR_NAME_NFSV4_DACL "system.nfs4_dacl" 7953 7954 static int nfs4_xattr_set_nfs4_dacl(const struct xattr_handler *handler, 7955 struct mnt_idmap *idmap, 7956 struct dentry *unused, struct inode *inode, 7957 const char *key, const void *buf, 7958 size_t buflen, int flags) 7959 { 7960 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_DACL); 7961 } 7962 7963 static int nfs4_xattr_get_nfs4_dacl(const struct xattr_handler *handler, 7964 struct dentry *unused, struct inode *inode, 7965 const char *key, void *buf, size_t buflen) 7966 { 7967 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_DACL); 7968 } 7969 7970 static bool nfs4_xattr_list_nfs4_dacl(struct dentry *dentry) 7971 { 7972 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_DACL); 7973 } 7974 7975 #define XATTR_NAME_NFSV4_SACL "system.nfs4_sacl" 7976 7977 static int nfs4_xattr_set_nfs4_sacl(const struct xattr_handler *handler, 7978 struct mnt_idmap *idmap, 7979 struct dentry *unused, struct inode *inode, 7980 const char *key, const void *buf, 7981 size_t buflen, int flags) 7982 { 7983 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_SACL); 7984 } 7985 7986 static int nfs4_xattr_get_nfs4_sacl(const struct xattr_handler *handler, 7987 struct dentry *unused, struct inode *inode, 7988 const char *key, void *buf, size_t buflen) 7989 { 7990 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_SACL); 7991 } 7992 7993 static bool nfs4_xattr_list_nfs4_sacl(struct dentry *dentry) 7994 { 7995 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_SACL); 7996 } 7997 7998 #endif 7999 8000 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 8001 8002 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler, 8003 struct mnt_idmap *idmap, 8004 struct dentry *unused, struct inode *inode, 8005 const char *key, const void *buf, 8006 size_t buflen, int flags) 8007 { 8008 if (security_ismaclabel(key)) 8009 return nfs4_set_security_label(inode, buf, buflen); 8010 8011 return -EOPNOTSUPP; 8012 } 8013 8014 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler, 8015 struct dentry *unused, struct inode *inode, 8016 const char *key, void *buf, size_t buflen) 8017 { 8018 if (security_ismaclabel(key)) 8019 return nfs4_get_security_label(inode, buf, buflen); 8020 return -EOPNOTSUPP; 8021 } 8022 8023 static ssize_t 8024 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len) 8025 { 8026 int len = 0; 8027 8028 if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) { 8029 len = security_inode_listsecurity(inode, list, list_len); 8030 if (len >= 0 && list_len && len > list_len) 8031 return -ERANGE; 8032 } 8033 return len; 8034 } 8035 8036 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = { 8037 .prefix = XATTR_SECURITY_PREFIX, 8038 .get = nfs4_xattr_get_nfs4_label, 8039 .set = nfs4_xattr_set_nfs4_label, 8040 }; 8041 8042 #else 8043 8044 static ssize_t 8045 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len) 8046 { 8047 return 0; 8048 } 8049 8050 #endif 8051 8052 #ifdef CONFIG_NFS_V4_2 8053 static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler, 8054 struct mnt_idmap *idmap, 8055 struct dentry *unused, struct inode *inode, 8056 const char *key, const void *buf, 8057 size_t buflen, int flags) 8058 { 8059 u32 mask; 8060 int ret; 8061 8062 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 8063 return -EOPNOTSUPP; 8064 8065 /* 8066 * There is no mapping from the MAY_* flags to the NFS_ACCESS_XA* 8067 * flags right now. Handling of xattr operations use the normal 8068 * file read/write permissions. 8069 * 8070 * Just in case the server has other ideas (which RFC 8276 allows), 8071 * do a cached access check for the XA* flags to possibly avoid 8072 * doing an RPC and getting EACCES back. 8073 */ 8074 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { 8075 if (!(mask & NFS_ACCESS_XAWRITE)) 8076 return -EACCES; 8077 } 8078 8079 if (buf == NULL) { 8080 ret = nfs42_proc_removexattr(inode, key); 8081 if (!ret) 8082 nfs4_xattr_cache_remove(inode, key); 8083 } else { 8084 ret = nfs42_proc_setxattr(inode, key, buf, buflen, flags); 8085 if (!ret) 8086 nfs4_xattr_cache_add(inode, key, buf, NULL, buflen); 8087 } 8088 8089 return ret; 8090 } 8091 8092 static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler, 8093 struct dentry *unused, struct inode *inode, 8094 const char *key, void *buf, size_t buflen) 8095 { 8096 u32 mask; 8097 ssize_t ret; 8098 8099 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 8100 return -EOPNOTSUPP; 8101 8102 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { 8103 if (!(mask & NFS_ACCESS_XAREAD)) 8104 return -EACCES; 8105 } 8106 8107 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 8108 if (ret) 8109 return ret; 8110 8111 ret = nfs4_xattr_cache_get(inode, key, buf, buflen); 8112 if (ret >= 0 || (ret < 0 && ret != -ENOENT)) 8113 return ret; 8114 8115 ret = nfs42_proc_getxattr(inode, key, buf, buflen); 8116 8117 return ret; 8118 } 8119 8120 static ssize_t 8121 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) 8122 { 8123 u64 cookie; 8124 bool eof; 8125 ssize_t ret, size; 8126 char *buf; 8127 size_t buflen; 8128 u32 mask; 8129 8130 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 8131 return 0; 8132 8133 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { 8134 if (!(mask & NFS_ACCESS_XALIST)) 8135 return 0; 8136 } 8137 8138 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 8139 if (ret) 8140 return ret; 8141 8142 ret = nfs4_xattr_cache_list(inode, list, list_len); 8143 if (ret >= 0 || (ret < 0 && ret != -ENOENT)) 8144 return ret; 8145 8146 cookie = 0; 8147 eof = false; 8148 buflen = list_len ? list_len : XATTR_LIST_MAX; 8149 buf = list_len ? list : NULL; 8150 size = 0; 8151 8152 while (!eof) { 8153 ret = nfs42_proc_listxattrs(inode, buf, buflen, 8154 &cookie, &eof); 8155 if (ret < 0) 8156 return ret; 8157 8158 if (list_len) { 8159 buf += ret; 8160 buflen -= ret; 8161 } 8162 size += ret; 8163 } 8164 8165 if (list_len) 8166 nfs4_xattr_cache_set_list(inode, list, size); 8167 8168 return size; 8169 } 8170 8171 #else 8172 8173 static ssize_t 8174 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) 8175 { 8176 return 0; 8177 } 8178 #endif /* CONFIG_NFS_V4_2 */ 8179 8180 /* 8181 * nfs_fhget will use either the mounted_on_fileid or the fileid 8182 */ 8183 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) 8184 { 8185 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || 8186 (fattr->valid & NFS_ATTR_FATTR_FILEID)) && 8187 (fattr->valid & NFS_ATTR_FATTR_FSID) && 8188 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS))) 8189 return; 8190 8191 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 8192 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL; 8193 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 8194 fattr->nlink = 2; 8195 } 8196 8197 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 8198 const struct qstr *name, 8199 struct nfs4_fs_locations *fs_locations, 8200 struct page *page) 8201 { 8202 struct nfs_server *server = NFS_SERVER(dir); 8203 u32 bitmask[3]; 8204 struct nfs4_fs_locations_arg args = { 8205 .dir_fh = NFS_FH(dir), 8206 .name = name, 8207 .page = page, 8208 .bitmask = bitmask, 8209 }; 8210 struct nfs4_fs_locations_res res = { 8211 .fs_locations = fs_locations, 8212 }; 8213 struct rpc_message msg = { 8214 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 8215 .rpc_argp = &args, 8216 .rpc_resp = &res, 8217 }; 8218 int status; 8219 8220 dprintk("%s: start\n", __func__); 8221 8222 bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS; 8223 bitmask[1] = nfs4_fattr_bitmap[1]; 8224 8225 /* Ask for the fileid of the absent filesystem if mounted_on_fileid 8226 * is not supported */ 8227 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) 8228 bitmask[0] &= ~FATTR4_WORD0_FILEID; 8229 else 8230 bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID; 8231 8232 nfs_fattr_init(fs_locations->fattr); 8233 fs_locations->server = server; 8234 fs_locations->nlocations = 0; 8235 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0); 8236 dprintk("%s: returned status = %d\n", __func__, status); 8237 return status; 8238 } 8239 8240 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 8241 const struct qstr *name, 8242 struct nfs4_fs_locations *fs_locations, 8243 struct page *page) 8244 { 8245 struct nfs4_exception exception = { 8246 .interruptible = true, 8247 }; 8248 int err; 8249 do { 8250 err = _nfs4_proc_fs_locations(client, dir, name, 8251 fs_locations, page); 8252 trace_nfs4_get_fs_locations(dir, name, err); 8253 err = nfs4_handle_exception(NFS_SERVER(dir), err, 8254 &exception); 8255 } while (exception.retry); 8256 return err; 8257 } 8258 8259 /* 8260 * This operation also signals the server that this client is 8261 * performing migration recovery. The server can stop returning 8262 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is 8263 * appended to this compound to identify the client ID which is 8264 * performing recovery. 8265 */ 8266 static int _nfs40_proc_get_locations(struct nfs_server *server, 8267 struct nfs_fh *fhandle, 8268 struct nfs4_fs_locations *locations, 8269 struct page *page, const struct cred *cred) 8270 { 8271 struct rpc_clnt *clnt = server->client; 8272 u32 bitmask[2] = { 8273 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 8274 }; 8275 struct nfs4_fs_locations_arg args = { 8276 .clientid = server->nfs_client->cl_clientid, 8277 .fh = fhandle, 8278 .page = page, 8279 .bitmask = bitmask, 8280 .migration = 1, /* skip LOOKUP */ 8281 .renew = 1, /* append RENEW */ 8282 }; 8283 struct nfs4_fs_locations_res res = { 8284 .fs_locations = locations, 8285 .migration = 1, 8286 .renew = 1, 8287 }; 8288 struct rpc_message msg = { 8289 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 8290 .rpc_argp = &args, 8291 .rpc_resp = &res, 8292 .rpc_cred = cred, 8293 }; 8294 unsigned long now = jiffies; 8295 int status; 8296 8297 nfs_fattr_init(locations->fattr); 8298 locations->server = server; 8299 locations->nlocations = 0; 8300 8301 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8302 status = nfs4_call_sync_sequence(clnt, server, &msg, 8303 &args.seq_args, &res.seq_res); 8304 if (status) 8305 return status; 8306 8307 renew_lease(server, now); 8308 return 0; 8309 } 8310 8311 #ifdef CONFIG_NFS_V4_1 8312 8313 /* 8314 * This operation also signals the server that this client is 8315 * performing migration recovery. The server can stop asserting 8316 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID 8317 * performing this operation is identified in the SEQUENCE 8318 * operation in this compound. 8319 * 8320 * When the client supports GETATTR(fs_locations_info), it can 8321 * be plumbed in here. 8322 */ 8323 static int _nfs41_proc_get_locations(struct nfs_server *server, 8324 struct nfs_fh *fhandle, 8325 struct nfs4_fs_locations *locations, 8326 struct page *page, const struct cred *cred) 8327 { 8328 struct rpc_clnt *clnt = server->client; 8329 u32 bitmask[2] = { 8330 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 8331 }; 8332 struct nfs4_fs_locations_arg args = { 8333 .fh = fhandle, 8334 .page = page, 8335 .bitmask = bitmask, 8336 .migration = 1, /* skip LOOKUP */ 8337 }; 8338 struct nfs4_fs_locations_res res = { 8339 .fs_locations = locations, 8340 .migration = 1, 8341 }; 8342 struct rpc_message msg = { 8343 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 8344 .rpc_argp = &args, 8345 .rpc_resp = &res, 8346 .rpc_cred = cred, 8347 }; 8348 struct nfs4_call_sync_data data = { 8349 .seq_server = server, 8350 .seq_args = &args.seq_args, 8351 .seq_res = &res.seq_res, 8352 }; 8353 struct rpc_task_setup task_setup_data = { 8354 .rpc_client = clnt, 8355 .rpc_message = &msg, 8356 .callback_ops = server->nfs_client->cl_mvops->call_sync_ops, 8357 .callback_data = &data, 8358 .flags = RPC_TASK_NO_ROUND_ROBIN, 8359 }; 8360 int status; 8361 8362 nfs_fattr_init(locations->fattr); 8363 locations->server = server; 8364 locations->nlocations = 0; 8365 8366 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8367 status = nfs4_call_sync_custom(&task_setup_data); 8368 if (status == NFS4_OK && 8369 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 8370 status = -NFS4ERR_LEASE_MOVED; 8371 return status; 8372 } 8373 8374 #endif /* CONFIG_NFS_V4_1 */ 8375 8376 /** 8377 * nfs4_proc_get_locations - discover locations for a migrated FSID 8378 * @server: pointer to nfs_server to process 8379 * @fhandle: pointer to the kernel NFS client file handle 8380 * @locations: result of query 8381 * @page: buffer 8382 * @cred: credential to use for this operation 8383 * 8384 * Returns NFS4_OK on success, a negative NFS4ERR status code if the 8385 * operation failed, or a negative errno if a local error occurred. 8386 * 8387 * On success, "locations" is filled in, but if the server has 8388 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not 8389 * asserted. 8390 * 8391 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases 8392 * from this client that require migration recovery. 8393 */ 8394 int nfs4_proc_get_locations(struct nfs_server *server, 8395 struct nfs_fh *fhandle, 8396 struct nfs4_fs_locations *locations, 8397 struct page *page, const struct cred *cred) 8398 { 8399 struct nfs_client *clp = server->nfs_client; 8400 const struct nfs4_mig_recovery_ops *ops = 8401 clp->cl_mvops->mig_recovery_ops; 8402 struct nfs4_exception exception = { 8403 .interruptible = true, 8404 }; 8405 int status; 8406 8407 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 8408 (unsigned long long)server->fsid.major, 8409 (unsigned long long)server->fsid.minor, 8410 clp->cl_hostname); 8411 nfs_display_fhandle(fhandle, __func__); 8412 8413 do { 8414 status = ops->get_locations(server, fhandle, locations, page, 8415 cred); 8416 if (status != -NFS4ERR_DELAY) 8417 break; 8418 nfs4_handle_exception(server, status, &exception); 8419 } while (exception.retry); 8420 return status; 8421 } 8422 8423 /* 8424 * This operation also signals the server that this client is 8425 * performing "lease moved" recovery. The server can stop 8426 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation 8427 * is appended to this compound to identify the client ID which is 8428 * performing recovery. 8429 */ 8430 static int _nfs40_proc_fsid_present(struct inode *inode, const struct cred *cred) 8431 { 8432 struct nfs_server *server = NFS_SERVER(inode); 8433 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 8434 struct rpc_clnt *clnt = server->client; 8435 struct nfs4_fsid_present_arg args = { 8436 .fh = NFS_FH(inode), 8437 .clientid = clp->cl_clientid, 8438 .renew = 1, /* append RENEW */ 8439 }; 8440 struct nfs4_fsid_present_res res = { 8441 .renew = 1, 8442 }; 8443 struct rpc_message msg = { 8444 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 8445 .rpc_argp = &args, 8446 .rpc_resp = &res, 8447 .rpc_cred = cred, 8448 }; 8449 unsigned long now = jiffies; 8450 int status; 8451 8452 res.fh = nfs_alloc_fhandle(); 8453 if (res.fh == NULL) 8454 return -ENOMEM; 8455 8456 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8457 status = nfs4_call_sync_sequence(clnt, server, &msg, 8458 &args.seq_args, &res.seq_res); 8459 nfs_free_fhandle(res.fh); 8460 if (status) 8461 return status; 8462 8463 do_renew_lease(clp, now); 8464 return 0; 8465 } 8466 8467 #ifdef CONFIG_NFS_V4_1 8468 8469 /* 8470 * This operation also signals the server that this client is 8471 * performing "lease moved" recovery. The server can stop asserting 8472 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing 8473 * this operation is identified in the SEQUENCE operation in this 8474 * compound. 8475 */ 8476 static int _nfs41_proc_fsid_present(struct inode *inode, const struct cred *cred) 8477 { 8478 struct nfs_server *server = NFS_SERVER(inode); 8479 struct rpc_clnt *clnt = server->client; 8480 struct nfs4_fsid_present_arg args = { 8481 .fh = NFS_FH(inode), 8482 }; 8483 struct nfs4_fsid_present_res res = { 8484 }; 8485 struct rpc_message msg = { 8486 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 8487 .rpc_argp = &args, 8488 .rpc_resp = &res, 8489 .rpc_cred = cred, 8490 }; 8491 int status; 8492 8493 res.fh = nfs_alloc_fhandle(); 8494 if (res.fh == NULL) 8495 return -ENOMEM; 8496 8497 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8498 status = nfs4_call_sync_sequence(clnt, server, &msg, 8499 &args.seq_args, &res.seq_res); 8500 nfs_free_fhandle(res.fh); 8501 if (status == NFS4_OK && 8502 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 8503 status = -NFS4ERR_LEASE_MOVED; 8504 return status; 8505 } 8506 8507 #endif /* CONFIG_NFS_V4_1 */ 8508 8509 /** 8510 * nfs4_proc_fsid_present - Is this FSID present or absent on server? 8511 * @inode: inode on FSID to check 8512 * @cred: credential to use for this operation 8513 * 8514 * Server indicates whether the FSID is present, moved, or not 8515 * recognized. This operation is necessary to clear a LEASE_MOVED 8516 * condition for this client ID. 8517 * 8518 * Returns NFS4_OK if the FSID is present on this server, 8519 * -NFS4ERR_MOVED if the FSID is no longer present, a negative 8520 * NFS4ERR code if some error occurred on the server, or a 8521 * negative errno if a local failure occurred. 8522 */ 8523 int nfs4_proc_fsid_present(struct inode *inode, const struct cred *cred) 8524 { 8525 struct nfs_server *server = NFS_SERVER(inode); 8526 struct nfs_client *clp = server->nfs_client; 8527 const struct nfs4_mig_recovery_ops *ops = 8528 clp->cl_mvops->mig_recovery_ops; 8529 struct nfs4_exception exception = { 8530 .interruptible = true, 8531 }; 8532 int status; 8533 8534 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 8535 (unsigned long long)server->fsid.major, 8536 (unsigned long long)server->fsid.minor, 8537 clp->cl_hostname); 8538 nfs_display_fhandle(NFS_FH(inode), __func__); 8539 8540 do { 8541 status = ops->fsid_present(inode, cred); 8542 if (status != -NFS4ERR_DELAY) 8543 break; 8544 nfs4_handle_exception(server, status, &exception); 8545 } while (exception.retry); 8546 return status; 8547 } 8548 8549 /* 8550 * If 'use_integrity' is true and the state managment nfs_client 8551 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient 8552 * and the machine credential as per RFC3530bis and RFC5661 Security 8553 * Considerations sections. Otherwise, just use the user cred with the 8554 * filesystem's rpc_client. 8555 */ 8556 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity) 8557 { 8558 int status; 8559 struct rpc_clnt *clnt = NFS_SERVER(dir)->client; 8560 struct nfs_client *clp = NFS_SERVER(dir)->nfs_client; 8561 struct nfs4_secinfo_arg args = { 8562 .dir_fh = NFS_FH(dir), 8563 .name = name, 8564 }; 8565 struct nfs4_secinfo_res res = { 8566 .flavors = flavors, 8567 }; 8568 struct rpc_message msg = { 8569 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO], 8570 .rpc_argp = &args, 8571 .rpc_resp = &res, 8572 }; 8573 struct nfs4_call_sync_data data = { 8574 .seq_server = NFS_SERVER(dir), 8575 .seq_args = &args.seq_args, 8576 .seq_res = &res.seq_res, 8577 }; 8578 struct rpc_task_setup task_setup = { 8579 .rpc_client = clnt, 8580 .rpc_message = &msg, 8581 .callback_ops = clp->cl_mvops->call_sync_ops, 8582 .callback_data = &data, 8583 .flags = RPC_TASK_NO_ROUND_ROBIN, 8584 }; 8585 const struct cred *cred = NULL; 8586 8587 if (use_integrity) { 8588 clnt = clp->cl_rpcclient; 8589 task_setup.rpc_client = clnt; 8590 8591 cred = nfs4_get_clid_cred(clp); 8592 msg.rpc_cred = cred; 8593 } 8594 8595 dprintk("NFS call secinfo %s\n", name->name); 8596 8597 nfs4_state_protect(clp, NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg); 8598 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 8599 status = nfs4_call_sync_custom(&task_setup); 8600 8601 dprintk("NFS reply secinfo: %d\n", status); 8602 8603 put_cred(cred); 8604 return status; 8605 } 8606 8607 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, 8608 struct nfs4_secinfo_flavors *flavors) 8609 { 8610 struct nfs4_exception exception = { 8611 .interruptible = true, 8612 }; 8613 int err; 8614 do { 8615 err = -NFS4ERR_WRONGSEC; 8616 8617 /* try to use integrity protection with machine cred */ 8618 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client)) 8619 err = _nfs4_proc_secinfo(dir, name, flavors, true); 8620 8621 /* 8622 * if unable to use integrity protection, or SECINFO with 8623 * integrity protection returns NFS4ERR_WRONGSEC (which is 8624 * disallowed by spec, but exists in deployed servers) use 8625 * the current filesystem's rpc_client and the user cred. 8626 */ 8627 if (err == -NFS4ERR_WRONGSEC) 8628 err = _nfs4_proc_secinfo(dir, name, flavors, false); 8629 8630 trace_nfs4_secinfo(dir, name, err); 8631 err = nfs4_handle_exception(NFS_SERVER(dir), err, 8632 &exception); 8633 } while (exception.retry); 8634 return err; 8635 } 8636 8637 #ifdef CONFIG_NFS_V4_1 8638 /* 8639 * Check the exchange flags returned by the server for invalid flags, having 8640 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or 8641 * DS flags set. 8642 */ 8643 static int nfs4_check_cl_exchange_flags(u32 flags, u32 version) 8644 { 8645 if (version >= 2 && (flags & ~EXCHGID4_2_FLAG_MASK_R)) 8646 goto out_inval; 8647 else if (version < 2 && (flags & ~EXCHGID4_FLAG_MASK_R)) 8648 goto out_inval; 8649 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && 8650 (flags & EXCHGID4_FLAG_USE_NON_PNFS)) 8651 goto out_inval; 8652 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) 8653 goto out_inval; 8654 return NFS_OK; 8655 out_inval: 8656 return -NFS4ERR_INVAL; 8657 } 8658 8659 static bool 8660 nfs41_same_server_scope(struct nfs41_server_scope *a, 8661 struct nfs41_server_scope *b) 8662 { 8663 if (a->server_scope_sz != b->server_scope_sz) 8664 return false; 8665 return memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0; 8666 } 8667 8668 static void 8669 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) 8670 { 8671 struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp; 8672 struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp; 8673 struct nfs_client *clp = args->client; 8674 8675 switch (task->tk_status) { 8676 case -NFS4ERR_BADSESSION: 8677 case -NFS4ERR_DEADSESSION: 8678 nfs4_schedule_session_recovery(clp->cl_session, 8679 task->tk_status); 8680 return; 8681 } 8682 if (args->dir == NFS4_CDFC4_FORE_OR_BOTH && 8683 res->dir != NFS4_CDFS4_BOTH) { 8684 rpc_task_close_connection(task); 8685 if (args->retries++ < MAX_BIND_CONN_TO_SESSION_RETRIES) 8686 rpc_restart_call(task); 8687 } 8688 } 8689 8690 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = { 8691 .rpc_call_done = nfs4_bind_one_conn_to_session_done, 8692 }; 8693 8694 /* 8695 * nfs4_proc_bind_one_conn_to_session() 8696 * 8697 * The 4.1 client currently uses the same TCP connection for the 8698 * fore and backchannel. 8699 */ 8700 static 8701 int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt, 8702 struct rpc_xprt *xprt, 8703 struct nfs_client *clp, 8704 const struct cred *cred) 8705 { 8706 int status; 8707 struct nfs41_bind_conn_to_session_args args = { 8708 .client = clp, 8709 .dir = NFS4_CDFC4_FORE_OR_BOTH, 8710 .retries = 0, 8711 }; 8712 struct nfs41_bind_conn_to_session_res res; 8713 struct rpc_message msg = { 8714 .rpc_proc = 8715 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION], 8716 .rpc_argp = &args, 8717 .rpc_resp = &res, 8718 .rpc_cred = cred, 8719 }; 8720 struct rpc_task_setup task_setup_data = { 8721 .rpc_client = clnt, 8722 .rpc_xprt = xprt, 8723 .callback_ops = &nfs4_bind_one_conn_to_session_ops, 8724 .rpc_message = &msg, 8725 .flags = RPC_TASK_TIMEOUT, 8726 }; 8727 struct rpc_task *task; 8728 8729 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id); 8730 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN)) 8731 args.dir = NFS4_CDFC4_FORE; 8732 8733 /* Do not set the backchannel flag unless this is clnt->cl_xprt */ 8734 if (xprt != rcu_access_pointer(clnt->cl_xprt)) 8735 args.dir = NFS4_CDFC4_FORE; 8736 8737 task = rpc_run_task(&task_setup_data); 8738 if (!IS_ERR(task)) { 8739 status = task->tk_status; 8740 rpc_put_task(task); 8741 } else 8742 status = PTR_ERR(task); 8743 trace_nfs4_bind_conn_to_session(clp, status); 8744 if (status == 0) { 8745 if (memcmp(res.sessionid.data, 8746 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { 8747 dprintk("NFS: %s: Session ID mismatch\n", __func__); 8748 return -EIO; 8749 } 8750 if ((res.dir & args.dir) != res.dir || res.dir == 0) { 8751 dprintk("NFS: %s: Unexpected direction from server\n", 8752 __func__); 8753 return -EIO; 8754 } 8755 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) { 8756 dprintk("NFS: %s: Server returned RDMA mode = true\n", 8757 __func__); 8758 return -EIO; 8759 } 8760 } 8761 8762 return status; 8763 } 8764 8765 struct rpc_bind_conn_calldata { 8766 struct nfs_client *clp; 8767 const struct cred *cred; 8768 }; 8769 8770 static int 8771 nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt, 8772 struct rpc_xprt *xprt, 8773 void *calldata) 8774 { 8775 struct rpc_bind_conn_calldata *p = calldata; 8776 8777 return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred); 8778 } 8779 8780 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, const struct cred *cred) 8781 { 8782 struct rpc_bind_conn_calldata data = { 8783 .clp = clp, 8784 .cred = cred, 8785 }; 8786 return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient, 8787 nfs4_proc_bind_conn_to_session_callback, &data); 8788 } 8789 8790 /* 8791 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map 8792 * and operations we'd like to see to enable certain features in the allow map 8793 */ 8794 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = { 8795 .how = SP4_MACH_CRED, 8796 .enforce.u.words = { 8797 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 8798 1 << (OP_EXCHANGE_ID - 32) | 8799 1 << (OP_CREATE_SESSION - 32) | 8800 1 << (OP_DESTROY_SESSION - 32) | 8801 1 << (OP_DESTROY_CLIENTID - 32) 8802 }, 8803 .allow.u.words = { 8804 [0] = 1 << (OP_CLOSE) | 8805 1 << (OP_OPEN_DOWNGRADE) | 8806 1 << (OP_LOCKU) | 8807 1 << (OP_DELEGRETURN) | 8808 1 << (OP_COMMIT), 8809 [1] = 1 << (OP_SECINFO - 32) | 8810 1 << (OP_SECINFO_NO_NAME - 32) | 8811 1 << (OP_LAYOUTRETURN - 32) | 8812 1 << (OP_TEST_STATEID - 32) | 8813 1 << (OP_FREE_STATEID - 32) | 8814 1 << (OP_WRITE - 32) 8815 } 8816 }; 8817 8818 /* 8819 * Select the state protection mode for client `clp' given the server results 8820 * from exchange_id in `sp'. 8821 * 8822 * Returns 0 on success, negative errno otherwise. 8823 */ 8824 static int nfs4_sp4_select_mode(struct nfs_client *clp, 8825 struct nfs41_state_protection *sp) 8826 { 8827 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = { 8828 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 8829 1 << (OP_EXCHANGE_ID - 32) | 8830 1 << (OP_CREATE_SESSION - 32) | 8831 1 << (OP_DESTROY_SESSION - 32) | 8832 1 << (OP_DESTROY_CLIENTID - 32) 8833 }; 8834 unsigned long flags = 0; 8835 unsigned int i; 8836 int ret = 0; 8837 8838 if (sp->how == SP4_MACH_CRED) { 8839 /* Print state protect result */ 8840 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n"); 8841 for (i = 0; i <= LAST_NFS4_OP; i++) { 8842 if (test_bit(i, sp->enforce.u.longs)) 8843 dfprintk(MOUNT, " enforce op %d\n", i); 8844 if (test_bit(i, sp->allow.u.longs)) 8845 dfprintk(MOUNT, " allow op %d\n", i); 8846 } 8847 8848 /* make sure nothing is on enforce list that isn't supported */ 8849 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) { 8850 if (sp->enforce.u.words[i] & ~supported_enforce[i]) { 8851 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 8852 ret = -EINVAL; 8853 goto out; 8854 } 8855 } 8856 8857 /* 8858 * Minimal mode - state operations are allowed to use machine 8859 * credential. Note this already happens by default, so the 8860 * client doesn't have to do anything more than the negotiation. 8861 * 8862 * NOTE: we don't care if EXCHANGE_ID is in the list - 8863 * we're already using the machine cred for exchange_id 8864 * and will never use a different cred. 8865 */ 8866 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) && 8867 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) && 8868 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) && 8869 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) { 8870 dfprintk(MOUNT, "sp4_mach_cred:\n"); 8871 dfprintk(MOUNT, " minimal mode enabled\n"); 8872 __set_bit(NFS_SP4_MACH_CRED_MINIMAL, &flags); 8873 } else { 8874 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 8875 ret = -EINVAL; 8876 goto out; 8877 } 8878 8879 if (test_bit(OP_CLOSE, sp->allow.u.longs) && 8880 test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) && 8881 test_bit(OP_DELEGRETURN, sp->allow.u.longs) && 8882 test_bit(OP_LOCKU, sp->allow.u.longs)) { 8883 dfprintk(MOUNT, " cleanup mode enabled\n"); 8884 __set_bit(NFS_SP4_MACH_CRED_CLEANUP, &flags); 8885 } 8886 8887 if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) { 8888 dfprintk(MOUNT, " pnfs cleanup mode enabled\n"); 8889 __set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP, &flags); 8890 } 8891 8892 if (test_bit(OP_SECINFO, sp->allow.u.longs) && 8893 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) { 8894 dfprintk(MOUNT, " secinfo mode enabled\n"); 8895 __set_bit(NFS_SP4_MACH_CRED_SECINFO, &flags); 8896 } 8897 8898 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) && 8899 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) { 8900 dfprintk(MOUNT, " stateid mode enabled\n"); 8901 __set_bit(NFS_SP4_MACH_CRED_STATEID, &flags); 8902 } 8903 8904 if (test_bit(OP_WRITE, sp->allow.u.longs)) { 8905 dfprintk(MOUNT, " write mode enabled\n"); 8906 __set_bit(NFS_SP4_MACH_CRED_WRITE, &flags); 8907 } 8908 8909 if (test_bit(OP_COMMIT, sp->allow.u.longs)) { 8910 dfprintk(MOUNT, " commit mode enabled\n"); 8911 __set_bit(NFS_SP4_MACH_CRED_COMMIT, &flags); 8912 } 8913 } 8914 out: 8915 clp->cl_sp4_flags = flags; 8916 return ret; 8917 } 8918 8919 struct nfs41_exchange_id_data { 8920 struct nfs41_exchange_id_res res; 8921 struct nfs41_exchange_id_args args; 8922 }; 8923 8924 static void nfs4_exchange_id_release(void *data) 8925 { 8926 struct nfs41_exchange_id_data *cdata = 8927 (struct nfs41_exchange_id_data *)data; 8928 8929 nfs_put_client(cdata->args.client); 8930 kfree(cdata->res.impl_id); 8931 kfree(cdata->res.server_scope); 8932 kfree(cdata->res.server_owner); 8933 kfree(cdata); 8934 } 8935 8936 static const struct rpc_call_ops nfs4_exchange_id_call_ops = { 8937 .rpc_release = nfs4_exchange_id_release, 8938 }; 8939 8940 /* 8941 * _nfs4_proc_exchange_id() 8942 * 8943 * Wrapper for EXCHANGE_ID operation. 8944 */ 8945 static struct rpc_task * 8946 nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred, 8947 u32 sp4_how, struct rpc_xprt *xprt) 8948 { 8949 struct rpc_message msg = { 8950 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], 8951 .rpc_cred = cred, 8952 }; 8953 struct rpc_task_setup task_setup_data = { 8954 .rpc_client = clp->cl_rpcclient, 8955 .callback_ops = &nfs4_exchange_id_call_ops, 8956 .rpc_message = &msg, 8957 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN, 8958 }; 8959 struct nfs41_exchange_id_data *calldata; 8960 int status; 8961 8962 if (!refcount_inc_not_zero(&clp->cl_count)) 8963 return ERR_PTR(-EIO); 8964 8965 status = -ENOMEM; 8966 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 8967 if (!calldata) 8968 goto out; 8969 8970 nfs4_init_boot_verifier(clp, &calldata->args.verifier); 8971 8972 status = nfs4_init_uniform_client_string(clp); 8973 if (status) 8974 goto out_calldata; 8975 8976 calldata->res.server_owner = kzalloc(sizeof(struct nfs41_server_owner), 8977 GFP_NOFS); 8978 status = -ENOMEM; 8979 if (unlikely(calldata->res.server_owner == NULL)) 8980 goto out_calldata; 8981 8982 calldata->res.server_scope = kzalloc(sizeof(struct nfs41_server_scope), 8983 GFP_NOFS); 8984 if (unlikely(calldata->res.server_scope == NULL)) 8985 goto out_server_owner; 8986 8987 calldata->res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS); 8988 if (unlikely(calldata->res.impl_id == NULL)) 8989 goto out_server_scope; 8990 8991 switch (sp4_how) { 8992 case SP4_NONE: 8993 calldata->args.state_protect.how = SP4_NONE; 8994 break; 8995 8996 case SP4_MACH_CRED: 8997 calldata->args.state_protect = nfs4_sp4_mach_cred_request; 8998 break; 8999 9000 default: 9001 /* unsupported! */ 9002 WARN_ON_ONCE(1); 9003 status = -EINVAL; 9004 goto out_impl_id; 9005 } 9006 if (xprt) { 9007 task_setup_data.rpc_xprt = xprt; 9008 task_setup_data.flags |= RPC_TASK_SOFTCONN; 9009 memcpy(calldata->args.verifier.data, clp->cl_confirm.data, 9010 sizeof(calldata->args.verifier.data)); 9011 } 9012 calldata->args.client = clp; 9013 calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 9014 EXCHGID4_FLAG_BIND_PRINC_STATEID; 9015 #ifdef CONFIG_NFS_V4_1_MIGRATION 9016 calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR; 9017 #endif 9018 if (test_bit(NFS_CS_PNFS, &clp->cl_flags)) 9019 calldata->args.flags |= EXCHGID4_FLAG_USE_PNFS_DS; 9020 msg.rpc_argp = &calldata->args; 9021 msg.rpc_resp = &calldata->res; 9022 task_setup_data.callback_data = calldata; 9023 9024 return rpc_run_task(&task_setup_data); 9025 9026 out_impl_id: 9027 kfree(calldata->res.impl_id); 9028 out_server_scope: 9029 kfree(calldata->res.server_scope); 9030 out_server_owner: 9031 kfree(calldata->res.server_owner); 9032 out_calldata: 9033 kfree(calldata); 9034 out: 9035 nfs_put_client(clp); 9036 return ERR_PTR(status); 9037 } 9038 9039 /* 9040 * _nfs4_proc_exchange_id() 9041 * 9042 * Wrapper for EXCHANGE_ID operation. 9043 */ 9044 static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred, 9045 u32 sp4_how) 9046 { 9047 struct rpc_task *task; 9048 struct nfs41_exchange_id_args *argp; 9049 struct nfs41_exchange_id_res *resp; 9050 unsigned long now = jiffies; 9051 int status; 9052 9053 task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL); 9054 if (IS_ERR(task)) 9055 return PTR_ERR(task); 9056 9057 argp = task->tk_msg.rpc_argp; 9058 resp = task->tk_msg.rpc_resp; 9059 status = task->tk_status; 9060 if (status != 0) 9061 goto out; 9062 9063 status = nfs4_check_cl_exchange_flags(resp->flags, 9064 clp->cl_mvops->minor_version); 9065 if (status != 0) 9066 goto out; 9067 9068 status = nfs4_sp4_select_mode(clp, &resp->state_protect); 9069 if (status != 0) 9070 goto out; 9071 9072 do_renew_lease(clp, now); 9073 9074 clp->cl_clientid = resp->clientid; 9075 clp->cl_exchange_flags = resp->flags; 9076 clp->cl_seqid = resp->seqid; 9077 /* Client ID is not confirmed */ 9078 if (!(resp->flags & EXCHGID4_FLAG_CONFIRMED_R)) 9079 clear_bit(NFS4_SESSION_ESTABLISHED, 9080 &clp->cl_session->session_state); 9081 9082 if (clp->cl_serverscope != NULL && 9083 !nfs41_same_server_scope(clp->cl_serverscope, 9084 resp->server_scope)) { 9085 dprintk("%s: server_scope mismatch detected\n", 9086 __func__); 9087 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); 9088 } 9089 9090 swap(clp->cl_serverowner, resp->server_owner); 9091 swap(clp->cl_serverscope, resp->server_scope); 9092 swap(clp->cl_implid, resp->impl_id); 9093 9094 /* Save the EXCHANGE_ID verifier session trunk tests */ 9095 memcpy(clp->cl_confirm.data, argp->verifier.data, 9096 sizeof(clp->cl_confirm.data)); 9097 out: 9098 trace_nfs4_exchange_id(clp, status); 9099 rpc_put_task(task); 9100 return status; 9101 } 9102 9103 /* 9104 * nfs4_proc_exchange_id() 9105 * 9106 * Returns zero, a negative errno, or a negative NFS4ERR status code. 9107 * 9108 * Since the clientid has expired, all compounds using sessions 9109 * associated with the stale clientid will be returning 9110 * NFS4ERR_BADSESSION in the sequence operation, and will therefore 9111 * be in some phase of session reset. 9112 * 9113 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used. 9114 */ 9115 int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred) 9116 { 9117 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor; 9118 int status; 9119 9120 /* try SP4_MACH_CRED if krb5i/p */ 9121 if (authflavor == RPC_AUTH_GSS_KRB5I || 9122 authflavor == RPC_AUTH_GSS_KRB5P) { 9123 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED); 9124 if (!status) 9125 return 0; 9126 } 9127 9128 /* try SP4_NONE */ 9129 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE); 9130 } 9131 9132 /** 9133 * nfs4_test_session_trunk 9134 * 9135 * This is an add_xprt_test() test function called from 9136 * rpc_clnt_setup_test_and_add_xprt. 9137 * 9138 * The rpc_xprt_switch is referrenced by rpc_clnt_setup_test_and_add_xprt 9139 * and is dereferrenced in nfs4_exchange_id_release 9140 * 9141 * Upon success, add the new transport to the rpc_clnt 9142 * 9143 * @clnt: struct rpc_clnt to get new transport 9144 * @xprt: the rpc_xprt to test 9145 * @data: call data for _nfs4_proc_exchange_id. 9146 */ 9147 void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt, 9148 void *data) 9149 { 9150 struct nfs4_add_xprt_data *adata = data; 9151 struct rpc_task *task; 9152 int status; 9153 9154 u32 sp4_how; 9155 9156 dprintk("--> %s try %s\n", __func__, 9157 xprt->address_strings[RPC_DISPLAY_ADDR]); 9158 9159 sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED); 9160 9161 try_again: 9162 /* Test connection for session trunking. Async exchange_id call */ 9163 task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt); 9164 if (IS_ERR(task)) 9165 return; 9166 9167 status = task->tk_status; 9168 if (status == 0) { 9169 status = nfs4_detect_session_trunking(adata->clp, 9170 task->tk_msg.rpc_resp, xprt); 9171 trace_nfs4_trunked_exchange_id(adata->clp, 9172 xprt->address_strings[RPC_DISPLAY_ADDR], status); 9173 } 9174 if (status == 0) 9175 rpc_clnt_xprt_switch_add_xprt(clnt, xprt); 9176 else if (status != -NFS4ERR_DELAY && rpc_clnt_xprt_switch_has_addr(clnt, 9177 (struct sockaddr *)&xprt->addr)) 9178 rpc_clnt_xprt_switch_remove_xprt(clnt, xprt); 9179 9180 rpc_put_task(task); 9181 if (status == -NFS4ERR_DELAY) { 9182 ssleep(1); 9183 goto try_again; 9184 } 9185 } 9186 EXPORT_SYMBOL_GPL(nfs4_test_session_trunk); 9187 9188 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, 9189 const struct cred *cred) 9190 { 9191 struct rpc_message msg = { 9192 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID], 9193 .rpc_argp = clp, 9194 .rpc_cred = cred, 9195 }; 9196 int status; 9197 9198 status = rpc_call_sync(clp->cl_rpcclient, &msg, 9199 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 9200 trace_nfs4_destroy_clientid(clp, status); 9201 if (status) 9202 dprintk("NFS: Got error %d from the server %s on " 9203 "DESTROY_CLIENTID.", status, clp->cl_hostname); 9204 return status; 9205 } 9206 9207 static int nfs4_proc_destroy_clientid(struct nfs_client *clp, 9208 const struct cred *cred) 9209 { 9210 unsigned int loop; 9211 int ret; 9212 9213 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { 9214 ret = _nfs4_proc_destroy_clientid(clp, cred); 9215 switch (ret) { 9216 case -NFS4ERR_DELAY: 9217 case -NFS4ERR_CLIENTID_BUSY: 9218 ssleep(1); 9219 break; 9220 default: 9221 return ret; 9222 } 9223 } 9224 return 0; 9225 } 9226 9227 int nfs4_destroy_clientid(struct nfs_client *clp) 9228 { 9229 const struct cred *cred; 9230 int ret = 0; 9231 9232 if (clp->cl_mvops->minor_version < 1) 9233 goto out; 9234 if (clp->cl_exchange_flags == 0) 9235 goto out; 9236 if (clp->cl_preserve_clid) 9237 goto out; 9238 cred = nfs4_get_clid_cred(clp); 9239 ret = nfs4_proc_destroy_clientid(clp, cred); 9240 put_cred(cred); 9241 switch (ret) { 9242 case 0: 9243 case -NFS4ERR_STALE_CLIENTID: 9244 clp->cl_exchange_flags = 0; 9245 } 9246 out: 9247 return ret; 9248 } 9249 9250 #endif /* CONFIG_NFS_V4_1 */ 9251 9252 struct nfs4_get_lease_time_data { 9253 struct nfs4_get_lease_time_args *args; 9254 struct nfs4_get_lease_time_res *res; 9255 struct nfs_client *clp; 9256 }; 9257 9258 static void nfs4_get_lease_time_prepare(struct rpc_task *task, 9259 void *calldata) 9260 { 9261 struct nfs4_get_lease_time_data *data = 9262 (struct nfs4_get_lease_time_data *)calldata; 9263 9264 /* just setup sequence, do not trigger session recovery 9265 since we're invoked within one */ 9266 nfs4_setup_sequence(data->clp, 9267 &data->args->la_seq_args, 9268 &data->res->lr_seq_res, 9269 task); 9270 } 9271 9272 /* 9273 * Called from nfs4_state_manager thread for session setup, so don't recover 9274 * from sequence operation or clientid errors. 9275 */ 9276 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) 9277 { 9278 struct nfs4_get_lease_time_data *data = 9279 (struct nfs4_get_lease_time_data *)calldata; 9280 9281 if (!nfs4_sequence_done(task, &data->res->lr_seq_res)) 9282 return; 9283 switch (task->tk_status) { 9284 case -NFS4ERR_DELAY: 9285 case -NFS4ERR_GRACE: 9286 rpc_delay(task, NFS4_POLL_RETRY_MIN); 9287 task->tk_status = 0; 9288 fallthrough; 9289 case -NFS4ERR_RETRY_UNCACHED_REP: 9290 rpc_restart_call_prepare(task); 9291 return; 9292 } 9293 } 9294 9295 static const struct rpc_call_ops nfs4_get_lease_time_ops = { 9296 .rpc_call_prepare = nfs4_get_lease_time_prepare, 9297 .rpc_call_done = nfs4_get_lease_time_done, 9298 }; 9299 9300 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) 9301 { 9302 struct nfs4_get_lease_time_args args; 9303 struct nfs4_get_lease_time_res res = { 9304 .lr_fsinfo = fsinfo, 9305 }; 9306 struct nfs4_get_lease_time_data data = { 9307 .args = &args, 9308 .res = &res, 9309 .clp = clp, 9310 }; 9311 struct rpc_message msg = { 9312 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], 9313 .rpc_argp = &args, 9314 .rpc_resp = &res, 9315 }; 9316 struct rpc_task_setup task_setup = { 9317 .rpc_client = clp->cl_rpcclient, 9318 .rpc_message = &msg, 9319 .callback_ops = &nfs4_get_lease_time_ops, 9320 .callback_data = &data, 9321 .flags = RPC_TASK_TIMEOUT, 9322 }; 9323 9324 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0, 1); 9325 return nfs4_call_sync_custom(&task_setup); 9326 } 9327 9328 #ifdef CONFIG_NFS_V4_1 9329 9330 /* 9331 * Initialize the values to be used by the client in CREATE_SESSION 9332 * If nfs4_init_session set the fore channel request and response sizes, 9333 * use them. 9334 * 9335 * Set the back channel max_resp_sz_cached to zero to force the client to 9336 * always set csa_cachethis to FALSE because the current implementation 9337 * of the back channel DRC only supports caching the CB_SEQUENCE operation. 9338 */ 9339 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args, 9340 struct rpc_clnt *clnt) 9341 { 9342 unsigned int max_rqst_sz, max_resp_sz; 9343 unsigned int max_bc_payload = rpc_max_bc_payload(clnt); 9344 unsigned int max_bc_slots = rpc_num_bc_slots(clnt); 9345 9346 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead; 9347 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead; 9348 9349 /* Fore channel attributes */ 9350 args->fc_attrs.max_rqst_sz = max_rqst_sz; 9351 args->fc_attrs.max_resp_sz = max_resp_sz; 9352 args->fc_attrs.max_ops = NFS4_MAX_OPS; 9353 args->fc_attrs.max_reqs = max_session_slots; 9354 9355 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " 9356 "max_ops=%u max_reqs=%u\n", 9357 __func__, 9358 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, 9359 args->fc_attrs.max_ops, args->fc_attrs.max_reqs); 9360 9361 /* Back channel attributes */ 9362 args->bc_attrs.max_rqst_sz = max_bc_payload; 9363 args->bc_attrs.max_resp_sz = max_bc_payload; 9364 args->bc_attrs.max_resp_sz_cached = 0; 9365 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; 9366 args->bc_attrs.max_reqs = max_t(unsigned short, max_session_cb_slots, 1); 9367 if (args->bc_attrs.max_reqs > max_bc_slots) 9368 args->bc_attrs.max_reqs = max_bc_slots; 9369 9370 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " 9371 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", 9372 __func__, 9373 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, 9374 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, 9375 args->bc_attrs.max_reqs); 9376 } 9377 9378 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, 9379 struct nfs41_create_session_res *res) 9380 { 9381 struct nfs4_channel_attrs *sent = &args->fc_attrs; 9382 struct nfs4_channel_attrs *rcvd = &res->fc_attrs; 9383 9384 if (rcvd->max_resp_sz > sent->max_resp_sz) 9385 return -EINVAL; 9386 /* 9387 * Our requested max_ops is the minimum we need; we're not 9388 * prepared to break up compounds into smaller pieces than that. 9389 * So, no point even trying to continue if the server won't 9390 * cooperate: 9391 */ 9392 if (rcvd->max_ops < sent->max_ops) 9393 return -EINVAL; 9394 if (rcvd->max_reqs == 0) 9395 return -EINVAL; 9396 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE) 9397 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE; 9398 return 0; 9399 } 9400 9401 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, 9402 struct nfs41_create_session_res *res) 9403 { 9404 struct nfs4_channel_attrs *sent = &args->bc_attrs; 9405 struct nfs4_channel_attrs *rcvd = &res->bc_attrs; 9406 9407 if (!(res->flags & SESSION4_BACK_CHAN)) 9408 goto out; 9409 if (rcvd->max_rqst_sz > sent->max_rqst_sz) 9410 return -EINVAL; 9411 if (rcvd->max_resp_sz < sent->max_resp_sz) 9412 return -EINVAL; 9413 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) 9414 return -EINVAL; 9415 if (rcvd->max_ops > sent->max_ops) 9416 return -EINVAL; 9417 if (rcvd->max_reqs > sent->max_reqs) 9418 return -EINVAL; 9419 out: 9420 return 0; 9421 } 9422 9423 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, 9424 struct nfs41_create_session_res *res) 9425 { 9426 int ret; 9427 9428 ret = nfs4_verify_fore_channel_attrs(args, res); 9429 if (ret) 9430 return ret; 9431 return nfs4_verify_back_channel_attrs(args, res); 9432 } 9433 9434 static void nfs4_update_session(struct nfs4_session *session, 9435 struct nfs41_create_session_res *res) 9436 { 9437 nfs4_copy_sessionid(&session->sess_id, &res->sessionid); 9438 /* Mark client id and session as being confirmed */ 9439 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; 9440 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state); 9441 session->flags = res->flags; 9442 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs)); 9443 if (res->flags & SESSION4_BACK_CHAN) 9444 memcpy(&session->bc_attrs, &res->bc_attrs, 9445 sizeof(session->bc_attrs)); 9446 } 9447 9448 static int _nfs4_proc_create_session(struct nfs_client *clp, 9449 const struct cred *cred) 9450 { 9451 struct nfs4_session *session = clp->cl_session; 9452 struct nfs41_create_session_args args = { 9453 .client = clp, 9454 .clientid = clp->cl_clientid, 9455 .seqid = clp->cl_seqid, 9456 .cb_program = NFS4_CALLBACK, 9457 }; 9458 struct nfs41_create_session_res res; 9459 9460 struct rpc_message msg = { 9461 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], 9462 .rpc_argp = &args, 9463 .rpc_resp = &res, 9464 .rpc_cred = cred, 9465 }; 9466 int status; 9467 9468 nfs4_init_channel_attrs(&args, clp->cl_rpcclient); 9469 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); 9470 9471 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 9472 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 9473 trace_nfs4_create_session(clp, status); 9474 9475 switch (status) { 9476 case -NFS4ERR_STALE_CLIENTID: 9477 case -NFS4ERR_DELAY: 9478 case -ETIMEDOUT: 9479 case -EACCES: 9480 case -EAGAIN: 9481 goto out; 9482 } 9483 9484 clp->cl_seqid++; 9485 if (!status) { 9486 /* Verify the session's negotiated channel_attrs values */ 9487 status = nfs4_verify_channel_attrs(&args, &res); 9488 /* Increment the clientid slot sequence id */ 9489 if (status) 9490 goto out; 9491 nfs4_update_session(session, &res); 9492 } 9493 out: 9494 return status; 9495 } 9496 9497 /* 9498 * Issues a CREATE_SESSION operation to the server. 9499 * It is the responsibility of the caller to verify the session is 9500 * expired before calling this routine. 9501 */ 9502 int nfs4_proc_create_session(struct nfs_client *clp, const struct cred *cred) 9503 { 9504 int status; 9505 unsigned *ptr; 9506 struct nfs4_session *session = clp->cl_session; 9507 struct nfs4_add_xprt_data xprtdata = { 9508 .clp = clp, 9509 }; 9510 struct rpc_add_xprt_test rpcdata = { 9511 .add_xprt_test = clp->cl_mvops->session_trunk, 9512 .data = &xprtdata, 9513 }; 9514 9515 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 9516 9517 status = _nfs4_proc_create_session(clp, cred); 9518 if (status) 9519 goto out; 9520 9521 /* Init or reset the session slot tables */ 9522 status = nfs4_setup_session_slot_tables(session); 9523 dprintk("slot table setup returned %d\n", status); 9524 if (status) 9525 goto out; 9526 9527 ptr = (unsigned *)&session->sess_id.data[0]; 9528 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, 9529 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); 9530 rpc_clnt_probe_trunked_xprts(clp->cl_rpcclient, &rpcdata); 9531 out: 9532 return status; 9533 } 9534 9535 /* 9536 * Issue the over-the-wire RPC DESTROY_SESSION. 9537 * The caller must serialize access to this routine. 9538 */ 9539 int nfs4_proc_destroy_session(struct nfs4_session *session, 9540 const struct cred *cred) 9541 { 9542 struct rpc_message msg = { 9543 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION], 9544 .rpc_argp = session, 9545 .rpc_cred = cred, 9546 }; 9547 int status = 0; 9548 9549 /* session is still being setup */ 9550 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state)) 9551 return 0; 9552 9553 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 9554 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 9555 trace_nfs4_destroy_session(session->clp, status); 9556 9557 if (status) 9558 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " 9559 "Session has been destroyed regardless...\n", status); 9560 rpc_clnt_manage_trunked_xprts(session->clp->cl_rpcclient); 9561 return status; 9562 } 9563 9564 /* 9565 * Renew the cl_session lease. 9566 */ 9567 struct nfs4_sequence_data { 9568 struct nfs_client *clp; 9569 struct nfs4_sequence_args args; 9570 struct nfs4_sequence_res res; 9571 }; 9572 9573 static void nfs41_sequence_release(void *data) 9574 { 9575 struct nfs4_sequence_data *calldata = data; 9576 struct nfs_client *clp = calldata->clp; 9577 9578 if (refcount_read(&clp->cl_count) > 1) 9579 nfs4_schedule_state_renewal(clp); 9580 nfs_put_client(clp); 9581 kfree(calldata); 9582 } 9583 9584 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) 9585 { 9586 switch(task->tk_status) { 9587 case -NFS4ERR_DELAY: 9588 rpc_delay(task, NFS4_POLL_RETRY_MAX); 9589 return -EAGAIN; 9590 default: 9591 nfs4_schedule_lease_recovery(clp); 9592 } 9593 return 0; 9594 } 9595 9596 static void nfs41_sequence_call_done(struct rpc_task *task, void *data) 9597 { 9598 struct nfs4_sequence_data *calldata = data; 9599 struct nfs_client *clp = calldata->clp; 9600 9601 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) 9602 return; 9603 9604 trace_nfs4_sequence(clp, task->tk_status); 9605 if (task->tk_status < 0 && clp->cl_cons_state >= 0) { 9606 dprintk("%s ERROR %d\n", __func__, task->tk_status); 9607 if (refcount_read(&clp->cl_count) == 1) 9608 return; 9609 9610 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { 9611 rpc_restart_call_prepare(task); 9612 return; 9613 } 9614 } 9615 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 9616 } 9617 9618 static void nfs41_sequence_prepare(struct rpc_task *task, void *data) 9619 { 9620 struct nfs4_sequence_data *calldata = data; 9621 struct nfs_client *clp = calldata->clp; 9622 struct nfs4_sequence_args *args; 9623 struct nfs4_sequence_res *res; 9624 9625 args = task->tk_msg.rpc_argp; 9626 res = task->tk_msg.rpc_resp; 9627 9628 nfs4_setup_sequence(clp, args, res, task); 9629 } 9630 9631 static const struct rpc_call_ops nfs41_sequence_ops = { 9632 .rpc_call_done = nfs41_sequence_call_done, 9633 .rpc_call_prepare = nfs41_sequence_prepare, 9634 .rpc_release = nfs41_sequence_release, 9635 }; 9636 9637 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 9638 const struct cred *cred, 9639 struct nfs4_slot *slot, 9640 bool is_privileged) 9641 { 9642 struct nfs4_sequence_data *calldata; 9643 struct rpc_message msg = { 9644 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], 9645 .rpc_cred = cred, 9646 }; 9647 struct rpc_task_setup task_setup_data = { 9648 .rpc_client = clp->cl_rpcclient, 9649 .rpc_message = &msg, 9650 .callback_ops = &nfs41_sequence_ops, 9651 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT | RPC_TASK_MOVEABLE, 9652 }; 9653 struct rpc_task *ret; 9654 9655 ret = ERR_PTR(-EIO); 9656 if (!refcount_inc_not_zero(&clp->cl_count)) 9657 goto out_err; 9658 9659 ret = ERR_PTR(-ENOMEM); 9660 calldata = kzalloc(sizeof(*calldata), GFP_KERNEL); 9661 if (calldata == NULL) 9662 goto out_put_clp; 9663 nfs4_init_sequence(&calldata->args, &calldata->res, 0, is_privileged); 9664 nfs4_sequence_attach_slot(&calldata->args, &calldata->res, slot); 9665 msg.rpc_argp = &calldata->args; 9666 msg.rpc_resp = &calldata->res; 9667 calldata->clp = clp; 9668 task_setup_data.callback_data = calldata; 9669 9670 ret = rpc_run_task(&task_setup_data); 9671 if (IS_ERR(ret)) 9672 goto out_err; 9673 return ret; 9674 out_put_clp: 9675 nfs_put_client(clp); 9676 out_err: 9677 nfs41_release_slot(slot); 9678 return ret; 9679 } 9680 9681 static int nfs41_proc_async_sequence(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags) 9682 { 9683 struct rpc_task *task; 9684 int ret = 0; 9685 9686 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) 9687 return -EAGAIN; 9688 task = _nfs41_proc_sequence(clp, cred, NULL, false); 9689 if (IS_ERR(task)) 9690 ret = PTR_ERR(task); 9691 else 9692 rpc_put_task_async(task); 9693 dprintk("<-- %s status=%d\n", __func__, ret); 9694 return ret; 9695 } 9696 9697 static int nfs4_proc_sequence(struct nfs_client *clp, const struct cred *cred) 9698 { 9699 struct rpc_task *task; 9700 int ret; 9701 9702 task = _nfs41_proc_sequence(clp, cred, NULL, true); 9703 if (IS_ERR(task)) { 9704 ret = PTR_ERR(task); 9705 goto out; 9706 } 9707 ret = rpc_wait_for_completion_task(task); 9708 if (!ret) 9709 ret = task->tk_status; 9710 rpc_put_task(task); 9711 out: 9712 dprintk("<-- %s status=%d\n", __func__, ret); 9713 return ret; 9714 } 9715 9716 struct nfs4_reclaim_complete_data { 9717 struct nfs_client *clp; 9718 struct nfs41_reclaim_complete_args arg; 9719 struct nfs41_reclaim_complete_res res; 9720 }; 9721 9722 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) 9723 { 9724 struct nfs4_reclaim_complete_data *calldata = data; 9725 9726 nfs4_setup_sequence(calldata->clp, 9727 &calldata->arg.seq_args, 9728 &calldata->res.seq_res, 9729 task); 9730 } 9731 9732 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) 9733 { 9734 switch(task->tk_status) { 9735 case 0: 9736 wake_up_all(&clp->cl_lock_waitq); 9737 fallthrough; 9738 case -NFS4ERR_COMPLETE_ALREADY: 9739 case -NFS4ERR_WRONG_CRED: /* What to do here? */ 9740 break; 9741 case -NFS4ERR_DELAY: 9742 rpc_delay(task, NFS4_POLL_RETRY_MAX); 9743 fallthrough; 9744 case -NFS4ERR_RETRY_UNCACHED_REP: 9745 case -EACCES: 9746 dprintk("%s: failed to reclaim complete error %d for server %s, retrying\n", 9747 __func__, task->tk_status, clp->cl_hostname); 9748 return -EAGAIN; 9749 case -NFS4ERR_BADSESSION: 9750 case -NFS4ERR_DEADSESSION: 9751 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 9752 break; 9753 default: 9754 nfs4_schedule_lease_recovery(clp); 9755 } 9756 return 0; 9757 } 9758 9759 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) 9760 { 9761 struct nfs4_reclaim_complete_data *calldata = data; 9762 struct nfs_client *clp = calldata->clp; 9763 struct nfs4_sequence_res *res = &calldata->res.seq_res; 9764 9765 if (!nfs41_sequence_done(task, res)) 9766 return; 9767 9768 trace_nfs4_reclaim_complete(clp, task->tk_status); 9769 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { 9770 rpc_restart_call_prepare(task); 9771 return; 9772 } 9773 } 9774 9775 static void nfs4_free_reclaim_complete_data(void *data) 9776 { 9777 struct nfs4_reclaim_complete_data *calldata = data; 9778 9779 kfree(calldata); 9780 } 9781 9782 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = { 9783 .rpc_call_prepare = nfs4_reclaim_complete_prepare, 9784 .rpc_call_done = nfs4_reclaim_complete_done, 9785 .rpc_release = nfs4_free_reclaim_complete_data, 9786 }; 9787 9788 /* 9789 * Issue a global reclaim complete. 9790 */ 9791 static int nfs41_proc_reclaim_complete(struct nfs_client *clp, 9792 const struct cred *cred) 9793 { 9794 struct nfs4_reclaim_complete_data *calldata; 9795 struct rpc_message msg = { 9796 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE], 9797 .rpc_cred = cred, 9798 }; 9799 struct rpc_task_setup task_setup_data = { 9800 .rpc_client = clp->cl_rpcclient, 9801 .rpc_message = &msg, 9802 .callback_ops = &nfs4_reclaim_complete_call_ops, 9803 .flags = RPC_TASK_NO_ROUND_ROBIN, 9804 }; 9805 int status = -ENOMEM; 9806 9807 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 9808 if (calldata == NULL) 9809 goto out; 9810 calldata->clp = clp; 9811 calldata->arg.one_fs = 0; 9812 9813 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0, 1); 9814 msg.rpc_argp = &calldata->arg; 9815 msg.rpc_resp = &calldata->res; 9816 task_setup_data.callback_data = calldata; 9817 status = nfs4_call_sync_custom(&task_setup_data); 9818 out: 9819 dprintk("<-- %s status=%d\n", __func__, status); 9820 return status; 9821 } 9822 9823 static void 9824 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) 9825 { 9826 struct nfs4_layoutget *lgp = calldata; 9827 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 9828 9829 nfs4_setup_sequence(server->nfs_client, &lgp->args.seq_args, 9830 &lgp->res.seq_res, task); 9831 } 9832 9833 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) 9834 { 9835 struct nfs4_layoutget *lgp = calldata; 9836 9837 nfs41_sequence_process(task, &lgp->res.seq_res); 9838 } 9839 9840 static int 9841 nfs4_layoutget_handle_exception(struct rpc_task *task, 9842 struct nfs4_layoutget *lgp, struct nfs4_exception *exception) 9843 { 9844 struct inode *inode = lgp->args.inode; 9845 struct nfs_server *server = NFS_SERVER(inode); 9846 struct pnfs_layout_hdr *lo = lgp->lo; 9847 int nfs4err = task->tk_status; 9848 int err, status = 0; 9849 LIST_HEAD(head); 9850 9851 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status); 9852 9853 nfs4_sequence_free_slot(&lgp->res.seq_res); 9854 9855 exception->state = NULL; 9856 exception->stateid = NULL; 9857 9858 switch (nfs4err) { 9859 case 0: 9860 goto out; 9861 9862 /* 9863 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs 9864 * on the file. set tk_status to -ENODATA to tell upper layer to 9865 * retry go inband. 9866 */ 9867 case -NFS4ERR_LAYOUTUNAVAILABLE: 9868 status = -ENODATA; 9869 goto out; 9870 /* 9871 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of 9872 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3). 9873 */ 9874 case -NFS4ERR_BADLAYOUT: 9875 status = -EOVERFLOW; 9876 goto out; 9877 /* 9878 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client 9879 * (or clients) writing to the same RAID stripe except when 9880 * the minlength argument is 0 (see RFC5661 section 18.43.3). 9881 * 9882 * Treat it like we would RECALLCONFLICT -- we retry for a little 9883 * while, and then eventually give up. 9884 */ 9885 case -NFS4ERR_LAYOUTTRYLATER: 9886 if (lgp->args.minlength == 0) { 9887 status = -EOVERFLOW; 9888 goto out; 9889 } 9890 status = -EBUSY; 9891 break; 9892 case -NFS4ERR_RECALLCONFLICT: 9893 case -NFS4ERR_RETURNCONFLICT: 9894 status = -ERECALLCONFLICT; 9895 break; 9896 case -NFS4ERR_DELEG_REVOKED: 9897 case -NFS4ERR_ADMIN_REVOKED: 9898 case -NFS4ERR_EXPIRED: 9899 case -NFS4ERR_BAD_STATEID: 9900 exception->timeout = 0; 9901 spin_lock(&inode->i_lock); 9902 /* If the open stateid was bad, then recover it. */ 9903 if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) || 9904 !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) { 9905 spin_unlock(&inode->i_lock); 9906 exception->state = lgp->args.ctx->state; 9907 exception->stateid = &lgp->args.stateid; 9908 break; 9909 } 9910 9911 /* 9912 * Mark the bad layout state as invalid, then retry 9913 */ 9914 pnfs_mark_layout_stateid_invalid(lo, &head); 9915 spin_unlock(&inode->i_lock); 9916 nfs_commit_inode(inode, 0); 9917 pnfs_free_lseg_list(&head); 9918 status = -EAGAIN; 9919 goto out; 9920 } 9921 9922 err = nfs4_handle_exception(server, nfs4err, exception); 9923 if (!status) { 9924 if (exception->retry) 9925 status = -EAGAIN; 9926 else 9927 status = err; 9928 } 9929 out: 9930 return status; 9931 } 9932 9933 size_t max_response_pages(struct nfs_server *server) 9934 { 9935 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 9936 return nfs_page_array_len(0, max_resp_sz); 9937 } 9938 9939 static void nfs4_layoutget_release(void *calldata) 9940 { 9941 struct nfs4_layoutget *lgp = calldata; 9942 9943 nfs4_sequence_free_slot(&lgp->res.seq_res); 9944 pnfs_layoutget_free(lgp); 9945 } 9946 9947 static const struct rpc_call_ops nfs4_layoutget_call_ops = { 9948 .rpc_call_prepare = nfs4_layoutget_prepare, 9949 .rpc_call_done = nfs4_layoutget_done, 9950 .rpc_release = nfs4_layoutget_release, 9951 }; 9952 9953 struct pnfs_layout_segment * 9954 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, 9955 struct nfs4_exception *exception) 9956 { 9957 struct inode *inode = lgp->args.inode; 9958 struct nfs_server *server = NFS_SERVER(inode); 9959 struct rpc_task *task; 9960 struct rpc_message msg = { 9961 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], 9962 .rpc_argp = &lgp->args, 9963 .rpc_resp = &lgp->res, 9964 .rpc_cred = lgp->cred, 9965 }; 9966 struct rpc_task_setup task_setup_data = { 9967 .rpc_client = server->client, 9968 .rpc_message = &msg, 9969 .callback_ops = &nfs4_layoutget_call_ops, 9970 .callback_data = lgp, 9971 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF | 9972 RPC_TASK_MOVEABLE, 9973 }; 9974 struct pnfs_layout_segment *lseg = NULL; 9975 int status = 0; 9976 9977 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0); 9978 exception->retry = 0; 9979 9980 task = rpc_run_task(&task_setup_data); 9981 if (IS_ERR(task)) 9982 return ERR_CAST(task); 9983 9984 status = rpc_wait_for_completion_task(task); 9985 if (status != 0) 9986 goto out; 9987 9988 if (task->tk_status < 0) { 9989 exception->retry = 1; 9990 status = nfs4_layoutget_handle_exception(task, lgp, exception); 9991 } else if (lgp->res.layoutp->len == 0) { 9992 exception->retry = 1; 9993 status = -EAGAIN; 9994 nfs4_update_delay(&exception->timeout); 9995 } else 9996 lseg = pnfs_layout_process(lgp); 9997 out: 9998 trace_nfs4_layoutget(lgp->args.ctx, 9999 &lgp->args.range, 10000 &lgp->res.range, 10001 &lgp->res.stateid, 10002 status); 10003 10004 rpc_put_task(task); 10005 dprintk("<-- %s status=%d\n", __func__, status); 10006 if (status) 10007 return ERR_PTR(status); 10008 return lseg; 10009 } 10010 10011 static void 10012 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) 10013 { 10014 struct nfs4_layoutreturn *lrp = calldata; 10015 10016 nfs4_setup_sequence(lrp->clp, 10017 &lrp->args.seq_args, 10018 &lrp->res.seq_res, 10019 task); 10020 if (!pnfs_layout_is_valid(lrp->args.layout)) 10021 rpc_exit(task, 0); 10022 } 10023 10024 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) 10025 { 10026 struct nfs4_layoutreturn *lrp = calldata; 10027 struct nfs_server *server; 10028 10029 if (!nfs41_sequence_process(task, &lrp->res.seq_res)) 10030 return; 10031 10032 if (task->tk_rpc_status == -ETIMEDOUT) { 10033 lrp->rpc_status = -EAGAIN; 10034 lrp->res.lrs_present = 0; 10035 return; 10036 } 10037 /* 10038 * Was there an RPC level error? Assume the call succeeded, 10039 * and that we need to release the layout 10040 */ 10041 if (task->tk_rpc_status != 0 && RPC_WAS_SENT(task)) { 10042 lrp->res.lrs_present = 0; 10043 return; 10044 } 10045 10046 server = NFS_SERVER(lrp->args.inode); 10047 switch (task->tk_status) { 10048 case -NFS4ERR_OLD_STATEID: 10049 if (nfs4_layout_refresh_old_stateid(&lrp->args.stateid, 10050 &lrp->args.range, 10051 lrp->args.inode)) 10052 goto out_restart; 10053 fallthrough; 10054 default: 10055 task->tk_status = 0; 10056 lrp->res.lrs_present = 0; 10057 fallthrough; 10058 case 0: 10059 break; 10060 case -NFS4ERR_BADSESSION: 10061 case -NFS4ERR_DEADSESSION: 10062 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 10063 nfs4_schedule_session_recovery(server->nfs_client->cl_session, 10064 task->tk_status); 10065 lrp->res.lrs_present = 0; 10066 lrp->rpc_status = -EAGAIN; 10067 task->tk_status = 0; 10068 break; 10069 case -NFS4ERR_DELAY: 10070 if (nfs4_async_handle_error(task, server, NULL, NULL) == 10071 -EAGAIN) 10072 goto out_restart; 10073 lrp->res.lrs_present = 0; 10074 break; 10075 } 10076 return; 10077 out_restart: 10078 task->tk_status = 0; 10079 nfs4_sequence_free_slot(&lrp->res.seq_res); 10080 rpc_restart_call_prepare(task); 10081 } 10082 10083 static void nfs4_layoutreturn_release(void *calldata) 10084 { 10085 struct nfs4_layoutreturn *lrp = calldata; 10086 struct pnfs_layout_hdr *lo = lrp->args.layout; 10087 10088 if (lrp->rpc_status == 0 || !lrp->inode) 10089 pnfs_layoutreturn_free_lsegs( 10090 lo, &lrp->args.stateid, &lrp->args.range, 10091 lrp->res.lrs_present ? &lrp->res.stateid : NULL); 10092 else 10093 pnfs_layoutreturn_retry_later(lo, &lrp->args.stateid, 10094 &lrp->args.range); 10095 nfs4_sequence_free_slot(&lrp->res.seq_res); 10096 if (lrp->ld_private.ops && lrp->ld_private.ops->free) 10097 lrp->ld_private.ops->free(&lrp->ld_private); 10098 pnfs_put_layout_hdr(lrp->args.layout); 10099 nfs_iput_and_deactive(lrp->inode); 10100 put_cred(lrp->cred); 10101 kfree(calldata); 10102 } 10103 10104 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { 10105 .rpc_call_prepare = nfs4_layoutreturn_prepare, 10106 .rpc_call_done = nfs4_layoutreturn_done, 10107 .rpc_release = nfs4_layoutreturn_release, 10108 }; 10109 10110 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, unsigned int flags) 10111 { 10112 struct rpc_task *task; 10113 struct rpc_message msg = { 10114 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN], 10115 .rpc_argp = &lrp->args, 10116 .rpc_resp = &lrp->res, 10117 .rpc_cred = lrp->cred, 10118 }; 10119 struct rpc_task_setup task_setup_data = { 10120 .rpc_client = NFS_SERVER(lrp->args.inode)->client, 10121 .rpc_message = &msg, 10122 .callback_ops = &nfs4_layoutreturn_call_ops, 10123 .callback_data = lrp, 10124 .flags = RPC_TASK_MOVEABLE, 10125 }; 10126 int status = 0; 10127 10128 nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client, 10129 NFS_SP4_MACH_CRED_PNFS_CLEANUP, 10130 &task_setup_data.rpc_client, &msg); 10131 10132 lrp->inode = nfs_igrab_and_active(lrp->args.inode); 10133 if (flags & PNFS_FL_LAYOUTRETURN_ASYNC) { 10134 if (!lrp->inode) { 10135 nfs4_layoutreturn_release(lrp); 10136 return -EAGAIN; 10137 } 10138 task_setup_data.flags |= RPC_TASK_ASYNC; 10139 } 10140 if (!lrp->inode) 10141 flags |= PNFS_FL_LAYOUTRETURN_PRIVILEGED; 10142 if (flags & PNFS_FL_LAYOUTRETURN_PRIVILEGED) 10143 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1, 10144 1); 10145 else 10146 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1, 10147 0); 10148 task = rpc_run_task(&task_setup_data); 10149 if (IS_ERR(task)) 10150 return PTR_ERR(task); 10151 if (!(flags & PNFS_FL_LAYOUTRETURN_ASYNC)) 10152 status = task->tk_status; 10153 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status); 10154 dprintk("<-- %s status=%d\n", __func__, status); 10155 rpc_put_task(task); 10156 return status; 10157 } 10158 10159 static int 10160 _nfs4_proc_getdeviceinfo(struct nfs_server *server, 10161 struct pnfs_device *pdev, 10162 const struct cred *cred) 10163 { 10164 struct nfs4_getdeviceinfo_args args = { 10165 .pdev = pdev, 10166 .notify_types = NOTIFY_DEVICEID4_CHANGE | 10167 NOTIFY_DEVICEID4_DELETE, 10168 }; 10169 struct nfs4_getdeviceinfo_res res = { 10170 .pdev = pdev, 10171 }; 10172 struct rpc_message msg = { 10173 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO], 10174 .rpc_argp = &args, 10175 .rpc_resp = &res, 10176 .rpc_cred = cred, 10177 }; 10178 int status; 10179 10180 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 10181 if (res.notification & ~args.notify_types) 10182 dprintk("%s: unsupported notification\n", __func__); 10183 if (res.notification != args.notify_types) 10184 pdev->nocache = 1; 10185 10186 trace_nfs4_getdeviceinfo(server, &pdev->dev_id, status); 10187 10188 dprintk("<-- %s status=%d\n", __func__, status); 10189 10190 return status; 10191 } 10192 10193 int nfs4_proc_getdeviceinfo(struct nfs_server *server, 10194 struct pnfs_device *pdev, 10195 const struct cred *cred) 10196 { 10197 struct nfs4_exception exception = { }; 10198 int err; 10199 10200 do { 10201 err = nfs4_handle_exception(server, 10202 _nfs4_proc_getdeviceinfo(server, pdev, cred), 10203 &exception); 10204 } while (exception.retry); 10205 return err; 10206 } 10207 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo); 10208 10209 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) 10210 { 10211 struct nfs4_layoutcommit_data *data = calldata; 10212 struct nfs_server *server = NFS_SERVER(data->args.inode); 10213 10214 nfs4_setup_sequence(server->nfs_client, 10215 &data->args.seq_args, 10216 &data->res.seq_res, 10217 task); 10218 } 10219 10220 static void 10221 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) 10222 { 10223 struct nfs4_layoutcommit_data *data = calldata; 10224 struct nfs_server *server = NFS_SERVER(data->args.inode); 10225 10226 if (!nfs41_sequence_done(task, &data->res.seq_res)) 10227 return; 10228 10229 switch (task->tk_status) { /* Just ignore these failures */ 10230 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */ 10231 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ 10232 case -NFS4ERR_BADLAYOUT: /* no layout */ 10233 case -NFS4ERR_GRACE: /* loca_recalim always false */ 10234 task->tk_status = 0; 10235 break; 10236 case 0: 10237 break; 10238 default: 10239 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) { 10240 rpc_restart_call_prepare(task); 10241 return; 10242 } 10243 } 10244 } 10245 10246 static void nfs4_layoutcommit_release(void *calldata) 10247 { 10248 struct nfs4_layoutcommit_data *data = calldata; 10249 10250 pnfs_cleanup_layoutcommit(data); 10251 nfs_post_op_update_inode_force_wcc(data->args.inode, 10252 data->res.fattr); 10253 put_cred(data->cred); 10254 nfs_iput_and_deactive(data->inode); 10255 kfree(data); 10256 } 10257 10258 static const struct rpc_call_ops nfs4_layoutcommit_ops = { 10259 .rpc_call_prepare = nfs4_layoutcommit_prepare, 10260 .rpc_call_done = nfs4_layoutcommit_done, 10261 .rpc_release = nfs4_layoutcommit_release, 10262 }; 10263 10264 int 10265 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) 10266 { 10267 struct rpc_message msg = { 10268 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT], 10269 .rpc_argp = &data->args, 10270 .rpc_resp = &data->res, 10271 .rpc_cred = data->cred, 10272 }; 10273 struct rpc_task_setup task_setup_data = { 10274 .task = &data->task, 10275 .rpc_client = NFS_CLIENT(data->args.inode), 10276 .rpc_message = &msg, 10277 .callback_ops = &nfs4_layoutcommit_ops, 10278 .callback_data = data, 10279 .flags = RPC_TASK_MOVEABLE, 10280 }; 10281 struct rpc_task *task; 10282 int status = 0; 10283 10284 dprintk("NFS: initiating layoutcommit call. sync %d " 10285 "lbw: %llu inode %lu\n", sync, 10286 data->args.lastbytewritten, 10287 data->args.inode->i_ino); 10288 10289 if (!sync) { 10290 data->inode = nfs_igrab_and_active(data->args.inode); 10291 if (data->inode == NULL) { 10292 nfs4_layoutcommit_release(data); 10293 return -EAGAIN; 10294 } 10295 task_setup_data.flags = RPC_TASK_ASYNC; 10296 } 10297 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0); 10298 task = rpc_run_task(&task_setup_data); 10299 if (IS_ERR(task)) 10300 return PTR_ERR(task); 10301 if (sync) 10302 status = task->tk_status; 10303 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status); 10304 dprintk("%s: status %d\n", __func__, status); 10305 rpc_put_task(task); 10306 return status; 10307 } 10308 10309 /* 10310 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if 10311 * possible) as per RFC3530bis and RFC5661 Security Considerations sections 10312 */ 10313 static int 10314 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 10315 struct nfs_fsinfo *info, 10316 struct nfs4_secinfo_flavors *flavors, bool use_integrity) 10317 { 10318 struct nfs41_secinfo_no_name_args args = { 10319 .style = SECINFO_STYLE_CURRENT_FH, 10320 }; 10321 struct nfs4_secinfo_res res = { 10322 .flavors = flavors, 10323 }; 10324 struct rpc_message msg = { 10325 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME], 10326 .rpc_argp = &args, 10327 .rpc_resp = &res, 10328 }; 10329 struct nfs4_call_sync_data data = { 10330 .seq_server = server, 10331 .seq_args = &args.seq_args, 10332 .seq_res = &res.seq_res, 10333 }; 10334 struct rpc_task_setup task_setup = { 10335 .rpc_client = server->client, 10336 .rpc_message = &msg, 10337 .callback_ops = server->nfs_client->cl_mvops->call_sync_ops, 10338 .callback_data = &data, 10339 .flags = RPC_TASK_NO_ROUND_ROBIN, 10340 }; 10341 const struct cred *cred = NULL; 10342 int status; 10343 10344 if (use_integrity) { 10345 task_setup.rpc_client = server->nfs_client->cl_rpcclient; 10346 10347 cred = nfs4_get_clid_cred(server->nfs_client); 10348 msg.rpc_cred = cred; 10349 } 10350 10351 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 10352 status = nfs4_call_sync_custom(&task_setup); 10353 dprintk("<-- %s status=%d\n", __func__, status); 10354 10355 put_cred(cred); 10356 10357 return status; 10358 } 10359 10360 static int 10361 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 10362 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) 10363 { 10364 struct nfs4_exception exception = { 10365 .interruptible = true, 10366 }; 10367 int err; 10368 do { 10369 /* first try using integrity protection */ 10370 err = -NFS4ERR_WRONGSEC; 10371 10372 /* try to use integrity protection with machine cred */ 10373 if (_nfs4_is_integrity_protected(server->nfs_client)) 10374 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, 10375 flavors, true); 10376 10377 /* 10378 * if unable to use integrity protection, or SECINFO with 10379 * integrity protection returns NFS4ERR_WRONGSEC (which is 10380 * disallowed by spec, but exists in deployed servers) use 10381 * the current filesystem's rpc_client and the user cred. 10382 */ 10383 if (err == -NFS4ERR_WRONGSEC) 10384 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, 10385 flavors, false); 10386 10387 switch (err) { 10388 case 0: 10389 case -NFS4ERR_WRONGSEC: 10390 case -ENOTSUPP: 10391 goto out; 10392 default: 10393 err = nfs4_handle_exception(server, err, &exception); 10394 } 10395 } while (exception.retry); 10396 out: 10397 return err; 10398 } 10399 10400 static int 10401 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 10402 struct nfs_fsinfo *info) 10403 { 10404 int err; 10405 struct page *page; 10406 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR; 10407 struct nfs4_secinfo_flavors *flavors; 10408 struct nfs4_secinfo4 *secinfo; 10409 int i; 10410 10411 page = alloc_page(GFP_KERNEL); 10412 if (!page) { 10413 err = -ENOMEM; 10414 goto out; 10415 } 10416 10417 flavors = page_address(page); 10418 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); 10419 10420 /* 10421 * Fall back on "guess and check" method if 10422 * the server doesn't support SECINFO_NO_NAME 10423 */ 10424 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) { 10425 err = nfs4_find_root_sec(server, fhandle, info); 10426 goto out_freepage; 10427 } 10428 if (err) 10429 goto out_freepage; 10430 10431 for (i = 0; i < flavors->num_flavors; i++) { 10432 secinfo = &flavors->flavors[i]; 10433 10434 switch (secinfo->flavor) { 10435 case RPC_AUTH_NULL: 10436 case RPC_AUTH_UNIX: 10437 case RPC_AUTH_GSS: 10438 flavor = rpcauth_get_pseudoflavor(secinfo->flavor, 10439 &secinfo->flavor_info); 10440 break; 10441 default: 10442 flavor = RPC_AUTH_MAXFLAVOR; 10443 break; 10444 } 10445 10446 if (!nfs_auth_info_match(&server->auth_info, flavor)) 10447 flavor = RPC_AUTH_MAXFLAVOR; 10448 10449 if (flavor != RPC_AUTH_MAXFLAVOR) { 10450 err = nfs4_lookup_root_sec(server, fhandle, 10451 info, flavor); 10452 if (!err) 10453 break; 10454 } 10455 } 10456 10457 if (flavor == RPC_AUTH_MAXFLAVOR) 10458 err = -EPERM; 10459 10460 out_freepage: 10461 put_page(page); 10462 if (err == -EACCES) 10463 return -EPERM; 10464 out: 10465 return err; 10466 } 10467 10468 static int _nfs41_test_stateid(struct nfs_server *server, 10469 const nfs4_stateid *stateid, 10470 const struct cred *cred) 10471 { 10472 int status; 10473 struct nfs41_test_stateid_args args = { 10474 .stateid = *stateid, 10475 }; 10476 struct nfs41_test_stateid_res res; 10477 struct rpc_message msg = { 10478 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID], 10479 .rpc_argp = &args, 10480 .rpc_resp = &res, 10481 .rpc_cred = cred, 10482 }; 10483 struct rpc_clnt *rpc_client = server->client; 10484 10485 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 10486 &rpc_client, &msg); 10487 10488 dprintk("NFS call test_stateid %p\n", stateid); 10489 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 10490 status = nfs4_call_sync_sequence(rpc_client, server, &msg, 10491 &args.seq_args, &res.seq_res); 10492 if (status != NFS_OK) { 10493 dprintk("NFS reply test_stateid: failed, %d\n", status); 10494 return status; 10495 } 10496 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status); 10497 return -res.status; 10498 } 10499 10500 static void nfs4_handle_delay_or_session_error(struct nfs_server *server, 10501 int err, struct nfs4_exception *exception) 10502 { 10503 exception->retry = 0; 10504 switch(err) { 10505 case -NFS4ERR_DELAY: 10506 case -NFS4ERR_RETRY_UNCACHED_REP: 10507 nfs4_handle_exception(server, err, exception); 10508 break; 10509 case -NFS4ERR_BADSESSION: 10510 case -NFS4ERR_BADSLOT: 10511 case -NFS4ERR_BAD_HIGH_SLOT: 10512 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 10513 case -NFS4ERR_DEADSESSION: 10514 nfs4_do_handle_exception(server, err, exception); 10515 } 10516 } 10517 10518 /** 10519 * nfs41_test_stateid - perform a TEST_STATEID operation 10520 * 10521 * @server: server / transport on which to perform the operation 10522 * @stateid: state ID to test 10523 * @cred: credential 10524 * 10525 * Returns NFS_OK if the server recognizes that "stateid" is valid. 10526 * Otherwise a negative NFS4ERR value is returned if the operation 10527 * failed or the state ID is not currently valid. 10528 */ 10529 static int nfs41_test_stateid(struct nfs_server *server, 10530 const nfs4_stateid *stateid, 10531 const struct cred *cred) 10532 { 10533 struct nfs4_exception exception = { 10534 .interruptible = true, 10535 }; 10536 int err; 10537 do { 10538 err = _nfs41_test_stateid(server, stateid, cred); 10539 nfs4_handle_delay_or_session_error(server, err, &exception); 10540 } while (exception.retry); 10541 return err; 10542 } 10543 10544 struct nfs_free_stateid_data { 10545 struct nfs_server *server; 10546 struct nfs41_free_stateid_args args; 10547 struct nfs41_free_stateid_res res; 10548 }; 10549 10550 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata) 10551 { 10552 struct nfs_free_stateid_data *data = calldata; 10553 nfs4_setup_sequence(data->server->nfs_client, 10554 &data->args.seq_args, 10555 &data->res.seq_res, 10556 task); 10557 } 10558 10559 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata) 10560 { 10561 struct nfs_free_stateid_data *data = calldata; 10562 10563 nfs41_sequence_done(task, &data->res.seq_res); 10564 10565 switch (task->tk_status) { 10566 case -NFS4ERR_DELAY: 10567 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN) 10568 rpc_restart_call_prepare(task); 10569 } 10570 } 10571 10572 static void nfs41_free_stateid_release(void *calldata) 10573 { 10574 struct nfs_free_stateid_data *data = calldata; 10575 struct nfs_client *clp = data->server->nfs_client; 10576 10577 nfs_put_client(clp); 10578 kfree(calldata); 10579 } 10580 10581 static const struct rpc_call_ops nfs41_free_stateid_ops = { 10582 .rpc_call_prepare = nfs41_free_stateid_prepare, 10583 .rpc_call_done = nfs41_free_stateid_done, 10584 .rpc_release = nfs41_free_stateid_release, 10585 }; 10586 10587 /** 10588 * nfs41_free_stateid - perform a FREE_STATEID operation 10589 * 10590 * @server: server / transport on which to perform the operation 10591 * @stateid: state ID to release 10592 * @cred: credential 10593 * @privileged: set to true if this call needs to be privileged 10594 * 10595 * Note: this function is always asynchronous. 10596 */ 10597 static int nfs41_free_stateid(struct nfs_server *server, 10598 const nfs4_stateid *stateid, 10599 const struct cred *cred, 10600 bool privileged) 10601 { 10602 struct rpc_message msg = { 10603 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID], 10604 .rpc_cred = cred, 10605 }; 10606 struct rpc_task_setup task_setup = { 10607 .rpc_client = server->client, 10608 .rpc_message = &msg, 10609 .callback_ops = &nfs41_free_stateid_ops, 10610 .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE, 10611 }; 10612 struct nfs_free_stateid_data *data; 10613 struct rpc_task *task; 10614 struct nfs_client *clp = server->nfs_client; 10615 10616 if (!refcount_inc_not_zero(&clp->cl_count)) 10617 return -EIO; 10618 10619 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 10620 &task_setup.rpc_client, &msg); 10621 10622 dprintk("NFS call free_stateid %p\n", stateid); 10623 data = kmalloc(sizeof(*data), GFP_KERNEL); 10624 if (!data) 10625 return -ENOMEM; 10626 data->server = server; 10627 nfs4_stateid_copy(&data->args.stateid, stateid); 10628 10629 task_setup.callback_data = data; 10630 10631 msg.rpc_argp = &data->args; 10632 msg.rpc_resp = &data->res; 10633 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, privileged); 10634 task = rpc_run_task(&task_setup); 10635 if (IS_ERR(task)) 10636 return PTR_ERR(task); 10637 rpc_put_task(task); 10638 return 0; 10639 } 10640 10641 static void 10642 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp) 10643 { 10644 const struct cred *cred = lsp->ls_state->owner->so_cred; 10645 10646 nfs41_free_stateid(server, &lsp->ls_stateid, cred, false); 10647 nfs4_free_lock_state(server, lsp); 10648 } 10649 10650 static bool nfs41_match_stateid(const nfs4_stateid *s1, 10651 const nfs4_stateid *s2) 10652 { 10653 if (s1->type != s2->type) 10654 return false; 10655 10656 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0) 10657 return false; 10658 10659 if (s1->seqid == s2->seqid) 10660 return true; 10661 10662 return s1->seqid == 0 || s2->seqid == 0; 10663 } 10664 10665 #endif /* CONFIG_NFS_V4_1 */ 10666 10667 static bool nfs4_match_stateid(const nfs4_stateid *s1, 10668 const nfs4_stateid *s2) 10669 { 10670 return nfs4_stateid_match(s1, s2); 10671 } 10672 10673 10674 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { 10675 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 10676 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 10677 .recover_open = nfs4_open_reclaim, 10678 .recover_lock = nfs4_lock_reclaim, 10679 .establish_clid = nfs4_init_clientid, 10680 .detect_trunking = nfs40_discover_server_trunking, 10681 }; 10682 10683 #if defined(CONFIG_NFS_V4_1) 10684 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { 10685 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 10686 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 10687 .recover_open = nfs4_open_reclaim, 10688 .recover_lock = nfs4_lock_reclaim, 10689 .establish_clid = nfs41_init_clientid, 10690 .reclaim_complete = nfs41_proc_reclaim_complete, 10691 .detect_trunking = nfs41_discover_server_trunking, 10692 }; 10693 #endif /* CONFIG_NFS_V4_1 */ 10694 10695 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { 10696 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 10697 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 10698 .recover_open = nfs40_open_expired, 10699 .recover_lock = nfs4_lock_expired, 10700 .establish_clid = nfs4_init_clientid, 10701 }; 10702 10703 #if defined(CONFIG_NFS_V4_1) 10704 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { 10705 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 10706 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 10707 .recover_open = nfs41_open_expired, 10708 .recover_lock = nfs41_lock_expired, 10709 .establish_clid = nfs41_init_clientid, 10710 }; 10711 #endif /* CONFIG_NFS_V4_1 */ 10712 10713 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { 10714 .sched_state_renewal = nfs4_proc_async_renew, 10715 .get_state_renewal_cred = nfs4_get_renew_cred, 10716 .renew_lease = nfs4_proc_renew, 10717 }; 10718 10719 #if defined(CONFIG_NFS_V4_1) 10720 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { 10721 .sched_state_renewal = nfs41_proc_async_sequence, 10722 .get_state_renewal_cred = nfs4_get_machine_cred, 10723 .renew_lease = nfs4_proc_sequence, 10724 }; 10725 #endif 10726 10727 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = { 10728 .get_locations = _nfs40_proc_get_locations, 10729 .fsid_present = _nfs40_proc_fsid_present, 10730 }; 10731 10732 #if defined(CONFIG_NFS_V4_1) 10733 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = { 10734 .get_locations = _nfs41_proc_get_locations, 10735 .fsid_present = _nfs41_proc_fsid_present, 10736 }; 10737 #endif /* CONFIG_NFS_V4_1 */ 10738 10739 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { 10740 .minor_version = 0, 10741 .init_caps = NFS_CAP_READDIRPLUS 10742 | NFS_CAP_ATOMIC_OPEN 10743 | NFS_CAP_POSIX_LOCK, 10744 .init_client = nfs40_init_client, 10745 .shutdown_client = nfs40_shutdown_client, 10746 .match_stateid = nfs4_match_stateid, 10747 .find_root_sec = nfs4_find_root_sec, 10748 .free_lock_state = nfs4_release_lockowner, 10749 .test_and_free_expired = nfs40_test_and_free_expired_stateid, 10750 .alloc_seqid = nfs_alloc_seqid, 10751 .call_sync_ops = &nfs40_call_sync_ops, 10752 .reboot_recovery_ops = &nfs40_reboot_recovery_ops, 10753 .nograce_recovery_ops = &nfs40_nograce_recovery_ops, 10754 .state_renewal_ops = &nfs40_state_renewal_ops, 10755 .mig_recovery_ops = &nfs40_mig_recovery_ops, 10756 }; 10757 10758 #if defined(CONFIG_NFS_V4_1) 10759 static struct nfs_seqid * 10760 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2) 10761 { 10762 return NULL; 10763 } 10764 10765 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { 10766 .minor_version = 1, 10767 .init_caps = NFS_CAP_READDIRPLUS 10768 | NFS_CAP_ATOMIC_OPEN 10769 | NFS_CAP_POSIX_LOCK 10770 | NFS_CAP_STATEID_NFSV41 10771 | NFS_CAP_ATOMIC_OPEN_V1 10772 | NFS_CAP_LGOPEN 10773 | NFS_CAP_MOVEABLE, 10774 .init_client = nfs41_init_client, 10775 .shutdown_client = nfs41_shutdown_client, 10776 .match_stateid = nfs41_match_stateid, 10777 .find_root_sec = nfs41_find_root_sec, 10778 .free_lock_state = nfs41_free_lock_state, 10779 .test_and_free_expired = nfs41_test_and_free_expired_stateid, 10780 .alloc_seqid = nfs_alloc_no_seqid, 10781 .session_trunk = nfs4_test_session_trunk, 10782 .call_sync_ops = &nfs41_call_sync_ops, 10783 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 10784 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 10785 .state_renewal_ops = &nfs41_state_renewal_ops, 10786 .mig_recovery_ops = &nfs41_mig_recovery_ops, 10787 }; 10788 #endif 10789 10790 #if defined(CONFIG_NFS_V4_2) 10791 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = { 10792 .minor_version = 2, 10793 .init_caps = NFS_CAP_READDIRPLUS 10794 | NFS_CAP_ATOMIC_OPEN 10795 | NFS_CAP_POSIX_LOCK 10796 | NFS_CAP_STATEID_NFSV41 10797 | NFS_CAP_ATOMIC_OPEN_V1 10798 | NFS_CAP_LGOPEN 10799 | NFS_CAP_ALLOCATE 10800 | NFS_CAP_COPY 10801 | NFS_CAP_OFFLOAD_CANCEL 10802 | NFS_CAP_COPY_NOTIFY 10803 | NFS_CAP_DEALLOCATE 10804 | NFS_CAP_SEEK 10805 | NFS_CAP_LAYOUTSTATS 10806 | NFS_CAP_CLONE 10807 | NFS_CAP_LAYOUTERROR 10808 | NFS_CAP_READ_PLUS 10809 | NFS_CAP_MOVEABLE 10810 | NFS_CAP_OFFLOAD_STATUS, 10811 .init_client = nfs41_init_client, 10812 .shutdown_client = nfs41_shutdown_client, 10813 .match_stateid = nfs41_match_stateid, 10814 .find_root_sec = nfs41_find_root_sec, 10815 .free_lock_state = nfs41_free_lock_state, 10816 .call_sync_ops = &nfs41_call_sync_ops, 10817 .test_and_free_expired = nfs41_test_and_free_expired_stateid, 10818 .alloc_seqid = nfs_alloc_no_seqid, 10819 .session_trunk = nfs4_test_session_trunk, 10820 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 10821 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 10822 .state_renewal_ops = &nfs41_state_renewal_ops, 10823 .mig_recovery_ops = &nfs41_mig_recovery_ops, 10824 }; 10825 #endif 10826 10827 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { 10828 [0] = &nfs_v4_0_minor_ops, 10829 #if defined(CONFIG_NFS_V4_1) 10830 [1] = &nfs_v4_1_minor_ops, 10831 #endif 10832 #if defined(CONFIG_NFS_V4_2) 10833 [2] = &nfs_v4_2_minor_ops, 10834 #endif 10835 }; 10836 10837 static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size) 10838 { 10839 ssize_t error, error2, error3; 10840 size_t left = size; 10841 10842 error = generic_listxattr(dentry, list, left); 10843 if (error < 0) 10844 return error; 10845 if (list) { 10846 list += error; 10847 left -= error; 10848 } 10849 10850 error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, left); 10851 if (error2 < 0) 10852 return error2; 10853 10854 if (list) { 10855 list += error2; 10856 left -= error2; 10857 } 10858 10859 error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, left); 10860 if (error3 < 0) 10861 return error3; 10862 10863 error += error2 + error3; 10864 if (size && error > size) 10865 return -ERANGE; 10866 return error; 10867 } 10868 10869 static void nfs4_enable_swap(struct inode *inode) 10870 { 10871 /* The state manager thread must always be running. 10872 * It will notice the client is a swapper, and stay put. 10873 */ 10874 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 10875 10876 nfs4_schedule_state_manager(clp); 10877 } 10878 10879 static void nfs4_disable_swap(struct inode *inode) 10880 { 10881 /* The state manager thread will now exit once it is 10882 * woken. 10883 */ 10884 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 10885 10886 set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state); 10887 clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state); 10888 wake_up_var(&clp->cl_state); 10889 } 10890 10891 static const struct inode_operations nfs4_dir_inode_operations = { 10892 .create = nfs_create, 10893 .lookup = nfs_lookup, 10894 .atomic_open = nfs_atomic_open, 10895 .link = nfs_link, 10896 .unlink = nfs_unlink, 10897 .symlink = nfs_symlink, 10898 .mkdir = nfs_mkdir, 10899 .rmdir = nfs_rmdir, 10900 .mknod = nfs_mknod, 10901 .rename = nfs_rename, 10902 .permission = nfs_permission, 10903 .getattr = nfs_getattr, 10904 .setattr = nfs_setattr, 10905 .listxattr = nfs4_listxattr, 10906 }; 10907 10908 static const struct inode_operations nfs4_file_inode_operations = { 10909 .permission = nfs_permission, 10910 .getattr = nfs_getattr, 10911 .setattr = nfs_setattr, 10912 .listxattr = nfs4_listxattr, 10913 }; 10914 10915 const struct nfs_rpc_ops nfs_v4_clientops = { 10916 .version = 4, /* protocol version */ 10917 .dentry_ops = &nfs4_dentry_operations, 10918 .dir_inode_ops = &nfs4_dir_inode_operations, 10919 .file_inode_ops = &nfs4_file_inode_operations, 10920 .file_ops = &nfs4_file_operations, 10921 .getroot = nfs4_proc_get_root, 10922 .submount = nfs4_submount, 10923 .try_get_tree = nfs4_try_get_tree, 10924 .getattr = nfs4_proc_getattr, 10925 .setattr = nfs4_proc_setattr, 10926 .lookup = nfs4_proc_lookup, 10927 .lookupp = nfs4_proc_lookupp, 10928 .access = nfs4_proc_access, 10929 .readlink = nfs4_proc_readlink, 10930 .create = nfs4_proc_create, 10931 .remove = nfs4_proc_remove, 10932 .unlink_setup = nfs4_proc_unlink_setup, 10933 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, 10934 .unlink_done = nfs4_proc_unlink_done, 10935 .rename_setup = nfs4_proc_rename_setup, 10936 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, 10937 .rename_done = nfs4_proc_rename_done, 10938 .link = nfs4_proc_link, 10939 .symlink = nfs4_proc_symlink, 10940 .mkdir = nfs4_proc_mkdir, 10941 .rmdir = nfs4_proc_rmdir, 10942 .readdir = nfs4_proc_readdir, 10943 .mknod = nfs4_proc_mknod, 10944 .statfs = nfs4_proc_statfs, 10945 .fsinfo = nfs4_proc_fsinfo, 10946 .pathconf = nfs4_proc_pathconf, 10947 .set_capabilities = nfs4_server_capabilities, 10948 .decode_dirent = nfs4_decode_dirent, 10949 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare, 10950 .read_setup = nfs4_proc_read_setup, 10951 .read_done = nfs4_read_done, 10952 .write_setup = nfs4_proc_write_setup, 10953 .write_done = nfs4_write_done, 10954 .commit_setup = nfs4_proc_commit_setup, 10955 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare, 10956 .commit_done = nfs4_commit_done, 10957 .lock = nfs4_proc_lock, 10958 .clear_acl_cache = nfs4_zap_acl_attr, 10959 .close_context = nfs4_close_context, 10960 .open_context = nfs4_atomic_open, 10961 .have_delegation = nfs4_have_delegation, 10962 .return_delegation = nfs4_inode_return_delegation, 10963 .alloc_client = nfs4_alloc_client, 10964 .init_client = nfs4_init_client, 10965 .free_client = nfs4_free_client, 10966 .create_server = nfs4_create_server, 10967 .clone_server = nfs_clone_server, 10968 .discover_trunking = nfs4_discover_trunking, 10969 .enable_swap = nfs4_enable_swap, 10970 .disable_swap = nfs4_disable_swap, 10971 }; 10972 10973 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { 10974 .name = XATTR_NAME_NFSV4_ACL, 10975 .list = nfs4_xattr_list_nfs4_acl, 10976 .get = nfs4_xattr_get_nfs4_acl, 10977 .set = nfs4_xattr_set_nfs4_acl, 10978 }; 10979 10980 #if defined(CONFIG_NFS_V4_1) 10981 static const struct xattr_handler nfs4_xattr_nfs4_dacl_handler = { 10982 .name = XATTR_NAME_NFSV4_DACL, 10983 .list = nfs4_xattr_list_nfs4_dacl, 10984 .get = nfs4_xattr_get_nfs4_dacl, 10985 .set = nfs4_xattr_set_nfs4_dacl, 10986 }; 10987 10988 static const struct xattr_handler nfs4_xattr_nfs4_sacl_handler = { 10989 .name = XATTR_NAME_NFSV4_SACL, 10990 .list = nfs4_xattr_list_nfs4_sacl, 10991 .get = nfs4_xattr_get_nfs4_sacl, 10992 .set = nfs4_xattr_set_nfs4_sacl, 10993 }; 10994 #endif 10995 10996 #ifdef CONFIG_NFS_V4_2 10997 static const struct xattr_handler nfs4_xattr_nfs4_user_handler = { 10998 .prefix = XATTR_USER_PREFIX, 10999 .get = nfs4_xattr_get_nfs4_user, 11000 .set = nfs4_xattr_set_nfs4_user, 11001 }; 11002 #endif 11003 11004 const struct xattr_handler * const nfs4_xattr_handlers[] = { 11005 &nfs4_xattr_nfs4_acl_handler, 11006 #if defined(CONFIG_NFS_V4_1) 11007 &nfs4_xattr_nfs4_dacl_handler, 11008 &nfs4_xattr_nfs4_sacl_handler, 11009 #endif 11010 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 11011 &nfs4_xattr_nfs4_label_handler, 11012 #endif 11013 #ifdef CONFIG_NFS_V4_2 11014 &nfs4_xattr_nfs4_user_handler, 11015 #endif 11016 NULL 11017 }; 11018