1 /* 2 * fs/nfs/nfs4proc.c 3 * 4 * Client-side procedure declarations for NFSv4. 5 * 6 * Copyright (c) 2002 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Kendrick Smith <kmsmith@umich.edu> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <linux/mm.h> 39 #include <linux/delay.h> 40 #include <linux/errno.h> 41 #include <linux/file.h> 42 #include <linux/string.h> 43 #include <linux/ratelimit.h> 44 #include <linux/printk.h> 45 #include <linux/slab.h> 46 #include <linux/sunrpc/clnt.h> 47 #include <linux/nfs.h> 48 #include <linux/nfs4.h> 49 #include <linux/nfs_fs.h> 50 #include <linux/nfs_page.h> 51 #include <linux/nfs_mount.h> 52 #include <linux/namei.h> 53 #include <linux/mount.h> 54 #include <linux/module.h> 55 #include <linux/xattr.h> 56 #include <linux/utsname.h> 57 #include <linux/freezer.h> 58 59 #include "nfs4_fs.h" 60 #include "delegation.h" 61 #include "internal.h" 62 #include "iostat.h" 63 #include "callback.h" 64 #include "pnfs.h" 65 #include "netns.h" 66 #include "nfs4idmap.h" 67 #include "nfs4session.h" 68 #include "fscache.h" 69 70 #include "nfs4trace.h" 71 72 #define NFSDBG_FACILITY NFSDBG_PROC 73 74 #define NFS4_POLL_RETRY_MIN (HZ/10) 75 #define NFS4_POLL_RETRY_MAX (15*HZ) 76 77 struct nfs4_opendata; 78 static int _nfs4_proc_open(struct nfs4_opendata *data); 79 static int _nfs4_recover_proc_open(struct nfs4_opendata *data); 80 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); 81 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); 82 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *label); 83 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label); 84 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 85 struct nfs_fattr *fattr, struct iattr *sattr, 86 struct nfs4_state *state, struct nfs4_label *ilabel, 87 struct nfs4_label *olabel); 88 #ifdef CONFIG_NFS_V4_1 89 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *, 90 struct rpc_cred *); 91 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *, 92 struct rpc_cred *); 93 #endif 94 95 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 96 static inline struct nfs4_label * 97 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 98 struct iattr *sattr, struct nfs4_label *label) 99 { 100 int err; 101 102 if (label == NULL) 103 return NULL; 104 105 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0) 106 return NULL; 107 108 err = security_dentry_init_security(dentry, sattr->ia_mode, 109 &dentry->d_name, (void **)&label->label, &label->len); 110 if (err == 0) 111 return label; 112 113 return NULL; 114 } 115 static inline void 116 nfs4_label_release_security(struct nfs4_label *label) 117 { 118 if (label) 119 security_release_secctx(label->label, label->len); 120 } 121 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 122 { 123 if (label) 124 return server->attr_bitmask; 125 126 return server->attr_bitmask_nl; 127 } 128 #else 129 static inline struct nfs4_label * 130 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 131 struct iattr *sattr, struct nfs4_label *l) 132 { return NULL; } 133 static inline void 134 nfs4_label_release_security(struct nfs4_label *label) 135 { return; } 136 static inline u32 * 137 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 138 { return server->attr_bitmask; } 139 #endif 140 141 /* Prevent leaks of NFSv4 errors into userland */ 142 static int nfs4_map_errors(int err) 143 { 144 if (err >= -1000) 145 return err; 146 switch (err) { 147 case -NFS4ERR_RESOURCE: 148 case -NFS4ERR_LAYOUTTRYLATER: 149 case -NFS4ERR_RECALLCONFLICT: 150 return -EREMOTEIO; 151 case -NFS4ERR_WRONGSEC: 152 case -NFS4ERR_WRONG_CRED: 153 return -EPERM; 154 case -NFS4ERR_BADOWNER: 155 case -NFS4ERR_BADNAME: 156 return -EINVAL; 157 case -NFS4ERR_SHARE_DENIED: 158 return -EACCES; 159 case -NFS4ERR_MINOR_VERS_MISMATCH: 160 return -EPROTONOSUPPORT; 161 case -NFS4ERR_FILE_OPEN: 162 return -EBUSY; 163 default: 164 dprintk("%s could not handle NFSv4 error %d\n", 165 __func__, -err); 166 break; 167 } 168 return -EIO; 169 } 170 171 /* 172 * This is our standard bitmap for GETATTR requests. 173 */ 174 const u32 nfs4_fattr_bitmap[3] = { 175 FATTR4_WORD0_TYPE 176 | FATTR4_WORD0_CHANGE 177 | FATTR4_WORD0_SIZE 178 | FATTR4_WORD0_FSID 179 | FATTR4_WORD0_FILEID, 180 FATTR4_WORD1_MODE 181 | FATTR4_WORD1_NUMLINKS 182 | FATTR4_WORD1_OWNER 183 | FATTR4_WORD1_OWNER_GROUP 184 | FATTR4_WORD1_RAWDEV 185 | FATTR4_WORD1_SPACE_USED 186 | FATTR4_WORD1_TIME_ACCESS 187 | FATTR4_WORD1_TIME_METADATA 188 | FATTR4_WORD1_TIME_MODIFY 189 | FATTR4_WORD1_MOUNTED_ON_FILEID, 190 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 191 FATTR4_WORD2_SECURITY_LABEL 192 #endif 193 }; 194 195 static const u32 nfs4_pnfs_open_bitmap[3] = { 196 FATTR4_WORD0_TYPE 197 | FATTR4_WORD0_CHANGE 198 | FATTR4_WORD0_SIZE 199 | FATTR4_WORD0_FSID 200 | FATTR4_WORD0_FILEID, 201 FATTR4_WORD1_MODE 202 | FATTR4_WORD1_NUMLINKS 203 | FATTR4_WORD1_OWNER 204 | FATTR4_WORD1_OWNER_GROUP 205 | FATTR4_WORD1_RAWDEV 206 | FATTR4_WORD1_SPACE_USED 207 | FATTR4_WORD1_TIME_ACCESS 208 | FATTR4_WORD1_TIME_METADATA 209 | FATTR4_WORD1_TIME_MODIFY, 210 FATTR4_WORD2_MDSTHRESHOLD 211 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 212 | FATTR4_WORD2_SECURITY_LABEL 213 #endif 214 }; 215 216 static const u32 nfs4_open_noattr_bitmap[3] = { 217 FATTR4_WORD0_TYPE 218 | FATTR4_WORD0_CHANGE 219 | FATTR4_WORD0_FILEID, 220 }; 221 222 const u32 nfs4_statfs_bitmap[3] = { 223 FATTR4_WORD0_FILES_AVAIL 224 | FATTR4_WORD0_FILES_FREE 225 | FATTR4_WORD0_FILES_TOTAL, 226 FATTR4_WORD1_SPACE_AVAIL 227 | FATTR4_WORD1_SPACE_FREE 228 | FATTR4_WORD1_SPACE_TOTAL 229 }; 230 231 const u32 nfs4_pathconf_bitmap[3] = { 232 FATTR4_WORD0_MAXLINK 233 | FATTR4_WORD0_MAXNAME, 234 0 235 }; 236 237 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE 238 | FATTR4_WORD0_MAXREAD 239 | FATTR4_WORD0_MAXWRITE 240 | FATTR4_WORD0_LEASE_TIME, 241 FATTR4_WORD1_TIME_DELTA 242 | FATTR4_WORD1_FS_LAYOUT_TYPES, 243 FATTR4_WORD2_LAYOUT_BLKSIZE 244 | FATTR4_WORD2_CLONE_BLKSIZE 245 }; 246 247 const u32 nfs4_fs_locations_bitmap[3] = { 248 FATTR4_WORD0_TYPE 249 | FATTR4_WORD0_CHANGE 250 | FATTR4_WORD0_SIZE 251 | FATTR4_WORD0_FSID 252 | FATTR4_WORD0_FILEID 253 | FATTR4_WORD0_FS_LOCATIONS, 254 FATTR4_WORD1_MODE 255 | FATTR4_WORD1_NUMLINKS 256 | FATTR4_WORD1_OWNER 257 | FATTR4_WORD1_OWNER_GROUP 258 | FATTR4_WORD1_RAWDEV 259 | FATTR4_WORD1_SPACE_USED 260 | FATTR4_WORD1_TIME_ACCESS 261 | FATTR4_WORD1_TIME_METADATA 262 | FATTR4_WORD1_TIME_MODIFY 263 | FATTR4_WORD1_MOUNTED_ON_FILEID, 264 }; 265 266 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, 267 struct nfs4_readdir_arg *readdir) 268 { 269 __be32 *start, *p; 270 271 if (cookie > 2) { 272 readdir->cookie = cookie; 273 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); 274 return; 275 } 276 277 readdir->cookie = 0; 278 memset(&readdir->verifier, 0, sizeof(readdir->verifier)); 279 if (cookie == 2) 280 return; 281 282 /* 283 * NFSv4 servers do not return entries for '.' and '..' 284 * Therefore, we fake these entries here. We let '.' 285 * have cookie 0 and '..' have cookie 1. Note that 286 * when talking to the server, we always send cookie 0 287 * instead of 1 or 2. 288 */ 289 start = p = kmap_atomic(*readdir->pages); 290 291 if (cookie == 0) { 292 *p++ = xdr_one; /* next */ 293 *p++ = xdr_zero; /* cookie, first word */ 294 *p++ = xdr_one; /* cookie, second word */ 295 *p++ = xdr_one; /* entry len */ 296 memcpy(p, ".\0\0\0", 4); /* entry */ 297 p++; 298 *p++ = xdr_one; /* bitmap length */ 299 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ 300 *p++ = htonl(8); /* attribute buffer length */ 301 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry))); 302 } 303 304 *p++ = xdr_one; /* next */ 305 *p++ = xdr_zero; /* cookie, first word */ 306 *p++ = xdr_two; /* cookie, second word */ 307 *p++ = xdr_two; /* entry len */ 308 memcpy(p, "..\0\0", 4); /* entry */ 309 p++; 310 *p++ = xdr_one; /* bitmap length */ 311 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ 312 *p++ = htonl(8); /* attribute buffer length */ 313 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent))); 314 315 readdir->pgbase = (char *)p - (char *)start; 316 readdir->count -= readdir->pgbase; 317 kunmap_atomic(start); 318 } 319 320 static long nfs4_update_delay(long *timeout) 321 { 322 long ret; 323 if (!timeout) 324 return NFS4_POLL_RETRY_MAX; 325 if (*timeout <= 0) 326 *timeout = NFS4_POLL_RETRY_MIN; 327 if (*timeout > NFS4_POLL_RETRY_MAX) 328 *timeout = NFS4_POLL_RETRY_MAX; 329 ret = *timeout; 330 *timeout <<= 1; 331 return ret; 332 } 333 334 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) 335 { 336 int res = 0; 337 338 might_sleep(); 339 340 freezable_schedule_timeout_killable_unsafe( 341 nfs4_update_delay(timeout)); 342 if (fatal_signal_pending(current)) 343 res = -ERESTARTSYS; 344 return res; 345 } 346 347 /* This is the error handling routine for processes that are allowed 348 * to sleep. 349 */ 350 static int nfs4_do_handle_exception(struct nfs_server *server, 351 int errorcode, struct nfs4_exception *exception) 352 { 353 struct nfs_client *clp = server->nfs_client; 354 struct nfs4_state *state = exception->state; 355 struct inode *inode = exception->inode; 356 int ret = errorcode; 357 358 exception->delay = 0; 359 exception->recovering = 0; 360 exception->retry = 0; 361 switch(errorcode) { 362 case 0: 363 return 0; 364 case -NFS4ERR_OPENMODE: 365 case -NFS4ERR_DELEG_REVOKED: 366 case -NFS4ERR_ADMIN_REVOKED: 367 case -NFS4ERR_BAD_STATEID: 368 if (inode && nfs_async_inode_return_delegation(inode, 369 NULL) == 0) 370 goto wait_on_recovery; 371 if (state == NULL) 372 break; 373 ret = nfs4_schedule_stateid_recovery(server, state); 374 if (ret < 0) 375 break; 376 goto wait_on_recovery; 377 case -NFS4ERR_EXPIRED: 378 if (state != NULL) { 379 ret = nfs4_schedule_stateid_recovery(server, state); 380 if (ret < 0) 381 break; 382 } 383 case -NFS4ERR_STALE_STATEID: 384 case -NFS4ERR_STALE_CLIENTID: 385 nfs4_schedule_lease_recovery(clp); 386 goto wait_on_recovery; 387 case -NFS4ERR_MOVED: 388 ret = nfs4_schedule_migration_recovery(server); 389 if (ret < 0) 390 break; 391 goto wait_on_recovery; 392 case -NFS4ERR_LEASE_MOVED: 393 nfs4_schedule_lease_moved_recovery(clp); 394 goto wait_on_recovery; 395 #if defined(CONFIG_NFS_V4_1) 396 case -NFS4ERR_BADSESSION: 397 case -NFS4ERR_BADSLOT: 398 case -NFS4ERR_BAD_HIGH_SLOT: 399 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 400 case -NFS4ERR_DEADSESSION: 401 case -NFS4ERR_SEQ_FALSE_RETRY: 402 case -NFS4ERR_SEQ_MISORDERED: 403 dprintk("%s ERROR: %d Reset session\n", __func__, 404 errorcode); 405 nfs4_schedule_session_recovery(clp->cl_session, errorcode); 406 goto wait_on_recovery; 407 #endif /* defined(CONFIG_NFS_V4_1) */ 408 case -NFS4ERR_FILE_OPEN: 409 if (exception->timeout > HZ) { 410 /* We have retried a decent amount, time to 411 * fail 412 */ 413 ret = -EBUSY; 414 break; 415 } 416 case -NFS4ERR_DELAY: 417 nfs_inc_server_stats(server, NFSIOS_DELAY); 418 case -NFS4ERR_GRACE: 419 exception->delay = 1; 420 return 0; 421 422 case -NFS4ERR_RETRY_UNCACHED_REP: 423 case -NFS4ERR_OLD_STATEID: 424 exception->retry = 1; 425 break; 426 case -NFS4ERR_BADOWNER: 427 /* The following works around a Linux server bug! */ 428 case -NFS4ERR_BADNAME: 429 if (server->caps & NFS_CAP_UIDGID_NOMAP) { 430 server->caps &= ~NFS_CAP_UIDGID_NOMAP; 431 exception->retry = 1; 432 printk(KERN_WARNING "NFS: v4 server %s " 433 "does not accept raw " 434 "uid/gids. " 435 "Reenabling the idmapper.\n", 436 server->nfs_client->cl_hostname); 437 } 438 } 439 /* We failed to handle the error */ 440 return nfs4_map_errors(ret); 441 wait_on_recovery: 442 exception->recovering = 1; 443 return 0; 444 } 445 446 /* This is the error handling routine for processes that are allowed 447 * to sleep. 448 */ 449 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) 450 { 451 struct nfs_client *clp = server->nfs_client; 452 int ret; 453 454 ret = nfs4_do_handle_exception(server, errorcode, exception); 455 if (exception->delay) { 456 ret = nfs4_delay(server->client, &exception->timeout); 457 goto out_retry; 458 } 459 if (exception->recovering) { 460 ret = nfs4_wait_clnt_recover(clp); 461 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 462 return -EIO; 463 goto out_retry; 464 } 465 return ret; 466 out_retry: 467 if (ret == 0) 468 exception->retry = 1; 469 return ret; 470 } 471 472 static int 473 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server, 474 int errorcode, struct nfs4_exception *exception) 475 { 476 struct nfs_client *clp = server->nfs_client; 477 int ret; 478 479 ret = nfs4_do_handle_exception(server, errorcode, exception); 480 if (exception->delay) { 481 rpc_delay(task, nfs4_update_delay(&exception->timeout)); 482 goto out_retry; 483 } 484 if (exception->recovering) { 485 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); 486 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) 487 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); 488 goto out_retry; 489 } 490 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 491 ret = -EIO; 492 return ret; 493 out_retry: 494 if (ret == 0) 495 exception->retry = 1; 496 return ret; 497 } 498 499 static int 500 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server, 501 struct nfs4_state *state, long *timeout) 502 { 503 struct nfs4_exception exception = { 504 .state = state, 505 }; 506 507 if (task->tk_status >= 0) 508 return 0; 509 if (timeout) 510 exception.timeout = *timeout; 511 task->tk_status = nfs4_async_handle_exception(task, server, 512 task->tk_status, 513 &exception); 514 if (exception.delay && timeout) 515 *timeout = exception.timeout; 516 if (exception.retry) 517 return -EAGAIN; 518 return 0; 519 } 520 521 /* 522 * Return 'true' if 'clp' is using an rpc_client that is integrity protected 523 * or 'false' otherwise. 524 */ 525 static bool _nfs4_is_integrity_protected(struct nfs_client *clp) 526 { 527 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor; 528 529 if (flavor == RPC_AUTH_GSS_KRB5I || 530 flavor == RPC_AUTH_GSS_KRB5P) 531 return true; 532 533 return false; 534 } 535 536 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) 537 { 538 spin_lock(&clp->cl_lock); 539 if (time_before(clp->cl_last_renewal,timestamp)) 540 clp->cl_last_renewal = timestamp; 541 spin_unlock(&clp->cl_lock); 542 } 543 544 static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 545 { 546 struct nfs_client *clp = server->nfs_client; 547 548 if (!nfs4_has_session(clp)) 549 do_renew_lease(clp, timestamp); 550 } 551 552 struct nfs4_call_sync_data { 553 const struct nfs_server *seq_server; 554 struct nfs4_sequence_args *seq_args; 555 struct nfs4_sequence_res *seq_res; 556 }; 557 558 void nfs4_init_sequence(struct nfs4_sequence_args *args, 559 struct nfs4_sequence_res *res, int cache_reply) 560 { 561 args->sa_slot = NULL; 562 args->sa_cache_this = cache_reply; 563 args->sa_privileged = 0; 564 565 res->sr_slot = NULL; 566 } 567 568 static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args) 569 { 570 args->sa_privileged = 1; 571 } 572 573 int nfs40_setup_sequence(struct nfs4_slot_table *tbl, 574 struct nfs4_sequence_args *args, 575 struct nfs4_sequence_res *res, 576 struct rpc_task *task) 577 { 578 struct nfs4_slot *slot; 579 580 /* slot already allocated? */ 581 if (res->sr_slot != NULL) 582 goto out_start; 583 584 spin_lock(&tbl->slot_tbl_lock); 585 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) 586 goto out_sleep; 587 588 slot = nfs4_alloc_slot(tbl); 589 if (IS_ERR(slot)) { 590 if (slot == ERR_PTR(-ENOMEM)) 591 task->tk_timeout = HZ >> 2; 592 goto out_sleep; 593 } 594 spin_unlock(&tbl->slot_tbl_lock); 595 596 args->sa_slot = slot; 597 res->sr_slot = slot; 598 599 out_start: 600 rpc_call_start(task); 601 return 0; 602 603 out_sleep: 604 if (args->sa_privileged) 605 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, 606 NULL, RPC_PRIORITY_PRIVILEGED); 607 else 608 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 609 spin_unlock(&tbl->slot_tbl_lock); 610 return -EAGAIN; 611 } 612 EXPORT_SYMBOL_GPL(nfs40_setup_sequence); 613 614 static int nfs40_sequence_done(struct rpc_task *task, 615 struct nfs4_sequence_res *res) 616 { 617 struct nfs4_slot *slot = res->sr_slot; 618 struct nfs4_slot_table *tbl; 619 620 if (slot == NULL) 621 goto out; 622 623 tbl = slot->table; 624 spin_lock(&tbl->slot_tbl_lock); 625 if (!nfs41_wake_and_assign_slot(tbl, slot)) 626 nfs4_free_slot(tbl, slot); 627 spin_unlock(&tbl->slot_tbl_lock); 628 629 res->sr_slot = NULL; 630 out: 631 return 1; 632 } 633 634 #if defined(CONFIG_NFS_V4_1) 635 636 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) 637 { 638 struct nfs4_session *session; 639 struct nfs4_slot_table *tbl; 640 struct nfs4_slot *slot = res->sr_slot; 641 bool send_new_highest_used_slotid = false; 642 643 tbl = slot->table; 644 session = tbl->session; 645 646 spin_lock(&tbl->slot_tbl_lock); 647 /* Be nice to the server: try to ensure that the last transmitted 648 * value for highest_user_slotid <= target_highest_slotid 649 */ 650 if (tbl->highest_used_slotid > tbl->target_highest_slotid) 651 send_new_highest_used_slotid = true; 652 653 if (nfs41_wake_and_assign_slot(tbl, slot)) { 654 send_new_highest_used_slotid = false; 655 goto out_unlock; 656 } 657 nfs4_free_slot(tbl, slot); 658 659 if (tbl->highest_used_slotid != NFS4_NO_SLOT) 660 send_new_highest_used_slotid = false; 661 out_unlock: 662 spin_unlock(&tbl->slot_tbl_lock); 663 res->sr_slot = NULL; 664 if (send_new_highest_used_slotid) 665 nfs41_notify_server(session->clp); 666 } 667 668 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 669 { 670 struct nfs4_session *session; 671 struct nfs4_slot *slot = res->sr_slot; 672 struct nfs_client *clp; 673 bool interrupted = false; 674 int ret = 1; 675 676 if (slot == NULL) 677 goto out_noaction; 678 /* don't increment the sequence number if the task wasn't sent */ 679 if (!RPC_WAS_SENT(task)) 680 goto out; 681 682 session = slot->table->session; 683 684 if (slot->interrupted) { 685 slot->interrupted = 0; 686 interrupted = true; 687 } 688 689 trace_nfs4_sequence_done(session, res); 690 /* Check the SEQUENCE operation status */ 691 switch (res->sr_status) { 692 case 0: 693 /* Update the slot's sequence and clientid lease timer */ 694 ++slot->seq_nr; 695 clp = session->clp; 696 do_renew_lease(clp, res->sr_timestamp); 697 /* Check sequence flags */ 698 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); 699 nfs41_update_target_slotid(slot->table, slot, res); 700 break; 701 case 1: 702 /* 703 * sr_status remains 1 if an RPC level error occurred. 704 * The server may or may not have processed the sequence 705 * operation.. 706 * Mark the slot as having hosted an interrupted RPC call. 707 */ 708 slot->interrupted = 1; 709 goto out; 710 case -NFS4ERR_DELAY: 711 /* The server detected a resend of the RPC call and 712 * returned NFS4ERR_DELAY as per Section 2.10.6.2 713 * of RFC5661. 714 */ 715 dprintk("%s: slot=%u seq=%u: Operation in progress\n", 716 __func__, 717 slot->slot_nr, 718 slot->seq_nr); 719 goto out_retry; 720 case -NFS4ERR_BADSLOT: 721 /* 722 * The slot id we used was probably retired. Try again 723 * using a different slot id. 724 */ 725 goto retry_nowait; 726 case -NFS4ERR_SEQ_MISORDERED: 727 /* 728 * Was the last operation on this sequence interrupted? 729 * If so, retry after bumping the sequence number. 730 */ 731 if (interrupted) { 732 ++slot->seq_nr; 733 goto retry_nowait; 734 } 735 /* 736 * Could this slot have been previously retired? 737 * If so, then the server may be expecting seq_nr = 1! 738 */ 739 if (slot->seq_nr != 1) { 740 slot->seq_nr = 1; 741 goto retry_nowait; 742 } 743 break; 744 case -NFS4ERR_SEQ_FALSE_RETRY: 745 ++slot->seq_nr; 746 goto retry_nowait; 747 default: 748 /* Just update the slot sequence no. */ 749 ++slot->seq_nr; 750 } 751 out: 752 /* The session may be reset by one of the error handlers. */ 753 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); 754 nfs41_sequence_free_slot(res); 755 out_noaction: 756 return ret; 757 retry_nowait: 758 if (rpc_restart_call_prepare(task)) { 759 task->tk_status = 0; 760 ret = 0; 761 } 762 goto out; 763 out_retry: 764 if (!rpc_restart_call(task)) 765 goto out; 766 rpc_delay(task, NFS4_POLL_RETRY_MAX); 767 return 0; 768 } 769 EXPORT_SYMBOL_GPL(nfs41_sequence_done); 770 771 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 772 { 773 if (res->sr_slot == NULL) 774 return 1; 775 if (!res->sr_slot->table->session) 776 return nfs40_sequence_done(task, res); 777 return nfs41_sequence_done(task, res); 778 } 779 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 780 781 int nfs41_setup_sequence(struct nfs4_session *session, 782 struct nfs4_sequence_args *args, 783 struct nfs4_sequence_res *res, 784 struct rpc_task *task) 785 { 786 struct nfs4_slot *slot; 787 struct nfs4_slot_table *tbl; 788 789 dprintk("--> %s\n", __func__); 790 /* slot already allocated? */ 791 if (res->sr_slot != NULL) 792 goto out_success; 793 794 tbl = &session->fc_slot_table; 795 796 task->tk_timeout = 0; 797 798 spin_lock(&tbl->slot_tbl_lock); 799 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) && 800 !args->sa_privileged) { 801 /* The state manager will wait until the slot table is empty */ 802 dprintk("%s session is draining\n", __func__); 803 goto out_sleep; 804 } 805 806 slot = nfs4_alloc_slot(tbl); 807 if (IS_ERR(slot)) { 808 /* If out of memory, try again in 1/4 second */ 809 if (slot == ERR_PTR(-ENOMEM)) 810 task->tk_timeout = HZ >> 2; 811 dprintk("<-- %s: no free slots\n", __func__); 812 goto out_sleep; 813 } 814 spin_unlock(&tbl->slot_tbl_lock); 815 816 args->sa_slot = slot; 817 818 dprintk("<-- %s slotid=%u seqid=%u\n", __func__, 819 slot->slot_nr, slot->seq_nr); 820 821 res->sr_slot = slot; 822 res->sr_timestamp = jiffies; 823 res->sr_status_flags = 0; 824 /* 825 * sr_status is only set in decode_sequence, and so will remain 826 * set to 1 if an rpc level failure occurs. 827 */ 828 res->sr_status = 1; 829 trace_nfs4_setup_sequence(session, args); 830 out_success: 831 rpc_call_start(task); 832 return 0; 833 out_sleep: 834 /* Privileged tasks are queued with top priority */ 835 if (args->sa_privileged) 836 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, 837 NULL, RPC_PRIORITY_PRIVILEGED); 838 else 839 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 840 spin_unlock(&tbl->slot_tbl_lock); 841 return -EAGAIN; 842 } 843 EXPORT_SYMBOL_GPL(nfs41_setup_sequence); 844 845 static int nfs4_setup_sequence(const struct nfs_server *server, 846 struct nfs4_sequence_args *args, 847 struct nfs4_sequence_res *res, 848 struct rpc_task *task) 849 { 850 struct nfs4_session *session = nfs4_get_session(server); 851 int ret = 0; 852 853 if (!session) 854 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, 855 args, res, task); 856 857 dprintk("--> %s clp %p session %p sr_slot %u\n", 858 __func__, session->clp, session, res->sr_slot ? 859 res->sr_slot->slot_nr : NFS4_NO_SLOT); 860 861 ret = nfs41_setup_sequence(session, args, res, task); 862 863 dprintk("<-- %s status=%d\n", __func__, ret); 864 return ret; 865 } 866 867 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) 868 { 869 struct nfs4_call_sync_data *data = calldata; 870 struct nfs4_session *session = nfs4_get_session(data->seq_server); 871 872 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); 873 874 nfs41_setup_sequence(session, data->seq_args, data->seq_res, task); 875 } 876 877 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) 878 { 879 struct nfs4_call_sync_data *data = calldata; 880 881 nfs41_sequence_done(task, data->seq_res); 882 } 883 884 static const struct rpc_call_ops nfs41_call_sync_ops = { 885 .rpc_call_prepare = nfs41_call_sync_prepare, 886 .rpc_call_done = nfs41_call_sync_done, 887 }; 888 889 #else /* !CONFIG_NFS_V4_1 */ 890 891 static int nfs4_setup_sequence(const struct nfs_server *server, 892 struct nfs4_sequence_args *args, 893 struct nfs4_sequence_res *res, 894 struct rpc_task *task) 895 { 896 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, 897 args, res, task); 898 } 899 900 int nfs4_sequence_done(struct rpc_task *task, 901 struct nfs4_sequence_res *res) 902 { 903 return nfs40_sequence_done(task, res); 904 } 905 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 906 907 #endif /* !CONFIG_NFS_V4_1 */ 908 909 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata) 910 { 911 struct nfs4_call_sync_data *data = calldata; 912 nfs4_setup_sequence(data->seq_server, 913 data->seq_args, data->seq_res, task); 914 } 915 916 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata) 917 { 918 struct nfs4_call_sync_data *data = calldata; 919 nfs4_sequence_done(task, data->seq_res); 920 } 921 922 static const struct rpc_call_ops nfs40_call_sync_ops = { 923 .rpc_call_prepare = nfs40_call_sync_prepare, 924 .rpc_call_done = nfs40_call_sync_done, 925 }; 926 927 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, 928 struct nfs_server *server, 929 struct rpc_message *msg, 930 struct nfs4_sequence_args *args, 931 struct nfs4_sequence_res *res) 932 { 933 int ret; 934 struct rpc_task *task; 935 struct nfs_client *clp = server->nfs_client; 936 struct nfs4_call_sync_data data = { 937 .seq_server = server, 938 .seq_args = args, 939 .seq_res = res, 940 }; 941 struct rpc_task_setup task_setup = { 942 .rpc_client = clnt, 943 .rpc_message = msg, 944 .callback_ops = clp->cl_mvops->call_sync_ops, 945 .callback_data = &data 946 }; 947 948 task = rpc_run_task(&task_setup); 949 if (IS_ERR(task)) 950 ret = PTR_ERR(task); 951 else { 952 ret = task->tk_status; 953 rpc_put_task(task); 954 } 955 return ret; 956 } 957 958 int nfs4_call_sync(struct rpc_clnt *clnt, 959 struct nfs_server *server, 960 struct rpc_message *msg, 961 struct nfs4_sequence_args *args, 962 struct nfs4_sequence_res *res, 963 int cache_reply) 964 { 965 nfs4_init_sequence(args, res, cache_reply); 966 return nfs4_call_sync_sequence(clnt, server, msg, args, res); 967 } 968 969 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) 970 { 971 struct nfs_inode *nfsi = NFS_I(dir); 972 973 spin_lock(&dir->i_lock); 974 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 975 if (!cinfo->atomic || cinfo->before != dir->i_version) 976 nfs_force_lookup_revalidate(dir); 977 dir->i_version = cinfo->after; 978 nfsi->attr_gencount = nfs_inc_attr_generation_counter(); 979 nfs_fscache_invalidate(dir); 980 spin_unlock(&dir->i_lock); 981 } 982 983 struct nfs4_opendata { 984 struct kref kref; 985 struct nfs_openargs o_arg; 986 struct nfs_openres o_res; 987 struct nfs_open_confirmargs c_arg; 988 struct nfs_open_confirmres c_res; 989 struct nfs4_string owner_name; 990 struct nfs4_string group_name; 991 struct nfs4_label *a_label; 992 struct nfs_fattr f_attr; 993 struct nfs4_label *f_label; 994 struct dentry *dir; 995 struct dentry *dentry; 996 struct nfs4_state_owner *owner; 997 struct nfs4_state *state; 998 struct iattr attrs; 999 unsigned long timestamp; 1000 unsigned int rpc_done : 1; 1001 unsigned int file_created : 1; 1002 unsigned int is_recover : 1; 1003 int rpc_status; 1004 int cancelled; 1005 }; 1006 1007 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server, 1008 int err, struct nfs4_exception *exception) 1009 { 1010 if (err != -EINVAL) 1011 return false; 1012 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1013 return false; 1014 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1; 1015 exception->retry = 1; 1016 return true; 1017 } 1018 1019 static u32 1020 nfs4_map_atomic_open_share(struct nfs_server *server, 1021 fmode_t fmode, int openflags) 1022 { 1023 u32 res = 0; 1024 1025 switch (fmode & (FMODE_READ | FMODE_WRITE)) { 1026 case FMODE_READ: 1027 res = NFS4_SHARE_ACCESS_READ; 1028 break; 1029 case FMODE_WRITE: 1030 res = NFS4_SHARE_ACCESS_WRITE; 1031 break; 1032 case FMODE_READ|FMODE_WRITE: 1033 res = NFS4_SHARE_ACCESS_BOTH; 1034 } 1035 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1036 goto out; 1037 /* Want no delegation if we're using O_DIRECT */ 1038 if (openflags & O_DIRECT) 1039 res |= NFS4_SHARE_WANT_NO_DELEG; 1040 out: 1041 return res; 1042 } 1043 1044 static enum open_claim_type4 1045 nfs4_map_atomic_open_claim(struct nfs_server *server, 1046 enum open_claim_type4 claim) 1047 { 1048 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1) 1049 return claim; 1050 switch (claim) { 1051 default: 1052 return claim; 1053 case NFS4_OPEN_CLAIM_FH: 1054 return NFS4_OPEN_CLAIM_NULL; 1055 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1056 return NFS4_OPEN_CLAIM_DELEGATE_CUR; 1057 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1058 return NFS4_OPEN_CLAIM_DELEGATE_PREV; 1059 } 1060 } 1061 1062 static void nfs4_init_opendata_res(struct nfs4_opendata *p) 1063 { 1064 p->o_res.f_attr = &p->f_attr; 1065 p->o_res.f_label = p->f_label; 1066 p->o_res.seqid = p->o_arg.seqid; 1067 p->c_res.seqid = p->c_arg.seqid; 1068 p->o_res.server = p->o_arg.server; 1069 p->o_res.access_request = p->o_arg.access; 1070 nfs_fattr_init(&p->f_attr); 1071 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); 1072 } 1073 1074 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, 1075 struct nfs4_state_owner *sp, fmode_t fmode, int flags, 1076 const struct iattr *attrs, 1077 struct nfs4_label *label, 1078 enum open_claim_type4 claim, 1079 gfp_t gfp_mask) 1080 { 1081 struct dentry *parent = dget_parent(dentry); 1082 struct inode *dir = d_inode(parent); 1083 struct nfs_server *server = NFS_SERVER(dir); 1084 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 1085 struct nfs4_opendata *p; 1086 1087 p = kzalloc(sizeof(*p), gfp_mask); 1088 if (p == NULL) 1089 goto err; 1090 1091 p->f_label = nfs4_label_alloc(server, gfp_mask); 1092 if (IS_ERR(p->f_label)) 1093 goto err_free_p; 1094 1095 p->a_label = nfs4_label_alloc(server, gfp_mask); 1096 if (IS_ERR(p->a_label)) 1097 goto err_free_f; 1098 1099 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 1100 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask); 1101 if (IS_ERR(p->o_arg.seqid)) 1102 goto err_free_label; 1103 nfs_sb_active(dentry->d_sb); 1104 p->dentry = dget(dentry); 1105 p->dir = parent; 1106 p->owner = sp; 1107 atomic_inc(&sp->so_count); 1108 p->o_arg.open_flags = flags; 1109 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); 1110 p->o_arg.share_access = nfs4_map_atomic_open_share(server, 1111 fmode, flags); 1112 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS 1113 * will return permission denied for all bits until close */ 1114 if (!(flags & O_EXCL)) { 1115 /* ask server to check for all possible rights as results 1116 * are cached */ 1117 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY | 1118 NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE; 1119 } 1120 p->o_arg.clientid = server->nfs_client->cl_clientid; 1121 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time); 1122 p->o_arg.id.uniquifier = sp->so_seqid.owner_id; 1123 p->o_arg.name = &dentry->d_name; 1124 p->o_arg.server = server; 1125 p->o_arg.bitmask = nfs4_bitmask(server, label); 1126 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 1127 p->o_arg.label = nfs4_label_copy(p->a_label, label); 1128 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim); 1129 switch (p->o_arg.claim) { 1130 case NFS4_OPEN_CLAIM_NULL: 1131 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 1132 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 1133 p->o_arg.fh = NFS_FH(dir); 1134 break; 1135 case NFS4_OPEN_CLAIM_PREVIOUS: 1136 case NFS4_OPEN_CLAIM_FH: 1137 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1138 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1139 p->o_arg.fh = NFS_FH(d_inode(dentry)); 1140 } 1141 if (attrs != NULL && attrs->ia_valid != 0) { 1142 __u32 verf[2]; 1143 1144 p->o_arg.u.attrs = &p->attrs; 1145 memcpy(&p->attrs, attrs, sizeof(p->attrs)); 1146 1147 verf[0] = jiffies; 1148 verf[1] = current->pid; 1149 memcpy(p->o_arg.u.verifier.data, verf, 1150 sizeof(p->o_arg.u.verifier.data)); 1151 } 1152 p->c_arg.fh = &p->o_res.fh; 1153 p->c_arg.stateid = &p->o_res.stateid; 1154 p->c_arg.seqid = p->o_arg.seqid; 1155 nfs4_init_opendata_res(p); 1156 kref_init(&p->kref); 1157 return p; 1158 1159 err_free_label: 1160 nfs4_label_free(p->a_label); 1161 err_free_f: 1162 nfs4_label_free(p->f_label); 1163 err_free_p: 1164 kfree(p); 1165 err: 1166 dput(parent); 1167 return NULL; 1168 } 1169 1170 static void nfs4_opendata_free(struct kref *kref) 1171 { 1172 struct nfs4_opendata *p = container_of(kref, 1173 struct nfs4_opendata, kref); 1174 struct super_block *sb = p->dentry->d_sb; 1175 1176 nfs_free_seqid(p->o_arg.seqid); 1177 if (p->state != NULL) 1178 nfs4_put_open_state(p->state); 1179 nfs4_put_state_owner(p->owner); 1180 1181 nfs4_label_free(p->a_label); 1182 nfs4_label_free(p->f_label); 1183 1184 dput(p->dir); 1185 dput(p->dentry); 1186 nfs_sb_deactive(sb); 1187 nfs_fattr_free_names(&p->f_attr); 1188 kfree(p->f_attr.mdsthreshold); 1189 kfree(p); 1190 } 1191 1192 static void nfs4_opendata_put(struct nfs4_opendata *p) 1193 { 1194 if (p != NULL) 1195 kref_put(&p->kref, nfs4_opendata_free); 1196 } 1197 1198 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) 1199 { 1200 int ret; 1201 1202 ret = rpc_wait_for_completion_task(task); 1203 return ret; 1204 } 1205 1206 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state, 1207 fmode_t fmode) 1208 { 1209 switch(fmode & (FMODE_READ|FMODE_WRITE)) { 1210 case FMODE_READ|FMODE_WRITE: 1211 return state->n_rdwr != 0; 1212 case FMODE_WRITE: 1213 return state->n_wronly != 0; 1214 case FMODE_READ: 1215 return state->n_rdonly != 0; 1216 } 1217 WARN_ON_ONCE(1); 1218 return false; 1219 } 1220 1221 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode) 1222 { 1223 int ret = 0; 1224 1225 if (open_mode & (O_EXCL|O_TRUNC)) 1226 goto out; 1227 switch (mode & (FMODE_READ|FMODE_WRITE)) { 1228 case FMODE_READ: 1229 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 1230 && state->n_rdonly != 0; 1231 break; 1232 case FMODE_WRITE: 1233 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 1234 && state->n_wronly != 0; 1235 break; 1236 case FMODE_READ|FMODE_WRITE: 1237 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 1238 && state->n_rdwr != 0; 1239 } 1240 out: 1241 return ret; 1242 } 1243 1244 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode, 1245 enum open_claim_type4 claim) 1246 { 1247 if (delegation == NULL) 1248 return 0; 1249 if ((delegation->type & fmode) != fmode) 1250 return 0; 1251 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) 1252 return 0; 1253 switch (claim) { 1254 case NFS4_OPEN_CLAIM_NULL: 1255 case NFS4_OPEN_CLAIM_FH: 1256 break; 1257 case NFS4_OPEN_CLAIM_PREVIOUS: 1258 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) 1259 break; 1260 default: 1261 return 0; 1262 } 1263 nfs_mark_delegation_referenced(delegation); 1264 return 1; 1265 } 1266 1267 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) 1268 { 1269 switch (fmode) { 1270 case FMODE_WRITE: 1271 state->n_wronly++; 1272 break; 1273 case FMODE_READ: 1274 state->n_rdonly++; 1275 break; 1276 case FMODE_READ|FMODE_WRITE: 1277 state->n_rdwr++; 1278 } 1279 nfs4_state_set_mode_locked(state, state->state | fmode); 1280 } 1281 1282 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state) 1283 { 1284 struct nfs_client *clp = state->owner->so_server->nfs_client; 1285 bool need_recover = false; 1286 1287 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly) 1288 need_recover = true; 1289 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly) 1290 need_recover = true; 1291 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr) 1292 need_recover = true; 1293 if (need_recover) 1294 nfs4_state_mark_reclaim_nograce(clp, state); 1295 } 1296 1297 static bool nfs_need_update_open_stateid(struct nfs4_state *state, 1298 nfs4_stateid *stateid) 1299 { 1300 if (test_and_set_bit(NFS_OPEN_STATE, &state->flags) == 0) 1301 return true; 1302 if (!nfs4_stateid_match_other(stateid, &state->open_stateid)) { 1303 nfs_test_and_clear_all_open_stateid(state); 1304 return true; 1305 } 1306 if (nfs4_stateid_is_newer(stateid, &state->open_stateid)) 1307 return true; 1308 return false; 1309 } 1310 1311 static void nfs_resync_open_stateid_locked(struct nfs4_state *state) 1312 { 1313 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr)) 1314 return; 1315 if (state->n_wronly) 1316 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1317 if (state->n_rdonly) 1318 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1319 if (state->n_rdwr) 1320 set_bit(NFS_O_RDWR_STATE, &state->flags); 1321 set_bit(NFS_OPEN_STATE, &state->flags); 1322 } 1323 1324 static void nfs_clear_open_stateid_locked(struct nfs4_state *state, 1325 nfs4_stateid *arg_stateid, 1326 nfs4_stateid *stateid, fmode_t fmode) 1327 { 1328 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1329 switch (fmode & (FMODE_READ|FMODE_WRITE)) { 1330 case FMODE_WRITE: 1331 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1332 break; 1333 case FMODE_READ: 1334 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1335 break; 1336 case 0: 1337 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1338 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1339 clear_bit(NFS_OPEN_STATE, &state->flags); 1340 } 1341 if (stateid == NULL) 1342 return; 1343 /* Handle races with OPEN */ 1344 if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) || 1345 (nfs4_stateid_match_other(stateid, &state->open_stateid) && 1346 !nfs4_stateid_is_newer(stateid, &state->open_stateid))) { 1347 nfs_resync_open_stateid_locked(state); 1348 return; 1349 } 1350 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1351 nfs4_stateid_copy(&state->stateid, stateid); 1352 nfs4_stateid_copy(&state->open_stateid, stateid); 1353 } 1354 1355 static void nfs_clear_open_stateid(struct nfs4_state *state, 1356 nfs4_stateid *arg_stateid, 1357 nfs4_stateid *stateid, fmode_t fmode) 1358 { 1359 write_seqlock(&state->seqlock); 1360 nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode); 1361 write_sequnlock(&state->seqlock); 1362 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1363 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1364 } 1365 1366 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) 1367 { 1368 switch (fmode) { 1369 case FMODE_READ: 1370 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1371 break; 1372 case FMODE_WRITE: 1373 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1374 break; 1375 case FMODE_READ|FMODE_WRITE: 1376 set_bit(NFS_O_RDWR_STATE, &state->flags); 1377 } 1378 if (!nfs_need_update_open_stateid(state, stateid)) 1379 return; 1380 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1381 nfs4_stateid_copy(&state->stateid, stateid); 1382 nfs4_stateid_copy(&state->open_stateid, stateid); 1383 } 1384 1385 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode) 1386 { 1387 /* 1388 * Protect the call to nfs4_state_set_mode_locked and 1389 * serialise the stateid update 1390 */ 1391 spin_lock(&state->owner->so_lock); 1392 write_seqlock(&state->seqlock); 1393 if (deleg_stateid != NULL) { 1394 nfs4_stateid_copy(&state->stateid, deleg_stateid); 1395 set_bit(NFS_DELEGATED_STATE, &state->flags); 1396 } 1397 if (open_stateid != NULL) 1398 nfs_set_open_stateid_locked(state, open_stateid, fmode); 1399 write_sequnlock(&state->seqlock); 1400 update_open_stateflags(state, fmode); 1401 spin_unlock(&state->owner->so_lock); 1402 } 1403 1404 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode) 1405 { 1406 struct nfs_inode *nfsi = NFS_I(state->inode); 1407 struct nfs_delegation *deleg_cur; 1408 int ret = 0; 1409 1410 fmode &= (FMODE_READ|FMODE_WRITE); 1411 1412 rcu_read_lock(); 1413 deleg_cur = rcu_dereference(nfsi->delegation); 1414 if (deleg_cur == NULL) 1415 goto no_delegation; 1416 1417 spin_lock(&deleg_cur->lock); 1418 if (rcu_dereference(nfsi->delegation) != deleg_cur || 1419 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) || 1420 (deleg_cur->type & fmode) != fmode) 1421 goto no_delegation_unlock; 1422 1423 if (delegation == NULL) 1424 delegation = &deleg_cur->stateid; 1425 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation)) 1426 goto no_delegation_unlock; 1427 1428 nfs_mark_delegation_referenced(deleg_cur); 1429 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode); 1430 ret = 1; 1431 no_delegation_unlock: 1432 spin_unlock(&deleg_cur->lock); 1433 no_delegation: 1434 rcu_read_unlock(); 1435 1436 if (!ret && open_stateid != NULL) { 1437 __update_open_stateid(state, open_stateid, NULL, fmode); 1438 ret = 1; 1439 } 1440 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1441 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1442 1443 return ret; 1444 } 1445 1446 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp, 1447 const nfs4_stateid *stateid) 1448 { 1449 struct nfs4_state *state = lsp->ls_state; 1450 bool ret = false; 1451 1452 spin_lock(&state->state_lock); 1453 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid)) 1454 goto out_noupdate; 1455 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid)) 1456 goto out_noupdate; 1457 nfs4_stateid_copy(&lsp->ls_stateid, stateid); 1458 ret = true; 1459 out_noupdate: 1460 spin_unlock(&state->state_lock); 1461 return ret; 1462 } 1463 1464 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) 1465 { 1466 struct nfs_delegation *delegation; 1467 1468 rcu_read_lock(); 1469 delegation = rcu_dereference(NFS_I(inode)->delegation); 1470 if (delegation == NULL || (delegation->type & fmode) == fmode) { 1471 rcu_read_unlock(); 1472 return; 1473 } 1474 rcu_read_unlock(); 1475 nfs4_inode_return_delegation(inode); 1476 } 1477 1478 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) 1479 { 1480 struct nfs4_state *state = opendata->state; 1481 struct nfs_inode *nfsi = NFS_I(state->inode); 1482 struct nfs_delegation *delegation; 1483 int open_mode = opendata->o_arg.open_flags; 1484 fmode_t fmode = opendata->o_arg.fmode; 1485 enum open_claim_type4 claim = opendata->o_arg.claim; 1486 nfs4_stateid stateid; 1487 int ret = -EAGAIN; 1488 1489 for (;;) { 1490 spin_lock(&state->owner->so_lock); 1491 if (can_open_cached(state, fmode, open_mode)) { 1492 update_open_stateflags(state, fmode); 1493 spin_unlock(&state->owner->so_lock); 1494 goto out_return_state; 1495 } 1496 spin_unlock(&state->owner->so_lock); 1497 rcu_read_lock(); 1498 delegation = rcu_dereference(nfsi->delegation); 1499 if (!can_open_delegated(delegation, fmode, claim)) { 1500 rcu_read_unlock(); 1501 break; 1502 } 1503 /* Save the delegation */ 1504 nfs4_stateid_copy(&stateid, &delegation->stateid); 1505 rcu_read_unlock(); 1506 nfs_release_seqid(opendata->o_arg.seqid); 1507 if (!opendata->is_recover) { 1508 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); 1509 if (ret != 0) 1510 goto out; 1511 } 1512 ret = -EAGAIN; 1513 1514 /* Try to update the stateid using the delegation */ 1515 if (update_open_stateid(state, NULL, &stateid, fmode)) 1516 goto out_return_state; 1517 } 1518 out: 1519 return ERR_PTR(ret); 1520 out_return_state: 1521 atomic_inc(&state->count); 1522 return state; 1523 } 1524 1525 static void 1526 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state) 1527 { 1528 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client; 1529 struct nfs_delegation *delegation; 1530 int delegation_flags = 0; 1531 1532 rcu_read_lock(); 1533 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 1534 if (delegation) 1535 delegation_flags = delegation->flags; 1536 rcu_read_unlock(); 1537 switch (data->o_arg.claim) { 1538 default: 1539 break; 1540 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 1541 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1542 pr_err_ratelimited("NFS: Broken NFSv4 server %s is " 1543 "returning a delegation for " 1544 "OPEN(CLAIM_DELEGATE_CUR)\n", 1545 clp->cl_hostname); 1546 return; 1547 } 1548 if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0) 1549 nfs_inode_set_delegation(state->inode, 1550 data->owner->so_cred, 1551 &data->o_res); 1552 else 1553 nfs_inode_reclaim_delegation(state->inode, 1554 data->owner->so_cred, 1555 &data->o_res); 1556 } 1557 1558 /* 1559 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes 1560 * and update the nfs4_state. 1561 */ 1562 static struct nfs4_state * 1563 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data) 1564 { 1565 struct inode *inode = data->state->inode; 1566 struct nfs4_state *state = data->state; 1567 int ret; 1568 1569 if (!data->rpc_done) { 1570 if (data->rpc_status) { 1571 ret = data->rpc_status; 1572 goto err; 1573 } 1574 /* cached opens have already been processed */ 1575 goto update; 1576 } 1577 1578 ret = nfs_refresh_inode(inode, &data->f_attr); 1579 if (ret) 1580 goto err; 1581 1582 if (data->o_res.delegation_type != 0) 1583 nfs4_opendata_check_deleg(data, state); 1584 update: 1585 update_open_stateid(state, &data->o_res.stateid, NULL, 1586 data->o_arg.fmode); 1587 atomic_inc(&state->count); 1588 1589 return state; 1590 err: 1591 return ERR_PTR(ret); 1592 1593 } 1594 1595 static struct nfs4_state * 1596 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1597 { 1598 struct inode *inode; 1599 struct nfs4_state *state = NULL; 1600 int ret; 1601 1602 if (!data->rpc_done) { 1603 state = nfs4_try_open_cached(data); 1604 trace_nfs4_cached_open(data->state); 1605 goto out; 1606 } 1607 1608 ret = -EAGAIN; 1609 if (!(data->f_attr.valid & NFS_ATTR_FATTR)) 1610 goto err; 1611 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr, data->f_label); 1612 ret = PTR_ERR(inode); 1613 if (IS_ERR(inode)) 1614 goto err; 1615 ret = -ENOMEM; 1616 state = nfs4_get_open_state(inode, data->owner); 1617 if (state == NULL) 1618 goto err_put_inode; 1619 if (data->o_res.delegation_type != 0) 1620 nfs4_opendata_check_deleg(data, state); 1621 update_open_stateid(state, &data->o_res.stateid, NULL, 1622 data->o_arg.fmode); 1623 iput(inode); 1624 out: 1625 nfs_release_seqid(data->o_arg.seqid); 1626 return state; 1627 err_put_inode: 1628 iput(inode); 1629 err: 1630 return ERR_PTR(ret); 1631 } 1632 1633 static struct nfs4_state * 1634 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1635 { 1636 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) 1637 return _nfs4_opendata_reclaim_to_nfs4_state(data); 1638 return _nfs4_opendata_to_nfs4_state(data); 1639 } 1640 1641 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state) 1642 { 1643 struct nfs_inode *nfsi = NFS_I(state->inode); 1644 struct nfs_open_context *ctx; 1645 1646 spin_lock(&state->inode->i_lock); 1647 list_for_each_entry(ctx, &nfsi->open_files, list) { 1648 if (ctx->state != state) 1649 continue; 1650 get_nfs_open_context(ctx); 1651 spin_unlock(&state->inode->i_lock); 1652 return ctx; 1653 } 1654 spin_unlock(&state->inode->i_lock); 1655 return ERR_PTR(-ENOENT); 1656 } 1657 1658 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, 1659 struct nfs4_state *state, enum open_claim_type4 claim) 1660 { 1661 struct nfs4_opendata *opendata; 1662 1663 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, 1664 NULL, NULL, claim, GFP_NOFS); 1665 if (opendata == NULL) 1666 return ERR_PTR(-ENOMEM); 1667 opendata->state = state; 1668 atomic_inc(&state->count); 1669 return opendata; 1670 } 1671 1672 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, 1673 fmode_t fmode) 1674 { 1675 struct nfs4_state *newstate; 1676 int ret; 1677 1678 if (!nfs4_mode_match_open_stateid(opendata->state, fmode)) 1679 return 0; 1680 opendata->o_arg.open_flags = 0; 1681 opendata->o_arg.fmode = fmode; 1682 opendata->o_arg.share_access = nfs4_map_atomic_open_share( 1683 NFS_SB(opendata->dentry->d_sb), 1684 fmode, 0); 1685 memset(&opendata->o_res, 0, sizeof(opendata->o_res)); 1686 memset(&opendata->c_res, 0, sizeof(opendata->c_res)); 1687 nfs4_init_opendata_res(opendata); 1688 ret = _nfs4_recover_proc_open(opendata); 1689 if (ret != 0) 1690 return ret; 1691 newstate = nfs4_opendata_to_nfs4_state(opendata); 1692 if (IS_ERR(newstate)) 1693 return PTR_ERR(newstate); 1694 if (newstate != opendata->state) 1695 ret = -ESTALE; 1696 nfs4_close_state(newstate, fmode); 1697 return ret; 1698 } 1699 1700 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) 1701 { 1702 int ret; 1703 1704 /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */ 1705 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1706 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1707 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1708 /* memory barrier prior to reading state->n_* */ 1709 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1710 clear_bit(NFS_OPEN_STATE, &state->flags); 1711 smp_rmb(); 1712 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 1713 if (ret != 0) 1714 return ret; 1715 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE); 1716 if (ret != 0) 1717 return ret; 1718 ret = nfs4_open_recover_helper(opendata, FMODE_READ); 1719 if (ret != 0) 1720 return ret; 1721 /* 1722 * We may have performed cached opens for all three recoveries. 1723 * Check if we need to update the current stateid. 1724 */ 1725 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && 1726 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) { 1727 write_seqlock(&state->seqlock); 1728 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1729 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1730 write_sequnlock(&state->seqlock); 1731 } 1732 return 0; 1733 } 1734 1735 /* 1736 * OPEN_RECLAIM: 1737 * reclaim state on the server after a reboot. 1738 */ 1739 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 1740 { 1741 struct nfs_delegation *delegation; 1742 struct nfs4_opendata *opendata; 1743 fmode_t delegation_type = 0; 1744 int status; 1745 1746 opendata = nfs4_open_recoverdata_alloc(ctx, state, 1747 NFS4_OPEN_CLAIM_PREVIOUS); 1748 if (IS_ERR(opendata)) 1749 return PTR_ERR(opendata); 1750 rcu_read_lock(); 1751 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 1752 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) 1753 delegation_type = delegation->type; 1754 rcu_read_unlock(); 1755 opendata->o_arg.u.delegation_type = delegation_type; 1756 status = nfs4_open_recover(opendata, state); 1757 nfs4_opendata_put(opendata); 1758 return status; 1759 } 1760 1761 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 1762 { 1763 struct nfs_server *server = NFS_SERVER(state->inode); 1764 struct nfs4_exception exception = { }; 1765 int err; 1766 do { 1767 err = _nfs4_do_open_reclaim(ctx, state); 1768 trace_nfs4_open_reclaim(ctx, 0, err); 1769 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 1770 continue; 1771 if (err != -NFS4ERR_DELAY) 1772 break; 1773 nfs4_handle_exception(server, err, &exception); 1774 } while (exception.retry); 1775 return err; 1776 } 1777 1778 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) 1779 { 1780 struct nfs_open_context *ctx; 1781 int ret; 1782 1783 ctx = nfs4_state_find_open_context(state); 1784 if (IS_ERR(ctx)) 1785 return -EAGAIN; 1786 ret = nfs4_do_open_reclaim(ctx, state); 1787 put_nfs_open_context(ctx); 1788 return ret; 1789 } 1790 1791 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err) 1792 { 1793 switch (err) { 1794 default: 1795 printk(KERN_ERR "NFS: %s: unhandled error " 1796 "%d.\n", __func__, err); 1797 case 0: 1798 case -ENOENT: 1799 case -EAGAIN: 1800 case -ESTALE: 1801 break; 1802 case -NFS4ERR_BADSESSION: 1803 case -NFS4ERR_BADSLOT: 1804 case -NFS4ERR_BAD_HIGH_SLOT: 1805 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1806 case -NFS4ERR_DEADSESSION: 1807 set_bit(NFS_DELEGATED_STATE, &state->flags); 1808 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err); 1809 return -EAGAIN; 1810 case -NFS4ERR_STALE_CLIENTID: 1811 case -NFS4ERR_STALE_STATEID: 1812 set_bit(NFS_DELEGATED_STATE, &state->flags); 1813 case -NFS4ERR_EXPIRED: 1814 /* Don't recall a delegation if it was lost */ 1815 nfs4_schedule_lease_recovery(server->nfs_client); 1816 return -EAGAIN; 1817 case -NFS4ERR_MOVED: 1818 nfs4_schedule_migration_recovery(server); 1819 return -EAGAIN; 1820 case -NFS4ERR_LEASE_MOVED: 1821 nfs4_schedule_lease_moved_recovery(server->nfs_client); 1822 return -EAGAIN; 1823 case -NFS4ERR_DELEG_REVOKED: 1824 case -NFS4ERR_ADMIN_REVOKED: 1825 case -NFS4ERR_BAD_STATEID: 1826 case -NFS4ERR_OPENMODE: 1827 nfs_inode_find_state_and_recover(state->inode, 1828 stateid); 1829 nfs4_schedule_stateid_recovery(server, state); 1830 return -EAGAIN; 1831 case -NFS4ERR_DELAY: 1832 case -NFS4ERR_GRACE: 1833 set_bit(NFS_DELEGATED_STATE, &state->flags); 1834 ssleep(1); 1835 return -EAGAIN; 1836 case -ENOMEM: 1837 case -NFS4ERR_DENIED: 1838 /* kill_proc(fl->fl_pid, SIGLOST, 1); */ 1839 return 0; 1840 } 1841 return err; 1842 } 1843 1844 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, 1845 struct nfs4_state *state, const nfs4_stateid *stateid, 1846 fmode_t type) 1847 { 1848 struct nfs_server *server = NFS_SERVER(state->inode); 1849 struct nfs4_opendata *opendata; 1850 int err = 0; 1851 1852 opendata = nfs4_open_recoverdata_alloc(ctx, state, 1853 NFS4_OPEN_CLAIM_DELEG_CUR_FH); 1854 if (IS_ERR(opendata)) 1855 return PTR_ERR(opendata); 1856 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 1857 write_seqlock(&state->seqlock); 1858 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1859 write_sequnlock(&state->seqlock); 1860 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1861 switch (type & (FMODE_READ|FMODE_WRITE)) { 1862 case FMODE_READ|FMODE_WRITE: 1863 case FMODE_WRITE: 1864 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 1865 if (err) 1866 break; 1867 err = nfs4_open_recover_helper(opendata, FMODE_WRITE); 1868 if (err) 1869 break; 1870 case FMODE_READ: 1871 err = nfs4_open_recover_helper(opendata, FMODE_READ); 1872 } 1873 nfs4_opendata_put(opendata); 1874 return nfs4_handle_delegation_recall_error(server, state, stateid, err); 1875 } 1876 1877 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata) 1878 { 1879 struct nfs4_opendata *data = calldata; 1880 1881 nfs40_setup_sequence(data->o_arg.server->nfs_client->cl_slot_tbl, 1882 &data->c_arg.seq_args, &data->c_res.seq_res, task); 1883 } 1884 1885 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) 1886 { 1887 struct nfs4_opendata *data = calldata; 1888 1889 nfs40_sequence_done(task, &data->c_res.seq_res); 1890 1891 data->rpc_status = task->tk_status; 1892 if (data->rpc_status == 0) { 1893 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid); 1894 nfs_confirm_seqid(&data->owner->so_seqid, 0); 1895 renew_lease(data->o_res.server, data->timestamp); 1896 data->rpc_done = 1; 1897 } 1898 } 1899 1900 static void nfs4_open_confirm_release(void *calldata) 1901 { 1902 struct nfs4_opendata *data = calldata; 1903 struct nfs4_state *state = NULL; 1904 1905 /* If this request hasn't been cancelled, do nothing */ 1906 if (data->cancelled == 0) 1907 goto out_free; 1908 /* In case of error, no cleanup! */ 1909 if (!data->rpc_done) 1910 goto out_free; 1911 state = nfs4_opendata_to_nfs4_state(data); 1912 if (!IS_ERR(state)) 1913 nfs4_close_state(state, data->o_arg.fmode); 1914 out_free: 1915 nfs4_opendata_put(data); 1916 } 1917 1918 static const struct rpc_call_ops nfs4_open_confirm_ops = { 1919 .rpc_call_prepare = nfs4_open_confirm_prepare, 1920 .rpc_call_done = nfs4_open_confirm_done, 1921 .rpc_release = nfs4_open_confirm_release, 1922 }; 1923 1924 /* 1925 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata 1926 */ 1927 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) 1928 { 1929 struct nfs_server *server = NFS_SERVER(d_inode(data->dir)); 1930 struct rpc_task *task; 1931 struct rpc_message msg = { 1932 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], 1933 .rpc_argp = &data->c_arg, 1934 .rpc_resp = &data->c_res, 1935 .rpc_cred = data->owner->so_cred, 1936 }; 1937 struct rpc_task_setup task_setup_data = { 1938 .rpc_client = server->client, 1939 .rpc_message = &msg, 1940 .callback_ops = &nfs4_open_confirm_ops, 1941 .callback_data = data, 1942 .workqueue = nfsiod_workqueue, 1943 .flags = RPC_TASK_ASYNC, 1944 }; 1945 int status; 1946 1947 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1); 1948 kref_get(&data->kref); 1949 data->rpc_done = 0; 1950 data->rpc_status = 0; 1951 data->timestamp = jiffies; 1952 if (data->is_recover) 1953 nfs4_set_sequence_privileged(&data->c_arg.seq_args); 1954 task = rpc_run_task(&task_setup_data); 1955 if (IS_ERR(task)) 1956 return PTR_ERR(task); 1957 status = nfs4_wait_for_completion_rpc_task(task); 1958 if (status != 0) { 1959 data->cancelled = 1; 1960 smp_wmb(); 1961 } else 1962 status = data->rpc_status; 1963 rpc_put_task(task); 1964 return status; 1965 } 1966 1967 static void nfs4_open_prepare(struct rpc_task *task, void *calldata) 1968 { 1969 struct nfs4_opendata *data = calldata; 1970 struct nfs4_state_owner *sp = data->owner; 1971 struct nfs_client *clp = sp->so_server->nfs_client; 1972 enum open_claim_type4 claim = data->o_arg.claim; 1973 1974 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) 1975 goto out_wait; 1976 /* 1977 * Check if we still need to send an OPEN call, or if we can use 1978 * a delegation instead. 1979 */ 1980 if (data->state != NULL) { 1981 struct nfs_delegation *delegation; 1982 1983 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags)) 1984 goto out_no_action; 1985 rcu_read_lock(); 1986 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation); 1987 if (can_open_delegated(delegation, data->o_arg.fmode, claim)) 1988 goto unlock_no_action; 1989 rcu_read_unlock(); 1990 } 1991 /* Update client id. */ 1992 data->o_arg.clientid = clp->cl_clientid; 1993 switch (claim) { 1994 default: 1995 break; 1996 case NFS4_OPEN_CLAIM_PREVIOUS: 1997 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1998 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1999 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0]; 2000 case NFS4_OPEN_CLAIM_FH: 2001 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 2002 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh); 2003 } 2004 data->timestamp = jiffies; 2005 if (nfs4_setup_sequence(data->o_arg.server, 2006 &data->o_arg.seq_args, 2007 &data->o_res.seq_res, 2008 task) != 0) 2009 nfs_release_seqid(data->o_arg.seqid); 2010 2011 /* Set the create mode (note dependency on the session type) */ 2012 data->o_arg.createmode = NFS4_CREATE_UNCHECKED; 2013 if (data->o_arg.open_flags & O_EXCL) { 2014 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE; 2015 if (nfs4_has_persistent_session(clp)) 2016 data->o_arg.createmode = NFS4_CREATE_GUARDED; 2017 else if (clp->cl_mvops->minor_version > 0) 2018 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1; 2019 } 2020 return; 2021 unlock_no_action: 2022 trace_nfs4_cached_open(data->state); 2023 rcu_read_unlock(); 2024 out_no_action: 2025 task->tk_action = NULL; 2026 out_wait: 2027 nfs4_sequence_done(task, &data->o_res.seq_res); 2028 } 2029 2030 static void nfs4_open_done(struct rpc_task *task, void *calldata) 2031 { 2032 struct nfs4_opendata *data = calldata; 2033 2034 data->rpc_status = task->tk_status; 2035 2036 if (!nfs4_sequence_done(task, &data->o_res.seq_res)) 2037 return; 2038 2039 if (task->tk_status == 0) { 2040 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) { 2041 switch (data->o_res.f_attr->mode & S_IFMT) { 2042 case S_IFREG: 2043 break; 2044 case S_IFLNK: 2045 data->rpc_status = -ELOOP; 2046 break; 2047 case S_IFDIR: 2048 data->rpc_status = -EISDIR; 2049 break; 2050 default: 2051 data->rpc_status = -ENOTDIR; 2052 } 2053 } 2054 renew_lease(data->o_res.server, data->timestamp); 2055 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) 2056 nfs_confirm_seqid(&data->owner->so_seqid, 0); 2057 } 2058 data->rpc_done = 1; 2059 } 2060 2061 static void nfs4_open_release(void *calldata) 2062 { 2063 struct nfs4_opendata *data = calldata; 2064 struct nfs4_state *state = NULL; 2065 2066 /* If this request hasn't been cancelled, do nothing */ 2067 if (data->cancelled == 0) 2068 goto out_free; 2069 /* In case of error, no cleanup! */ 2070 if (data->rpc_status != 0 || !data->rpc_done) 2071 goto out_free; 2072 /* In case we need an open_confirm, no cleanup! */ 2073 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) 2074 goto out_free; 2075 state = nfs4_opendata_to_nfs4_state(data); 2076 if (!IS_ERR(state)) 2077 nfs4_close_state(state, data->o_arg.fmode); 2078 out_free: 2079 nfs4_opendata_put(data); 2080 } 2081 2082 static const struct rpc_call_ops nfs4_open_ops = { 2083 .rpc_call_prepare = nfs4_open_prepare, 2084 .rpc_call_done = nfs4_open_done, 2085 .rpc_release = nfs4_open_release, 2086 }; 2087 2088 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover) 2089 { 2090 struct inode *dir = d_inode(data->dir); 2091 struct nfs_server *server = NFS_SERVER(dir); 2092 struct nfs_openargs *o_arg = &data->o_arg; 2093 struct nfs_openres *o_res = &data->o_res; 2094 struct rpc_task *task; 2095 struct rpc_message msg = { 2096 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN], 2097 .rpc_argp = o_arg, 2098 .rpc_resp = o_res, 2099 .rpc_cred = data->owner->so_cred, 2100 }; 2101 struct rpc_task_setup task_setup_data = { 2102 .rpc_client = server->client, 2103 .rpc_message = &msg, 2104 .callback_ops = &nfs4_open_ops, 2105 .callback_data = data, 2106 .workqueue = nfsiod_workqueue, 2107 .flags = RPC_TASK_ASYNC, 2108 }; 2109 int status; 2110 2111 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1); 2112 kref_get(&data->kref); 2113 data->rpc_done = 0; 2114 data->rpc_status = 0; 2115 data->cancelled = 0; 2116 data->is_recover = 0; 2117 if (isrecover) { 2118 nfs4_set_sequence_privileged(&o_arg->seq_args); 2119 data->is_recover = 1; 2120 } 2121 task = rpc_run_task(&task_setup_data); 2122 if (IS_ERR(task)) 2123 return PTR_ERR(task); 2124 status = nfs4_wait_for_completion_rpc_task(task); 2125 if (status != 0) { 2126 data->cancelled = 1; 2127 smp_wmb(); 2128 } else 2129 status = data->rpc_status; 2130 rpc_put_task(task); 2131 2132 return status; 2133 } 2134 2135 static int _nfs4_recover_proc_open(struct nfs4_opendata *data) 2136 { 2137 struct inode *dir = d_inode(data->dir); 2138 struct nfs_openres *o_res = &data->o_res; 2139 int status; 2140 2141 status = nfs4_run_open_task(data, 1); 2142 if (status != 0 || !data->rpc_done) 2143 return status; 2144 2145 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); 2146 2147 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 2148 status = _nfs4_proc_open_confirm(data); 2149 if (status != 0) 2150 return status; 2151 } 2152 2153 return status; 2154 } 2155 2156 /* 2157 * Additional permission checks in order to distinguish between an 2158 * open for read, and an open for execute. This works around the 2159 * fact that NFSv4 OPEN treats read and execute permissions as being 2160 * the same. 2161 * Note that in the non-execute case, we want to turn off permission 2162 * checking if we just created a new file (POSIX open() semantics). 2163 */ 2164 static int nfs4_opendata_access(struct rpc_cred *cred, 2165 struct nfs4_opendata *opendata, 2166 struct nfs4_state *state, fmode_t fmode, 2167 int openflags) 2168 { 2169 struct nfs_access_entry cache; 2170 u32 mask; 2171 2172 /* access call failed or for some reason the server doesn't 2173 * support any access modes -- defer access call until later */ 2174 if (opendata->o_res.access_supported == 0) 2175 return 0; 2176 2177 mask = 0; 2178 /* 2179 * Use openflags to check for exec, because fmode won't 2180 * always have FMODE_EXEC set when file open for exec. 2181 */ 2182 if (openflags & __FMODE_EXEC) { 2183 /* ONLY check for exec rights */ 2184 mask = MAY_EXEC; 2185 } else if ((fmode & FMODE_READ) && !opendata->file_created) 2186 mask = MAY_READ; 2187 2188 cache.cred = cred; 2189 cache.jiffies = jiffies; 2190 nfs_access_set_mask(&cache, opendata->o_res.access_result); 2191 nfs_access_add_cache(state->inode, &cache); 2192 2193 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0) 2194 return 0; 2195 2196 /* even though OPEN succeeded, access is denied. Close the file */ 2197 nfs4_close_state(state, fmode); 2198 return -EACCES; 2199 } 2200 2201 /* 2202 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata 2203 */ 2204 static int _nfs4_proc_open(struct nfs4_opendata *data) 2205 { 2206 struct inode *dir = d_inode(data->dir); 2207 struct nfs_server *server = NFS_SERVER(dir); 2208 struct nfs_openargs *o_arg = &data->o_arg; 2209 struct nfs_openres *o_res = &data->o_res; 2210 int status; 2211 2212 status = nfs4_run_open_task(data, 0); 2213 if (!data->rpc_done) 2214 return status; 2215 if (status != 0) { 2216 if (status == -NFS4ERR_BADNAME && 2217 !(o_arg->open_flags & O_CREAT)) 2218 return -ENOENT; 2219 return status; 2220 } 2221 2222 nfs_fattr_map_and_free_names(server, &data->f_attr); 2223 2224 if (o_arg->open_flags & O_CREAT) { 2225 update_changeattr(dir, &o_res->cinfo); 2226 if (o_arg->open_flags & O_EXCL) 2227 data->file_created = 1; 2228 else if (o_res->cinfo.before != o_res->cinfo.after) 2229 data->file_created = 1; 2230 } 2231 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 2232 server->caps &= ~NFS_CAP_POSIX_LOCK; 2233 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 2234 status = _nfs4_proc_open_confirm(data); 2235 if (status != 0) 2236 return status; 2237 } 2238 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) 2239 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, o_res->f_label); 2240 return 0; 2241 } 2242 2243 static int nfs4_recover_expired_lease(struct nfs_server *server) 2244 { 2245 return nfs4_client_recover_expired_lease(server->nfs_client); 2246 } 2247 2248 /* 2249 * OPEN_EXPIRED: 2250 * reclaim state on the server after a network partition. 2251 * Assumes caller holds the appropriate lock 2252 */ 2253 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2254 { 2255 struct nfs4_opendata *opendata; 2256 int ret; 2257 2258 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2259 NFS4_OPEN_CLAIM_FH); 2260 if (IS_ERR(opendata)) 2261 return PTR_ERR(opendata); 2262 ret = nfs4_open_recover(opendata, state); 2263 if (ret == -ESTALE) 2264 d_drop(ctx->dentry); 2265 nfs4_opendata_put(opendata); 2266 return ret; 2267 } 2268 2269 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2270 { 2271 struct nfs_server *server = NFS_SERVER(state->inode); 2272 struct nfs4_exception exception = { }; 2273 int err; 2274 2275 do { 2276 err = _nfs4_open_expired(ctx, state); 2277 trace_nfs4_open_expired(ctx, 0, err); 2278 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 2279 continue; 2280 switch (err) { 2281 default: 2282 goto out; 2283 case -NFS4ERR_GRACE: 2284 case -NFS4ERR_DELAY: 2285 nfs4_handle_exception(server, err, &exception); 2286 err = 0; 2287 } 2288 } while (exception.retry); 2289 out: 2290 return err; 2291 } 2292 2293 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2294 { 2295 struct nfs_open_context *ctx; 2296 int ret; 2297 2298 ctx = nfs4_state_find_open_context(state); 2299 if (IS_ERR(ctx)) 2300 return -EAGAIN; 2301 ret = nfs4_do_open_expired(ctx, state); 2302 put_nfs_open_context(ctx); 2303 return ret; 2304 } 2305 2306 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state) 2307 { 2308 nfs_remove_bad_delegation(state->inode); 2309 write_seqlock(&state->seqlock); 2310 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 2311 write_sequnlock(&state->seqlock); 2312 clear_bit(NFS_DELEGATED_STATE, &state->flags); 2313 } 2314 2315 static void nfs40_clear_delegation_stateid(struct nfs4_state *state) 2316 { 2317 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL) 2318 nfs_finish_clear_delegation_stateid(state); 2319 } 2320 2321 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2322 { 2323 /* NFSv4.0 doesn't allow for delegation recovery on open expire */ 2324 nfs40_clear_delegation_stateid(state); 2325 return nfs4_open_expired(sp, state); 2326 } 2327 2328 #if defined(CONFIG_NFS_V4_1) 2329 static void nfs41_check_delegation_stateid(struct nfs4_state *state) 2330 { 2331 struct nfs_server *server = NFS_SERVER(state->inode); 2332 nfs4_stateid stateid; 2333 struct nfs_delegation *delegation; 2334 struct rpc_cred *cred; 2335 int status; 2336 2337 /* Get the delegation credential for use by test/free_stateid */ 2338 rcu_read_lock(); 2339 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2340 if (delegation == NULL) { 2341 rcu_read_unlock(); 2342 return; 2343 } 2344 2345 nfs4_stateid_copy(&stateid, &delegation->stateid); 2346 cred = get_rpccred(delegation->cred); 2347 rcu_read_unlock(); 2348 status = nfs41_test_stateid(server, &stateid, cred); 2349 trace_nfs4_test_delegation_stateid(state, NULL, status); 2350 2351 if (status != NFS_OK) { 2352 /* Free the stateid unless the server explicitly 2353 * informs us the stateid is unrecognized. */ 2354 if (status != -NFS4ERR_BAD_STATEID) 2355 nfs41_free_stateid(server, &stateid, cred); 2356 nfs_finish_clear_delegation_stateid(state); 2357 } 2358 2359 put_rpccred(cred); 2360 } 2361 2362 /** 2363 * nfs41_check_open_stateid - possibly free an open stateid 2364 * 2365 * @state: NFSv4 state for an inode 2366 * 2367 * Returns NFS_OK if recovery for this stateid is now finished. 2368 * Otherwise a negative NFS4ERR value is returned. 2369 */ 2370 static int nfs41_check_open_stateid(struct nfs4_state *state) 2371 { 2372 struct nfs_server *server = NFS_SERVER(state->inode); 2373 nfs4_stateid *stateid = &state->open_stateid; 2374 struct rpc_cred *cred = state->owner->so_cred; 2375 int status; 2376 2377 /* If a state reset has been done, test_stateid is unneeded */ 2378 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) && 2379 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) && 2380 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0)) 2381 return -NFS4ERR_BAD_STATEID; 2382 2383 status = nfs41_test_stateid(server, stateid, cred); 2384 trace_nfs4_test_open_stateid(state, NULL, status); 2385 if (status != NFS_OK) { 2386 /* Free the stateid unless the server explicitly 2387 * informs us the stateid is unrecognized. */ 2388 if (status != -NFS4ERR_BAD_STATEID) 2389 nfs41_free_stateid(server, stateid, cred); 2390 2391 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 2392 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 2393 clear_bit(NFS_O_RDWR_STATE, &state->flags); 2394 clear_bit(NFS_OPEN_STATE, &state->flags); 2395 } 2396 return status; 2397 } 2398 2399 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2400 { 2401 int status; 2402 2403 nfs41_check_delegation_stateid(state); 2404 status = nfs41_check_open_stateid(state); 2405 if (status != NFS_OK) 2406 status = nfs4_open_expired(sp, state); 2407 return status; 2408 } 2409 #endif 2410 2411 /* 2412 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* 2413 * fields corresponding to attributes that were used to store the verifier. 2414 * Make sure we clobber those fields in the later setattr call 2415 */ 2416 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, 2417 struct iattr *sattr, struct nfs4_label **label) 2418 { 2419 const u32 *attrset = opendata->o_res.attrset; 2420 2421 if ((attrset[1] & FATTR4_WORD1_TIME_ACCESS) && 2422 !(sattr->ia_valid & ATTR_ATIME_SET)) 2423 sattr->ia_valid |= ATTR_ATIME; 2424 2425 if ((attrset[1] & FATTR4_WORD1_TIME_MODIFY) && 2426 !(sattr->ia_valid & ATTR_MTIME_SET)) 2427 sattr->ia_valid |= ATTR_MTIME; 2428 2429 /* Except MODE, it seems harmless of setting twice. */ 2430 if ((attrset[1] & FATTR4_WORD1_MODE)) 2431 sattr->ia_valid &= ~ATTR_MODE; 2432 2433 if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL) 2434 *label = NULL; 2435 } 2436 2437 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, 2438 fmode_t fmode, 2439 int flags, 2440 struct nfs_open_context *ctx) 2441 { 2442 struct nfs4_state_owner *sp = opendata->owner; 2443 struct nfs_server *server = sp->so_server; 2444 struct dentry *dentry; 2445 struct nfs4_state *state; 2446 unsigned int seq; 2447 int ret; 2448 2449 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); 2450 2451 ret = _nfs4_proc_open(opendata); 2452 if (ret != 0) 2453 goto out; 2454 2455 state = nfs4_opendata_to_nfs4_state(opendata); 2456 ret = PTR_ERR(state); 2457 if (IS_ERR(state)) 2458 goto out; 2459 if (server->caps & NFS_CAP_POSIX_LOCK) 2460 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); 2461 2462 dentry = opendata->dentry; 2463 if (d_really_is_negative(dentry)) { 2464 /* FIXME: Is this d_drop() ever needed? */ 2465 d_drop(dentry); 2466 dentry = d_add_unique(dentry, igrab(state->inode)); 2467 if (dentry == NULL) { 2468 dentry = opendata->dentry; 2469 } else if (dentry != ctx->dentry) { 2470 dput(ctx->dentry); 2471 ctx->dentry = dget(dentry); 2472 } 2473 nfs_set_verifier(dentry, 2474 nfs_save_change_attribute(d_inode(opendata->dir))); 2475 } 2476 2477 ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags); 2478 if (ret != 0) 2479 goto out; 2480 2481 ctx->state = state; 2482 if (d_inode(dentry) == state->inode) { 2483 nfs_inode_attach_open_context(ctx); 2484 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) 2485 nfs4_schedule_stateid_recovery(server, state); 2486 } 2487 out: 2488 return ret; 2489 } 2490 2491 /* 2492 * Returns a referenced nfs4_state 2493 */ 2494 static int _nfs4_do_open(struct inode *dir, 2495 struct nfs_open_context *ctx, 2496 int flags, 2497 struct iattr *sattr, 2498 struct nfs4_label *label, 2499 int *opened) 2500 { 2501 struct nfs4_state_owner *sp; 2502 struct nfs4_state *state = NULL; 2503 struct nfs_server *server = NFS_SERVER(dir); 2504 struct nfs4_opendata *opendata; 2505 struct dentry *dentry = ctx->dentry; 2506 struct rpc_cred *cred = ctx->cred; 2507 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold; 2508 fmode_t fmode = ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC); 2509 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL; 2510 struct nfs4_label *olabel = NULL; 2511 int status; 2512 2513 /* Protect against reboot recovery conflicts */ 2514 status = -ENOMEM; 2515 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL); 2516 if (sp == NULL) { 2517 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); 2518 goto out_err; 2519 } 2520 status = nfs4_recover_expired_lease(server); 2521 if (status != 0) 2522 goto err_put_state_owner; 2523 if (d_really_is_positive(dentry)) 2524 nfs4_return_incompatible_delegation(d_inode(dentry), fmode); 2525 status = -ENOMEM; 2526 if (d_really_is_positive(dentry)) 2527 claim = NFS4_OPEN_CLAIM_FH; 2528 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, 2529 label, claim, GFP_KERNEL); 2530 if (opendata == NULL) 2531 goto err_put_state_owner; 2532 2533 if (label) { 2534 olabel = nfs4_label_alloc(server, GFP_KERNEL); 2535 if (IS_ERR(olabel)) { 2536 status = PTR_ERR(olabel); 2537 goto err_opendata_put; 2538 } 2539 } 2540 2541 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) { 2542 if (!opendata->f_attr.mdsthreshold) { 2543 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); 2544 if (!opendata->f_attr.mdsthreshold) 2545 goto err_free_label; 2546 } 2547 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; 2548 } 2549 if (d_really_is_positive(dentry)) 2550 opendata->state = nfs4_get_open_state(d_inode(dentry), sp); 2551 2552 status = _nfs4_open_and_get_state(opendata, fmode, flags, ctx); 2553 if (status != 0) 2554 goto err_free_label; 2555 state = ctx->state; 2556 2557 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) && 2558 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) { 2559 nfs4_exclusive_attrset(opendata, sattr, &label); 2560 2561 nfs_fattr_init(opendata->o_res.f_attr); 2562 status = nfs4_do_setattr(state->inode, cred, 2563 opendata->o_res.f_attr, sattr, 2564 state, label, olabel); 2565 if (status == 0) { 2566 nfs_setattr_update_inode(state->inode, sattr, 2567 opendata->o_res.f_attr); 2568 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel); 2569 } 2570 } 2571 if (opened && opendata->file_created) 2572 *opened |= FILE_CREATED; 2573 2574 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) { 2575 *ctx_th = opendata->f_attr.mdsthreshold; 2576 opendata->f_attr.mdsthreshold = NULL; 2577 } 2578 2579 nfs4_label_free(olabel); 2580 2581 nfs4_opendata_put(opendata); 2582 nfs4_put_state_owner(sp); 2583 return 0; 2584 err_free_label: 2585 nfs4_label_free(olabel); 2586 err_opendata_put: 2587 nfs4_opendata_put(opendata); 2588 err_put_state_owner: 2589 nfs4_put_state_owner(sp); 2590 out_err: 2591 return status; 2592 } 2593 2594 2595 static struct nfs4_state *nfs4_do_open(struct inode *dir, 2596 struct nfs_open_context *ctx, 2597 int flags, 2598 struct iattr *sattr, 2599 struct nfs4_label *label, 2600 int *opened) 2601 { 2602 struct nfs_server *server = NFS_SERVER(dir); 2603 struct nfs4_exception exception = { }; 2604 struct nfs4_state *res; 2605 int status; 2606 2607 do { 2608 status = _nfs4_do_open(dir, ctx, flags, sattr, label, opened); 2609 res = ctx->state; 2610 trace_nfs4_open_file(ctx, flags, status); 2611 if (status == 0) 2612 break; 2613 /* NOTE: BAD_SEQID means the server and client disagree about the 2614 * book-keeping w.r.t. state-changing operations 2615 * (OPEN/CLOSE/LOCK/LOCKU...) 2616 * It is actually a sign of a bug on the client or on the server. 2617 * 2618 * If we receive a BAD_SEQID error in the particular case of 2619 * doing an OPEN, we assume that nfs_increment_open_seqid() will 2620 * have unhashed the old state_owner for us, and that we can 2621 * therefore safely retry using a new one. We should still warn 2622 * the user though... 2623 */ 2624 if (status == -NFS4ERR_BAD_SEQID) { 2625 pr_warn_ratelimited("NFS: v4 server %s " 2626 " returned a bad sequence-id error!\n", 2627 NFS_SERVER(dir)->nfs_client->cl_hostname); 2628 exception.retry = 1; 2629 continue; 2630 } 2631 /* 2632 * BAD_STATEID on OPEN means that the server cancelled our 2633 * state before it received the OPEN_CONFIRM. 2634 * Recover by retrying the request as per the discussion 2635 * on Page 181 of RFC3530. 2636 */ 2637 if (status == -NFS4ERR_BAD_STATEID) { 2638 exception.retry = 1; 2639 continue; 2640 } 2641 if (status == -EAGAIN) { 2642 /* We must have found a delegation */ 2643 exception.retry = 1; 2644 continue; 2645 } 2646 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception)) 2647 continue; 2648 res = ERR_PTR(nfs4_handle_exception(server, 2649 status, &exception)); 2650 } while (exception.retry); 2651 return res; 2652 } 2653 2654 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 2655 struct nfs_fattr *fattr, struct iattr *sattr, 2656 struct nfs4_state *state, struct nfs4_label *ilabel, 2657 struct nfs4_label *olabel) 2658 { 2659 struct nfs_server *server = NFS_SERVER(inode); 2660 struct nfs_setattrargs arg = { 2661 .fh = NFS_FH(inode), 2662 .iap = sattr, 2663 .server = server, 2664 .bitmask = server->attr_bitmask, 2665 .label = ilabel, 2666 }; 2667 struct nfs_setattrres res = { 2668 .fattr = fattr, 2669 .label = olabel, 2670 .server = server, 2671 }; 2672 struct rpc_message msg = { 2673 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 2674 .rpc_argp = &arg, 2675 .rpc_resp = &res, 2676 .rpc_cred = cred, 2677 }; 2678 unsigned long timestamp = jiffies; 2679 fmode_t fmode; 2680 bool truncate; 2681 int status; 2682 2683 arg.bitmask = nfs4_bitmask(server, ilabel); 2684 if (ilabel) 2685 arg.bitmask = nfs4_bitmask(server, olabel); 2686 2687 nfs_fattr_init(fattr); 2688 2689 /* Servers should only apply open mode checks for file size changes */ 2690 truncate = (sattr->ia_valid & ATTR_SIZE) ? true : false; 2691 fmode = truncate ? FMODE_WRITE : FMODE_READ; 2692 2693 if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) { 2694 /* Use that stateid */ 2695 } else if (truncate && state != NULL) { 2696 struct nfs_lockowner lockowner = { 2697 .l_owner = current->files, 2698 .l_pid = current->tgid, 2699 }; 2700 if (!nfs4_valid_open_stateid(state)) 2701 return -EBADF; 2702 if (nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE, 2703 &lockowner) == -EIO) 2704 return -EBADF; 2705 } else 2706 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 2707 2708 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 2709 if (status == 0 && state != NULL) 2710 renew_lease(server, timestamp); 2711 trace_nfs4_setattr(inode, &arg.stateid, status); 2712 return status; 2713 } 2714 2715 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 2716 struct nfs_fattr *fattr, struct iattr *sattr, 2717 struct nfs4_state *state, struct nfs4_label *ilabel, 2718 struct nfs4_label *olabel) 2719 { 2720 struct nfs_server *server = NFS_SERVER(inode); 2721 struct nfs4_exception exception = { 2722 .state = state, 2723 .inode = inode, 2724 }; 2725 int err; 2726 do { 2727 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state, ilabel, olabel); 2728 switch (err) { 2729 case -NFS4ERR_OPENMODE: 2730 if (!(sattr->ia_valid & ATTR_SIZE)) { 2731 pr_warn_once("NFSv4: server %s is incorrectly " 2732 "applying open mode checks to " 2733 "a SETATTR that is not " 2734 "changing file size.\n", 2735 server->nfs_client->cl_hostname); 2736 } 2737 if (state && !(state->state & FMODE_WRITE)) { 2738 err = -EBADF; 2739 if (sattr->ia_valid & ATTR_OPEN) 2740 err = -EACCES; 2741 goto out; 2742 } 2743 } 2744 err = nfs4_handle_exception(server, err, &exception); 2745 } while (exception.retry); 2746 out: 2747 return err; 2748 } 2749 2750 static bool 2751 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task) 2752 { 2753 if (inode == NULL || !nfs_have_layout(inode)) 2754 return false; 2755 2756 return pnfs_wait_on_layoutreturn(inode, task); 2757 } 2758 2759 struct nfs4_closedata { 2760 struct inode *inode; 2761 struct nfs4_state *state; 2762 struct nfs_closeargs arg; 2763 struct nfs_closeres res; 2764 struct nfs_fattr fattr; 2765 unsigned long timestamp; 2766 bool roc; 2767 u32 roc_barrier; 2768 }; 2769 2770 static void nfs4_free_closedata(void *data) 2771 { 2772 struct nfs4_closedata *calldata = data; 2773 struct nfs4_state_owner *sp = calldata->state->owner; 2774 struct super_block *sb = calldata->state->inode->i_sb; 2775 2776 if (calldata->roc) 2777 pnfs_roc_release(calldata->state->inode); 2778 nfs4_put_open_state(calldata->state); 2779 nfs_free_seqid(calldata->arg.seqid); 2780 nfs4_put_state_owner(sp); 2781 nfs_sb_deactive(sb); 2782 kfree(calldata); 2783 } 2784 2785 static void nfs4_close_done(struct rpc_task *task, void *data) 2786 { 2787 struct nfs4_closedata *calldata = data; 2788 struct nfs4_state *state = calldata->state; 2789 struct nfs_server *server = NFS_SERVER(calldata->inode); 2790 nfs4_stateid *res_stateid = NULL; 2791 2792 dprintk("%s: begin!\n", __func__); 2793 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 2794 return; 2795 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status); 2796 /* hmm. we are done with the inode, and in the process of freeing 2797 * the state_owner. we keep this around to process errors 2798 */ 2799 switch (task->tk_status) { 2800 case 0: 2801 res_stateid = &calldata->res.stateid; 2802 if (calldata->roc) 2803 pnfs_roc_set_barrier(state->inode, 2804 calldata->roc_barrier); 2805 renew_lease(server, calldata->timestamp); 2806 break; 2807 case -NFS4ERR_ADMIN_REVOKED: 2808 case -NFS4ERR_STALE_STATEID: 2809 case -NFS4ERR_OLD_STATEID: 2810 case -NFS4ERR_BAD_STATEID: 2811 case -NFS4ERR_EXPIRED: 2812 if (!nfs4_stateid_match(&calldata->arg.stateid, 2813 &state->open_stateid)) { 2814 rpc_restart_call_prepare(task); 2815 goto out_release; 2816 } 2817 if (calldata->arg.fmode == 0) 2818 break; 2819 default: 2820 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) { 2821 rpc_restart_call_prepare(task); 2822 goto out_release; 2823 } 2824 } 2825 nfs_clear_open_stateid(state, &calldata->arg.stateid, 2826 res_stateid, calldata->arg.fmode); 2827 out_release: 2828 nfs_release_seqid(calldata->arg.seqid); 2829 nfs_refresh_inode(calldata->inode, calldata->res.fattr); 2830 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status); 2831 } 2832 2833 static void nfs4_close_prepare(struct rpc_task *task, void *data) 2834 { 2835 struct nfs4_closedata *calldata = data; 2836 struct nfs4_state *state = calldata->state; 2837 struct inode *inode = calldata->inode; 2838 bool is_rdonly, is_wronly, is_rdwr; 2839 int call_close = 0; 2840 2841 dprintk("%s: begin!\n", __func__); 2842 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 2843 goto out_wait; 2844 2845 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; 2846 spin_lock(&state->owner->so_lock); 2847 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags); 2848 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags); 2849 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags); 2850 nfs4_stateid_copy(&calldata->arg.stateid, &state->open_stateid); 2851 /* Calculate the change in open mode */ 2852 calldata->arg.fmode = 0; 2853 if (state->n_rdwr == 0) { 2854 if (state->n_rdonly == 0) 2855 call_close |= is_rdonly; 2856 else if (is_rdonly) 2857 calldata->arg.fmode |= FMODE_READ; 2858 if (state->n_wronly == 0) 2859 call_close |= is_wronly; 2860 else if (is_wronly) 2861 calldata->arg.fmode |= FMODE_WRITE; 2862 } else if (is_rdwr) 2863 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; 2864 2865 if (calldata->arg.fmode == 0) 2866 call_close |= is_rdwr; 2867 2868 if (!nfs4_valid_open_stateid(state)) 2869 call_close = 0; 2870 spin_unlock(&state->owner->so_lock); 2871 2872 if (!call_close) { 2873 /* Note: exit _without_ calling nfs4_close_done */ 2874 goto out_no_action; 2875 } 2876 2877 if (nfs4_wait_on_layoutreturn(inode, task)) { 2878 nfs_release_seqid(calldata->arg.seqid); 2879 goto out_wait; 2880 } 2881 2882 if (calldata->arg.fmode == 0) 2883 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; 2884 if (calldata->roc) 2885 pnfs_roc_get_barrier(inode, &calldata->roc_barrier); 2886 2887 calldata->arg.share_access = 2888 nfs4_map_atomic_open_share(NFS_SERVER(inode), 2889 calldata->arg.fmode, 0); 2890 2891 nfs_fattr_init(calldata->res.fattr); 2892 calldata->timestamp = jiffies; 2893 if (nfs4_setup_sequence(NFS_SERVER(inode), 2894 &calldata->arg.seq_args, 2895 &calldata->res.seq_res, 2896 task) != 0) 2897 nfs_release_seqid(calldata->arg.seqid); 2898 dprintk("%s: done!\n", __func__); 2899 return; 2900 out_no_action: 2901 task->tk_action = NULL; 2902 out_wait: 2903 nfs4_sequence_done(task, &calldata->res.seq_res); 2904 } 2905 2906 static const struct rpc_call_ops nfs4_close_ops = { 2907 .rpc_call_prepare = nfs4_close_prepare, 2908 .rpc_call_done = nfs4_close_done, 2909 .rpc_release = nfs4_free_closedata, 2910 }; 2911 2912 static bool nfs4_roc(struct inode *inode) 2913 { 2914 if (!nfs_have_layout(inode)) 2915 return false; 2916 return pnfs_roc(inode); 2917 } 2918 2919 /* 2920 * It is possible for data to be read/written from a mem-mapped file 2921 * after the sys_close call (which hits the vfs layer as a flush). 2922 * This means that we can't safely call nfsv4 close on a file until 2923 * the inode is cleared. This in turn means that we are not good 2924 * NFSv4 citizens - we do not indicate to the server to update the file's 2925 * share state even when we are done with one of the three share 2926 * stateid's in the inode. 2927 * 2928 * NOTE: Caller must be holding the sp->so_owner semaphore! 2929 */ 2930 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) 2931 { 2932 struct nfs_server *server = NFS_SERVER(state->inode); 2933 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 2934 struct nfs4_closedata *calldata; 2935 struct nfs4_state_owner *sp = state->owner; 2936 struct rpc_task *task; 2937 struct rpc_message msg = { 2938 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], 2939 .rpc_cred = state->owner->so_cred, 2940 }; 2941 struct rpc_task_setup task_setup_data = { 2942 .rpc_client = server->client, 2943 .rpc_message = &msg, 2944 .callback_ops = &nfs4_close_ops, 2945 .workqueue = nfsiod_workqueue, 2946 .flags = RPC_TASK_ASYNC, 2947 }; 2948 int status = -ENOMEM; 2949 2950 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP, 2951 &task_setup_data.rpc_client, &msg); 2952 2953 calldata = kzalloc(sizeof(*calldata), gfp_mask); 2954 if (calldata == NULL) 2955 goto out; 2956 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1); 2957 calldata->inode = state->inode; 2958 calldata->state = state; 2959 calldata->arg.fh = NFS_FH(state->inode); 2960 /* Serialization for the sequence id */ 2961 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 2962 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask); 2963 if (IS_ERR(calldata->arg.seqid)) 2964 goto out_free_calldata; 2965 calldata->arg.fmode = 0; 2966 calldata->arg.bitmask = server->cache_consistency_bitmask; 2967 calldata->res.fattr = &calldata->fattr; 2968 calldata->res.seqid = calldata->arg.seqid; 2969 calldata->res.server = server; 2970 calldata->roc = nfs4_roc(state->inode); 2971 nfs_sb_active(calldata->inode->i_sb); 2972 2973 msg.rpc_argp = &calldata->arg; 2974 msg.rpc_resp = &calldata->res; 2975 task_setup_data.callback_data = calldata; 2976 task = rpc_run_task(&task_setup_data); 2977 if (IS_ERR(task)) 2978 return PTR_ERR(task); 2979 status = 0; 2980 if (wait) 2981 status = rpc_wait_for_completion_task(task); 2982 rpc_put_task(task); 2983 return status; 2984 out_free_calldata: 2985 kfree(calldata); 2986 out: 2987 nfs4_put_open_state(state); 2988 nfs4_put_state_owner(sp); 2989 return status; 2990 } 2991 2992 static struct inode * 2993 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, 2994 int open_flags, struct iattr *attr, int *opened) 2995 { 2996 struct nfs4_state *state; 2997 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL; 2998 2999 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l); 3000 3001 /* Protect against concurrent sillydeletes */ 3002 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened); 3003 3004 nfs4_label_release_security(label); 3005 3006 if (IS_ERR(state)) 3007 return ERR_CAST(state); 3008 return state->inode; 3009 } 3010 3011 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) 3012 { 3013 if (ctx->state == NULL) 3014 return; 3015 if (is_sync) 3016 nfs4_close_sync(ctx->state, ctx->mode); 3017 else 3018 nfs4_close_state(ctx->state, ctx->mode); 3019 } 3020 3021 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL) 3022 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL) 3023 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_SECURITY_LABEL - 1UL) 3024 3025 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 3026 { 3027 u32 bitmask[3] = {}, minorversion = server->nfs_client->cl_minorversion; 3028 struct nfs4_server_caps_arg args = { 3029 .fhandle = fhandle, 3030 .bitmask = bitmask, 3031 }; 3032 struct nfs4_server_caps_res res = {}; 3033 struct rpc_message msg = { 3034 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], 3035 .rpc_argp = &args, 3036 .rpc_resp = &res, 3037 }; 3038 int status; 3039 3040 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS | 3041 FATTR4_WORD0_FH_EXPIRE_TYPE | 3042 FATTR4_WORD0_LINK_SUPPORT | 3043 FATTR4_WORD0_SYMLINK_SUPPORT | 3044 FATTR4_WORD0_ACLSUPPORT; 3045 if (minorversion) 3046 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT; 3047 3048 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3049 if (status == 0) { 3050 /* Sanity check the server answers */ 3051 switch (minorversion) { 3052 case 0: 3053 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK; 3054 res.attr_bitmask[2] = 0; 3055 break; 3056 case 1: 3057 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK; 3058 break; 3059 case 2: 3060 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK; 3061 } 3062 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); 3063 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS| 3064 NFS_CAP_SYMLINKS|NFS_CAP_FILEID| 3065 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER| 3066 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME| 3067 NFS_CAP_CTIME|NFS_CAP_MTIME| 3068 NFS_CAP_SECURITY_LABEL); 3069 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL && 3070 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) 3071 server->caps |= NFS_CAP_ACLS; 3072 if (res.has_links != 0) 3073 server->caps |= NFS_CAP_HARDLINKS; 3074 if (res.has_symlinks != 0) 3075 server->caps |= NFS_CAP_SYMLINKS; 3076 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID) 3077 server->caps |= NFS_CAP_FILEID; 3078 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE) 3079 server->caps |= NFS_CAP_MODE; 3080 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS) 3081 server->caps |= NFS_CAP_NLINK; 3082 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER) 3083 server->caps |= NFS_CAP_OWNER; 3084 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP) 3085 server->caps |= NFS_CAP_OWNER_GROUP; 3086 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS) 3087 server->caps |= NFS_CAP_ATIME; 3088 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA) 3089 server->caps |= NFS_CAP_CTIME; 3090 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY) 3091 server->caps |= NFS_CAP_MTIME; 3092 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 3093 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL) 3094 server->caps |= NFS_CAP_SECURITY_LABEL; 3095 #endif 3096 memcpy(server->attr_bitmask_nl, res.attr_bitmask, 3097 sizeof(server->attr_bitmask)); 3098 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL; 3099 3100 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); 3101 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; 3102 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; 3103 server->cache_consistency_bitmask[2] = 0; 3104 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask, 3105 sizeof(server->exclcreat_bitmask)); 3106 server->acl_bitmask = res.acl_bitmask; 3107 server->fh_expire_type = res.fh_expire_type; 3108 } 3109 3110 return status; 3111 } 3112 3113 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 3114 { 3115 struct nfs4_exception exception = { }; 3116 int err; 3117 do { 3118 err = nfs4_handle_exception(server, 3119 _nfs4_server_capabilities(server, fhandle), 3120 &exception); 3121 } while (exception.retry); 3122 return err; 3123 } 3124 3125 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 3126 struct nfs_fsinfo *info) 3127 { 3128 u32 bitmask[3]; 3129 struct nfs4_lookup_root_arg args = { 3130 .bitmask = bitmask, 3131 }; 3132 struct nfs4_lookup_res res = { 3133 .server = server, 3134 .fattr = info->fattr, 3135 .fh = fhandle, 3136 }; 3137 struct rpc_message msg = { 3138 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT], 3139 .rpc_argp = &args, 3140 .rpc_resp = &res, 3141 }; 3142 3143 bitmask[0] = nfs4_fattr_bitmap[0]; 3144 bitmask[1] = nfs4_fattr_bitmap[1]; 3145 /* 3146 * Process the label in the upcoming getfattr 3147 */ 3148 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL; 3149 3150 nfs_fattr_init(info->fattr); 3151 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3152 } 3153 3154 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 3155 struct nfs_fsinfo *info) 3156 { 3157 struct nfs4_exception exception = { }; 3158 int err; 3159 do { 3160 err = _nfs4_lookup_root(server, fhandle, info); 3161 trace_nfs4_lookup_root(server, fhandle, info->fattr, err); 3162 switch (err) { 3163 case 0: 3164 case -NFS4ERR_WRONGSEC: 3165 goto out; 3166 default: 3167 err = nfs4_handle_exception(server, err, &exception); 3168 } 3169 } while (exception.retry); 3170 out: 3171 return err; 3172 } 3173 3174 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 3175 struct nfs_fsinfo *info, rpc_authflavor_t flavor) 3176 { 3177 struct rpc_auth_create_args auth_args = { 3178 .pseudoflavor = flavor, 3179 }; 3180 struct rpc_auth *auth; 3181 int ret; 3182 3183 auth = rpcauth_create(&auth_args, server->client); 3184 if (IS_ERR(auth)) { 3185 ret = -EACCES; 3186 goto out; 3187 } 3188 ret = nfs4_lookup_root(server, fhandle, info); 3189 out: 3190 return ret; 3191 } 3192 3193 /* 3194 * Retry pseudoroot lookup with various security flavors. We do this when: 3195 * 3196 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC 3197 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation 3198 * 3199 * Returns zero on success, or a negative NFS4ERR value, or a 3200 * negative errno value. 3201 */ 3202 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 3203 struct nfs_fsinfo *info) 3204 { 3205 /* Per 3530bis 15.33.5 */ 3206 static const rpc_authflavor_t flav_array[] = { 3207 RPC_AUTH_GSS_KRB5P, 3208 RPC_AUTH_GSS_KRB5I, 3209 RPC_AUTH_GSS_KRB5, 3210 RPC_AUTH_UNIX, /* courtesy */ 3211 RPC_AUTH_NULL, 3212 }; 3213 int status = -EPERM; 3214 size_t i; 3215 3216 if (server->auth_info.flavor_len > 0) { 3217 /* try each flavor specified by user */ 3218 for (i = 0; i < server->auth_info.flavor_len; i++) { 3219 status = nfs4_lookup_root_sec(server, fhandle, info, 3220 server->auth_info.flavors[i]); 3221 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 3222 continue; 3223 break; 3224 } 3225 } else { 3226 /* no flavors specified by user, try default list */ 3227 for (i = 0; i < ARRAY_SIZE(flav_array); i++) { 3228 status = nfs4_lookup_root_sec(server, fhandle, info, 3229 flav_array[i]); 3230 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 3231 continue; 3232 break; 3233 } 3234 } 3235 3236 /* 3237 * -EACCESS could mean that the user doesn't have correct permissions 3238 * to access the mount. It could also mean that we tried to mount 3239 * with a gss auth flavor, but rpc.gssd isn't running. Either way, 3240 * existing mount programs don't handle -EACCES very well so it should 3241 * be mapped to -EPERM instead. 3242 */ 3243 if (status == -EACCES) 3244 status = -EPERM; 3245 return status; 3246 } 3247 3248 static int nfs4_do_find_root_sec(struct nfs_server *server, 3249 struct nfs_fh *fhandle, struct nfs_fsinfo *info) 3250 { 3251 int mv = server->nfs_client->cl_minorversion; 3252 return nfs_v4_minor_ops[mv]->find_root_sec(server, fhandle, info); 3253 } 3254 3255 /** 3256 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot 3257 * @server: initialized nfs_server handle 3258 * @fhandle: we fill in the pseudo-fs root file handle 3259 * @info: we fill in an FSINFO struct 3260 * @auth_probe: probe the auth flavours 3261 * 3262 * Returns zero on success, or a negative errno. 3263 */ 3264 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle, 3265 struct nfs_fsinfo *info, 3266 bool auth_probe) 3267 { 3268 int status = 0; 3269 3270 if (!auth_probe) 3271 status = nfs4_lookup_root(server, fhandle, info); 3272 3273 if (auth_probe || status == NFS4ERR_WRONGSEC) 3274 status = nfs4_do_find_root_sec(server, fhandle, info); 3275 3276 if (status == 0) 3277 status = nfs4_server_capabilities(server, fhandle); 3278 if (status == 0) 3279 status = nfs4_do_fsinfo(server, fhandle, info); 3280 3281 return nfs4_map_errors(status); 3282 } 3283 3284 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh, 3285 struct nfs_fsinfo *info) 3286 { 3287 int error; 3288 struct nfs_fattr *fattr = info->fattr; 3289 struct nfs4_label *label = NULL; 3290 3291 error = nfs4_server_capabilities(server, mntfh); 3292 if (error < 0) { 3293 dprintk("nfs4_get_root: getcaps error = %d\n", -error); 3294 return error; 3295 } 3296 3297 label = nfs4_label_alloc(server, GFP_KERNEL); 3298 if (IS_ERR(label)) 3299 return PTR_ERR(label); 3300 3301 error = nfs4_proc_getattr(server, mntfh, fattr, label); 3302 if (error < 0) { 3303 dprintk("nfs4_get_root: getattr error = %d\n", -error); 3304 goto err_free_label; 3305 } 3306 3307 if (fattr->valid & NFS_ATTR_FATTR_FSID && 3308 !nfs_fsid_equal(&server->fsid, &fattr->fsid)) 3309 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid)); 3310 3311 err_free_label: 3312 nfs4_label_free(label); 3313 3314 return error; 3315 } 3316 3317 /* 3318 * Get locations and (maybe) other attributes of a referral. 3319 * Note that we'll actually follow the referral later when 3320 * we detect fsid mismatch in inode revalidation 3321 */ 3322 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir, 3323 const struct qstr *name, struct nfs_fattr *fattr, 3324 struct nfs_fh *fhandle) 3325 { 3326 int status = -ENOMEM; 3327 struct page *page = NULL; 3328 struct nfs4_fs_locations *locations = NULL; 3329 3330 page = alloc_page(GFP_KERNEL); 3331 if (page == NULL) 3332 goto out; 3333 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 3334 if (locations == NULL) 3335 goto out; 3336 3337 status = nfs4_proc_fs_locations(client, dir, name, locations, page); 3338 if (status != 0) 3339 goto out; 3340 3341 /* 3342 * If the fsid didn't change, this is a migration event, not a 3343 * referral. Cause us to drop into the exception handler, which 3344 * will kick off migration recovery. 3345 */ 3346 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) { 3347 dprintk("%s: server did not return a different fsid for" 3348 " a referral at %s\n", __func__, name->name); 3349 status = -NFS4ERR_MOVED; 3350 goto out; 3351 } 3352 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ 3353 nfs_fixup_referral_attributes(&locations->fattr); 3354 3355 /* replace the lookup nfs_fattr with the locations nfs_fattr */ 3356 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr)); 3357 memset(fhandle, 0, sizeof(struct nfs_fh)); 3358 out: 3359 if (page) 3360 __free_page(page); 3361 kfree(locations); 3362 return status; 3363 } 3364 3365 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 3366 struct nfs_fattr *fattr, struct nfs4_label *label) 3367 { 3368 struct nfs4_getattr_arg args = { 3369 .fh = fhandle, 3370 .bitmask = server->attr_bitmask, 3371 }; 3372 struct nfs4_getattr_res res = { 3373 .fattr = fattr, 3374 .label = label, 3375 .server = server, 3376 }; 3377 struct rpc_message msg = { 3378 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 3379 .rpc_argp = &args, 3380 .rpc_resp = &res, 3381 }; 3382 3383 args.bitmask = nfs4_bitmask(server, label); 3384 3385 nfs_fattr_init(fattr); 3386 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3387 } 3388 3389 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 3390 struct nfs_fattr *fattr, struct nfs4_label *label) 3391 { 3392 struct nfs4_exception exception = { }; 3393 int err; 3394 do { 3395 err = _nfs4_proc_getattr(server, fhandle, fattr, label); 3396 trace_nfs4_getattr(server, fhandle, fattr, err); 3397 err = nfs4_handle_exception(server, err, 3398 &exception); 3399 } while (exception.retry); 3400 return err; 3401 } 3402 3403 /* 3404 * The file is not closed if it is opened due to the a request to change 3405 * the size of the file. The open call will not be needed once the 3406 * VFS layer lookup-intents are implemented. 3407 * 3408 * Close is called when the inode is destroyed. 3409 * If we haven't opened the file for O_WRONLY, we 3410 * need to in the size_change case to obtain a stateid. 3411 * 3412 * Got race? 3413 * Because OPEN is always done by name in nfsv4, it is 3414 * possible that we opened a different file by the same 3415 * name. We can recognize this race condition, but we 3416 * can't do anything about it besides returning an error. 3417 * 3418 * This will be fixed with VFS changes (lookup-intent). 3419 */ 3420 static int 3421 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, 3422 struct iattr *sattr) 3423 { 3424 struct inode *inode = d_inode(dentry); 3425 struct rpc_cred *cred = NULL; 3426 struct nfs4_state *state = NULL; 3427 struct nfs4_label *label = NULL; 3428 int status; 3429 3430 if (pnfs_ld_layoutret_on_setattr(inode) && 3431 sattr->ia_valid & ATTR_SIZE && 3432 sattr->ia_size < i_size_read(inode)) 3433 pnfs_commit_and_return_layout(inode); 3434 3435 nfs_fattr_init(fattr); 3436 3437 /* Deal with open(O_TRUNC) */ 3438 if (sattr->ia_valid & ATTR_OPEN) 3439 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME); 3440 3441 /* Optimization: if the end result is no change, don't RPC */ 3442 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0) 3443 return 0; 3444 3445 /* Search for an existing open(O_WRITE) file */ 3446 if (sattr->ia_valid & ATTR_FILE) { 3447 struct nfs_open_context *ctx; 3448 3449 ctx = nfs_file_open_context(sattr->ia_file); 3450 if (ctx) { 3451 cred = ctx->cred; 3452 state = ctx->state; 3453 } 3454 } 3455 3456 label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL); 3457 if (IS_ERR(label)) 3458 return PTR_ERR(label); 3459 3460 status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label); 3461 if (status == 0) { 3462 nfs_setattr_update_inode(inode, sattr, fattr); 3463 nfs_setsecurity(inode, fattr, label); 3464 } 3465 nfs4_label_free(label); 3466 return status; 3467 } 3468 3469 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, 3470 const struct qstr *name, struct nfs_fh *fhandle, 3471 struct nfs_fattr *fattr, struct nfs4_label *label) 3472 { 3473 struct nfs_server *server = NFS_SERVER(dir); 3474 int status; 3475 struct nfs4_lookup_arg args = { 3476 .bitmask = server->attr_bitmask, 3477 .dir_fh = NFS_FH(dir), 3478 .name = name, 3479 }; 3480 struct nfs4_lookup_res res = { 3481 .server = server, 3482 .fattr = fattr, 3483 .label = label, 3484 .fh = fhandle, 3485 }; 3486 struct rpc_message msg = { 3487 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], 3488 .rpc_argp = &args, 3489 .rpc_resp = &res, 3490 }; 3491 3492 args.bitmask = nfs4_bitmask(server, label); 3493 3494 nfs_fattr_init(fattr); 3495 3496 dprintk("NFS call lookup %s\n", name->name); 3497 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0); 3498 dprintk("NFS reply lookup: %d\n", status); 3499 return status; 3500 } 3501 3502 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr) 3503 { 3504 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 3505 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT; 3506 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 3507 fattr->nlink = 2; 3508 } 3509 3510 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir, 3511 struct qstr *name, struct nfs_fh *fhandle, 3512 struct nfs_fattr *fattr, struct nfs4_label *label) 3513 { 3514 struct nfs4_exception exception = { }; 3515 struct rpc_clnt *client = *clnt; 3516 int err; 3517 do { 3518 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr, label); 3519 trace_nfs4_lookup(dir, name, err); 3520 switch (err) { 3521 case -NFS4ERR_BADNAME: 3522 err = -ENOENT; 3523 goto out; 3524 case -NFS4ERR_MOVED: 3525 err = nfs4_get_referral(client, dir, name, fattr, fhandle); 3526 if (err == -NFS4ERR_MOVED) 3527 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 3528 goto out; 3529 case -NFS4ERR_WRONGSEC: 3530 err = -EPERM; 3531 if (client != *clnt) 3532 goto out; 3533 client = nfs4_negotiate_security(client, dir, name); 3534 if (IS_ERR(client)) 3535 return PTR_ERR(client); 3536 3537 exception.retry = 1; 3538 break; 3539 default: 3540 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 3541 } 3542 } while (exception.retry); 3543 3544 out: 3545 if (err == 0) 3546 *clnt = client; 3547 else if (client != *clnt) 3548 rpc_shutdown_client(client); 3549 3550 return err; 3551 } 3552 3553 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, 3554 struct nfs_fh *fhandle, struct nfs_fattr *fattr, 3555 struct nfs4_label *label) 3556 { 3557 int status; 3558 struct rpc_clnt *client = NFS_CLIENT(dir); 3559 3560 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, label); 3561 if (client != NFS_CLIENT(dir)) { 3562 rpc_shutdown_client(client); 3563 nfs_fixup_secinfo_attributes(fattr); 3564 } 3565 return status; 3566 } 3567 3568 struct rpc_clnt * 3569 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name, 3570 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 3571 { 3572 struct rpc_clnt *client = NFS_CLIENT(dir); 3573 int status; 3574 3575 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL); 3576 if (status < 0) 3577 return ERR_PTR(status); 3578 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client; 3579 } 3580 3581 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 3582 { 3583 struct nfs_server *server = NFS_SERVER(inode); 3584 struct nfs4_accessargs args = { 3585 .fh = NFS_FH(inode), 3586 .bitmask = server->cache_consistency_bitmask, 3587 }; 3588 struct nfs4_accessres res = { 3589 .server = server, 3590 }; 3591 struct rpc_message msg = { 3592 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], 3593 .rpc_argp = &args, 3594 .rpc_resp = &res, 3595 .rpc_cred = entry->cred, 3596 }; 3597 int mode = entry->mask; 3598 int status = 0; 3599 3600 /* 3601 * Determine which access bits we want to ask for... 3602 */ 3603 if (mode & MAY_READ) 3604 args.access |= NFS4_ACCESS_READ; 3605 if (S_ISDIR(inode->i_mode)) { 3606 if (mode & MAY_WRITE) 3607 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE; 3608 if (mode & MAY_EXEC) 3609 args.access |= NFS4_ACCESS_LOOKUP; 3610 } else { 3611 if (mode & MAY_WRITE) 3612 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND; 3613 if (mode & MAY_EXEC) 3614 args.access |= NFS4_ACCESS_EXECUTE; 3615 } 3616 3617 res.fattr = nfs_alloc_fattr(); 3618 if (res.fattr == NULL) 3619 return -ENOMEM; 3620 3621 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3622 if (!status) { 3623 nfs_access_set_mask(entry, res.access); 3624 nfs_refresh_inode(inode, res.fattr); 3625 } 3626 nfs_free_fattr(res.fattr); 3627 return status; 3628 } 3629 3630 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 3631 { 3632 struct nfs4_exception exception = { }; 3633 int err; 3634 do { 3635 err = _nfs4_proc_access(inode, entry); 3636 trace_nfs4_access(inode, err); 3637 err = nfs4_handle_exception(NFS_SERVER(inode), err, 3638 &exception); 3639 } while (exception.retry); 3640 return err; 3641 } 3642 3643 /* 3644 * TODO: For the time being, we don't try to get any attributes 3645 * along with any of the zero-copy operations READ, READDIR, 3646 * READLINK, WRITE. 3647 * 3648 * In the case of the first three, we want to put the GETATTR 3649 * after the read-type operation -- this is because it is hard 3650 * to predict the length of a GETATTR response in v4, and thus 3651 * align the READ data correctly. This means that the GETATTR 3652 * may end up partially falling into the page cache, and we should 3653 * shift it into the 'tail' of the xdr_buf before processing. 3654 * To do this efficiently, we need to know the total length 3655 * of data received, which doesn't seem to be available outside 3656 * of the RPC layer. 3657 * 3658 * In the case of WRITE, we also want to put the GETATTR after 3659 * the operation -- in this case because we want to make sure 3660 * we get the post-operation mtime and size. 3661 * 3662 * Both of these changes to the XDR layer would in fact be quite 3663 * minor, but I decided to leave them for a subsequent patch. 3664 */ 3665 static int _nfs4_proc_readlink(struct inode *inode, struct page *page, 3666 unsigned int pgbase, unsigned int pglen) 3667 { 3668 struct nfs4_readlink args = { 3669 .fh = NFS_FH(inode), 3670 .pgbase = pgbase, 3671 .pglen = pglen, 3672 .pages = &page, 3673 }; 3674 struct nfs4_readlink_res res; 3675 struct rpc_message msg = { 3676 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], 3677 .rpc_argp = &args, 3678 .rpc_resp = &res, 3679 }; 3680 3681 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); 3682 } 3683 3684 static int nfs4_proc_readlink(struct inode *inode, struct page *page, 3685 unsigned int pgbase, unsigned int pglen) 3686 { 3687 struct nfs4_exception exception = { }; 3688 int err; 3689 do { 3690 err = _nfs4_proc_readlink(inode, page, pgbase, pglen); 3691 trace_nfs4_readlink(inode, err); 3692 err = nfs4_handle_exception(NFS_SERVER(inode), err, 3693 &exception); 3694 } while (exception.retry); 3695 return err; 3696 } 3697 3698 /* 3699 * This is just for mknod. open(O_CREAT) will always do ->open_context(). 3700 */ 3701 static int 3702 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, 3703 int flags) 3704 { 3705 struct nfs4_label l, *ilabel = NULL; 3706 struct nfs_open_context *ctx; 3707 struct nfs4_state *state; 3708 int status = 0; 3709 3710 ctx = alloc_nfs_open_context(dentry, FMODE_READ); 3711 if (IS_ERR(ctx)) 3712 return PTR_ERR(ctx); 3713 3714 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l); 3715 3716 sattr->ia_mode &= ~current_umask(); 3717 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL); 3718 if (IS_ERR(state)) { 3719 status = PTR_ERR(state); 3720 goto out; 3721 } 3722 out: 3723 nfs4_label_release_security(ilabel); 3724 put_nfs_open_context(ctx); 3725 return status; 3726 } 3727 3728 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name) 3729 { 3730 struct nfs_server *server = NFS_SERVER(dir); 3731 struct nfs_removeargs args = { 3732 .fh = NFS_FH(dir), 3733 .name = *name, 3734 }; 3735 struct nfs_removeres res = { 3736 .server = server, 3737 }; 3738 struct rpc_message msg = { 3739 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], 3740 .rpc_argp = &args, 3741 .rpc_resp = &res, 3742 }; 3743 int status; 3744 3745 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 3746 if (status == 0) 3747 update_changeattr(dir, &res.cinfo); 3748 return status; 3749 } 3750 3751 static int nfs4_proc_remove(struct inode *dir, struct qstr *name) 3752 { 3753 struct nfs4_exception exception = { }; 3754 int err; 3755 do { 3756 err = _nfs4_proc_remove(dir, name); 3757 trace_nfs4_remove(dir, name, err); 3758 err = nfs4_handle_exception(NFS_SERVER(dir), err, 3759 &exception); 3760 } while (exception.retry); 3761 return err; 3762 } 3763 3764 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) 3765 { 3766 struct nfs_server *server = NFS_SERVER(dir); 3767 struct nfs_removeargs *args = msg->rpc_argp; 3768 struct nfs_removeres *res = msg->rpc_resp; 3769 3770 res->server = server; 3771 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; 3772 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1); 3773 3774 nfs_fattr_init(res->dir_attr); 3775 } 3776 3777 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) 3778 { 3779 nfs4_setup_sequence(NFS_SERVER(data->dir), 3780 &data->args.seq_args, 3781 &data->res.seq_res, 3782 task); 3783 } 3784 3785 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) 3786 { 3787 struct nfs_unlinkdata *data = task->tk_calldata; 3788 struct nfs_removeres *res = &data->res; 3789 3790 if (!nfs4_sequence_done(task, &res->seq_res)) 3791 return 0; 3792 if (nfs4_async_handle_error(task, res->server, NULL, 3793 &data->timeout) == -EAGAIN) 3794 return 0; 3795 update_changeattr(dir, &res->cinfo); 3796 return 1; 3797 } 3798 3799 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir) 3800 { 3801 struct nfs_server *server = NFS_SERVER(dir); 3802 struct nfs_renameargs *arg = msg->rpc_argp; 3803 struct nfs_renameres *res = msg->rpc_resp; 3804 3805 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; 3806 res->server = server; 3807 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1); 3808 } 3809 3810 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) 3811 { 3812 nfs4_setup_sequence(NFS_SERVER(data->old_dir), 3813 &data->args.seq_args, 3814 &data->res.seq_res, 3815 task); 3816 } 3817 3818 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, 3819 struct inode *new_dir) 3820 { 3821 struct nfs_renamedata *data = task->tk_calldata; 3822 struct nfs_renameres *res = &data->res; 3823 3824 if (!nfs4_sequence_done(task, &res->seq_res)) 3825 return 0; 3826 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN) 3827 return 0; 3828 3829 update_changeattr(old_dir, &res->old_cinfo); 3830 update_changeattr(new_dir, &res->new_cinfo); 3831 return 1; 3832 } 3833 3834 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) 3835 { 3836 struct nfs_server *server = NFS_SERVER(inode); 3837 struct nfs4_link_arg arg = { 3838 .fh = NFS_FH(inode), 3839 .dir_fh = NFS_FH(dir), 3840 .name = name, 3841 .bitmask = server->attr_bitmask, 3842 }; 3843 struct nfs4_link_res res = { 3844 .server = server, 3845 .label = NULL, 3846 }; 3847 struct rpc_message msg = { 3848 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], 3849 .rpc_argp = &arg, 3850 .rpc_resp = &res, 3851 }; 3852 int status = -ENOMEM; 3853 3854 res.fattr = nfs_alloc_fattr(); 3855 if (res.fattr == NULL) 3856 goto out; 3857 3858 res.label = nfs4_label_alloc(server, GFP_KERNEL); 3859 if (IS_ERR(res.label)) { 3860 status = PTR_ERR(res.label); 3861 goto out; 3862 } 3863 arg.bitmask = nfs4_bitmask(server, res.label); 3864 3865 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 3866 if (!status) { 3867 update_changeattr(dir, &res.cinfo); 3868 status = nfs_post_op_update_inode(inode, res.fattr); 3869 if (!status) 3870 nfs_setsecurity(inode, res.fattr, res.label); 3871 } 3872 3873 3874 nfs4_label_free(res.label); 3875 3876 out: 3877 nfs_free_fattr(res.fattr); 3878 return status; 3879 } 3880 3881 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) 3882 { 3883 struct nfs4_exception exception = { }; 3884 int err; 3885 do { 3886 err = nfs4_handle_exception(NFS_SERVER(inode), 3887 _nfs4_proc_link(inode, dir, name), 3888 &exception); 3889 } while (exception.retry); 3890 return err; 3891 } 3892 3893 struct nfs4_createdata { 3894 struct rpc_message msg; 3895 struct nfs4_create_arg arg; 3896 struct nfs4_create_res res; 3897 struct nfs_fh fh; 3898 struct nfs_fattr fattr; 3899 struct nfs4_label *label; 3900 }; 3901 3902 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, 3903 struct qstr *name, struct iattr *sattr, u32 ftype) 3904 { 3905 struct nfs4_createdata *data; 3906 3907 data = kzalloc(sizeof(*data), GFP_KERNEL); 3908 if (data != NULL) { 3909 struct nfs_server *server = NFS_SERVER(dir); 3910 3911 data->label = nfs4_label_alloc(server, GFP_KERNEL); 3912 if (IS_ERR(data->label)) 3913 goto out_free; 3914 3915 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE]; 3916 data->msg.rpc_argp = &data->arg; 3917 data->msg.rpc_resp = &data->res; 3918 data->arg.dir_fh = NFS_FH(dir); 3919 data->arg.server = server; 3920 data->arg.name = name; 3921 data->arg.attrs = sattr; 3922 data->arg.ftype = ftype; 3923 data->arg.bitmask = nfs4_bitmask(server, data->label); 3924 data->res.server = server; 3925 data->res.fh = &data->fh; 3926 data->res.fattr = &data->fattr; 3927 data->res.label = data->label; 3928 nfs_fattr_init(data->res.fattr); 3929 } 3930 return data; 3931 out_free: 3932 kfree(data); 3933 return NULL; 3934 } 3935 3936 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) 3937 { 3938 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 3939 &data->arg.seq_args, &data->res.seq_res, 1); 3940 if (status == 0) { 3941 update_changeattr(dir, &data->res.dir_cinfo); 3942 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label); 3943 } 3944 return status; 3945 } 3946 3947 static void nfs4_free_createdata(struct nfs4_createdata *data) 3948 { 3949 nfs4_label_free(data->label); 3950 kfree(data); 3951 } 3952 3953 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 3954 struct page *page, unsigned int len, struct iattr *sattr, 3955 struct nfs4_label *label) 3956 { 3957 struct nfs4_createdata *data; 3958 int status = -ENAMETOOLONG; 3959 3960 if (len > NFS4_MAXPATHLEN) 3961 goto out; 3962 3963 status = -ENOMEM; 3964 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK); 3965 if (data == NULL) 3966 goto out; 3967 3968 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK]; 3969 data->arg.u.symlink.pages = &page; 3970 data->arg.u.symlink.len = len; 3971 data->arg.label = label; 3972 3973 status = nfs4_do_create(dir, dentry, data); 3974 3975 nfs4_free_createdata(data); 3976 out: 3977 return status; 3978 } 3979 3980 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 3981 struct page *page, unsigned int len, struct iattr *sattr) 3982 { 3983 struct nfs4_exception exception = { }; 3984 struct nfs4_label l, *label = NULL; 3985 int err; 3986 3987 label = nfs4_label_init_security(dir, dentry, sattr, &l); 3988 3989 do { 3990 err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label); 3991 trace_nfs4_symlink(dir, &dentry->d_name, err); 3992 err = nfs4_handle_exception(NFS_SERVER(dir), err, 3993 &exception); 3994 } while (exception.retry); 3995 3996 nfs4_label_release_security(label); 3997 return err; 3998 } 3999 4000 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 4001 struct iattr *sattr, struct nfs4_label *label) 4002 { 4003 struct nfs4_createdata *data; 4004 int status = -ENOMEM; 4005 4006 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); 4007 if (data == NULL) 4008 goto out; 4009 4010 data->arg.label = label; 4011 status = nfs4_do_create(dir, dentry, data); 4012 4013 nfs4_free_createdata(data); 4014 out: 4015 return status; 4016 } 4017 4018 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 4019 struct iattr *sattr) 4020 { 4021 struct nfs4_exception exception = { }; 4022 struct nfs4_label l, *label = NULL; 4023 int err; 4024 4025 label = nfs4_label_init_security(dir, dentry, sattr, &l); 4026 4027 sattr->ia_mode &= ~current_umask(); 4028 do { 4029 err = _nfs4_proc_mkdir(dir, dentry, sattr, label); 4030 trace_nfs4_mkdir(dir, &dentry->d_name, err); 4031 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4032 &exception); 4033 } while (exception.retry); 4034 nfs4_label_release_security(label); 4035 4036 return err; 4037 } 4038 4039 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, 4040 u64 cookie, struct page **pages, unsigned int count, int plus) 4041 { 4042 struct inode *dir = d_inode(dentry); 4043 struct nfs4_readdir_arg args = { 4044 .fh = NFS_FH(dir), 4045 .pages = pages, 4046 .pgbase = 0, 4047 .count = count, 4048 .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask, 4049 .plus = plus, 4050 }; 4051 struct nfs4_readdir_res res; 4052 struct rpc_message msg = { 4053 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR], 4054 .rpc_argp = &args, 4055 .rpc_resp = &res, 4056 .rpc_cred = cred, 4057 }; 4058 int status; 4059 4060 dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__, 4061 dentry, 4062 (unsigned long long)cookie); 4063 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args); 4064 res.pgbase = args.pgbase; 4065 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); 4066 if (status >= 0) { 4067 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE); 4068 status += args.pgbase; 4069 } 4070 4071 nfs_invalidate_atime(dir); 4072 4073 dprintk("%s: returns %d\n", __func__, status); 4074 return status; 4075 } 4076 4077 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, 4078 u64 cookie, struct page **pages, unsigned int count, int plus) 4079 { 4080 struct nfs4_exception exception = { }; 4081 int err; 4082 do { 4083 err = _nfs4_proc_readdir(dentry, cred, cookie, 4084 pages, count, plus); 4085 trace_nfs4_readdir(d_inode(dentry), err); 4086 err = nfs4_handle_exception(NFS_SERVER(d_inode(dentry)), err, 4087 &exception); 4088 } while (exception.retry); 4089 return err; 4090 } 4091 4092 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 4093 struct iattr *sattr, struct nfs4_label *label, dev_t rdev) 4094 { 4095 struct nfs4_createdata *data; 4096 int mode = sattr->ia_mode; 4097 int status = -ENOMEM; 4098 4099 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK); 4100 if (data == NULL) 4101 goto out; 4102 4103 if (S_ISFIFO(mode)) 4104 data->arg.ftype = NF4FIFO; 4105 else if (S_ISBLK(mode)) { 4106 data->arg.ftype = NF4BLK; 4107 data->arg.u.device.specdata1 = MAJOR(rdev); 4108 data->arg.u.device.specdata2 = MINOR(rdev); 4109 } 4110 else if (S_ISCHR(mode)) { 4111 data->arg.ftype = NF4CHR; 4112 data->arg.u.device.specdata1 = MAJOR(rdev); 4113 data->arg.u.device.specdata2 = MINOR(rdev); 4114 } else if (!S_ISSOCK(mode)) { 4115 status = -EINVAL; 4116 goto out_free; 4117 } 4118 4119 data->arg.label = label; 4120 status = nfs4_do_create(dir, dentry, data); 4121 out_free: 4122 nfs4_free_createdata(data); 4123 out: 4124 return status; 4125 } 4126 4127 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 4128 struct iattr *sattr, dev_t rdev) 4129 { 4130 struct nfs4_exception exception = { }; 4131 struct nfs4_label l, *label = NULL; 4132 int err; 4133 4134 label = nfs4_label_init_security(dir, dentry, sattr, &l); 4135 4136 sattr->ia_mode &= ~current_umask(); 4137 do { 4138 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev); 4139 trace_nfs4_mknod(dir, &dentry->d_name, err); 4140 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4141 &exception); 4142 } while (exception.retry); 4143 4144 nfs4_label_release_security(label); 4145 4146 return err; 4147 } 4148 4149 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, 4150 struct nfs_fsstat *fsstat) 4151 { 4152 struct nfs4_statfs_arg args = { 4153 .fh = fhandle, 4154 .bitmask = server->attr_bitmask, 4155 }; 4156 struct nfs4_statfs_res res = { 4157 .fsstat = fsstat, 4158 }; 4159 struct rpc_message msg = { 4160 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], 4161 .rpc_argp = &args, 4162 .rpc_resp = &res, 4163 }; 4164 4165 nfs_fattr_init(fsstat->fattr); 4166 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4167 } 4168 4169 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) 4170 { 4171 struct nfs4_exception exception = { }; 4172 int err; 4173 do { 4174 err = nfs4_handle_exception(server, 4175 _nfs4_proc_statfs(server, fhandle, fsstat), 4176 &exception); 4177 } while (exception.retry); 4178 return err; 4179 } 4180 4181 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, 4182 struct nfs_fsinfo *fsinfo) 4183 { 4184 struct nfs4_fsinfo_arg args = { 4185 .fh = fhandle, 4186 .bitmask = server->attr_bitmask, 4187 }; 4188 struct nfs4_fsinfo_res res = { 4189 .fsinfo = fsinfo, 4190 }; 4191 struct rpc_message msg = { 4192 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], 4193 .rpc_argp = &args, 4194 .rpc_resp = &res, 4195 }; 4196 4197 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4198 } 4199 4200 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 4201 { 4202 struct nfs4_exception exception = { }; 4203 unsigned long now = jiffies; 4204 int err; 4205 4206 do { 4207 err = _nfs4_do_fsinfo(server, fhandle, fsinfo); 4208 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err); 4209 if (err == 0) { 4210 struct nfs_client *clp = server->nfs_client; 4211 4212 spin_lock(&clp->cl_lock); 4213 clp->cl_lease_time = fsinfo->lease_time * HZ; 4214 clp->cl_last_renewal = now; 4215 spin_unlock(&clp->cl_lock); 4216 break; 4217 } 4218 err = nfs4_handle_exception(server, err, &exception); 4219 } while (exception.retry); 4220 return err; 4221 } 4222 4223 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 4224 { 4225 int error; 4226 4227 nfs_fattr_init(fsinfo->fattr); 4228 error = nfs4_do_fsinfo(server, fhandle, fsinfo); 4229 if (error == 0) { 4230 /* block layout checks this! */ 4231 server->pnfs_blksize = fsinfo->blksize; 4232 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype); 4233 } 4234 4235 return error; 4236 } 4237 4238 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 4239 struct nfs_pathconf *pathconf) 4240 { 4241 struct nfs4_pathconf_arg args = { 4242 .fh = fhandle, 4243 .bitmask = server->attr_bitmask, 4244 }; 4245 struct nfs4_pathconf_res res = { 4246 .pathconf = pathconf, 4247 }; 4248 struct rpc_message msg = { 4249 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], 4250 .rpc_argp = &args, 4251 .rpc_resp = &res, 4252 }; 4253 4254 /* None of the pathconf attributes are mandatory to implement */ 4255 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { 4256 memset(pathconf, 0, sizeof(*pathconf)); 4257 return 0; 4258 } 4259 4260 nfs_fattr_init(pathconf->fattr); 4261 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4262 } 4263 4264 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 4265 struct nfs_pathconf *pathconf) 4266 { 4267 struct nfs4_exception exception = { }; 4268 int err; 4269 4270 do { 4271 err = nfs4_handle_exception(server, 4272 _nfs4_proc_pathconf(server, fhandle, pathconf), 4273 &exception); 4274 } while (exception.retry); 4275 return err; 4276 } 4277 4278 int nfs4_set_rw_stateid(nfs4_stateid *stateid, 4279 const struct nfs_open_context *ctx, 4280 const struct nfs_lock_context *l_ctx, 4281 fmode_t fmode) 4282 { 4283 const struct nfs_lockowner *lockowner = NULL; 4284 4285 if (l_ctx != NULL) 4286 lockowner = &l_ctx->lockowner; 4287 return nfs4_select_rw_stateid(stateid, ctx->state, fmode, lockowner); 4288 } 4289 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid); 4290 4291 static bool nfs4_stateid_is_current(nfs4_stateid *stateid, 4292 const struct nfs_open_context *ctx, 4293 const struct nfs_lock_context *l_ctx, 4294 fmode_t fmode) 4295 { 4296 nfs4_stateid current_stateid; 4297 4298 /* If the current stateid represents a lost lock, then exit */ 4299 if (nfs4_set_rw_stateid(¤t_stateid, ctx, l_ctx, fmode) == -EIO) 4300 return true; 4301 return nfs4_stateid_match(stateid, ¤t_stateid); 4302 } 4303 4304 static bool nfs4_error_stateid_expired(int err) 4305 { 4306 switch (err) { 4307 case -NFS4ERR_DELEG_REVOKED: 4308 case -NFS4ERR_ADMIN_REVOKED: 4309 case -NFS4ERR_BAD_STATEID: 4310 case -NFS4ERR_STALE_STATEID: 4311 case -NFS4ERR_OLD_STATEID: 4312 case -NFS4ERR_OPENMODE: 4313 case -NFS4ERR_EXPIRED: 4314 return true; 4315 } 4316 return false; 4317 } 4318 4319 void __nfs4_read_done_cb(struct nfs_pgio_header *hdr) 4320 { 4321 nfs_invalidate_atime(hdr->inode); 4322 } 4323 4324 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr) 4325 { 4326 struct nfs_server *server = NFS_SERVER(hdr->inode); 4327 4328 trace_nfs4_read(hdr, task->tk_status); 4329 if (nfs4_async_handle_error(task, server, 4330 hdr->args.context->state, 4331 NULL) == -EAGAIN) { 4332 rpc_restart_call_prepare(task); 4333 return -EAGAIN; 4334 } 4335 4336 __nfs4_read_done_cb(hdr); 4337 if (task->tk_status > 0) 4338 renew_lease(server, hdr->timestamp); 4339 return 0; 4340 } 4341 4342 static bool nfs4_read_stateid_changed(struct rpc_task *task, 4343 struct nfs_pgio_args *args) 4344 { 4345 4346 if (!nfs4_error_stateid_expired(task->tk_status) || 4347 nfs4_stateid_is_current(&args->stateid, 4348 args->context, 4349 args->lock_context, 4350 FMODE_READ)) 4351 return false; 4352 rpc_restart_call_prepare(task); 4353 return true; 4354 } 4355 4356 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 4357 { 4358 4359 dprintk("--> %s\n", __func__); 4360 4361 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 4362 return -EAGAIN; 4363 if (nfs4_read_stateid_changed(task, &hdr->args)) 4364 return -EAGAIN; 4365 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 4366 nfs4_read_done_cb(task, hdr); 4367 } 4368 4369 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr, 4370 struct rpc_message *msg) 4371 { 4372 hdr->timestamp = jiffies; 4373 hdr->pgio_done_cb = nfs4_read_done_cb; 4374 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 4375 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0); 4376 } 4377 4378 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task, 4379 struct nfs_pgio_header *hdr) 4380 { 4381 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode), 4382 &hdr->args.seq_args, 4383 &hdr->res.seq_res, 4384 task)) 4385 return 0; 4386 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context, 4387 hdr->args.lock_context, 4388 hdr->rw_ops->rw_mode) == -EIO) 4389 return -EIO; 4390 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) 4391 return -EIO; 4392 return 0; 4393 } 4394 4395 static int nfs4_write_done_cb(struct rpc_task *task, 4396 struct nfs_pgio_header *hdr) 4397 { 4398 struct inode *inode = hdr->inode; 4399 4400 trace_nfs4_write(hdr, task->tk_status); 4401 if (nfs4_async_handle_error(task, NFS_SERVER(inode), 4402 hdr->args.context->state, 4403 NULL) == -EAGAIN) { 4404 rpc_restart_call_prepare(task); 4405 return -EAGAIN; 4406 } 4407 if (task->tk_status >= 0) { 4408 renew_lease(NFS_SERVER(inode), hdr->timestamp); 4409 nfs_writeback_update_inode(hdr); 4410 } 4411 return 0; 4412 } 4413 4414 static bool nfs4_write_stateid_changed(struct rpc_task *task, 4415 struct nfs_pgio_args *args) 4416 { 4417 4418 if (!nfs4_error_stateid_expired(task->tk_status) || 4419 nfs4_stateid_is_current(&args->stateid, 4420 args->context, 4421 args->lock_context, 4422 FMODE_WRITE)) 4423 return false; 4424 rpc_restart_call_prepare(task); 4425 return true; 4426 } 4427 4428 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 4429 { 4430 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 4431 return -EAGAIN; 4432 if (nfs4_write_stateid_changed(task, &hdr->args)) 4433 return -EAGAIN; 4434 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 4435 nfs4_write_done_cb(task, hdr); 4436 } 4437 4438 static 4439 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr) 4440 { 4441 /* Don't request attributes for pNFS or O_DIRECT writes */ 4442 if (hdr->ds_clp != NULL || hdr->dreq != NULL) 4443 return false; 4444 /* Otherwise, request attributes if and only if we don't hold 4445 * a delegation 4446 */ 4447 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0; 4448 } 4449 4450 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr, 4451 struct rpc_message *msg) 4452 { 4453 struct nfs_server *server = NFS_SERVER(hdr->inode); 4454 4455 if (!nfs4_write_need_cache_consistency_data(hdr)) { 4456 hdr->args.bitmask = NULL; 4457 hdr->res.fattr = NULL; 4458 } else 4459 hdr->args.bitmask = server->cache_consistency_bitmask; 4460 4461 if (!hdr->pgio_done_cb) 4462 hdr->pgio_done_cb = nfs4_write_done_cb; 4463 hdr->res.server = server; 4464 hdr->timestamp = jiffies; 4465 4466 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; 4467 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1); 4468 } 4469 4470 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) 4471 { 4472 nfs4_setup_sequence(NFS_SERVER(data->inode), 4473 &data->args.seq_args, 4474 &data->res.seq_res, 4475 task); 4476 } 4477 4478 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data) 4479 { 4480 struct inode *inode = data->inode; 4481 4482 trace_nfs4_commit(data, task->tk_status); 4483 if (nfs4_async_handle_error(task, NFS_SERVER(inode), 4484 NULL, NULL) == -EAGAIN) { 4485 rpc_restart_call_prepare(task); 4486 return -EAGAIN; 4487 } 4488 return 0; 4489 } 4490 4491 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data) 4492 { 4493 if (!nfs4_sequence_done(task, &data->res.seq_res)) 4494 return -EAGAIN; 4495 return data->commit_done_cb(task, data); 4496 } 4497 4498 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg) 4499 { 4500 struct nfs_server *server = NFS_SERVER(data->inode); 4501 4502 if (data->commit_done_cb == NULL) 4503 data->commit_done_cb = nfs4_commit_done_cb; 4504 data->res.server = server; 4505 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; 4506 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 4507 } 4508 4509 struct nfs4_renewdata { 4510 struct nfs_client *client; 4511 unsigned long timestamp; 4512 }; 4513 4514 /* 4515 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special 4516 * standalone procedure for queueing an asynchronous RENEW. 4517 */ 4518 static void nfs4_renew_release(void *calldata) 4519 { 4520 struct nfs4_renewdata *data = calldata; 4521 struct nfs_client *clp = data->client; 4522 4523 if (atomic_read(&clp->cl_count) > 1) 4524 nfs4_schedule_state_renewal(clp); 4525 nfs_put_client(clp); 4526 kfree(data); 4527 } 4528 4529 static void nfs4_renew_done(struct rpc_task *task, void *calldata) 4530 { 4531 struct nfs4_renewdata *data = calldata; 4532 struct nfs_client *clp = data->client; 4533 unsigned long timestamp = data->timestamp; 4534 4535 trace_nfs4_renew_async(clp, task->tk_status); 4536 switch (task->tk_status) { 4537 case 0: 4538 break; 4539 case -NFS4ERR_LEASE_MOVED: 4540 nfs4_schedule_lease_moved_recovery(clp); 4541 break; 4542 default: 4543 /* Unless we're shutting down, schedule state recovery! */ 4544 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) 4545 return; 4546 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { 4547 nfs4_schedule_lease_recovery(clp); 4548 return; 4549 } 4550 nfs4_schedule_path_down_recovery(clp); 4551 } 4552 do_renew_lease(clp, timestamp); 4553 } 4554 4555 static const struct rpc_call_ops nfs4_renew_ops = { 4556 .rpc_call_done = nfs4_renew_done, 4557 .rpc_release = nfs4_renew_release, 4558 }; 4559 4560 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 4561 { 4562 struct rpc_message msg = { 4563 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 4564 .rpc_argp = clp, 4565 .rpc_cred = cred, 4566 }; 4567 struct nfs4_renewdata *data; 4568 4569 if (renew_flags == 0) 4570 return 0; 4571 if (!atomic_inc_not_zero(&clp->cl_count)) 4572 return -EIO; 4573 data = kmalloc(sizeof(*data), GFP_NOFS); 4574 if (data == NULL) 4575 return -ENOMEM; 4576 data->client = clp; 4577 data->timestamp = jiffies; 4578 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT, 4579 &nfs4_renew_ops, data); 4580 } 4581 4582 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) 4583 { 4584 struct rpc_message msg = { 4585 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 4586 .rpc_argp = clp, 4587 .rpc_cred = cred, 4588 }; 4589 unsigned long now = jiffies; 4590 int status; 4591 4592 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 4593 if (status < 0) 4594 return status; 4595 do_renew_lease(clp, now); 4596 return 0; 4597 } 4598 4599 static inline int nfs4_server_supports_acls(struct nfs_server *server) 4600 { 4601 return server->caps & NFS_CAP_ACLS; 4602 } 4603 4604 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that 4605 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on 4606 * the stack. 4607 */ 4608 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 4609 4610 static int buf_to_pages_noslab(const void *buf, size_t buflen, 4611 struct page **pages) 4612 { 4613 struct page *newpage, **spages; 4614 int rc = 0; 4615 size_t len; 4616 spages = pages; 4617 4618 do { 4619 len = min_t(size_t, PAGE_SIZE, buflen); 4620 newpage = alloc_page(GFP_KERNEL); 4621 4622 if (newpage == NULL) 4623 goto unwind; 4624 memcpy(page_address(newpage), buf, len); 4625 buf += len; 4626 buflen -= len; 4627 *pages++ = newpage; 4628 rc++; 4629 } while (buflen != 0); 4630 4631 return rc; 4632 4633 unwind: 4634 for(; rc > 0; rc--) 4635 __free_page(spages[rc-1]); 4636 return -ENOMEM; 4637 } 4638 4639 struct nfs4_cached_acl { 4640 int cached; 4641 size_t len; 4642 char data[0]; 4643 }; 4644 4645 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) 4646 { 4647 struct nfs_inode *nfsi = NFS_I(inode); 4648 4649 spin_lock(&inode->i_lock); 4650 kfree(nfsi->nfs4_acl); 4651 nfsi->nfs4_acl = acl; 4652 spin_unlock(&inode->i_lock); 4653 } 4654 4655 static void nfs4_zap_acl_attr(struct inode *inode) 4656 { 4657 nfs4_set_cached_acl(inode, NULL); 4658 } 4659 4660 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen) 4661 { 4662 struct nfs_inode *nfsi = NFS_I(inode); 4663 struct nfs4_cached_acl *acl; 4664 int ret = -ENOENT; 4665 4666 spin_lock(&inode->i_lock); 4667 acl = nfsi->nfs4_acl; 4668 if (acl == NULL) 4669 goto out; 4670 if (buf == NULL) /* user is just asking for length */ 4671 goto out_len; 4672 if (acl->cached == 0) 4673 goto out; 4674 ret = -ERANGE; /* see getxattr(2) man page */ 4675 if (acl->len > buflen) 4676 goto out; 4677 memcpy(buf, acl->data, acl->len); 4678 out_len: 4679 ret = acl->len; 4680 out: 4681 spin_unlock(&inode->i_lock); 4682 return ret; 4683 } 4684 4685 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len) 4686 { 4687 struct nfs4_cached_acl *acl; 4688 size_t buflen = sizeof(*acl) + acl_len; 4689 4690 if (buflen <= PAGE_SIZE) { 4691 acl = kmalloc(buflen, GFP_KERNEL); 4692 if (acl == NULL) 4693 goto out; 4694 acl->cached = 1; 4695 _copy_from_pages(acl->data, pages, pgbase, acl_len); 4696 } else { 4697 acl = kmalloc(sizeof(*acl), GFP_KERNEL); 4698 if (acl == NULL) 4699 goto out; 4700 acl->cached = 0; 4701 } 4702 acl->len = acl_len; 4703 out: 4704 nfs4_set_cached_acl(inode, acl); 4705 } 4706 4707 /* 4708 * The getxattr API returns the required buffer length when called with a 4709 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating 4710 * the required buf. On a NULL buf, we send a page of data to the server 4711 * guessing that the ACL request can be serviced by a page. If so, we cache 4712 * up to the page of ACL data, and the 2nd call to getxattr is serviced by 4713 * the cache. If not so, we throw away the page, and cache the required 4714 * length. The next getxattr call will then produce another round trip to 4715 * the server, this time with the input buf of the required size. 4716 */ 4717 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 4718 { 4719 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, }; 4720 struct nfs_getaclargs args = { 4721 .fh = NFS_FH(inode), 4722 .acl_pages = pages, 4723 .acl_len = buflen, 4724 }; 4725 struct nfs_getaclres res = { 4726 .acl_len = buflen, 4727 }; 4728 struct rpc_message msg = { 4729 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], 4730 .rpc_argp = &args, 4731 .rpc_resp = &res, 4732 }; 4733 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 4734 int ret = -ENOMEM, i; 4735 4736 /* As long as we're doing a round trip to the server anyway, 4737 * let's be prepared for a page of acl data. */ 4738 if (npages == 0) 4739 npages = 1; 4740 if (npages > ARRAY_SIZE(pages)) 4741 return -ERANGE; 4742 4743 for (i = 0; i < npages; i++) { 4744 pages[i] = alloc_page(GFP_KERNEL); 4745 if (!pages[i]) 4746 goto out_free; 4747 } 4748 4749 /* for decoding across pages */ 4750 res.acl_scratch = alloc_page(GFP_KERNEL); 4751 if (!res.acl_scratch) 4752 goto out_free; 4753 4754 args.acl_len = npages * PAGE_SIZE; 4755 4756 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", 4757 __func__, buf, buflen, npages, args.acl_len); 4758 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), 4759 &msg, &args.seq_args, &res.seq_res, 0); 4760 if (ret) 4761 goto out_free; 4762 4763 /* Handle the case where the passed-in buffer is too short */ 4764 if (res.acl_flags & NFS4_ACL_TRUNC) { 4765 /* Did the user only issue a request for the acl length? */ 4766 if (buf == NULL) 4767 goto out_ok; 4768 ret = -ERANGE; 4769 goto out_free; 4770 } 4771 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len); 4772 if (buf) { 4773 if (res.acl_len > buflen) { 4774 ret = -ERANGE; 4775 goto out_free; 4776 } 4777 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); 4778 } 4779 out_ok: 4780 ret = res.acl_len; 4781 out_free: 4782 for (i = 0; i < npages; i++) 4783 if (pages[i]) 4784 __free_page(pages[i]); 4785 if (res.acl_scratch) 4786 __free_page(res.acl_scratch); 4787 return ret; 4788 } 4789 4790 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 4791 { 4792 struct nfs4_exception exception = { }; 4793 ssize_t ret; 4794 do { 4795 ret = __nfs4_get_acl_uncached(inode, buf, buflen); 4796 trace_nfs4_get_acl(inode, ret); 4797 if (ret >= 0) 4798 break; 4799 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); 4800 } while (exception.retry); 4801 return ret; 4802 } 4803 4804 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) 4805 { 4806 struct nfs_server *server = NFS_SERVER(inode); 4807 int ret; 4808 4809 if (!nfs4_server_supports_acls(server)) 4810 return -EOPNOTSUPP; 4811 ret = nfs_revalidate_inode(server, inode); 4812 if (ret < 0) 4813 return ret; 4814 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) 4815 nfs_zap_acl_cache(inode); 4816 ret = nfs4_read_cached_acl(inode, buf, buflen); 4817 if (ret != -ENOENT) 4818 /* -ENOENT is returned if there is no ACL or if there is an ACL 4819 * but no cached acl data, just the acl length */ 4820 return ret; 4821 return nfs4_get_acl_uncached(inode, buf, buflen); 4822 } 4823 4824 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 4825 { 4826 struct nfs_server *server = NFS_SERVER(inode); 4827 struct page *pages[NFS4ACL_MAXPAGES]; 4828 struct nfs_setaclargs arg = { 4829 .fh = NFS_FH(inode), 4830 .acl_pages = pages, 4831 .acl_len = buflen, 4832 }; 4833 struct nfs_setaclres res; 4834 struct rpc_message msg = { 4835 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], 4836 .rpc_argp = &arg, 4837 .rpc_resp = &res, 4838 }; 4839 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 4840 int ret, i; 4841 4842 if (!nfs4_server_supports_acls(server)) 4843 return -EOPNOTSUPP; 4844 if (npages > ARRAY_SIZE(pages)) 4845 return -ERANGE; 4846 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages); 4847 if (i < 0) 4848 return i; 4849 nfs4_inode_return_delegation(inode); 4850 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 4851 4852 /* 4853 * Free each page after tx, so the only ref left is 4854 * held by the network stack 4855 */ 4856 for (; i > 0; i--) 4857 put_page(pages[i-1]); 4858 4859 /* 4860 * Acl update can result in inode attribute update. 4861 * so mark the attribute cache invalid. 4862 */ 4863 spin_lock(&inode->i_lock); 4864 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR; 4865 spin_unlock(&inode->i_lock); 4866 nfs_access_zap_cache(inode); 4867 nfs_zap_acl_cache(inode); 4868 return ret; 4869 } 4870 4871 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 4872 { 4873 struct nfs4_exception exception = { }; 4874 int err; 4875 do { 4876 err = __nfs4_proc_set_acl(inode, buf, buflen); 4877 trace_nfs4_set_acl(inode, err); 4878 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4879 &exception); 4880 } while (exception.retry); 4881 return err; 4882 } 4883 4884 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 4885 static int _nfs4_get_security_label(struct inode *inode, void *buf, 4886 size_t buflen) 4887 { 4888 struct nfs_server *server = NFS_SERVER(inode); 4889 struct nfs_fattr fattr; 4890 struct nfs4_label label = {0, 0, buflen, buf}; 4891 4892 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 4893 struct nfs4_getattr_arg arg = { 4894 .fh = NFS_FH(inode), 4895 .bitmask = bitmask, 4896 }; 4897 struct nfs4_getattr_res res = { 4898 .fattr = &fattr, 4899 .label = &label, 4900 .server = server, 4901 }; 4902 struct rpc_message msg = { 4903 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 4904 .rpc_argp = &arg, 4905 .rpc_resp = &res, 4906 }; 4907 int ret; 4908 4909 nfs_fattr_init(&fattr); 4910 4911 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0); 4912 if (ret) 4913 return ret; 4914 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL)) 4915 return -ENOENT; 4916 if (buflen < label.len) 4917 return -ERANGE; 4918 return 0; 4919 } 4920 4921 static int nfs4_get_security_label(struct inode *inode, void *buf, 4922 size_t buflen) 4923 { 4924 struct nfs4_exception exception = { }; 4925 int err; 4926 4927 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 4928 return -EOPNOTSUPP; 4929 4930 do { 4931 err = _nfs4_get_security_label(inode, buf, buflen); 4932 trace_nfs4_get_security_label(inode, err); 4933 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4934 &exception); 4935 } while (exception.retry); 4936 return err; 4937 } 4938 4939 static int _nfs4_do_set_security_label(struct inode *inode, 4940 struct nfs4_label *ilabel, 4941 struct nfs_fattr *fattr, 4942 struct nfs4_label *olabel) 4943 { 4944 4945 struct iattr sattr = {0}; 4946 struct nfs_server *server = NFS_SERVER(inode); 4947 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 4948 struct nfs_setattrargs arg = { 4949 .fh = NFS_FH(inode), 4950 .iap = &sattr, 4951 .server = server, 4952 .bitmask = bitmask, 4953 .label = ilabel, 4954 }; 4955 struct nfs_setattrres res = { 4956 .fattr = fattr, 4957 .label = olabel, 4958 .server = server, 4959 }; 4960 struct rpc_message msg = { 4961 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 4962 .rpc_argp = &arg, 4963 .rpc_resp = &res, 4964 }; 4965 int status; 4966 4967 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 4968 4969 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 4970 if (status) 4971 dprintk("%s failed: %d\n", __func__, status); 4972 4973 return status; 4974 } 4975 4976 static int nfs4_do_set_security_label(struct inode *inode, 4977 struct nfs4_label *ilabel, 4978 struct nfs_fattr *fattr, 4979 struct nfs4_label *olabel) 4980 { 4981 struct nfs4_exception exception = { }; 4982 int err; 4983 4984 do { 4985 err = _nfs4_do_set_security_label(inode, ilabel, 4986 fattr, olabel); 4987 trace_nfs4_set_security_label(inode, err); 4988 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4989 &exception); 4990 } while (exception.retry); 4991 return err; 4992 } 4993 4994 static int 4995 nfs4_set_security_label(struct dentry *dentry, const void *buf, size_t buflen) 4996 { 4997 struct nfs4_label ilabel, *olabel = NULL; 4998 struct nfs_fattr fattr; 4999 struct rpc_cred *cred; 5000 struct inode *inode = d_inode(dentry); 5001 int status; 5002 5003 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 5004 return -EOPNOTSUPP; 5005 5006 nfs_fattr_init(&fattr); 5007 5008 ilabel.pi = 0; 5009 ilabel.lfs = 0; 5010 ilabel.label = (char *)buf; 5011 ilabel.len = buflen; 5012 5013 cred = rpc_lookup_cred(); 5014 if (IS_ERR(cred)) 5015 return PTR_ERR(cred); 5016 5017 olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL); 5018 if (IS_ERR(olabel)) { 5019 status = -PTR_ERR(olabel); 5020 goto out; 5021 } 5022 5023 status = nfs4_do_set_security_label(inode, &ilabel, &fattr, olabel); 5024 if (status == 0) 5025 nfs_setsecurity(inode, &fattr, olabel); 5026 5027 nfs4_label_free(olabel); 5028 out: 5029 put_rpccred(cred); 5030 return status; 5031 } 5032 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */ 5033 5034 5035 static void nfs4_init_boot_verifier(const struct nfs_client *clp, 5036 nfs4_verifier *bootverf) 5037 { 5038 __be32 verf[2]; 5039 5040 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { 5041 /* An impossible timestamp guarantees this value 5042 * will never match a generated boot time. */ 5043 verf[0] = 0; 5044 verf[1] = cpu_to_be32(NSEC_PER_SEC + 1); 5045 } else { 5046 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 5047 verf[0] = cpu_to_be32(nn->boot_time.tv_sec); 5048 verf[1] = cpu_to_be32(nn->boot_time.tv_nsec); 5049 } 5050 memcpy(bootverf->data, verf, sizeof(bootverf->data)); 5051 } 5052 5053 static int 5054 nfs4_init_nonuniform_client_string(struct nfs_client *clp) 5055 { 5056 size_t len; 5057 char *str; 5058 5059 if (clp->cl_owner_id != NULL) 5060 return 0; 5061 5062 rcu_read_lock(); 5063 len = 14 + strlen(clp->cl_ipaddr) + 1 + 5064 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) + 5065 1 + 5066 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)) + 5067 1; 5068 rcu_read_unlock(); 5069 5070 if (len > NFS4_OPAQUE_LIMIT + 1) 5071 return -EINVAL; 5072 5073 /* 5074 * Since this string is allocated at mount time, and held until the 5075 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 5076 * about a memory-reclaim deadlock. 5077 */ 5078 str = kmalloc(len, GFP_KERNEL); 5079 if (!str) 5080 return -ENOMEM; 5081 5082 rcu_read_lock(); 5083 scnprintf(str, len, "Linux NFSv4.0 %s/%s %s", 5084 clp->cl_ipaddr, 5085 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR), 5086 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)); 5087 rcu_read_unlock(); 5088 5089 clp->cl_owner_id = str; 5090 return 0; 5091 } 5092 5093 static int 5094 nfs4_init_uniquifier_client_string(struct nfs_client *clp) 5095 { 5096 size_t len; 5097 char *str; 5098 5099 len = 10 + 10 + 1 + 10 + 1 + 5100 strlen(nfs4_client_id_uniquifier) + 1 + 5101 strlen(clp->cl_rpcclient->cl_nodename) + 1; 5102 5103 if (len > NFS4_OPAQUE_LIMIT + 1) 5104 return -EINVAL; 5105 5106 /* 5107 * Since this string is allocated at mount time, and held until the 5108 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 5109 * about a memory-reclaim deadlock. 5110 */ 5111 str = kmalloc(len, GFP_KERNEL); 5112 if (!str) 5113 return -ENOMEM; 5114 5115 scnprintf(str, len, "Linux NFSv%u.%u %s/%s", 5116 clp->rpc_ops->version, clp->cl_minorversion, 5117 nfs4_client_id_uniquifier, 5118 clp->cl_rpcclient->cl_nodename); 5119 clp->cl_owner_id = str; 5120 return 0; 5121 } 5122 5123 static int 5124 nfs4_init_uniform_client_string(struct nfs_client *clp) 5125 { 5126 size_t len; 5127 char *str; 5128 5129 if (clp->cl_owner_id != NULL) 5130 return 0; 5131 5132 if (nfs4_client_id_uniquifier[0] != '\0') 5133 return nfs4_init_uniquifier_client_string(clp); 5134 5135 len = 10 + 10 + 1 + 10 + 1 + 5136 strlen(clp->cl_rpcclient->cl_nodename) + 1; 5137 5138 if (len > NFS4_OPAQUE_LIMIT + 1) 5139 return -EINVAL; 5140 5141 /* 5142 * Since this string is allocated at mount time, and held until the 5143 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 5144 * about a memory-reclaim deadlock. 5145 */ 5146 str = kmalloc(len, GFP_KERNEL); 5147 if (!str) 5148 return -ENOMEM; 5149 5150 scnprintf(str, len, "Linux NFSv%u.%u %s", 5151 clp->rpc_ops->version, clp->cl_minorversion, 5152 clp->cl_rpcclient->cl_nodename); 5153 clp->cl_owner_id = str; 5154 return 0; 5155 } 5156 5157 /* 5158 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback 5159 * services. Advertise one based on the address family of the 5160 * clientaddr. 5161 */ 5162 static unsigned int 5163 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len) 5164 { 5165 if (strchr(clp->cl_ipaddr, ':') != NULL) 5166 return scnprintf(buf, len, "tcp6"); 5167 else 5168 return scnprintf(buf, len, "tcp"); 5169 } 5170 5171 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata) 5172 { 5173 struct nfs4_setclientid *sc = calldata; 5174 5175 if (task->tk_status == 0) 5176 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred); 5177 } 5178 5179 static const struct rpc_call_ops nfs4_setclientid_ops = { 5180 .rpc_call_done = nfs4_setclientid_done, 5181 }; 5182 5183 /** 5184 * nfs4_proc_setclientid - Negotiate client ID 5185 * @clp: state data structure 5186 * @program: RPC program for NFSv4 callback service 5187 * @port: IP port number for NFS4 callback service 5188 * @cred: RPC credential to use for this call 5189 * @res: where to place the result 5190 * 5191 * Returns zero, a negative errno, or a negative NFS4ERR status code. 5192 */ 5193 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, 5194 unsigned short port, struct rpc_cred *cred, 5195 struct nfs4_setclientid_res *res) 5196 { 5197 nfs4_verifier sc_verifier; 5198 struct nfs4_setclientid setclientid = { 5199 .sc_verifier = &sc_verifier, 5200 .sc_prog = program, 5201 .sc_clnt = clp, 5202 }; 5203 struct rpc_message msg = { 5204 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], 5205 .rpc_argp = &setclientid, 5206 .rpc_resp = res, 5207 .rpc_cred = cred, 5208 }; 5209 struct rpc_task *task; 5210 struct rpc_task_setup task_setup_data = { 5211 .rpc_client = clp->cl_rpcclient, 5212 .rpc_message = &msg, 5213 .callback_ops = &nfs4_setclientid_ops, 5214 .callback_data = &setclientid, 5215 .flags = RPC_TASK_TIMEOUT, 5216 }; 5217 int status; 5218 5219 /* nfs_client_id4 */ 5220 nfs4_init_boot_verifier(clp, &sc_verifier); 5221 5222 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags)) 5223 status = nfs4_init_uniform_client_string(clp); 5224 else 5225 status = nfs4_init_nonuniform_client_string(clp); 5226 5227 if (status) 5228 goto out; 5229 5230 /* cb_client4 */ 5231 setclientid.sc_netid_len = 5232 nfs4_init_callback_netid(clp, 5233 setclientid.sc_netid, 5234 sizeof(setclientid.sc_netid)); 5235 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, 5236 sizeof(setclientid.sc_uaddr), "%s.%u.%u", 5237 clp->cl_ipaddr, port >> 8, port & 255); 5238 5239 dprintk("NFS call setclientid auth=%s, '%s'\n", 5240 clp->cl_rpcclient->cl_auth->au_ops->au_name, 5241 clp->cl_owner_id); 5242 task = rpc_run_task(&task_setup_data); 5243 if (IS_ERR(task)) { 5244 status = PTR_ERR(task); 5245 goto out; 5246 } 5247 status = task->tk_status; 5248 if (setclientid.sc_cred) { 5249 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred); 5250 put_rpccred(setclientid.sc_cred); 5251 } 5252 rpc_put_task(task); 5253 out: 5254 trace_nfs4_setclientid(clp, status); 5255 dprintk("NFS reply setclientid: %d\n", status); 5256 return status; 5257 } 5258 5259 /** 5260 * nfs4_proc_setclientid_confirm - Confirm client ID 5261 * @clp: state data structure 5262 * @res: result of a previous SETCLIENTID 5263 * @cred: RPC credential to use for this call 5264 * 5265 * Returns zero, a negative errno, or a negative NFS4ERR status code. 5266 */ 5267 int nfs4_proc_setclientid_confirm(struct nfs_client *clp, 5268 struct nfs4_setclientid_res *arg, 5269 struct rpc_cred *cred) 5270 { 5271 struct rpc_message msg = { 5272 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], 5273 .rpc_argp = arg, 5274 .rpc_cred = cred, 5275 }; 5276 int status; 5277 5278 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n", 5279 clp->cl_rpcclient->cl_auth->au_ops->au_name, 5280 clp->cl_clientid); 5281 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5282 trace_nfs4_setclientid_confirm(clp, status); 5283 dprintk("NFS reply setclientid_confirm: %d\n", status); 5284 return status; 5285 } 5286 5287 struct nfs4_delegreturndata { 5288 struct nfs4_delegreturnargs args; 5289 struct nfs4_delegreturnres res; 5290 struct nfs_fh fh; 5291 nfs4_stateid stateid; 5292 unsigned long timestamp; 5293 struct nfs_fattr fattr; 5294 int rpc_status; 5295 struct inode *inode; 5296 bool roc; 5297 u32 roc_barrier; 5298 }; 5299 5300 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) 5301 { 5302 struct nfs4_delegreturndata *data = calldata; 5303 5304 if (!nfs4_sequence_done(task, &data->res.seq_res)) 5305 return; 5306 5307 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status); 5308 switch (task->tk_status) { 5309 case 0: 5310 renew_lease(data->res.server, data->timestamp); 5311 case -NFS4ERR_ADMIN_REVOKED: 5312 case -NFS4ERR_DELEG_REVOKED: 5313 case -NFS4ERR_BAD_STATEID: 5314 case -NFS4ERR_OLD_STATEID: 5315 case -NFS4ERR_STALE_STATEID: 5316 case -NFS4ERR_EXPIRED: 5317 task->tk_status = 0; 5318 if (data->roc) 5319 pnfs_roc_set_barrier(data->inode, data->roc_barrier); 5320 break; 5321 default: 5322 if (nfs4_async_handle_error(task, data->res.server, 5323 NULL, NULL) == -EAGAIN) { 5324 rpc_restart_call_prepare(task); 5325 return; 5326 } 5327 } 5328 data->rpc_status = task->tk_status; 5329 } 5330 5331 static void nfs4_delegreturn_release(void *calldata) 5332 { 5333 struct nfs4_delegreturndata *data = calldata; 5334 struct inode *inode = data->inode; 5335 5336 if (inode) { 5337 if (data->roc) 5338 pnfs_roc_release(inode); 5339 nfs_iput_and_deactive(inode); 5340 } 5341 kfree(calldata); 5342 } 5343 5344 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) 5345 { 5346 struct nfs4_delegreturndata *d_data; 5347 5348 d_data = (struct nfs4_delegreturndata *)data; 5349 5350 if (nfs4_wait_on_layoutreturn(d_data->inode, task)) 5351 return; 5352 5353 if (d_data->roc) 5354 pnfs_roc_get_barrier(d_data->inode, &d_data->roc_barrier); 5355 5356 nfs4_setup_sequence(d_data->res.server, 5357 &d_data->args.seq_args, 5358 &d_data->res.seq_res, 5359 task); 5360 } 5361 5362 static const struct rpc_call_ops nfs4_delegreturn_ops = { 5363 .rpc_call_prepare = nfs4_delegreturn_prepare, 5364 .rpc_call_done = nfs4_delegreturn_done, 5365 .rpc_release = nfs4_delegreturn_release, 5366 }; 5367 5368 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) 5369 { 5370 struct nfs4_delegreturndata *data; 5371 struct nfs_server *server = NFS_SERVER(inode); 5372 struct rpc_task *task; 5373 struct rpc_message msg = { 5374 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], 5375 .rpc_cred = cred, 5376 }; 5377 struct rpc_task_setup task_setup_data = { 5378 .rpc_client = server->client, 5379 .rpc_message = &msg, 5380 .callback_ops = &nfs4_delegreturn_ops, 5381 .flags = RPC_TASK_ASYNC, 5382 }; 5383 int status = 0; 5384 5385 data = kzalloc(sizeof(*data), GFP_NOFS); 5386 if (data == NULL) 5387 return -ENOMEM; 5388 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 5389 5390 nfs4_state_protect(server->nfs_client, 5391 NFS_SP4_MACH_CRED_CLEANUP, 5392 &task_setup_data.rpc_client, &msg); 5393 5394 data->args.fhandle = &data->fh; 5395 data->args.stateid = &data->stateid; 5396 data->args.bitmask = server->cache_consistency_bitmask; 5397 nfs_copy_fh(&data->fh, NFS_FH(inode)); 5398 nfs4_stateid_copy(&data->stateid, stateid); 5399 data->res.fattr = &data->fattr; 5400 data->res.server = server; 5401 nfs_fattr_init(data->res.fattr); 5402 data->timestamp = jiffies; 5403 data->rpc_status = 0; 5404 data->inode = nfs_igrab_and_active(inode); 5405 if (data->inode) 5406 data->roc = nfs4_roc(inode); 5407 5408 task_setup_data.callback_data = data; 5409 msg.rpc_argp = &data->args; 5410 msg.rpc_resp = &data->res; 5411 task = rpc_run_task(&task_setup_data); 5412 if (IS_ERR(task)) 5413 return PTR_ERR(task); 5414 if (!issync) 5415 goto out; 5416 status = nfs4_wait_for_completion_rpc_task(task); 5417 if (status != 0) 5418 goto out; 5419 status = data->rpc_status; 5420 if (status == 0) 5421 nfs_post_op_update_inode_force_wcc(inode, &data->fattr); 5422 else 5423 nfs_refresh_inode(inode, &data->fattr); 5424 out: 5425 rpc_put_task(task); 5426 return status; 5427 } 5428 5429 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) 5430 { 5431 struct nfs_server *server = NFS_SERVER(inode); 5432 struct nfs4_exception exception = { }; 5433 int err; 5434 do { 5435 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync); 5436 trace_nfs4_delegreturn(inode, stateid, err); 5437 switch (err) { 5438 case -NFS4ERR_STALE_STATEID: 5439 case -NFS4ERR_EXPIRED: 5440 case 0: 5441 return 0; 5442 } 5443 err = nfs4_handle_exception(server, err, &exception); 5444 } while (exception.retry); 5445 return err; 5446 } 5447 5448 #define NFS4_LOCK_MINTIMEOUT (1 * HZ) 5449 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) 5450 5451 /* 5452 * sleep, with exponential backoff, and retry the LOCK operation. 5453 */ 5454 static unsigned long 5455 nfs4_set_lock_task_retry(unsigned long timeout) 5456 { 5457 freezable_schedule_timeout_killable_unsafe(timeout); 5458 timeout <<= 1; 5459 if (timeout > NFS4_LOCK_MAXTIMEOUT) 5460 return NFS4_LOCK_MAXTIMEOUT; 5461 return timeout; 5462 } 5463 5464 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 5465 { 5466 struct inode *inode = state->inode; 5467 struct nfs_server *server = NFS_SERVER(inode); 5468 struct nfs_client *clp = server->nfs_client; 5469 struct nfs_lockt_args arg = { 5470 .fh = NFS_FH(inode), 5471 .fl = request, 5472 }; 5473 struct nfs_lockt_res res = { 5474 .denied = request, 5475 }; 5476 struct rpc_message msg = { 5477 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], 5478 .rpc_argp = &arg, 5479 .rpc_resp = &res, 5480 .rpc_cred = state->owner->so_cred, 5481 }; 5482 struct nfs4_lock_state *lsp; 5483 int status; 5484 5485 arg.lock_owner.clientid = clp->cl_clientid; 5486 status = nfs4_set_lock_state(state, request); 5487 if (status != 0) 5488 goto out; 5489 lsp = request->fl_u.nfs4_fl.owner; 5490 arg.lock_owner.id = lsp->ls_seqid.owner_id; 5491 arg.lock_owner.s_dev = server->s_dev; 5492 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 5493 switch (status) { 5494 case 0: 5495 request->fl_type = F_UNLCK; 5496 break; 5497 case -NFS4ERR_DENIED: 5498 status = 0; 5499 } 5500 request->fl_ops->fl_release_private(request); 5501 request->fl_ops = NULL; 5502 out: 5503 return status; 5504 } 5505 5506 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 5507 { 5508 struct nfs4_exception exception = { }; 5509 int err; 5510 5511 do { 5512 err = _nfs4_proc_getlk(state, cmd, request); 5513 trace_nfs4_get_lock(request, state, cmd, err); 5514 err = nfs4_handle_exception(NFS_SERVER(state->inode), err, 5515 &exception); 5516 } while (exception.retry); 5517 return err; 5518 } 5519 5520 static int do_vfs_lock(struct inode *inode, struct file_lock *fl) 5521 { 5522 return locks_lock_inode_wait(inode, fl); 5523 } 5524 5525 struct nfs4_unlockdata { 5526 struct nfs_locku_args arg; 5527 struct nfs_locku_res res; 5528 struct nfs4_lock_state *lsp; 5529 struct nfs_open_context *ctx; 5530 struct file_lock fl; 5531 struct nfs_server *server; 5532 unsigned long timestamp; 5533 }; 5534 5535 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, 5536 struct nfs_open_context *ctx, 5537 struct nfs4_lock_state *lsp, 5538 struct nfs_seqid *seqid) 5539 { 5540 struct nfs4_unlockdata *p; 5541 struct inode *inode = lsp->ls_state->inode; 5542 5543 p = kzalloc(sizeof(*p), GFP_NOFS); 5544 if (p == NULL) 5545 return NULL; 5546 p->arg.fh = NFS_FH(inode); 5547 p->arg.fl = &p->fl; 5548 p->arg.seqid = seqid; 5549 p->res.seqid = seqid; 5550 p->lsp = lsp; 5551 atomic_inc(&lsp->ls_count); 5552 /* Ensure we don't close file until we're done freeing locks! */ 5553 p->ctx = get_nfs_open_context(ctx); 5554 memcpy(&p->fl, fl, sizeof(p->fl)); 5555 p->server = NFS_SERVER(inode); 5556 return p; 5557 } 5558 5559 static void nfs4_locku_release_calldata(void *data) 5560 { 5561 struct nfs4_unlockdata *calldata = data; 5562 nfs_free_seqid(calldata->arg.seqid); 5563 nfs4_put_lock_state(calldata->lsp); 5564 put_nfs_open_context(calldata->ctx); 5565 kfree(calldata); 5566 } 5567 5568 static void nfs4_locku_done(struct rpc_task *task, void *data) 5569 { 5570 struct nfs4_unlockdata *calldata = data; 5571 5572 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 5573 return; 5574 switch (task->tk_status) { 5575 case 0: 5576 renew_lease(calldata->server, calldata->timestamp); 5577 do_vfs_lock(calldata->lsp->ls_state->inode, &calldata->fl); 5578 if (nfs4_update_lock_stateid(calldata->lsp, 5579 &calldata->res.stateid)) 5580 break; 5581 case -NFS4ERR_BAD_STATEID: 5582 case -NFS4ERR_OLD_STATEID: 5583 case -NFS4ERR_STALE_STATEID: 5584 case -NFS4ERR_EXPIRED: 5585 if (!nfs4_stateid_match(&calldata->arg.stateid, 5586 &calldata->lsp->ls_stateid)) 5587 rpc_restart_call_prepare(task); 5588 break; 5589 default: 5590 if (nfs4_async_handle_error(task, calldata->server, 5591 NULL, NULL) == -EAGAIN) 5592 rpc_restart_call_prepare(task); 5593 } 5594 nfs_release_seqid(calldata->arg.seqid); 5595 } 5596 5597 static void nfs4_locku_prepare(struct rpc_task *task, void *data) 5598 { 5599 struct nfs4_unlockdata *calldata = data; 5600 5601 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 5602 goto out_wait; 5603 nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid); 5604 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) { 5605 /* Note: exit _without_ running nfs4_locku_done */ 5606 goto out_no_action; 5607 } 5608 calldata->timestamp = jiffies; 5609 if (nfs4_setup_sequence(calldata->server, 5610 &calldata->arg.seq_args, 5611 &calldata->res.seq_res, 5612 task) != 0) 5613 nfs_release_seqid(calldata->arg.seqid); 5614 return; 5615 out_no_action: 5616 task->tk_action = NULL; 5617 out_wait: 5618 nfs4_sequence_done(task, &calldata->res.seq_res); 5619 } 5620 5621 static const struct rpc_call_ops nfs4_locku_ops = { 5622 .rpc_call_prepare = nfs4_locku_prepare, 5623 .rpc_call_done = nfs4_locku_done, 5624 .rpc_release = nfs4_locku_release_calldata, 5625 }; 5626 5627 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, 5628 struct nfs_open_context *ctx, 5629 struct nfs4_lock_state *lsp, 5630 struct nfs_seqid *seqid) 5631 { 5632 struct nfs4_unlockdata *data; 5633 struct rpc_message msg = { 5634 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], 5635 .rpc_cred = ctx->cred, 5636 }; 5637 struct rpc_task_setup task_setup_data = { 5638 .rpc_client = NFS_CLIENT(lsp->ls_state->inode), 5639 .rpc_message = &msg, 5640 .callback_ops = &nfs4_locku_ops, 5641 .workqueue = nfsiod_workqueue, 5642 .flags = RPC_TASK_ASYNC, 5643 }; 5644 5645 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client, 5646 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg); 5647 5648 /* Ensure this is an unlock - when canceling a lock, the 5649 * canceled lock is passed in, and it won't be an unlock. 5650 */ 5651 fl->fl_type = F_UNLCK; 5652 5653 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); 5654 if (data == NULL) { 5655 nfs_free_seqid(seqid); 5656 return ERR_PTR(-ENOMEM); 5657 } 5658 5659 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); 5660 msg.rpc_argp = &data->arg; 5661 msg.rpc_resp = &data->res; 5662 task_setup_data.callback_data = data; 5663 return rpc_run_task(&task_setup_data); 5664 } 5665 5666 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) 5667 { 5668 struct inode *inode = state->inode; 5669 struct nfs4_state_owner *sp = state->owner; 5670 struct nfs_inode *nfsi = NFS_I(inode); 5671 struct nfs_seqid *seqid; 5672 struct nfs4_lock_state *lsp; 5673 struct rpc_task *task; 5674 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 5675 int status = 0; 5676 unsigned char fl_flags = request->fl_flags; 5677 5678 status = nfs4_set_lock_state(state, request); 5679 /* Unlock _before_ we do the RPC call */ 5680 request->fl_flags |= FL_EXISTS; 5681 /* Exclude nfs_delegation_claim_locks() */ 5682 mutex_lock(&sp->so_delegreturn_mutex); 5683 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */ 5684 down_read(&nfsi->rwsem); 5685 if (do_vfs_lock(inode, request) == -ENOENT) { 5686 up_read(&nfsi->rwsem); 5687 mutex_unlock(&sp->so_delegreturn_mutex); 5688 goto out; 5689 } 5690 up_read(&nfsi->rwsem); 5691 mutex_unlock(&sp->so_delegreturn_mutex); 5692 if (status != 0) 5693 goto out; 5694 /* Is this a delegated lock? */ 5695 lsp = request->fl_u.nfs4_fl.owner; 5696 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0) 5697 goto out; 5698 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid; 5699 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); 5700 status = -ENOMEM; 5701 if (IS_ERR(seqid)) 5702 goto out; 5703 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid); 5704 status = PTR_ERR(task); 5705 if (IS_ERR(task)) 5706 goto out; 5707 status = nfs4_wait_for_completion_rpc_task(task); 5708 rpc_put_task(task); 5709 out: 5710 request->fl_flags = fl_flags; 5711 trace_nfs4_unlock(request, state, F_SETLK, status); 5712 return status; 5713 } 5714 5715 struct nfs4_lockdata { 5716 struct nfs_lock_args arg; 5717 struct nfs_lock_res res; 5718 struct nfs4_lock_state *lsp; 5719 struct nfs_open_context *ctx; 5720 struct file_lock fl; 5721 unsigned long timestamp; 5722 int rpc_status; 5723 int cancelled; 5724 struct nfs_server *server; 5725 }; 5726 5727 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, 5728 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, 5729 gfp_t gfp_mask) 5730 { 5731 struct nfs4_lockdata *p; 5732 struct inode *inode = lsp->ls_state->inode; 5733 struct nfs_server *server = NFS_SERVER(inode); 5734 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 5735 5736 p = kzalloc(sizeof(*p), gfp_mask); 5737 if (p == NULL) 5738 return NULL; 5739 5740 p->arg.fh = NFS_FH(inode); 5741 p->arg.fl = &p->fl; 5742 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); 5743 if (IS_ERR(p->arg.open_seqid)) 5744 goto out_free; 5745 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 5746 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask); 5747 if (IS_ERR(p->arg.lock_seqid)) 5748 goto out_free_seqid; 5749 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; 5750 p->arg.lock_owner.id = lsp->ls_seqid.owner_id; 5751 p->arg.lock_owner.s_dev = server->s_dev; 5752 p->res.lock_seqid = p->arg.lock_seqid; 5753 p->lsp = lsp; 5754 p->server = server; 5755 atomic_inc(&lsp->ls_count); 5756 p->ctx = get_nfs_open_context(ctx); 5757 get_file(fl->fl_file); 5758 memcpy(&p->fl, fl, sizeof(p->fl)); 5759 return p; 5760 out_free_seqid: 5761 nfs_free_seqid(p->arg.open_seqid); 5762 out_free: 5763 kfree(p); 5764 return NULL; 5765 } 5766 5767 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) 5768 { 5769 struct nfs4_lockdata *data = calldata; 5770 struct nfs4_state *state = data->lsp->ls_state; 5771 5772 dprintk("%s: begin!\n", __func__); 5773 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) 5774 goto out_wait; 5775 /* Do we need to do an open_to_lock_owner? */ 5776 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) { 5777 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) { 5778 goto out_release_lock_seqid; 5779 } 5780 nfs4_stateid_copy(&data->arg.open_stateid, 5781 &state->open_stateid); 5782 data->arg.new_lock_owner = 1; 5783 data->res.open_seqid = data->arg.open_seqid; 5784 } else { 5785 data->arg.new_lock_owner = 0; 5786 nfs4_stateid_copy(&data->arg.lock_stateid, 5787 &data->lsp->ls_stateid); 5788 } 5789 if (!nfs4_valid_open_stateid(state)) { 5790 data->rpc_status = -EBADF; 5791 task->tk_action = NULL; 5792 goto out_release_open_seqid; 5793 } 5794 data->timestamp = jiffies; 5795 if (nfs4_setup_sequence(data->server, 5796 &data->arg.seq_args, 5797 &data->res.seq_res, 5798 task) == 0) 5799 return; 5800 out_release_open_seqid: 5801 nfs_release_seqid(data->arg.open_seqid); 5802 out_release_lock_seqid: 5803 nfs_release_seqid(data->arg.lock_seqid); 5804 out_wait: 5805 nfs4_sequence_done(task, &data->res.seq_res); 5806 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status); 5807 } 5808 5809 static void nfs4_lock_done(struct rpc_task *task, void *calldata) 5810 { 5811 struct nfs4_lockdata *data = calldata; 5812 struct nfs4_lock_state *lsp = data->lsp; 5813 5814 dprintk("%s: begin!\n", __func__); 5815 5816 if (!nfs4_sequence_done(task, &data->res.seq_res)) 5817 return; 5818 5819 data->rpc_status = task->tk_status; 5820 switch (task->tk_status) { 5821 case 0: 5822 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)), 5823 data->timestamp); 5824 if (data->arg.new_lock) { 5825 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS); 5826 if (do_vfs_lock(lsp->ls_state->inode, &data->fl) < 0) { 5827 rpc_restart_call_prepare(task); 5828 break; 5829 } 5830 } 5831 if (data->arg.new_lock_owner != 0) { 5832 nfs_confirm_seqid(&lsp->ls_seqid, 0); 5833 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid); 5834 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 5835 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid)) 5836 rpc_restart_call_prepare(task); 5837 break; 5838 case -NFS4ERR_BAD_STATEID: 5839 case -NFS4ERR_OLD_STATEID: 5840 case -NFS4ERR_STALE_STATEID: 5841 case -NFS4ERR_EXPIRED: 5842 if (data->arg.new_lock_owner != 0) { 5843 if (!nfs4_stateid_match(&data->arg.open_stateid, 5844 &lsp->ls_state->open_stateid)) 5845 rpc_restart_call_prepare(task); 5846 } else if (!nfs4_stateid_match(&data->arg.lock_stateid, 5847 &lsp->ls_stateid)) 5848 rpc_restart_call_prepare(task); 5849 } 5850 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status); 5851 } 5852 5853 static void nfs4_lock_release(void *calldata) 5854 { 5855 struct nfs4_lockdata *data = calldata; 5856 5857 dprintk("%s: begin!\n", __func__); 5858 nfs_free_seqid(data->arg.open_seqid); 5859 if (data->cancelled != 0) { 5860 struct rpc_task *task; 5861 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, 5862 data->arg.lock_seqid); 5863 if (!IS_ERR(task)) 5864 rpc_put_task_async(task); 5865 dprintk("%s: cancelling lock!\n", __func__); 5866 } else 5867 nfs_free_seqid(data->arg.lock_seqid); 5868 nfs4_put_lock_state(data->lsp); 5869 put_nfs_open_context(data->ctx); 5870 fput(data->fl.fl_file); 5871 kfree(data); 5872 dprintk("%s: done!\n", __func__); 5873 } 5874 5875 static const struct rpc_call_ops nfs4_lock_ops = { 5876 .rpc_call_prepare = nfs4_lock_prepare, 5877 .rpc_call_done = nfs4_lock_done, 5878 .rpc_release = nfs4_lock_release, 5879 }; 5880 5881 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) 5882 { 5883 switch (error) { 5884 case -NFS4ERR_ADMIN_REVOKED: 5885 case -NFS4ERR_BAD_STATEID: 5886 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 5887 if (new_lock_owner != 0 || 5888 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) 5889 nfs4_schedule_stateid_recovery(server, lsp->ls_state); 5890 break; 5891 case -NFS4ERR_STALE_STATEID: 5892 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 5893 case -NFS4ERR_EXPIRED: 5894 nfs4_schedule_lease_recovery(server->nfs_client); 5895 }; 5896 } 5897 5898 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) 5899 { 5900 struct nfs4_lockdata *data; 5901 struct rpc_task *task; 5902 struct rpc_message msg = { 5903 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK], 5904 .rpc_cred = state->owner->so_cred, 5905 }; 5906 struct rpc_task_setup task_setup_data = { 5907 .rpc_client = NFS_CLIENT(state->inode), 5908 .rpc_message = &msg, 5909 .callback_ops = &nfs4_lock_ops, 5910 .workqueue = nfsiod_workqueue, 5911 .flags = RPC_TASK_ASYNC, 5912 }; 5913 int ret; 5914 5915 dprintk("%s: begin!\n", __func__); 5916 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file), 5917 fl->fl_u.nfs4_fl.owner, 5918 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS); 5919 if (data == NULL) 5920 return -ENOMEM; 5921 if (IS_SETLKW(cmd)) 5922 data->arg.block = 1; 5923 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); 5924 msg.rpc_argp = &data->arg; 5925 msg.rpc_resp = &data->res; 5926 task_setup_data.callback_data = data; 5927 if (recovery_type > NFS_LOCK_NEW) { 5928 if (recovery_type == NFS_LOCK_RECLAIM) 5929 data->arg.reclaim = NFS_LOCK_RECLAIM; 5930 nfs4_set_sequence_privileged(&data->arg.seq_args); 5931 } else 5932 data->arg.new_lock = 1; 5933 task = rpc_run_task(&task_setup_data); 5934 if (IS_ERR(task)) 5935 return PTR_ERR(task); 5936 ret = nfs4_wait_for_completion_rpc_task(task); 5937 if (ret == 0) { 5938 ret = data->rpc_status; 5939 if (ret) 5940 nfs4_handle_setlk_error(data->server, data->lsp, 5941 data->arg.new_lock_owner, ret); 5942 } else 5943 data->cancelled = 1; 5944 rpc_put_task(task); 5945 dprintk("%s: done, ret = %d!\n", __func__, ret); 5946 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret); 5947 return ret; 5948 } 5949 5950 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) 5951 { 5952 struct nfs_server *server = NFS_SERVER(state->inode); 5953 struct nfs4_exception exception = { 5954 .inode = state->inode, 5955 }; 5956 int err; 5957 5958 do { 5959 /* Cache the lock if possible... */ 5960 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 5961 return 0; 5962 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); 5963 if (err != -NFS4ERR_DELAY) 5964 break; 5965 nfs4_handle_exception(server, err, &exception); 5966 } while (exception.retry); 5967 return err; 5968 } 5969 5970 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) 5971 { 5972 struct nfs_server *server = NFS_SERVER(state->inode); 5973 struct nfs4_exception exception = { 5974 .inode = state->inode, 5975 }; 5976 int err; 5977 5978 err = nfs4_set_lock_state(state, request); 5979 if (err != 0) 5980 return err; 5981 if (!recover_lost_locks) { 5982 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags); 5983 return 0; 5984 } 5985 do { 5986 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 5987 return 0; 5988 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED); 5989 switch (err) { 5990 default: 5991 goto out; 5992 case -NFS4ERR_GRACE: 5993 case -NFS4ERR_DELAY: 5994 nfs4_handle_exception(server, err, &exception); 5995 err = 0; 5996 } 5997 } while (exception.retry); 5998 out: 5999 return err; 6000 } 6001 6002 #if defined(CONFIG_NFS_V4_1) 6003 /** 6004 * nfs41_check_expired_locks - possibly free a lock stateid 6005 * 6006 * @state: NFSv4 state for an inode 6007 * 6008 * Returns NFS_OK if recovery for this stateid is now finished. 6009 * Otherwise a negative NFS4ERR value is returned. 6010 */ 6011 static int nfs41_check_expired_locks(struct nfs4_state *state) 6012 { 6013 int status, ret = -NFS4ERR_BAD_STATEID; 6014 struct nfs4_lock_state *lsp; 6015 struct nfs_server *server = NFS_SERVER(state->inode); 6016 6017 list_for_each_entry(lsp, &state->lock_states, ls_locks) { 6018 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 6019 struct rpc_cred *cred = lsp->ls_state->owner->so_cred; 6020 6021 status = nfs41_test_stateid(server, 6022 &lsp->ls_stateid, 6023 cred); 6024 trace_nfs4_test_lock_stateid(state, lsp, status); 6025 if (status != NFS_OK) { 6026 /* Free the stateid unless the server 6027 * informs us the stateid is unrecognized. */ 6028 if (status != -NFS4ERR_BAD_STATEID) 6029 nfs41_free_stateid(server, 6030 &lsp->ls_stateid, 6031 cred); 6032 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 6033 ret = status; 6034 } 6035 } 6036 }; 6037 6038 return ret; 6039 } 6040 6041 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) 6042 { 6043 int status = NFS_OK; 6044 6045 if (test_bit(LK_STATE_IN_USE, &state->flags)) 6046 status = nfs41_check_expired_locks(state); 6047 if (status != NFS_OK) 6048 status = nfs4_lock_expired(state, request); 6049 return status; 6050 } 6051 #endif 6052 6053 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 6054 { 6055 struct nfs_inode *nfsi = NFS_I(state->inode); 6056 unsigned char fl_flags = request->fl_flags; 6057 int status = -ENOLCK; 6058 6059 if ((fl_flags & FL_POSIX) && 6060 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) 6061 goto out; 6062 /* Is this a delegated open? */ 6063 status = nfs4_set_lock_state(state, request); 6064 if (status != 0) 6065 goto out; 6066 request->fl_flags |= FL_ACCESS; 6067 status = do_vfs_lock(state->inode, request); 6068 if (status < 0) 6069 goto out; 6070 down_read(&nfsi->rwsem); 6071 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { 6072 /* Yes: cache locks! */ 6073 /* ...but avoid races with delegation recall... */ 6074 request->fl_flags = fl_flags & ~FL_SLEEP; 6075 status = do_vfs_lock(state->inode, request); 6076 up_read(&nfsi->rwsem); 6077 goto out; 6078 } 6079 up_read(&nfsi->rwsem); 6080 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); 6081 out: 6082 request->fl_flags = fl_flags; 6083 return status; 6084 } 6085 6086 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 6087 { 6088 struct nfs4_exception exception = { 6089 .state = state, 6090 .inode = state->inode, 6091 }; 6092 int err; 6093 6094 do { 6095 err = _nfs4_proc_setlk(state, cmd, request); 6096 if (err == -NFS4ERR_DENIED) 6097 err = -EAGAIN; 6098 err = nfs4_handle_exception(NFS_SERVER(state->inode), 6099 err, &exception); 6100 } while (exception.retry); 6101 return err; 6102 } 6103 6104 static int 6105 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) 6106 { 6107 struct nfs_open_context *ctx; 6108 struct nfs4_state *state; 6109 unsigned long timeout = NFS4_LOCK_MINTIMEOUT; 6110 int status; 6111 6112 /* verify open state */ 6113 ctx = nfs_file_open_context(filp); 6114 state = ctx->state; 6115 6116 if (request->fl_start < 0 || request->fl_end < 0) 6117 return -EINVAL; 6118 6119 if (IS_GETLK(cmd)) { 6120 if (state != NULL) 6121 return nfs4_proc_getlk(state, F_GETLK, request); 6122 return 0; 6123 } 6124 6125 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) 6126 return -EINVAL; 6127 6128 if (request->fl_type == F_UNLCK) { 6129 if (state != NULL) 6130 return nfs4_proc_unlck(state, cmd, request); 6131 return 0; 6132 } 6133 6134 if (state == NULL) 6135 return -ENOLCK; 6136 /* 6137 * Don't rely on the VFS having checked the file open mode, 6138 * since it won't do this for flock() locks. 6139 */ 6140 switch (request->fl_type) { 6141 case F_RDLCK: 6142 if (!(filp->f_mode & FMODE_READ)) 6143 return -EBADF; 6144 break; 6145 case F_WRLCK: 6146 if (!(filp->f_mode & FMODE_WRITE)) 6147 return -EBADF; 6148 } 6149 6150 do { 6151 status = nfs4_proc_setlk(state, cmd, request); 6152 if ((status != -EAGAIN) || IS_SETLK(cmd)) 6153 break; 6154 timeout = nfs4_set_lock_task_retry(timeout); 6155 status = -ERESTARTSYS; 6156 if (signalled()) 6157 break; 6158 } while(status < 0); 6159 return status; 6160 } 6161 6162 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid) 6163 { 6164 struct nfs_server *server = NFS_SERVER(state->inode); 6165 int err; 6166 6167 err = nfs4_set_lock_state(state, fl); 6168 if (err != 0) 6169 return err; 6170 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); 6171 return nfs4_handle_delegation_recall_error(server, state, stateid, err); 6172 } 6173 6174 struct nfs_release_lockowner_data { 6175 struct nfs4_lock_state *lsp; 6176 struct nfs_server *server; 6177 struct nfs_release_lockowner_args args; 6178 struct nfs_release_lockowner_res res; 6179 unsigned long timestamp; 6180 }; 6181 6182 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata) 6183 { 6184 struct nfs_release_lockowner_data *data = calldata; 6185 struct nfs_server *server = data->server; 6186 nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, 6187 &data->args.seq_args, &data->res.seq_res, task); 6188 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 6189 data->timestamp = jiffies; 6190 } 6191 6192 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata) 6193 { 6194 struct nfs_release_lockowner_data *data = calldata; 6195 struct nfs_server *server = data->server; 6196 6197 nfs40_sequence_done(task, &data->res.seq_res); 6198 6199 switch (task->tk_status) { 6200 case 0: 6201 renew_lease(server, data->timestamp); 6202 break; 6203 case -NFS4ERR_STALE_CLIENTID: 6204 case -NFS4ERR_EXPIRED: 6205 nfs4_schedule_lease_recovery(server->nfs_client); 6206 break; 6207 case -NFS4ERR_LEASE_MOVED: 6208 case -NFS4ERR_DELAY: 6209 if (nfs4_async_handle_error(task, server, 6210 NULL, NULL) == -EAGAIN) 6211 rpc_restart_call_prepare(task); 6212 } 6213 } 6214 6215 static void nfs4_release_lockowner_release(void *calldata) 6216 { 6217 struct nfs_release_lockowner_data *data = calldata; 6218 nfs4_free_lock_state(data->server, data->lsp); 6219 kfree(calldata); 6220 } 6221 6222 static const struct rpc_call_ops nfs4_release_lockowner_ops = { 6223 .rpc_call_prepare = nfs4_release_lockowner_prepare, 6224 .rpc_call_done = nfs4_release_lockowner_done, 6225 .rpc_release = nfs4_release_lockowner_release, 6226 }; 6227 6228 static void 6229 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp) 6230 { 6231 struct nfs_release_lockowner_data *data; 6232 struct rpc_message msg = { 6233 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER], 6234 }; 6235 6236 if (server->nfs_client->cl_mvops->minor_version != 0) 6237 return; 6238 6239 data = kmalloc(sizeof(*data), GFP_NOFS); 6240 if (!data) 6241 return; 6242 data->lsp = lsp; 6243 data->server = server; 6244 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 6245 data->args.lock_owner.id = lsp->ls_seqid.owner_id; 6246 data->args.lock_owner.s_dev = server->s_dev; 6247 6248 msg.rpc_argp = &data->args; 6249 msg.rpc_resp = &data->res; 6250 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); 6251 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); 6252 } 6253 6254 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" 6255 6256 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler, 6257 struct dentry *dentry, const char *key, 6258 const void *buf, size_t buflen, 6259 int flags) 6260 { 6261 return nfs4_proc_set_acl(d_inode(dentry), buf, buflen); 6262 } 6263 6264 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler, 6265 struct dentry *dentry, const char *key, 6266 void *buf, size_t buflen) 6267 { 6268 return nfs4_proc_get_acl(d_inode(dentry), buf, buflen); 6269 } 6270 6271 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry) 6272 { 6273 return nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry))); 6274 } 6275 6276 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 6277 6278 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler, 6279 struct dentry *dentry, const char *key, 6280 const void *buf, size_t buflen, 6281 int flags) 6282 { 6283 if (security_ismaclabel(key)) 6284 return nfs4_set_security_label(dentry, buf, buflen); 6285 6286 return -EOPNOTSUPP; 6287 } 6288 6289 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler, 6290 struct dentry *dentry, const char *key, 6291 void *buf, size_t buflen) 6292 { 6293 if (security_ismaclabel(key)) 6294 return nfs4_get_security_label(d_inode(dentry), buf, buflen); 6295 return -EOPNOTSUPP; 6296 } 6297 6298 static ssize_t 6299 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len) 6300 { 6301 int len = 0; 6302 6303 if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) { 6304 len = security_inode_listsecurity(inode, list, list_len); 6305 if (list_len && len > list_len) 6306 return -ERANGE; 6307 } 6308 return len; 6309 } 6310 6311 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = { 6312 .prefix = XATTR_SECURITY_PREFIX, 6313 .get = nfs4_xattr_get_nfs4_label, 6314 .set = nfs4_xattr_set_nfs4_label, 6315 }; 6316 6317 #else 6318 6319 static ssize_t 6320 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len) 6321 { 6322 return 0; 6323 } 6324 6325 #endif 6326 6327 /* 6328 * nfs_fhget will use either the mounted_on_fileid or the fileid 6329 */ 6330 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) 6331 { 6332 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || 6333 (fattr->valid & NFS_ATTR_FATTR_FILEID)) && 6334 (fattr->valid & NFS_ATTR_FATTR_FSID) && 6335 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS))) 6336 return; 6337 6338 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 6339 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL; 6340 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 6341 fattr->nlink = 2; 6342 } 6343 6344 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 6345 const struct qstr *name, 6346 struct nfs4_fs_locations *fs_locations, 6347 struct page *page) 6348 { 6349 struct nfs_server *server = NFS_SERVER(dir); 6350 u32 bitmask[3] = { 6351 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 6352 }; 6353 struct nfs4_fs_locations_arg args = { 6354 .dir_fh = NFS_FH(dir), 6355 .name = name, 6356 .page = page, 6357 .bitmask = bitmask, 6358 }; 6359 struct nfs4_fs_locations_res res = { 6360 .fs_locations = fs_locations, 6361 }; 6362 struct rpc_message msg = { 6363 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 6364 .rpc_argp = &args, 6365 .rpc_resp = &res, 6366 }; 6367 int status; 6368 6369 dprintk("%s: start\n", __func__); 6370 6371 /* Ask for the fileid of the absent filesystem if mounted_on_fileid 6372 * is not supported */ 6373 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) 6374 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID; 6375 else 6376 bitmask[0] |= FATTR4_WORD0_FILEID; 6377 6378 nfs_fattr_init(&fs_locations->fattr); 6379 fs_locations->server = server; 6380 fs_locations->nlocations = 0; 6381 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0); 6382 dprintk("%s: returned status = %d\n", __func__, status); 6383 return status; 6384 } 6385 6386 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 6387 const struct qstr *name, 6388 struct nfs4_fs_locations *fs_locations, 6389 struct page *page) 6390 { 6391 struct nfs4_exception exception = { }; 6392 int err; 6393 do { 6394 err = _nfs4_proc_fs_locations(client, dir, name, 6395 fs_locations, page); 6396 trace_nfs4_get_fs_locations(dir, name, err); 6397 err = nfs4_handle_exception(NFS_SERVER(dir), err, 6398 &exception); 6399 } while (exception.retry); 6400 return err; 6401 } 6402 6403 /* 6404 * This operation also signals the server that this client is 6405 * performing migration recovery. The server can stop returning 6406 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is 6407 * appended to this compound to identify the client ID which is 6408 * performing recovery. 6409 */ 6410 static int _nfs40_proc_get_locations(struct inode *inode, 6411 struct nfs4_fs_locations *locations, 6412 struct page *page, struct rpc_cred *cred) 6413 { 6414 struct nfs_server *server = NFS_SERVER(inode); 6415 struct rpc_clnt *clnt = server->client; 6416 u32 bitmask[2] = { 6417 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 6418 }; 6419 struct nfs4_fs_locations_arg args = { 6420 .clientid = server->nfs_client->cl_clientid, 6421 .fh = NFS_FH(inode), 6422 .page = page, 6423 .bitmask = bitmask, 6424 .migration = 1, /* skip LOOKUP */ 6425 .renew = 1, /* append RENEW */ 6426 }; 6427 struct nfs4_fs_locations_res res = { 6428 .fs_locations = locations, 6429 .migration = 1, 6430 .renew = 1, 6431 }; 6432 struct rpc_message msg = { 6433 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 6434 .rpc_argp = &args, 6435 .rpc_resp = &res, 6436 .rpc_cred = cred, 6437 }; 6438 unsigned long now = jiffies; 6439 int status; 6440 6441 nfs_fattr_init(&locations->fattr); 6442 locations->server = server; 6443 locations->nlocations = 0; 6444 6445 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6446 nfs4_set_sequence_privileged(&args.seq_args); 6447 status = nfs4_call_sync_sequence(clnt, server, &msg, 6448 &args.seq_args, &res.seq_res); 6449 if (status) 6450 return status; 6451 6452 renew_lease(server, now); 6453 return 0; 6454 } 6455 6456 #ifdef CONFIG_NFS_V4_1 6457 6458 /* 6459 * This operation also signals the server that this client is 6460 * performing migration recovery. The server can stop asserting 6461 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID 6462 * performing this operation is identified in the SEQUENCE 6463 * operation in this compound. 6464 * 6465 * When the client supports GETATTR(fs_locations_info), it can 6466 * be plumbed in here. 6467 */ 6468 static int _nfs41_proc_get_locations(struct inode *inode, 6469 struct nfs4_fs_locations *locations, 6470 struct page *page, struct rpc_cred *cred) 6471 { 6472 struct nfs_server *server = NFS_SERVER(inode); 6473 struct rpc_clnt *clnt = server->client; 6474 u32 bitmask[2] = { 6475 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 6476 }; 6477 struct nfs4_fs_locations_arg args = { 6478 .fh = NFS_FH(inode), 6479 .page = page, 6480 .bitmask = bitmask, 6481 .migration = 1, /* skip LOOKUP */ 6482 }; 6483 struct nfs4_fs_locations_res res = { 6484 .fs_locations = locations, 6485 .migration = 1, 6486 }; 6487 struct rpc_message msg = { 6488 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 6489 .rpc_argp = &args, 6490 .rpc_resp = &res, 6491 .rpc_cred = cred, 6492 }; 6493 int status; 6494 6495 nfs_fattr_init(&locations->fattr); 6496 locations->server = server; 6497 locations->nlocations = 0; 6498 6499 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6500 nfs4_set_sequence_privileged(&args.seq_args); 6501 status = nfs4_call_sync_sequence(clnt, server, &msg, 6502 &args.seq_args, &res.seq_res); 6503 if (status == NFS4_OK && 6504 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 6505 status = -NFS4ERR_LEASE_MOVED; 6506 return status; 6507 } 6508 6509 #endif /* CONFIG_NFS_V4_1 */ 6510 6511 /** 6512 * nfs4_proc_get_locations - discover locations for a migrated FSID 6513 * @inode: inode on FSID that is migrating 6514 * @locations: result of query 6515 * @page: buffer 6516 * @cred: credential to use for this operation 6517 * 6518 * Returns NFS4_OK on success, a negative NFS4ERR status code if the 6519 * operation failed, or a negative errno if a local error occurred. 6520 * 6521 * On success, "locations" is filled in, but if the server has 6522 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not 6523 * asserted. 6524 * 6525 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases 6526 * from this client that require migration recovery. 6527 */ 6528 int nfs4_proc_get_locations(struct inode *inode, 6529 struct nfs4_fs_locations *locations, 6530 struct page *page, struct rpc_cred *cred) 6531 { 6532 struct nfs_server *server = NFS_SERVER(inode); 6533 struct nfs_client *clp = server->nfs_client; 6534 const struct nfs4_mig_recovery_ops *ops = 6535 clp->cl_mvops->mig_recovery_ops; 6536 struct nfs4_exception exception = { }; 6537 int status; 6538 6539 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 6540 (unsigned long long)server->fsid.major, 6541 (unsigned long long)server->fsid.minor, 6542 clp->cl_hostname); 6543 nfs_display_fhandle(NFS_FH(inode), __func__); 6544 6545 do { 6546 status = ops->get_locations(inode, locations, page, cred); 6547 if (status != -NFS4ERR_DELAY) 6548 break; 6549 nfs4_handle_exception(server, status, &exception); 6550 } while (exception.retry); 6551 return status; 6552 } 6553 6554 /* 6555 * This operation also signals the server that this client is 6556 * performing "lease moved" recovery. The server can stop 6557 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation 6558 * is appended to this compound to identify the client ID which is 6559 * performing recovery. 6560 */ 6561 static int _nfs40_proc_fsid_present(struct inode *inode, struct rpc_cred *cred) 6562 { 6563 struct nfs_server *server = NFS_SERVER(inode); 6564 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 6565 struct rpc_clnt *clnt = server->client; 6566 struct nfs4_fsid_present_arg args = { 6567 .fh = NFS_FH(inode), 6568 .clientid = clp->cl_clientid, 6569 .renew = 1, /* append RENEW */ 6570 }; 6571 struct nfs4_fsid_present_res res = { 6572 .renew = 1, 6573 }; 6574 struct rpc_message msg = { 6575 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 6576 .rpc_argp = &args, 6577 .rpc_resp = &res, 6578 .rpc_cred = cred, 6579 }; 6580 unsigned long now = jiffies; 6581 int status; 6582 6583 res.fh = nfs_alloc_fhandle(); 6584 if (res.fh == NULL) 6585 return -ENOMEM; 6586 6587 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6588 nfs4_set_sequence_privileged(&args.seq_args); 6589 status = nfs4_call_sync_sequence(clnt, server, &msg, 6590 &args.seq_args, &res.seq_res); 6591 nfs_free_fhandle(res.fh); 6592 if (status) 6593 return status; 6594 6595 do_renew_lease(clp, now); 6596 return 0; 6597 } 6598 6599 #ifdef CONFIG_NFS_V4_1 6600 6601 /* 6602 * This operation also signals the server that this client is 6603 * performing "lease moved" recovery. The server can stop asserting 6604 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing 6605 * this operation is identified in the SEQUENCE operation in this 6606 * compound. 6607 */ 6608 static int _nfs41_proc_fsid_present(struct inode *inode, struct rpc_cred *cred) 6609 { 6610 struct nfs_server *server = NFS_SERVER(inode); 6611 struct rpc_clnt *clnt = server->client; 6612 struct nfs4_fsid_present_arg args = { 6613 .fh = NFS_FH(inode), 6614 }; 6615 struct nfs4_fsid_present_res res = { 6616 }; 6617 struct rpc_message msg = { 6618 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 6619 .rpc_argp = &args, 6620 .rpc_resp = &res, 6621 .rpc_cred = cred, 6622 }; 6623 int status; 6624 6625 res.fh = nfs_alloc_fhandle(); 6626 if (res.fh == NULL) 6627 return -ENOMEM; 6628 6629 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6630 nfs4_set_sequence_privileged(&args.seq_args); 6631 status = nfs4_call_sync_sequence(clnt, server, &msg, 6632 &args.seq_args, &res.seq_res); 6633 nfs_free_fhandle(res.fh); 6634 if (status == NFS4_OK && 6635 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 6636 status = -NFS4ERR_LEASE_MOVED; 6637 return status; 6638 } 6639 6640 #endif /* CONFIG_NFS_V4_1 */ 6641 6642 /** 6643 * nfs4_proc_fsid_present - Is this FSID present or absent on server? 6644 * @inode: inode on FSID to check 6645 * @cred: credential to use for this operation 6646 * 6647 * Server indicates whether the FSID is present, moved, or not 6648 * recognized. This operation is necessary to clear a LEASE_MOVED 6649 * condition for this client ID. 6650 * 6651 * Returns NFS4_OK if the FSID is present on this server, 6652 * -NFS4ERR_MOVED if the FSID is no longer present, a negative 6653 * NFS4ERR code if some error occurred on the server, or a 6654 * negative errno if a local failure occurred. 6655 */ 6656 int nfs4_proc_fsid_present(struct inode *inode, struct rpc_cred *cred) 6657 { 6658 struct nfs_server *server = NFS_SERVER(inode); 6659 struct nfs_client *clp = server->nfs_client; 6660 const struct nfs4_mig_recovery_ops *ops = 6661 clp->cl_mvops->mig_recovery_ops; 6662 struct nfs4_exception exception = { }; 6663 int status; 6664 6665 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 6666 (unsigned long long)server->fsid.major, 6667 (unsigned long long)server->fsid.minor, 6668 clp->cl_hostname); 6669 nfs_display_fhandle(NFS_FH(inode), __func__); 6670 6671 do { 6672 status = ops->fsid_present(inode, cred); 6673 if (status != -NFS4ERR_DELAY) 6674 break; 6675 nfs4_handle_exception(server, status, &exception); 6676 } while (exception.retry); 6677 return status; 6678 } 6679 6680 /** 6681 * If 'use_integrity' is true and the state managment nfs_client 6682 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient 6683 * and the machine credential as per RFC3530bis and RFC5661 Security 6684 * Considerations sections. Otherwise, just use the user cred with the 6685 * filesystem's rpc_client. 6686 */ 6687 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity) 6688 { 6689 int status; 6690 struct nfs4_secinfo_arg args = { 6691 .dir_fh = NFS_FH(dir), 6692 .name = name, 6693 }; 6694 struct nfs4_secinfo_res res = { 6695 .flavors = flavors, 6696 }; 6697 struct rpc_message msg = { 6698 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO], 6699 .rpc_argp = &args, 6700 .rpc_resp = &res, 6701 }; 6702 struct rpc_clnt *clnt = NFS_SERVER(dir)->client; 6703 struct rpc_cred *cred = NULL; 6704 6705 if (use_integrity) { 6706 clnt = NFS_SERVER(dir)->nfs_client->cl_rpcclient; 6707 cred = nfs4_get_clid_cred(NFS_SERVER(dir)->nfs_client); 6708 msg.rpc_cred = cred; 6709 } 6710 6711 dprintk("NFS call secinfo %s\n", name->name); 6712 6713 nfs4_state_protect(NFS_SERVER(dir)->nfs_client, 6714 NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg); 6715 6716 status = nfs4_call_sync(clnt, NFS_SERVER(dir), &msg, &args.seq_args, 6717 &res.seq_res, 0); 6718 dprintk("NFS reply secinfo: %d\n", status); 6719 6720 if (cred) 6721 put_rpccred(cred); 6722 6723 return status; 6724 } 6725 6726 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, 6727 struct nfs4_secinfo_flavors *flavors) 6728 { 6729 struct nfs4_exception exception = { }; 6730 int err; 6731 do { 6732 err = -NFS4ERR_WRONGSEC; 6733 6734 /* try to use integrity protection with machine cred */ 6735 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client)) 6736 err = _nfs4_proc_secinfo(dir, name, flavors, true); 6737 6738 /* 6739 * if unable to use integrity protection, or SECINFO with 6740 * integrity protection returns NFS4ERR_WRONGSEC (which is 6741 * disallowed by spec, but exists in deployed servers) use 6742 * the current filesystem's rpc_client and the user cred. 6743 */ 6744 if (err == -NFS4ERR_WRONGSEC) 6745 err = _nfs4_proc_secinfo(dir, name, flavors, false); 6746 6747 trace_nfs4_secinfo(dir, name, err); 6748 err = nfs4_handle_exception(NFS_SERVER(dir), err, 6749 &exception); 6750 } while (exception.retry); 6751 return err; 6752 } 6753 6754 #ifdef CONFIG_NFS_V4_1 6755 /* 6756 * Check the exchange flags returned by the server for invalid flags, having 6757 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or 6758 * DS flags set. 6759 */ 6760 static int nfs4_check_cl_exchange_flags(u32 flags) 6761 { 6762 if (flags & ~EXCHGID4_FLAG_MASK_R) 6763 goto out_inval; 6764 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && 6765 (flags & EXCHGID4_FLAG_USE_NON_PNFS)) 6766 goto out_inval; 6767 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) 6768 goto out_inval; 6769 return NFS_OK; 6770 out_inval: 6771 return -NFS4ERR_INVAL; 6772 } 6773 6774 static bool 6775 nfs41_same_server_scope(struct nfs41_server_scope *a, 6776 struct nfs41_server_scope *b) 6777 { 6778 if (a->server_scope_sz == b->server_scope_sz && 6779 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0) 6780 return true; 6781 6782 return false; 6783 } 6784 6785 /* 6786 * nfs4_proc_bind_conn_to_session() 6787 * 6788 * The 4.1 client currently uses the same TCP connection for the 6789 * fore and backchannel. 6790 */ 6791 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred) 6792 { 6793 int status; 6794 struct nfs41_bind_conn_to_session_args args = { 6795 .client = clp, 6796 .dir = NFS4_CDFC4_FORE_OR_BOTH, 6797 }; 6798 struct nfs41_bind_conn_to_session_res res; 6799 struct rpc_message msg = { 6800 .rpc_proc = 6801 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION], 6802 .rpc_argp = &args, 6803 .rpc_resp = &res, 6804 .rpc_cred = cred, 6805 }; 6806 6807 dprintk("--> %s\n", __func__); 6808 6809 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id); 6810 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN)) 6811 args.dir = NFS4_CDFC4_FORE; 6812 6813 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 6814 trace_nfs4_bind_conn_to_session(clp, status); 6815 if (status == 0) { 6816 if (memcmp(res.sessionid.data, 6817 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { 6818 dprintk("NFS: %s: Session ID mismatch\n", __func__); 6819 status = -EIO; 6820 goto out; 6821 } 6822 if ((res.dir & args.dir) != res.dir || res.dir == 0) { 6823 dprintk("NFS: %s: Unexpected direction from server\n", 6824 __func__); 6825 status = -EIO; 6826 goto out; 6827 } 6828 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) { 6829 dprintk("NFS: %s: Server returned RDMA mode = true\n", 6830 __func__); 6831 status = -EIO; 6832 goto out; 6833 } 6834 } 6835 out: 6836 dprintk("<-- %s status= %d\n", __func__, status); 6837 return status; 6838 } 6839 6840 /* 6841 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map 6842 * and operations we'd like to see to enable certain features in the allow map 6843 */ 6844 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = { 6845 .how = SP4_MACH_CRED, 6846 .enforce.u.words = { 6847 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 6848 1 << (OP_EXCHANGE_ID - 32) | 6849 1 << (OP_CREATE_SESSION - 32) | 6850 1 << (OP_DESTROY_SESSION - 32) | 6851 1 << (OP_DESTROY_CLIENTID - 32) 6852 }, 6853 .allow.u.words = { 6854 [0] = 1 << (OP_CLOSE) | 6855 1 << (OP_OPEN_DOWNGRADE) | 6856 1 << (OP_LOCKU) | 6857 1 << (OP_DELEGRETURN) | 6858 1 << (OP_COMMIT), 6859 [1] = 1 << (OP_SECINFO - 32) | 6860 1 << (OP_SECINFO_NO_NAME - 32) | 6861 1 << (OP_LAYOUTRETURN - 32) | 6862 1 << (OP_TEST_STATEID - 32) | 6863 1 << (OP_FREE_STATEID - 32) | 6864 1 << (OP_WRITE - 32) 6865 } 6866 }; 6867 6868 /* 6869 * Select the state protection mode for client `clp' given the server results 6870 * from exchange_id in `sp'. 6871 * 6872 * Returns 0 on success, negative errno otherwise. 6873 */ 6874 static int nfs4_sp4_select_mode(struct nfs_client *clp, 6875 struct nfs41_state_protection *sp) 6876 { 6877 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = { 6878 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 6879 1 << (OP_EXCHANGE_ID - 32) | 6880 1 << (OP_CREATE_SESSION - 32) | 6881 1 << (OP_DESTROY_SESSION - 32) | 6882 1 << (OP_DESTROY_CLIENTID - 32) 6883 }; 6884 unsigned int i; 6885 6886 if (sp->how == SP4_MACH_CRED) { 6887 /* Print state protect result */ 6888 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n"); 6889 for (i = 0; i <= LAST_NFS4_OP; i++) { 6890 if (test_bit(i, sp->enforce.u.longs)) 6891 dfprintk(MOUNT, " enforce op %d\n", i); 6892 if (test_bit(i, sp->allow.u.longs)) 6893 dfprintk(MOUNT, " allow op %d\n", i); 6894 } 6895 6896 /* make sure nothing is on enforce list that isn't supported */ 6897 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) { 6898 if (sp->enforce.u.words[i] & ~supported_enforce[i]) { 6899 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 6900 return -EINVAL; 6901 } 6902 } 6903 6904 /* 6905 * Minimal mode - state operations are allowed to use machine 6906 * credential. Note this already happens by default, so the 6907 * client doesn't have to do anything more than the negotiation. 6908 * 6909 * NOTE: we don't care if EXCHANGE_ID is in the list - 6910 * we're already using the machine cred for exchange_id 6911 * and will never use a different cred. 6912 */ 6913 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) && 6914 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) && 6915 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) && 6916 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) { 6917 dfprintk(MOUNT, "sp4_mach_cred:\n"); 6918 dfprintk(MOUNT, " minimal mode enabled\n"); 6919 set_bit(NFS_SP4_MACH_CRED_MINIMAL, &clp->cl_sp4_flags); 6920 } else { 6921 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 6922 return -EINVAL; 6923 } 6924 6925 if (test_bit(OP_CLOSE, sp->allow.u.longs) && 6926 test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) && 6927 test_bit(OP_DELEGRETURN, sp->allow.u.longs) && 6928 test_bit(OP_LOCKU, sp->allow.u.longs)) { 6929 dfprintk(MOUNT, " cleanup mode enabled\n"); 6930 set_bit(NFS_SP4_MACH_CRED_CLEANUP, &clp->cl_sp4_flags); 6931 } 6932 6933 if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) { 6934 dfprintk(MOUNT, " pnfs cleanup mode enabled\n"); 6935 set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP, 6936 &clp->cl_sp4_flags); 6937 } 6938 6939 if (test_bit(OP_SECINFO, sp->allow.u.longs) && 6940 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) { 6941 dfprintk(MOUNT, " secinfo mode enabled\n"); 6942 set_bit(NFS_SP4_MACH_CRED_SECINFO, &clp->cl_sp4_flags); 6943 } 6944 6945 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) && 6946 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) { 6947 dfprintk(MOUNT, " stateid mode enabled\n"); 6948 set_bit(NFS_SP4_MACH_CRED_STATEID, &clp->cl_sp4_flags); 6949 } 6950 6951 if (test_bit(OP_WRITE, sp->allow.u.longs)) { 6952 dfprintk(MOUNT, " write mode enabled\n"); 6953 set_bit(NFS_SP4_MACH_CRED_WRITE, &clp->cl_sp4_flags); 6954 } 6955 6956 if (test_bit(OP_COMMIT, sp->allow.u.longs)) { 6957 dfprintk(MOUNT, " commit mode enabled\n"); 6958 set_bit(NFS_SP4_MACH_CRED_COMMIT, &clp->cl_sp4_flags); 6959 } 6960 } 6961 6962 return 0; 6963 } 6964 6965 /* 6966 * _nfs4_proc_exchange_id() 6967 * 6968 * Wrapper for EXCHANGE_ID operation. 6969 */ 6970 static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred, 6971 u32 sp4_how) 6972 { 6973 nfs4_verifier verifier; 6974 struct nfs41_exchange_id_args args = { 6975 .verifier = &verifier, 6976 .client = clp, 6977 #ifdef CONFIG_NFS_V4_1_MIGRATION 6978 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 6979 EXCHGID4_FLAG_BIND_PRINC_STATEID | 6980 EXCHGID4_FLAG_SUPP_MOVED_MIGR, 6981 #else 6982 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 6983 EXCHGID4_FLAG_BIND_PRINC_STATEID, 6984 #endif 6985 }; 6986 struct nfs41_exchange_id_res res = { 6987 0 6988 }; 6989 int status; 6990 struct rpc_message msg = { 6991 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], 6992 .rpc_argp = &args, 6993 .rpc_resp = &res, 6994 .rpc_cred = cred, 6995 }; 6996 6997 nfs4_init_boot_verifier(clp, &verifier); 6998 6999 status = nfs4_init_uniform_client_string(clp); 7000 if (status) 7001 goto out; 7002 7003 dprintk("NFS call exchange_id auth=%s, '%s'\n", 7004 clp->cl_rpcclient->cl_auth->au_ops->au_name, 7005 clp->cl_owner_id); 7006 7007 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner), 7008 GFP_NOFS); 7009 if (unlikely(res.server_owner == NULL)) { 7010 status = -ENOMEM; 7011 goto out; 7012 } 7013 7014 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope), 7015 GFP_NOFS); 7016 if (unlikely(res.server_scope == NULL)) { 7017 status = -ENOMEM; 7018 goto out_server_owner; 7019 } 7020 7021 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS); 7022 if (unlikely(res.impl_id == NULL)) { 7023 status = -ENOMEM; 7024 goto out_server_scope; 7025 } 7026 7027 switch (sp4_how) { 7028 case SP4_NONE: 7029 args.state_protect.how = SP4_NONE; 7030 break; 7031 7032 case SP4_MACH_CRED: 7033 args.state_protect = nfs4_sp4_mach_cred_request; 7034 break; 7035 7036 default: 7037 /* unsupported! */ 7038 WARN_ON_ONCE(1); 7039 status = -EINVAL; 7040 goto out_impl_id; 7041 } 7042 7043 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7044 trace_nfs4_exchange_id(clp, status); 7045 if (status == 0) 7046 status = nfs4_check_cl_exchange_flags(res.flags); 7047 7048 if (status == 0) 7049 status = nfs4_sp4_select_mode(clp, &res.state_protect); 7050 7051 if (status == 0) { 7052 clp->cl_clientid = res.clientid; 7053 clp->cl_exchange_flags = res.flags; 7054 /* Client ID is not confirmed */ 7055 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) { 7056 clear_bit(NFS4_SESSION_ESTABLISHED, 7057 &clp->cl_session->session_state); 7058 clp->cl_seqid = res.seqid; 7059 } 7060 7061 kfree(clp->cl_serverowner); 7062 clp->cl_serverowner = res.server_owner; 7063 res.server_owner = NULL; 7064 7065 /* use the most recent implementation id */ 7066 kfree(clp->cl_implid); 7067 clp->cl_implid = res.impl_id; 7068 res.impl_id = NULL; 7069 7070 if (clp->cl_serverscope != NULL && 7071 !nfs41_same_server_scope(clp->cl_serverscope, 7072 res.server_scope)) { 7073 dprintk("%s: server_scope mismatch detected\n", 7074 __func__); 7075 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); 7076 kfree(clp->cl_serverscope); 7077 clp->cl_serverscope = NULL; 7078 } 7079 7080 if (clp->cl_serverscope == NULL) { 7081 clp->cl_serverscope = res.server_scope; 7082 res.server_scope = NULL; 7083 } 7084 } 7085 7086 out_impl_id: 7087 kfree(res.impl_id); 7088 out_server_scope: 7089 kfree(res.server_scope); 7090 out_server_owner: 7091 kfree(res.server_owner); 7092 out: 7093 if (clp->cl_implid != NULL) 7094 dprintk("NFS reply exchange_id: Server Implementation ID: " 7095 "domain: %s, name: %s, date: %llu,%u\n", 7096 clp->cl_implid->domain, clp->cl_implid->name, 7097 clp->cl_implid->date.seconds, 7098 clp->cl_implid->date.nseconds); 7099 dprintk("NFS reply exchange_id: %d\n", status); 7100 return status; 7101 } 7102 7103 /* 7104 * nfs4_proc_exchange_id() 7105 * 7106 * Returns zero, a negative errno, or a negative NFS4ERR status code. 7107 * 7108 * Since the clientid has expired, all compounds using sessions 7109 * associated with the stale clientid will be returning 7110 * NFS4ERR_BADSESSION in the sequence operation, and will therefore 7111 * be in some phase of session reset. 7112 * 7113 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used. 7114 */ 7115 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) 7116 { 7117 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor; 7118 int status; 7119 7120 /* try SP4_MACH_CRED if krb5i/p */ 7121 if (authflavor == RPC_AUTH_GSS_KRB5I || 7122 authflavor == RPC_AUTH_GSS_KRB5P) { 7123 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED); 7124 if (!status) 7125 return 0; 7126 } 7127 7128 /* try SP4_NONE */ 7129 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE); 7130 } 7131 7132 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, 7133 struct rpc_cred *cred) 7134 { 7135 struct rpc_message msg = { 7136 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID], 7137 .rpc_argp = clp, 7138 .rpc_cred = cred, 7139 }; 7140 int status; 7141 7142 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7143 trace_nfs4_destroy_clientid(clp, status); 7144 if (status) 7145 dprintk("NFS: Got error %d from the server %s on " 7146 "DESTROY_CLIENTID.", status, clp->cl_hostname); 7147 return status; 7148 } 7149 7150 static int nfs4_proc_destroy_clientid(struct nfs_client *clp, 7151 struct rpc_cred *cred) 7152 { 7153 unsigned int loop; 7154 int ret; 7155 7156 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { 7157 ret = _nfs4_proc_destroy_clientid(clp, cred); 7158 switch (ret) { 7159 case -NFS4ERR_DELAY: 7160 case -NFS4ERR_CLIENTID_BUSY: 7161 ssleep(1); 7162 break; 7163 default: 7164 return ret; 7165 } 7166 } 7167 return 0; 7168 } 7169 7170 int nfs4_destroy_clientid(struct nfs_client *clp) 7171 { 7172 struct rpc_cred *cred; 7173 int ret = 0; 7174 7175 if (clp->cl_mvops->minor_version < 1) 7176 goto out; 7177 if (clp->cl_exchange_flags == 0) 7178 goto out; 7179 if (clp->cl_preserve_clid) 7180 goto out; 7181 cred = nfs4_get_clid_cred(clp); 7182 ret = nfs4_proc_destroy_clientid(clp, cred); 7183 if (cred) 7184 put_rpccred(cred); 7185 switch (ret) { 7186 case 0: 7187 case -NFS4ERR_STALE_CLIENTID: 7188 clp->cl_exchange_flags = 0; 7189 } 7190 out: 7191 return ret; 7192 } 7193 7194 struct nfs4_get_lease_time_data { 7195 struct nfs4_get_lease_time_args *args; 7196 struct nfs4_get_lease_time_res *res; 7197 struct nfs_client *clp; 7198 }; 7199 7200 static void nfs4_get_lease_time_prepare(struct rpc_task *task, 7201 void *calldata) 7202 { 7203 struct nfs4_get_lease_time_data *data = 7204 (struct nfs4_get_lease_time_data *)calldata; 7205 7206 dprintk("--> %s\n", __func__); 7207 /* just setup sequence, do not trigger session recovery 7208 since we're invoked within one */ 7209 nfs41_setup_sequence(data->clp->cl_session, 7210 &data->args->la_seq_args, 7211 &data->res->lr_seq_res, 7212 task); 7213 dprintk("<-- %s\n", __func__); 7214 } 7215 7216 /* 7217 * Called from nfs4_state_manager thread for session setup, so don't recover 7218 * from sequence operation or clientid errors. 7219 */ 7220 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) 7221 { 7222 struct nfs4_get_lease_time_data *data = 7223 (struct nfs4_get_lease_time_data *)calldata; 7224 7225 dprintk("--> %s\n", __func__); 7226 if (!nfs41_sequence_done(task, &data->res->lr_seq_res)) 7227 return; 7228 switch (task->tk_status) { 7229 case -NFS4ERR_DELAY: 7230 case -NFS4ERR_GRACE: 7231 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); 7232 rpc_delay(task, NFS4_POLL_RETRY_MIN); 7233 task->tk_status = 0; 7234 /* fall through */ 7235 case -NFS4ERR_RETRY_UNCACHED_REP: 7236 rpc_restart_call_prepare(task); 7237 return; 7238 } 7239 dprintk("<-- %s\n", __func__); 7240 } 7241 7242 static const struct rpc_call_ops nfs4_get_lease_time_ops = { 7243 .rpc_call_prepare = nfs4_get_lease_time_prepare, 7244 .rpc_call_done = nfs4_get_lease_time_done, 7245 }; 7246 7247 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) 7248 { 7249 struct rpc_task *task; 7250 struct nfs4_get_lease_time_args args; 7251 struct nfs4_get_lease_time_res res = { 7252 .lr_fsinfo = fsinfo, 7253 }; 7254 struct nfs4_get_lease_time_data data = { 7255 .args = &args, 7256 .res = &res, 7257 .clp = clp, 7258 }; 7259 struct rpc_message msg = { 7260 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], 7261 .rpc_argp = &args, 7262 .rpc_resp = &res, 7263 }; 7264 struct rpc_task_setup task_setup = { 7265 .rpc_client = clp->cl_rpcclient, 7266 .rpc_message = &msg, 7267 .callback_ops = &nfs4_get_lease_time_ops, 7268 .callback_data = &data, 7269 .flags = RPC_TASK_TIMEOUT, 7270 }; 7271 int status; 7272 7273 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0); 7274 nfs4_set_sequence_privileged(&args.la_seq_args); 7275 dprintk("--> %s\n", __func__); 7276 task = rpc_run_task(&task_setup); 7277 7278 if (IS_ERR(task)) 7279 status = PTR_ERR(task); 7280 else { 7281 status = task->tk_status; 7282 rpc_put_task(task); 7283 } 7284 dprintk("<-- %s return %d\n", __func__, status); 7285 7286 return status; 7287 } 7288 7289 /* 7290 * Initialize the values to be used by the client in CREATE_SESSION 7291 * If nfs4_init_session set the fore channel request and response sizes, 7292 * use them. 7293 * 7294 * Set the back channel max_resp_sz_cached to zero to force the client to 7295 * always set csa_cachethis to FALSE because the current implementation 7296 * of the back channel DRC only supports caching the CB_SEQUENCE operation. 7297 */ 7298 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args) 7299 { 7300 unsigned int max_rqst_sz, max_resp_sz; 7301 7302 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead; 7303 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead; 7304 7305 /* Fore channel attributes */ 7306 args->fc_attrs.max_rqst_sz = max_rqst_sz; 7307 args->fc_attrs.max_resp_sz = max_resp_sz; 7308 args->fc_attrs.max_ops = NFS4_MAX_OPS; 7309 args->fc_attrs.max_reqs = max_session_slots; 7310 7311 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " 7312 "max_ops=%u max_reqs=%u\n", 7313 __func__, 7314 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, 7315 args->fc_attrs.max_ops, args->fc_attrs.max_reqs); 7316 7317 /* Back channel attributes */ 7318 args->bc_attrs.max_rqst_sz = PAGE_SIZE; 7319 args->bc_attrs.max_resp_sz = PAGE_SIZE; 7320 args->bc_attrs.max_resp_sz_cached = 0; 7321 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; 7322 args->bc_attrs.max_reqs = 1; 7323 7324 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " 7325 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", 7326 __func__, 7327 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, 7328 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, 7329 args->bc_attrs.max_reqs); 7330 } 7331 7332 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, 7333 struct nfs41_create_session_res *res) 7334 { 7335 struct nfs4_channel_attrs *sent = &args->fc_attrs; 7336 struct nfs4_channel_attrs *rcvd = &res->fc_attrs; 7337 7338 if (rcvd->max_resp_sz > sent->max_resp_sz) 7339 return -EINVAL; 7340 /* 7341 * Our requested max_ops is the minimum we need; we're not 7342 * prepared to break up compounds into smaller pieces than that. 7343 * So, no point even trying to continue if the server won't 7344 * cooperate: 7345 */ 7346 if (rcvd->max_ops < sent->max_ops) 7347 return -EINVAL; 7348 if (rcvd->max_reqs == 0) 7349 return -EINVAL; 7350 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE) 7351 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE; 7352 return 0; 7353 } 7354 7355 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, 7356 struct nfs41_create_session_res *res) 7357 { 7358 struct nfs4_channel_attrs *sent = &args->bc_attrs; 7359 struct nfs4_channel_attrs *rcvd = &res->bc_attrs; 7360 7361 if (!(res->flags & SESSION4_BACK_CHAN)) 7362 goto out; 7363 if (rcvd->max_rqst_sz > sent->max_rqst_sz) 7364 return -EINVAL; 7365 if (rcvd->max_resp_sz < sent->max_resp_sz) 7366 return -EINVAL; 7367 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) 7368 return -EINVAL; 7369 /* These would render the backchannel useless: */ 7370 if (rcvd->max_ops != sent->max_ops) 7371 return -EINVAL; 7372 if (rcvd->max_reqs != sent->max_reqs) 7373 return -EINVAL; 7374 out: 7375 return 0; 7376 } 7377 7378 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, 7379 struct nfs41_create_session_res *res) 7380 { 7381 int ret; 7382 7383 ret = nfs4_verify_fore_channel_attrs(args, res); 7384 if (ret) 7385 return ret; 7386 return nfs4_verify_back_channel_attrs(args, res); 7387 } 7388 7389 static void nfs4_update_session(struct nfs4_session *session, 7390 struct nfs41_create_session_res *res) 7391 { 7392 nfs4_copy_sessionid(&session->sess_id, &res->sessionid); 7393 /* Mark client id and session as being confirmed */ 7394 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; 7395 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state); 7396 session->flags = res->flags; 7397 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs)); 7398 if (res->flags & SESSION4_BACK_CHAN) 7399 memcpy(&session->bc_attrs, &res->bc_attrs, 7400 sizeof(session->bc_attrs)); 7401 } 7402 7403 static int _nfs4_proc_create_session(struct nfs_client *clp, 7404 struct rpc_cred *cred) 7405 { 7406 struct nfs4_session *session = clp->cl_session; 7407 struct nfs41_create_session_args args = { 7408 .client = clp, 7409 .clientid = clp->cl_clientid, 7410 .seqid = clp->cl_seqid, 7411 .cb_program = NFS4_CALLBACK, 7412 }; 7413 struct nfs41_create_session_res res; 7414 7415 struct rpc_message msg = { 7416 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], 7417 .rpc_argp = &args, 7418 .rpc_resp = &res, 7419 .rpc_cred = cred, 7420 }; 7421 int status; 7422 7423 nfs4_init_channel_attrs(&args); 7424 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); 7425 7426 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7427 trace_nfs4_create_session(clp, status); 7428 7429 if (!status) { 7430 /* Verify the session's negotiated channel_attrs values */ 7431 status = nfs4_verify_channel_attrs(&args, &res); 7432 /* Increment the clientid slot sequence id */ 7433 if (clp->cl_seqid == res.seqid) 7434 clp->cl_seqid++; 7435 if (status) 7436 goto out; 7437 nfs4_update_session(session, &res); 7438 } 7439 out: 7440 return status; 7441 } 7442 7443 /* 7444 * Issues a CREATE_SESSION operation to the server. 7445 * It is the responsibility of the caller to verify the session is 7446 * expired before calling this routine. 7447 */ 7448 int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred) 7449 { 7450 int status; 7451 unsigned *ptr; 7452 struct nfs4_session *session = clp->cl_session; 7453 7454 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 7455 7456 status = _nfs4_proc_create_session(clp, cred); 7457 if (status) 7458 goto out; 7459 7460 /* Init or reset the session slot tables */ 7461 status = nfs4_setup_session_slot_tables(session); 7462 dprintk("slot table setup returned %d\n", status); 7463 if (status) 7464 goto out; 7465 7466 ptr = (unsigned *)&session->sess_id.data[0]; 7467 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, 7468 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); 7469 out: 7470 dprintk("<-- %s\n", __func__); 7471 return status; 7472 } 7473 7474 /* 7475 * Issue the over-the-wire RPC DESTROY_SESSION. 7476 * The caller must serialize access to this routine. 7477 */ 7478 int nfs4_proc_destroy_session(struct nfs4_session *session, 7479 struct rpc_cred *cred) 7480 { 7481 struct rpc_message msg = { 7482 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION], 7483 .rpc_argp = session, 7484 .rpc_cred = cred, 7485 }; 7486 int status = 0; 7487 7488 dprintk("--> nfs4_proc_destroy_session\n"); 7489 7490 /* session is still being setup */ 7491 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state)) 7492 return 0; 7493 7494 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7495 trace_nfs4_destroy_session(session->clp, status); 7496 7497 if (status) 7498 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " 7499 "Session has been destroyed regardless...\n", status); 7500 7501 dprintk("<-- nfs4_proc_destroy_session\n"); 7502 return status; 7503 } 7504 7505 /* 7506 * Renew the cl_session lease. 7507 */ 7508 struct nfs4_sequence_data { 7509 struct nfs_client *clp; 7510 struct nfs4_sequence_args args; 7511 struct nfs4_sequence_res res; 7512 }; 7513 7514 static void nfs41_sequence_release(void *data) 7515 { 7516 struct nfs4_sequence_data *calldata = data; 7517 struct nfs_client *clp = calldata->clp; 7518 7519 if (atomic_read(&clp->cl_count) > 1) 7520 nfs4_schedule_state_renewal(clp); 7521 nfs_put_client(clp); 7522 kfree(calldata); 7523 } 7524 7525 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) 7526 { 7527 switch(task->tk_status) { 7528 case -NFS4ERR_DELAY: 7529 rpc_delay(task, NFS4_POLL_RETRY_MAX); 7530 return -EAGAIN; 7531 default: 7532 nfs4_schedule_lease_recovery(clp); 7533 } 7534 return 0; 7535 } 7536 7537 static void nfs41_sequence_call_done(struct rpc_task *task, void *data) 7538 { 7539 struct nfs4_sequence_data *calldata = data; 7540 struct nfs_client *clp = calldata->clp; 7541 7542 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) 7543 return; 7544 7545 trace_nfs4_sequence(clp, task->tk_status); 7546 if (task->tk_status < 0) { 7547 dprintk("%s ERROR %d\n", __func__, task->tk_status); 7548 if (atomic_read(&clp->cl_count) == 1) 7549 goto out; 7550 7551 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { 7552 rpc_restart_call_prepare(task); 7553 return; 7554 } 7555 } 7556 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 7557 out: 7558 dprintk("<-- %s\n", __func__); 7559 } 7560 7561 static void nfs41_sequence_prepare(struct rpc_task *task, void *data) 7562 { 7563 struct nfs4_sequence_data *calldata = data; 7564 struct nfs_client *clp = calldata->clp; 7565 struct nfs4_sequence_args *args; 7566 struct nfs4_sequence_res *res; 7567 7568 args = task->tk_msg.rpc_argp; 7569 res = task->tk_msg.rpc_resp; 7570 7571 nfs41_setup_sequence(clp->cl_session, args, res, task); 7572 } 7573 7574 static const struct rpc_call_ops nfs41_sequence_ops = { 7575 .rpc_call_done = nfs41_sequence_call_done, 7576 .rpc_call_prepare = nfs41_sequence_prepare, 7577 .rpc_release = nfs41_sequence_release, 7578 }; 7579 7580 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 7581 struct rpc_cred *cred, 7582 bool is_privileged) 7583 { 7584 struct nfs4_sequence_data *calldata; 7585 struct rpc_message msg = { 7586 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], 7587 .rpc_cred = cred, 7588 }; 7589 struct rpc_task_setup task_setup_data = { 7590 .rpc_client = clp->cl_rpcclient, 7591 .rpc_message = &msg, 7592 .callback_ops = &nfs41_sequence_ops, 7593 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT, 7594 }; 7595 7596 if (!atomic_inc_not_zero(&clp->cl_count)) 7597 return ERR_PTR(-EIO); 7598 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 7599 if (calldata == NULL) { 7600 nfs_put_client(clp); 7601 return ERR_PTR(-ENOMEM); 7602 } 7603 nfs4_init_sequence(&calldata->args, &calldata->res, 0); 7604 if (is_privileged) 7605 nfs4_set_sequence_privileged(&calldata->args); 7606 msg.rpc_argp = &calldata->args; 7607 msg.rpc_resp = &calldata->res; 7608 calldata->clp = clp; 7609 task_setup_data.callback_data = calldata; 7610 7611 return rpc_run_task(&task_setup_data); 7612 } 7613 7614 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 7615 { 7616 struct rpc_task *task; 7617 int ret = 0; 7618 7619 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) 7620 return -EAGAIN; 7621 task = _nfs41_proc_sequence(clp, cred, false); 7622 if (IS_ERR(task)) 7623 ret = PTR_ERR(task); 7624 else 7625 rpc_put_task_async(task); 7626 dprintk("<-- %s status=%d\n", __func__, ret); 7627 return ret; 7628 } 7629 7630 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) 7631 { 7632 struct rpc_task *task; 7633 int ret; 7634 7635 task = _nfs41_proc_sequence(clp, cred, true); 7636 if (IS_ERR(task)) { 7637 ret = PTR_ERR(task); 7638 goto out; 7639 } 7640 ret = rpc_wait_for_completion_task(task); 7641 if (!ret) 7642 ret = task->tk_status; 7643 rpc_put_task(task); 7644 out: 7645 dprintk("<-- %s status=%d\n", __func__, ret); 7646 return ret; 7647 } 7648 7649 struct nfs4_reclaim_complete_data { 7650 struct nfs_client *clp; 7651 struct nfs41_reclaim_complete_args arg; 7652 struct nfs41_reclaim_complete_res res; 7653 }; 7654 7655 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) 7656 { 7657 struct nfs4_reclaim_complete_data *calldata = data; 7658 7659 nfs41_setup_sequence(calldata->clp->cl_session, 7660 &calldata->arg.seq_args, 7661 &calldata->res.seq_res, 7662 task); 7663 } 7664 7665 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) 7666 { 7667 switch(task->tk_status) { 7668 case 0: 7669 case -NFS4ERR_COMPLETE_ALREADY: 7670 case -NFS4ERR_WRONG_CRED: /* What to do here? */ 7671 break; 7672 case -NFS4ERR_DELAY: 7673 rpc_delay(task, NFS4_POLL_RETRY_MAX); 7674 /* fall through */ 7675 case -NFS4ERR_RETRY_UNCACHED_REP: 7676 return -EAGAIN; 7677 default: 7678 nfs4_schedule_lease_recovery(clp); 7679 } 7680 return 0; 7681 } 7682 7683 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) 7684 { 7685 struct nfs4_reclaim_complete_data *calldata = data; 7686 struct nfs_client *clp = calldata->clp; 7687 struct nfs4_sequence_res *res = &calldata->res.seq_res; 7688 7689 dprintk("--> %s\n", __func__); 7690 if (!nfs41_sequence_done(task, res)) 7691 return; 7692 7693 trace_nfs4_reclaim_complete(clp, task->tk_status); 7694 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { 7695 rpc_restart_call_prepare(task); 7696 return; 7697 } 7698 dprintk("<-- %s\n", __func__); 7699 } 7700 7701 static void nfs4_free_reclaim_complete_data(void *data) 7702 { 7703 struct nfs4_reclaim_complete_data *calldata = data; 7704 7705 kfree(calldata); 7706 } 7707 7708 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = { 7709 .rpc_call_prepare = nfs4_reclaim_complete_prepare, 7710 .rpc_call_done = nfs4_reclaim_complete_done, 7711 .rpc_release = nfs4_free_reclaim_complete_data, 7712 }; 7713 7714 /* 7715 * Issue a global reclaim complete. 7716 */ 7717 static int nfs41_proc_reclaim_complete(struct nfs_client *clp, 7718 struct rpc_cred *cred) 7719 { 7720 struct nfs4_reclaim_complete_data *calldata; 7721 struct rpc_task *task; 7722 struct rpc_message msg = { 7723 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE], 7724 .rpc_cred = cred, 7725 }; 7726 struct rpc_task_setup task_setup_data = { 7727 .rpc_client = clp->cl_rpcclient, 7728 .rpc_message = &msg, 7729 .callback_ops = &nfs4_reclaim_complete_call_ops, 7730 .flags = RPC_TASK_ASYNC, 7731 }; 7732 int status = -ENOMEM; 7733 7734 dprintk("--> %s\n", __func__); 7735 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 7736 if (calldata == NULL) 7737 goto out; 7738 calldata->clp = clp; 7739 calldata->arg.one_fs = 0; 7740 7741 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0); 7742 nfs4_set_sequence_privileged(&calldata->arg.seq_args); 7743 msg.rpc_argp = &calldata->arg; 7744 msg.rpc_resp = &calldata->res; 7745 task_setup_data.callback_data = calldata; 7746 task = rpc_run_task(&task_setup_data); 7747 if (IS_ERR(task)) { 7748 status = PTR_ERR(task); 7749 goto out; 7750 } 7751 status = nfs4_wait_for_completion_rpc_task(task); 7752 if (status == 0) 7753 status = task->tk_status; 7754 rpc_put_task(task); 7755 return 0; 7756 out: 7757 dprintk("<-- %s status=%d\n", __func__, status); 7758 return status; 7759 } 7760 7761 static void 7762 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) 7763 { 7764 struct nfs4_layoutget *lgp = calldata; 7765 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 7766 struct nfs4_session *session = nfs4_get_session(server); 7767 int ret; 7768 7769 dprintk("--> %s\n", __func__); 7770 /* Note the is a race here, where a CB_LAYOUTRECALL can come in 7771 * right now covering the LAYOUTGET we are about to send. 7772 * However, that is not so catastrophic, and there seems 7773 * to be no way to prevent it completely. 7774 */ 7775 if (nfs41_setup_sequence(session, &lgp->args.seq_args, 7776 &lgp->res.seq_res, task)) 7777 return; 7778 ret = pnfs_choose_layoutget_stateid(&lgp->args.stateid, 7779 NFS_I(lgp->args.inode)->layout, 7780 &lgp->args.range, 7781 lgp->args.ctx->state); 7782 if (ret < 0) 7783 rpc_exit(task, ret); 7784 } 7785 7786 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) 7787 { 7788 struct nfs4_layoutget *lgp = calldata; 7789 struct inode *inode = lgp->args.inode; 7790 struct nfs_server *server = NFS_SERVER(inode); 7791 struct pnfs_layout_hdr *lo; 7792 struct nfs4_state *state = NULL; 7793 unsigned long timeo, now, giveup; 7794 7795 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status); 7796 7797 if (!nfs41_sequence_done(task, &lgp->res.seq_res)) 7798 goto out; 7799 7800 switch (task->tk_status) { 7801 case 0: 7802 goto out; 7803 7804 /* 7805 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs 7806 * on the file. set tk_status to -ENODATA to tell upper layer to 7807 * retry go inband. 7808 */ 7809 case -NFS4ERR_LAYOUTUNAVAILABLE: 7810 task->tk_status = -ENODATA; 7811 goto out; 7812 /* 7813 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of 7814 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3). 7815 */ 7816 case -NFS4ERR_BADLAYOUT: 7817 goto out_overflow; 7818 /* 7819 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client 7820 * (or clients) writing to the same RAID stripe except when 7821 * the minlength argument is 0 (see RFC5661 section 18.43.3). 7822 */ 7823 case -NFS4ERR_LAYOUTTRYLATER: 7824 if (lgp->args.minlength == 0) 7825 goto out_overflow; 7826 /* 7827 * NFS4ERR_RECALLCONFLICT is when conflict with self (must recall 7828 * existing layout before getting a new one). 7829 */ 7830 case -NFS4ERR_RECALLCONFLICT: 7831 timeo = rpc_get_timeout(task->tk_client); 7832 giveup = lgp->args.timestamp + timeo; 7833 now = jiffies; 7834 if (time_after(giveup, now)) { 7835 unsigned long delay; 7836 7837 /* Delay for: 7838 * - Not less then NFS4_POLL_RETRY_MIN. 7839 * - One last time a jiffie before we give up 7840 * - exponential backoff (time_now minus start_attempt) 7841 */ 7842 delay = max_t(unsigned long, NFS4_POLL_RETRY_MIN, 7843 min((giveup - now - 1), 7844 now - lgp->args.timestamp)); 7845 7846 dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n", 7847 __func__, delay); 7848 rpc_delay(task, delay); 7849 /* Do not call nfs4_async_handle_error() */ 7850 goto out_restart; 7851 } 7852 break; 7853 case -NFS4ERR_EXPIRED: 7854 case -NFS4ERR_BAD_STATEID: 7855 spin_lock(&inode->i_lock); 7856 if (nfs4_stateid_match(&lgp->args.stateid, 7857 &lgp->args.ctx->state->stateid)) { 7858 spin_unlock(&inode->i_lock); 7859 /* If the open stateid was bad, then recover it. */ 7860 state = lgp->args.ctx->state; 7861 break; 7862 } 7863 lo = NFS_I(inode)->layout; 7864 if (lo && nfs4_stateid_match(&lgp->args.stateid, 7865 &lo->plh_stateid)) { 7866 LIST_HEAD(head); 7867 7868 /* 7869 * Mark the bad layout state as invalid, then retry 7870 * with the current stateid. 7871 */ 7872 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags); 7873 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL); 7874 spin_unlock(&inode->i_lock); 7875 pnfs_free_lseg_list(&head); 7876 } else 7877 spin_unlock(&inode->i_lock); 7878 goto out_restart; 7879 } 7880 if (nfs4_async_handle_error(task, server, state, &lgp->timeout) == -EAGAIN) 7881 goto out_restart; 7882 out: 7883 dprintk("<-- %s\n", __func__); 7884 return; 7885 out_restart: 7886 task->tk_status = 0; 7887 rpc_restart_call_prepare(task); 7888 return; 7889 out_overflow: 7890 task->tk_status = -EOVERFLOW; 7891 goto out; 7892 } 7893 7894 static size_t max_response_pages(struct nfs_server *server) 7895 { 7896 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 7897 return nfs_page_array_len(0, max_resp_sz); 7898 } 7899 7900 static void nfs4_free_pages(struct page **pages, size_t size) 7901 { 7902 int i; 7903 7904 if (!pages) 7905 return; 7906 7907 for (i = 0; i < size; i++) { 7908 if (!pages[i]) 7909 break; 7910 __free_page(pages[i]); 7911 } 7912 kfree(pages); 7913 } 7914 7915 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags) 7916 { 7917 struct page **pages; 7918 int i; 7919 7920 pages = kcalloc(size, sizeof(struct page *), gfp_flags); 7921 if (!pages) { 7922 dprintk("%s: can't alloc array of %zu pages\n", __func__, size); 7923 return NULL; 7924 } 7925 7926 for (i = 0; i < size; i++) { 7927 pages[i] = alloc_page(gfp_flags); 7928 if (!pages[i]) { 7929 dprintk("%s: failed to allocate page\n", __func__); 7930 nfs4_free_pages(pages, size); 7931 return NULL; 7932 } 7933 } 7934 7935 return pages; 7936 } 7937 7938 static void nfs4_layoutget_release(void *calldata) 7939 { 7940 struct nfs4_layoutget *lgp = calldata; 7941 struct inode *inode = lgp->args.inode; 7942 struct nfs_server *server = NFS_SERVER(inode); 7943 size_t max_pages = max_response_pages(server); 7944 7945 dprintk("--> %s\n", __func__); 7946 nfs4_free_pages(lgp->args.layout.pages, max_pages); 7947 pnfs_put_layout_hdr(NFS_I(inode)->layout); 7948 put_nfs_open_context(lgp->args.ctx); 7949 kfree(calldata); 7950 dprintk("<-- %s\n", __func__); 7951 } 7952 7953 static const struct rpc_call_ops nfs4_layoutget_call_ops = { 7954 .rpc_call_prepare = nfs4_layoutget_prepare, 7955 .rpc_call_done = nfs4_layoutget_done, 7956 .rpc_release = nfs4_layoutget_release, 7957 }; 7958 7959 struct pnfs_layout_segment * 7960 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags) 7961 { 7962 struct inode *inode = lgp->args.inode; 7963 struct nfs_server *server = NFS_SERVER(inode); 7964 size_t max_pages = max_response_pages(server); 7965 struct rpc_task *task; 7966 struct rpc_message msg = { 7967 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], 7968 .rpc_argp = &lgp->args, 7969 .rpc_resp = &lgp->res, 7970 .rpc_cred = lgp->cred, 7971 }; 7972 struct rpc_task_setup task_setup_data = { 7973 .rpc_client = server->client, 7974 .rpc_message = &msg, 7975 .callback_ops = &nfs4_layoutget_call_ops, 7976 .callback_data = lgp, 7977 .flags = RPC_TASK_ASYNC, 7978 }; 7979 struct pnfs_layout_segment *lseg = NULL; 7980 int status = 0; 7981 7982 dprintk("--> %s\n", __func__); 7983 7984 /* nfs4_layoutget_release calls pnfs_put_layout_hdr */ 7985 pnfs_get_layout_hdr(NFS_I(inode)->layout); 7986 7987 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags); 7988 if (!lgp->args.layout.pages) { 7989 nfs4_layoutget_release(lgp); 7990 return ERR_PTR(-ENOMEM); 7991 } 7992 lgp->args.layout.pglen = max_pages * PAGE_SIZE; 7993 lgp->args.timestamp = jiffies; 7994 7995 lgp->res.layoutp = &lgp->args.layout; 7996 lgp->res.seq_res.sr_slot = NULL; 7997 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0); 7998 7999 task = rpc_run_task(&task_setup_data); 8000 if (IS_ERR(task)) 8001 return ERR_CAST(task); 8002 status = nfs4_wait_for_completion_rpc_task(task); 8003 if (status == 0) 8004 status = task->tk_status; 8005 trace_nfs4_layoutget(lgp->args.ctx, 8006 &lgp->args.range, 8007 &lgp->res.range, 8008 &lgp->res.stateid, 8009 status); 8010 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */ 8011 if (status == 0 && lgp->res.layoutp->len) 8012 lseg = pnfs_layout_process(lgp); 8013 rpc_put_task(task); 8014 dprintk("<-- %s status=%d\n", __func__, status); 8015 if (status) 8016 return ERR_PTR(status); 8017 return lseg; 8018 } 8019 8020 static void 8021 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) 8022 { 8023 struct nfs4_layoutreturn *lrp = calldata; 8024 8025 dprintk("--> %s\n", __func__); 8026 nfs41_setup_sequence(lrp->clp->cl_session, 8027 &lrp->args.seq_args, 8028 &lrp->res.seq_res, 8029 task); 8030 } 8031 8032 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) 8033 { 8034 struct nfs4_layoutreturn *lrp = calldata; 8035 struct nfs_server *server; 8036 8037 dprintk("--> %s\n", __func__); 8038 8039 if (!nfs41_sequence_done(task, &lrp->res.seq_res)) 8040 return; 8041 8042 server = NFS_SERVER(lrp->args.inode); 8043 switch (task->tk_status) { 8044 default: 8045 task->tk_status = 0; 8046 case 0: 8047 break; 8048 case -NFS4ERR_DELAY: 8049 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN) 8050 break; 8051 rpc_restart_call_prepare(task); 8052 return; 8053 } 8054 dprintk("<-- %s\n", __func__); 8055 } 8056 8057 static void nfs4_layoutreturn_release(void *calldata) 8058 { 8059 struct nfs4_layoutreturn *lrp = calldata; 8060 struct pnfs_layout_hdr *lo = lrp->args.layout; 8061 LIST_HEAD(freeme); 8062 8063 dprintk("--> %s\n", __func__); 8064 spin_lock(&lo->plh_inode->i_lock); 8065 pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range); 8066 pnfs_mark_layout_returned_if_empty(lo); 8067 if (lrp->res.lrs_present) 8068 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); 8069 pnfs_clear_layoutreturn_waitbit(lo); 8070 spin_unlock(&lo->plh_inode->i_lock); 8071 pnfs_free_lseg_list(&freeme); 8072 pnfs_put_layout_hdr(lrp->args.layout); 8073 nfs_iput_and_deactive(lrp->inode); 8074 kfree(calldata); 8075 dprintk("<-- %s\n", __func__); 8076 } 8077 8078 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { 8079 .rpc_call_prepare = nfs4_layoutreturn_prepare, 8080 .rpc_call_done = nfs4_layoutreturn_done, 8081 .rpc_release = nfs4_layoutreturn_release, 8082 }; 8083 8084 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync) 8085 { 8086 struct rpc_task *task; 8087 struct rpc_message msg = { 8088 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN], 8089 .rpc_argp = &lrp->args, 8090 .rpc_resp = &lrp->res, 8091 .rpc_cred = lrp->cred, 8092 }; 8093 struct rpc_task_setup task_setup_data = { 8094 .rpc_client = NFS_SERVER(lrp->args.inode)->client, 8095 .rpc_message = &msg, 8096 .callback_ops = &nfs4_layoutreturn_call_ops, 8097 .callback_data = lrp, 8098 }; 8099 int status = 0; 8100 8101 nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client, 8102 NFS_SP4_MACH_CRED_PNFS_CLEANUP, 8103 &task_setup_data.rpc_client, &msg); 8104 8105 dprintk("--> %s\n", __func__); 8106 if (!sync) { 8107 lrp->inode = nfs_igrab_and_active(lrp->args.inode); 8108 if (!lrp->inode) { 8109 nfs4_layoutreturn_release(lrp); 8110 return -EAGAIN; 8111 } 8112 task_setup_data.flags |= RPC_TASK_ASYNC; 8113 } 8114 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1); 8115 task = rpc_run_task(&task_setup_data); 8116 if (IS_ERR(task)) 8117 return PTR_ERR(task); 8118 if (sync) 8119 status = task->tk_status; 8120 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status); 8121 dprintk("<-- %s status=%d\n", __func__, status); 8122 rpc_put_task(task); 8123 return status; 8124 } 8125 8126 static int 8127 _nfs4_proc_getdeviceinfo(struct nfs_server *server, 8128 struct pnfs_device *pdev, 8129 struct rpc_cred *cred) 8130 { 8131 struct nfs4_getdeviceinfo_args args = { 8132 .pdev = pdev, 8133 .notify_types = NOTIFY_DEVICEID4_CHANGE | 8134 NOTIFY_DEVICEID4_DELETE, 8135 }; 8136 struct nfs4_getdeviceinfo_res res = { 8137 .pdev = pdev, 8138 }; 8139 struct rpc_message msg = { 8140 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO], 8141 .rpc_argp = &args, 8142 .rpc_resp = &res, 8143 .rpc_cred = cred, 8144 }; 8145 int status; 8146 8147 dprintk("--> %s\n", __func__); 8148 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 8149 if (res.notification & ~args.notify_types) 8150 dprintk("%s: unsupported notification\n", __func__); 8151 if (res.notification != args.notify_types) 8152 pdev->nocache = 1; 8153 8154 dprintk("<-- %s status=%d\n", __func__, status); 8155 8156 return status; 8157 } 8158 8159 int nfs4_proc_getdeviceinfo(struct nfs_server *server, 8160 struct pnfs_device *pdev, 8161 struct rpc_cred *cred) 8162 { 8163 struct nfs4_exception exception = { }; 8164 int err; 8165 8166 do { 8167 err = nfs4_handle_exception(server, 8168 _nfs4_proc_getdeviceinfo(server, pdev, cred), 8169 &exception); 8170 } while (exception.retry); 8171 return err; 8172 } 8173 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo); 8174 8175 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) 8176 { 8177 struct nfs4_layoutcommit_data *data = calldata; 8178 struct nfs_server *server = NFS_SERVER(data->args.inode); 8179 struct nfs4_session *session = nfs4_get_session(server); 8180 8181 nfs41_setup_sequence(session, 8182 &data->args.seq_args, 8183 &data->res.seq_res, 8184 task); 8185 } 8186 8187 static void 8188 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) 8189 { 8190 struct nfs4_layoutcommit_data *data = calldata; 8191 struct nfs_server *server = NFS_SERVER(data->args.inode); 8192 8193 if (!nfs41_sequence_done(task, &data->res.seq_res)) 8194 return; 8195 8196 switch (task->tk_status) { /* Just ignore these failures */ 8197 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */ 8198 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ 8199 case -NFS4ERR_BADLAYOUT: /* no layout */ 8200 case -NFS4ERR_GRACE: /* loca_recalim always false */ 8201 task->tk_status = 0; 8202 case 0: 8203 break; 8204 default: 8205 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) { 8206 rpc_restart_call_prepare(task); 8207 return; 8208 } 8209 } 8210 } 8211 8212 static void nfs4_layoutcommit_release(void *calldata) 8213 { 8214 struct nfs4_layoutcommit_data *data = calldata; 8215 8216 pnfs_cleanup_layoutcommit(data); 8217 nfs_post_op_update_inode_force_wcc(data->args.inode, 8218 data->res.fattr); 8219 put_rpccred(data->cred); 8220 nfs_iput_and_deactive(data->inode); 8221 kfree(data); 8222 } 8223 8224 static const struct rpc_call_ops nfs4_layoutcommit_ops = { 8225 .rpc_call_prepare = nfs4_layoutcommit_prepare, 8226 .rpc_call_done = nfs4_layoutcommit_done, 8227 .rpc_release = nfs4_layoutcommit_release, 8228 }; 8229 8230 int 8231 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) 8232 { 8233 struct rpc_message msg = { 8234 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT], 8235 .rpc_argp = &data->args, 8236 .rpc_resp = &data->res, 8237 .rpc_cred = data->cred, 8238 }; 8239 struct rpc_task_setup task_setup_data = { 8240 .task = &data->task, 8241 .rpc_client = NFS_CLIENT(data->args.inode), 8242 .rpc_message = &msg, 8243 .callback_ops = &nfs4_layoutcommit_ops, 8244 .callback_data = data, 8245 }; 8246 struct rpc_task *task; 8247 int status = 0; 8248 8249 dprintk("NFS: initiating layoutcommit call. sync %d " 8250 "lbw: %llu inode %lu\n", sync, 8251 data->args.lastbytewritten, 8252 data->args.inode->i_ino); 8253 8254 if (!sync) { 8255 data->inode = nfs_igrab_and_active(data->args.inode); 8256 if (data->inode == NULL) { 8257 nfs4_layoutcommit_release(data); 8258 return -EAGAIN; 8259 } 8260 task_setup_data.flags = RPC_TASK_ASYNC; 8261 } 8262 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 8263 task = rpc_run_task(&task_setup_data); 8264 if (IS_ERR(task)) 8265 return PTR_ERR(task); 8266 if (sync) 8267 status = task->tk_status; 8268 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status); 8269 dprintk("%s: status %d\n", __func__, status); 8270 rpc_put_task(task); 8271 return status; 8272 } 8273 8274 /** 8275 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if 8276 * possible) as per RFC3530bis and RFC5661 Security Considerations sections 8277 */ 8278 static int 8279 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 8280 struct nfs_fsinfo *info, 8281 struct nfs4_secinfo_flavors *flavors, bool use_integrity) 8282 { 8283 struct nfs41_secinfo_no_name_args args = { 8284 .style = SECINFO_STYLE_CURRENT_FH, 8285 }; 8286 struct nfs4_secinfo_res res = { 8287 .flavors = flavors, 8288 }; 8289 struct rpc_message msg = { 8290 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME], 8291 .rpc_argp = &args, 8292 .rpc_resp = &res, 8293 }; 8294 struct rpc_clnt *clnt = server->client; 8295 struct rpc_cred *cred = NULL; 8296 int status; 8297 8298 if (use_integrity) { 8299 clnt = server->nfs_client->cl_rpcclient; 8300 cred = nfs4_get_clid_cred(server->nfs_client); 8301 msg.rpc_cred = cred; 8302 } 8303 8304 dprintk("--> %s\n", __func__); 8305 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, 8306 &res.seq_res, 0); 8307 dprintk("<-- %s status=%d\n", __func__, status); 8308 8309 if (cred) 8310 put_rpccred(cred); 8311 8312 return status; 8313 } 8314 8315 static int 8316 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 8317 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) 8318 { 8319 struct nfs4_exception exception = { }; 8320 int err; 8321 do { 8322 /* first try using integrity protection */ 8323 err = -NFS4ERR_WRONGSEC; 8324 8325 /* try to use integrity protection with machine cred */ 8326 if (_nfs4_is_integrity_protected(server->nfs_client)) 8327 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, 8328 flavors, true); 8329 8330 /* 8331 * if unable to use integrity protection, or SECINFO with 8332 * integrity protection returns NFS4ERR_WRONGSEC (which is 8333 * disallowed by spec, but exists in deployed servers) use 8334 * the current filesystem's rpc_client and the user cred. 8335 */ 8336 if (err == -NFS4ERR_WRONGSEC) 8337 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, 8338 flavors, false); 8339 8340 switch (err) { 8341 case 0: 8342 case -NFS4ERR_WRONGSEC: 8343 case -ENOTSUPP: 8344 goto out; 8345 default: 8346 err = nfs4_handle_exception(server, err, &exception); 8347 } 8348 } while (exception.retry); 8349 out: 8350 return err; 8351 } 8352 8353 static int 8354 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 8355 struct nfs_fsinfo *info) 8356 { 8357 int err; 8358 struct page *page; 8359 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR; 8360 struct nfs4_secinfo_flavors *flavors; 8361 struct nfs4_secinfo4 *secinfo; 8362 int i; 8363 8364 page = alloc_page(GFP_KERNEL); 8365 if (!page) { 8366 err = -ENOMEM; 8367 goto out; 8368 } 8369 8370 flavors = page_address(page); 8371 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); 8372 8373 /* 8374 * Fall back on "guess and check" method if 8375 * the server doesn't support SECINFO_NO_NAME 8376 */ 8377 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) { 8378 err = nfs4_find_root_sec(server, fhandle, info); 8379 goto out_freepage; 8380 } 8381 if (err) 8382 goto out_freepage; 8383 8384 for (i = 0; i < flavors->num_flavors; i++) { 8385 secinfo = &flavors->flavors[i]; 8386 8387 switch (secinfo->flavor) { 8388 case RPC_AUTH_NULL: 8389 case RPC_AUTH_UNIX: 8390 case RPC_AUTH_GSS: 8391 flavor = rpcauth_get_pseudoflavor(secinfo->flavor, 8392 &secinfo->flavor_info); 8393 break; 8394 default: 8395 flavor = RPC_AUTH_MAXFLAVOR; 8396 break; 8397 } 8398 8399 if (!nfs_auth_info_match(&server->auth_info, flavor)) 8400 flavor = RPC_AUTH_MAXFLAVOR; 8401 8402 if (flavor != RPC_AUTH_MAXFLAVOR) { 8403 err = nfs4_lookup_root_sec(server, fhandle, 8404 info, flavor); 8405 if (!err) 8406 break; 8407 } 8408 } 8409 8410 if (flavor == RPC_AUTH_MAXFLAVOR) 8411 err = -EPERM; 8412 8413 out_freepage: 8414 put_page(page); 8415 if (err == -EACCES) 8416 return -EPERM; 8417 out: 8418 return err; 8419 } 8420 8421 static int _nfs41_test_stateid(struct nfs_server *server, 8422 nfs4_stateid *stateid, 8423 struct rpc_cred *cred) 8424 { 8425 int status; 8426 struct nfs41_test_stateid_args args = { 8427 .stateid = stateid, 8428 }; 8429 struct nfs41_test_stateid_res res; 8430 struct rpc_message msg = { 8431 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID], 8432 .rpc_argp = &args, 8433 .rpc_resp = &res, 8434 .rpc_cred = cred, 8435 }; 8436 struct rpc_clnt *rpc_client = server->client; 8437 8438 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 8439 &rpc_client, &msg); 8440 8441 dprintk("NFS call test_stateid %p\n", stateid); 8442 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 8443 nfs4_set_sequence_privileged(&args.seq_args); 8444 status = nfs4_call_sync_sequence(rpc_client, server, &msg, 8445 &args.seq_args, &res.seq_res); 8446 if (status != NFS_OK) { 8447 dprintk("NFS reply test_stateid: failed, %d\n", status); 8448 return status; 8449 } 8450 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status); 8451 return -res.status; 8452 } 8453 8454 /** 8455 * nfs41_test_stateid - perform a TEST_STATEID operation 8456 * 8457 * @server: server / transport on which to perform the operation 8458 * @stateid: state ID to test 8459 * @cred: credential 8460 * 8461 * Returns NFS_OK if the server recognizes that "stateid" is valid. 8462 * Otherwise a negative NFS4ERR value is returned if the operation 8463 * failed or the state ID is not currently valid. 8464 */ 8465 static int nfs41_test_stateid(struct nfs_server *server, 8466 nfs4_stateid *stateid, 8467 struct rpc_cred *cred) 8468 { 8469 struct nfs4_exception exception = { }; 8470 int err; 8471 do { 8472 err = _nfs41_test_stateid(server, stateid, cred); 8473 if (err != -NFS4ERR_DELAY) 8474 break; 8475 nfs4_handle_exception(server, err, &exception); 8476 } while (exception.retry); 8477 return err; 8478 } 8479 8480 struct nfs_free_stateid_data { 8481 struct nfs_server *server; 8482 struct nfs41_free_stateid_args args; 8483 struct nfs41_free_stateid_res res; 8484 }; 8485 8486 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata) 8487 { 8488 struct nfs_free_stateid_data *data = calldata; 8489 nfs41_setup_sequence(nfs4_get_session(data->server), 8490 &data->args.seq_args, 8491 &data->res.seq_res, 8492 task); 8493 } 8494 8495 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata) 8496 { 8497 struct nfs_free_stateid_data *data = calldata; 8498 8499 nfs41_sequence_done(task, &data->res.seq_res); 8500 8501 switch (task->tk_status) { 8502 case -NFS4ERR_DELAY: 8503 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN) 8504 rpc_restart_call_prepare(task); 8505 } 8506 } 8507 8508 static void nfs41_free_stateid_release(void *calldata) 8509 { 8510 kfree(calldata); 8511 } 8512 8513 static const struct rpc_call_ops nfs41_free_stateid_ops = { 8514 .rpc_call_prepare = nfs41_free_stateid_prepare, 8515 .rpc_call_done = nfs41_free_stateid_done, 8516 .rpc_release = nfs41_free_stateid_release, 8517 }; 8518 8519 static struct rpc_task *_nfs41_free_stateid(struct nfs_server *server, 8520 nfs4_stateid *stateid, 8521 struct rpc_cred *cred, 8522 bool privileged) 8523 { 8524 struct rpc_message msg = { 8525 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID], 8526 .rpc_cred = cred, 8527 }; 8528 struct rpc_task_setup task_setup = { 8529 .rpc_client = server->client, 8530 .rpc_message = &msg, 8531 .callback_ops = &nfs41_free_stateid_ops, 8532 .flags = RPC_TASK_ASYNC, 8533 }; 8534 struct nfs_free_stateid_data *data; 8535 8536 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 8537 &task_setup.rpc_client, &msg); 8538 8539 dprintk("NFS call free_stateid %p\n", stateid); 8540 data = kmalloc(sizeof(*data), GFP_NOFS); 8541 if (!data) 8542 return ERR_PTR(-ENOMEM); 8543 data->server = server; 8544 nfs4_stateid_copy(&data->args.stateid, stateid); 8545 8546 task_setup.callback_data = data; 8547 8548 msg.rpc_argp = &data->args; 8549 msg.rpc_resp = &data->res; 8550 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); 8551 if (privileged) 8552 nfs4_set_sequence_privileged(&data->args.seq_args); 8553 8554 return rpc_run_task(&task_setup); 8555 } 8556 8557 /** 8558 * nfs41_free_stateid - perform a FREE_STATEID operation 8559 * 8560 * @server: server / transport on which to perform the operation 8561 * @stateid: state ID to release 8562 * @cred: credential 8563 * 8564 * Returns NFS_OK if the server freed "stateid". Otherwise a 8565 * negative NFS4ERR value is returned. 8566 */ 8567 static int nfs41_free_stateid(struct nfs_server *server, 8568 nfs4_stateid *stateid, 8569 struct rpc_cred *cred) 8570 { 8571 struct rpc_task *task; 8572 int ret; 8573 8574 task = _nfs41_free_stateid(server, stateid, cred, true); 8575 if (IS_ERR(task)) 8576 return PTR_ERR(task); 8577 ret = rpc_wait_for_completion_task(task); 8578 if (!ret) 8579 ret = task->tk_status; 8580 rpc_put_task(task); 8581 return ret; 8582 } 8583 8584 static void 8585 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp) 8586 { 8587 struct rpc_task *task; 8588 struct rpc_cred *cred = lsp->ls_state->owner->so_cred; 8589 8590 task = _nfs41_free_stateid(server, &lsp->ls_stateid, cred, false); 8591 nfs4_free_lock_state(server, lsp); 8592 if (IS_ERR(task)) 8593 return; 8594 rpc_put_task(task); 8595 } 8596 8597 static bool nfs41_match_stateid(const nfs4_stateid *s1, 8598 const nfs4_stateid *s2) 8599 { 8600 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0) 8601 return false; 8602 8603 if (s1->seqid == s2->seqid) 8604 return true; 8605 if (s1->seqid == 0 || s2->seqid == 0) 8606 return true; 8607 8608 return false; 8609 } 8610 8611 #endif /* CONFIG_NFS_V4_1 */ 8612 8613 static bool nfs4_match_stateid(const nfs4_stateid *s1, 8614 const nfs4_stateid *s2) 8615 { 8616 return nfs4_stateid_match(s1, s2); 8617 } 8618 8619 8620 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { 8621 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 8622 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 8623 .recover_open = nfs4_open_reclaim, 8624 .recover_lock = nfs4_lock_reclaim, 8625 .establish_clid = nfs4_init_clientid, 8626 .detect_trunking = nfs40_discover_server_trunking, 8627 }; 8628 8629 #if defined(CONFIG_NFS_V4_1) 8630 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { 8631 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 8632 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 8633 .recover_open = nfs4_open_reclaim, 8634 .recover_lock = nfs4_lock_reclaim, 8635 .establish_clid = nfs41_init_clientid, 8636 .reclaim_complete = nfs41_proc_reclaim_complete, 8637 .detect_trunking = nfs41_discover_server_trunking, 8638 }; 8639 #endif /* CONFIG_NFS_V4_1 */ 8640 8641 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { 8642 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 8643 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 8644 .recover_open = nfs40_open_expired, 8645 .recover_lock = nfs4_lock_expired, 8646 .establish_clid = nfs4_init_clientid, 8647 }; 8648 8649 #if defined(CONFIG_NFS_V4_1) 8650 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { 8651 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 8652 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 8653 .recover_open = nfs41_open_expired, 8654 .recover_lock = nfs41_lock_expired, 8655 .establish_clid = nfs41_init_clientid, 8656 }; 8657 #endif /* CONFIG_NFS_V4_1 */ 8658 8659 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { 8660 .sched_state_renewal = nfs4_proc_async_renew, 8661 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked, 8662 .renew_lease = nfs4_proc_renew, 8663 }; 8664 8665 #if defined(CONFIG_NFS_V4_1) 8666 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { 8667 .sched_state_renewal = nfs41_proc_async_sequence, 8668 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked, 8669 .renew_lease = nfs4_proc_sequence, 8670 }; 8671 #endif 8672 8673 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = { 8674 .get_locations = _nfs40_proc_get_locations, 8675 .fsid_present = _nfs40_proc_fsid_present, 8676 }; 8677 8678 #if defined(CONFIG_NFS_V4_1) 8679 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = { 8680 .get_locations = _nfs41_proc_get_locations, 8681 .fsid_present = _nfs41_proc_fsid_present, 8682 }; 8683 #endif /* CONFIG_NFS_V4_1 */ 8684 8685 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { 8686 .minor_version = 0, 8687 .init_caps = NFS_CAP_READDIRPLUS 8688 | NFS_CAP_ATOMIC_OPEN 8689 | NFS_CAP_POSIX_LOCK, 8690 .init_client = nfs40_init_client, 8691 .shutdown_client = nfs40_shutdown_client, 8692 .match_stateid = nfs4_match_stateid, 8693 .find_root_sec = nfs4_find_root_sec, 8694 .free_lock_state = nfs4_release_lockowner, 8695 .alloc_seqid = nfs_alloc_seqid, 8696 .call_sync_ops = &nfs40_call_sync_ops, 8697 .reboot_recovery_ops = &nfs40_reboot_recovery_ops, 8698 .nograce_recovery_ops = &nfs40_nograce_recovery_ops, 8699 .state_renewal_ops = &nfs40_state_renewal_ops, 8700 .mig_recovery_ops = &nfs40_mig_recovery_ops, 8701 }; 8702 8703 #if defined(CONFIG_NFS_V4_1) 8704 static struct nfs_seqid * 8705 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2) 8706 { 8707 return NULL; 8708 } 8709 8710 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { 8711 .minor_version = 1, 8712 .init_caps = NFS_CAP_READDIRPLUS 8713 | NFS_CAP_ATOMIC_OPEN 8714 | NFS_CAP_POSIX_LOCK 8715 | NFS_CAP_STATEID_NFSV41 8716 | NFS_CAP_ATOMIC_OPEN_V1, 8717 .init_client = nfs41_init_client, 8718 .shutdown_client = nfs41_shutdown_client, 8719 .match_stateid = nfs41_match_stateid, 8720 .find_root_sec = nfs41_find_root_sec, 8721 .free_lock_state = nfs41_free_lock_state, 8722 .alloc_seqid = nfs_alloc_no_seqid, 8723 .call_sync_ops = &nfs41_call_sync_ops, 8724 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 8725 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 8726 .state_renewal_ops = &nfs41_state_renewal_ops, 8727 .mig_recovery_ops = &nfs41_mig_recovery_ops, 8728 }; 8729 #endif 8730 8731 #if defined(CONFIG_NFS_V4_2) 8732 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = { 8733 .minor_version = 2, 8734 .init_caps = NFS_CAP_READDIRPLUS 8735 | NFS_CAP_ATOMIC_OPEN 8736 | NFS_CAP_POSIX_LOCK 8737 | NFS_CAP_STATEID_NFSV41 8738 | NFS_CAP_ATOMIC_OPEN_V1 8739 | NFS_CAP_ALLOCATE 8740 | NFS_CAP_DEALLOCATE 8741 | NFS_CAP_SEEK 8742 | NFS_CAP_LAYOUTSTATS 8743 | NFS_CAP_CLONE, 8744 .init_client = nfs41_init_client, 8745 .shutdown_client = nfs41_shutdown_client, 8746 .match_stateid = nfs41_match_stateid, 8747 .find_root_sec = nfs41_find_root_sec, 8748 .free_lock_state = nfs41_free_lock_state, 8749 .call_sync_ops = &nfs41_call_sync_ops, 8750 .alloc_seqid = nfs_alloc_no_seqid, 8751 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 8752 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 8753 .state_renewal_ops = &nfs41_state_renewal_ops, 8754 .mig_recovery_ops = &nfs41_mig_recovery_ops, 8755 }; 8756 #endif 8757 8758 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { 8759 [0] = &nfs_v4_0_minor_ops, 8760 #if defined(CONFIG_NFS_V4_1) 8761 [1] = &nfs_v4_1_minor_ops, 8762 #endif 8763 #if defined(CONFIG_NFS_V4_2) 8764 [2] = &nfs_v4_2_minor_ops, 8765 #endif 8766 }; 8767 8768 ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size) 8769 { 8770 ssize_t error, error2; 8771 8772 error = generic_listxattr(dentry, list, size); 8773 if (error < 0) 8774 return error; 8775 if (list) { 8776 list += error; 8777 size -= error; 8778 } 8779 8780 error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, size); 8781 if (error2 < 0) 8782 return error2; 8783 return error + error2; 8784 } 8785 8786 static const struct inode_operations nfs4_dir_inode_operations = { 8787 .create = nfs_create, 8788 .lookup = nfs_lookup, 8789 .atomic_open = nfs_atomic_open, 8790 .link = nfs_link, 8791 .unlink = nfs_unlink, 8792 .symlink = nfs_symlink, 8793 .mkdir = nfs_mkdir, 8794 .rmdir = nfs_rmdir, 8795 .mknod = nfs_mknod, 8796 .rename = nfs_rename, 8797 .permission = nfs_permission, 8798 .getattr = nfs_getattr, 8799 .setattr = nfs_setattr, 8800 .getxattr = generic_getxattr, 8801 .setxattr = generic_setxattr, 8802 .listxattr = nfs4_listxattr, 8803 .removexattr = generic_removexattr, 8804 }; 8805 8806 static const struct inode_operations nfs4_file_inode_operations = { 8807 .permission = nfs_permission, 8808 .getattr = nfs_getattr, 8809 .setattr = nfs_setattr, 8810 .getxattr = generic_getxattr, 8811 .setxattr = generic_setxattr, 8812 .listxattr = nfs4_listxattr, 8813 .removexattr = generic_removexattr, 8814 }; 8815 8816 const struct nfs_rpc_ops nfs_v4_clientops = { 8817 .version = 4, /* protocol version */ 8818 .dentry_ops = &nfs4_dentry_operations, 8819 .dir_inode_ops = &nfs4_dir_inode_operations, 8820 .file_inode_ops = &nfs4_file_inode_operations, 8821 .file_ops = &nfs4_file_operations, 8822 .getroot = nfs4_proc_get_root, 8823 .submount = nfs4_submount, 8824 .try_mount = nfs4_try_mount, 8825 .getattr = nfs4_proc_getattr, 8826 .setattr = nfs4_proc_setattr, 8827 .lookup = nfs4_proc_lookup, 8828 .access = nfs4_proc_access, 8829 .readlink = nfs4_proc_readlink, 8830 .create = nfs4_proc_create, 8831 .remove = nfs4_proc_remove, 8832 .unlink_setup = nfs4_proc_unlink_setup, 8833 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, 8834 .unlink_done = nfs4_proc_unlink_done, 8835 .rename_setup = nfs4_proc_rename_setup, 8836 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, 8837 .rename_done = nfs4_proc_rename_done, 8838 .link = nfs4_proc_link, 8839 .symlink = nfs4_proc_symlink, 8840 .mkdir = nfs4_proc_mkdir, 8841 .rmdir = nfs4_proc_remove, 8842 .readdir = nfs4_proc_readdir, 8843 .mknod = nfs4_proc_mknod, 8844 .statfs = nfs4_proc_statfs, 8845 .fsinfo = nfs4_proc_fsinfo, 8846 .pathconf = nfs4_proc_pathconf, 8847 .set_capabilities = nfs4_server_capabilities, 8848 .decode_dirent = nfs4_decode_dirent, 8849 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare, 8850 .read_setup = nfs4_proc_read_setup, 8851 .read_done = nfs4_read_done, 8852 .write_setup = nfs4_proc_write_setup, 8853 .write_done = nfs4_write_done, 8854 .commit_setup = nfs4_proc_commit_setup, 8855 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare, 8856 .commit_done = nfs4_commit_done, 8857 .lock = nfs4_proc_lock, 8858 .clear_acl_cache = nfs4_zap_acl_attr, 8859 .close_context = nfs4_close_context, 8860 .open_context = nfs4_atomic_open, 8861 .have_delegation = nfs4_have_delegation, 8862 .return_delegation = nfs4_inode_return_delegation, 8863 .alloc_client = nfs4_alloc_client, 8864 .init_client = nfs4_init_client, 8865 .free_client = nfs4_free_client, 8866 .create_server = nfs4_create_server, 8867 .clone_server = nfs_clone_server, 8868 }; 8869 8870 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { 8871 .name = XATTR_NAME_NFSV4_ACL, 8872 .list = nfs4_xattr_list_nfs4_acl, 8873 .get = nfs4_xattr_get_nfs4_acl, 8874 .set = nfs4_xattr_set_nfs4_acl, 8875 }; 8876 8877 const struct xattr_handler *nfs4_xattr_handlers[] = { 8878 &nfs4_xattr_nfs4_acl_handler, 8879 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 8880 &nfs4_xattr_nfs4_label_handler, 8881 #endif 8882 NULL 8883 }; 8884 8885 /* 8886 * Local variables: 8887 * c-basic-offset: 8 8888 * End: 8889 */ 8890