1 /* 2 * fs/nfs/nfs4proc.c 3 * 4 * Client-side procedure declarations for NFSv4. 5 * 6 * Copyright (c) 2002 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Kendrick Smith <kmsmith@umich.edu> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <linux/mm.h> 39 #include <linux/delay.h> 40 #include <linux/errno.h> 41 #include <linux/string.h> 42 #include <linux/ratelimit.h> 43 #include <linux/printk.h> 44 #include <linux/slab.h> 45 #include <linux/sunrpc/clnt.h> 46 #include <linux/nfs.h> 47 #include <linux/nfs4.h> 48 #include <linux/nfs_fs.h> 49 #include <linux/nfs_page.h> 50 #include <linux/nfs_mount.h> 51 #include <linux/namei.h> 52 #include <linux/mount.h> 53 #include <linux/module.h> 54 #include <linux/nfs_idmap.h> 55 #include <linux/sunrpc/bc_xprt.h> 56 #include <linux/xattr.h> 57 #include <linux/utsname.h> 58 #include <linux/freezer.h> 59 60 #include "nfs4_fs.h" 61 #include "delegation.h" 62 #include "internal.h" 63 #include "iostat.h" 64 #include "callback.h" 65 #include "pnfs.h" 66 #include "netns.h" 67 68 #define NFSDBG_FACILITY NFSDBG_PROC 69 70 #define NFS4_POLL_RETRY_MIN (HZ/10) 71 #define NFS4_POLL_RETRY_MAX (15*HZ) 72 73 #define NFS4_MAX_LOOP_ON_RECOVER (10) 74 75 struct nfs4_opendata; 76 static int _nfs4_proc_open(struct nfs4_opendata *data); 77 static int _nfs4_recover_proc_open(struct nfs4_opendata *data); 78 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); 79 static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *); 80 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); 81 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *); 82 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr); 83 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 84 struct nfs_fattr *fattr, struct iattr *sattr, 85 struct nfs4_state *state); 86 #ifdef CONFIG_NFS_V4_1 87 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *); 88 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *); 89 #endif 90 /* Prevent leaks of NFSv4 errors into userland */ 91 static int nfs4_map_errors(int err) 92 { 93 if (err >= -1000) 94 return err; 95 switch (err) { 96 case -NFS4ERR_RESOURCE: 97 return -EREMOTEIO; 98 case -NFS4ERR_WRONGSEC: 99 return -EPERM; 100 case -NFS4ERR_BADOWNER: 101 case -NFS4ERR_BADNAME: 102 return -EINVAL; 103 case -NFS4ERR_SHARE_DENIED: 104 return -EACCES; 105 case -NFS4ERR_MINOR_VERS_MISMATCH: 106 return -EPROTONOSUPPORT; 107 case -NFS4ERR_ACCESS: 108 return -EACCES; 109 default: 110 dprintk("%s could not handle NFSv4 error %d\n", 111 __func__, -err); 112 break; 113 } 114 return -EIO; 115 } 116 117 /* 118 * This is our standard bitmap for GETATTR requests. 119 */ 120 const u32 nfs4_fattr_bitmap[3] = { 121 FATTR4_WORD0_TYPE 122 | FATTR4_WORD0_CHANGE 123 | FATTR4_WORD0_SIZE 124 | FATTR4_WORD0_FSID 125 | FATTR4_WORD0_FILEID, 126 FATTR4_WORD1_MODE 127 | FATTR4_WORD1_NUMLINKS 128 | FATTR4_WORD1_OWNER 129 | FATTR4_WORD1_OWNER_GROUP 130 | FATTR4_WORD1_RAWDEV 131 | FATTR4_WORD1_SPACE_USED 132 | FATTR4_WORD1_TIME_ACCESS 133 | FATTR4_WORD1_TIME_METADATA 134 | FATTR4_WORD1_TIME_MODIFY 135 }; 136 137 static const u32 nfs4_pnfs_open_bitmap[3] = { 138 FATTR4_WORD0_TYPE 139 | FATTR4_WORD0_CHANGE 140 | FATTR4_WORD0_SIZE 141 | FATTR4_WORD0_FSID 142 | FATTR4_WORD0_FILEID, 143 FATTR4_WORD1_MODE 144 | FATTR4_WORD1_NUMLINKS 145 | FATTR4_WORD1_OWNER 146 | FATTR4_WORD1_OWNER_GROUP 147 | FATTR4_WORD1_RAWDEV 148 | FATTR4_WORD1_SPACE_USED 149 | FATTR4_WORD1_TIME_ACCESS 150 | FATTR4_WORD1_TIME_METADATA 151 | FATTR4_WORD1_TIME_MODIFY, 152 FATTR4_WORD2_MDSTHRESHOLD 153 }; 154 155 static const u32 nfs4_open_noattr_bitmap[3] = { 156 FATTR4_WORD0_TYPE 157 | FATTR4_WORD0_CHANGE 158 | FATTR4_WORD0_FILEID, 159 }; 160 161 const u32 nfs4_statfs_bitmap[2] = { 162 FATTR4_WORD0_FILES_AVAIL 163 | FATTR4_WORD0_FILES_FREE 164 | FATTR4_WORD0_FILES_TOTAL, 165 FATTR4_WORD1_SPACE_AVAIL 166 | FATTR4_WORD1_SPACE_FREE 167 | FATTR4_WORD1_SPACE_TOTAL 168 }; 169 170 const u32 nfs4_pathconf_bitmap[2] = { 171 FATTR4_WORD0_MAXLINK 172 | FATTR4_WORD0_MAXNAME, 173 0 174 }; 175 176 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE 177 | FATTR4_WORD0_MAXREAD 178 | FATTR4_WORD0_MAXWRITE 179 | FATTR4_WORD0_LEASE_TIME, 180 FATTR4_WORD1_TIME_DELTA 181 | FATTR4_WORD1_FS_LAYOUT_TYPES, 182 FATTR4_WORD2_LAYOUT_BLKSIZE 183 }; 184 185 const u32 nfs4_fs_locations_bitmap[2] = { 186 FATTR4_WORD0_TYPE 187 | FATTR4_WORD0_CHANGE 188 | FATTR4_WORD0_SIZE 189 | FATTR4_WORD0_FSID 190 | FATTR4_WORD0_FILEID 191 | FATTR4_WORD0_FS_LOCATIONS, 192 FATTR4_WORD1_MODE 193 | FATTR4_WORD1_NUMLINKS 194 | FATTR4_WORD1_OWNER 195 | FATTR4_WORD1_OWNER_GROUP 196 | FATTR4_WORD1_RAWDEV 197 | FATTR4_WORD1_SPACE_USED 198 | FATTR4_WORD1_TIME_ACCESS 199 | FATTR4_WORD1_TIME_METADATA 200 | FATTR4_WORD1_TIME_MODIFY 201 | FATTR4_WORD1_MOUNTED_ON_FILEID 202 }; 203 204 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, 205 struct nfs4_readdir_arg *readdir) 206 { 207 __be32 *start, *p; 208 209 BUG_ON(readdir->count < 80); 210 if (cookie > 2) { 211 readdir->cookie = cookie; 212 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); 213 return; 214 } 215 216 readdir->cookie = 0; 217 memset(&readdir->verifier, 0, sizeof(readdir->verifier)); 218 if (cookie == 2) 219 return; 220 221 /* 222 * NFSv4 servers do not return entries for '.' and '..' 223 * Therefore, we fake these entries here. We let '.' 224 * have cookie 0 and '..' have cookie 1. Note that 225 * when talking to the server, we always send cookie 0 226 * instead of 1 or 2. 227 */ 228 start = p = kmap_atomic(*readdir->pages); 229 230 if (cookie == 0) { 231 *p++ = xdr_one; /* next */ 232 *p++ = xdr_zero; /* cookie, first word */ 233 *p++ = xdr_one; /* cookie, second word */ 234 *p++ = xdr_one; /* entry len */ 235 memcpy(p, ".\0\0\0", 4); /* entry */ 236 p++; 237 *p++ = xdr_one; /* bitmap length */ 238 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ 239 *p++ = htonl(8); /* attribute buffer length */ 240 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode)); 241 } 242 243 *p++ = xdr_one; /* next */ 244 *p++ = xdr_zero; /* cookie, first word */ 245 *p++ = xdr_two; /* cookie, second word */ 246 *p++ = xdr_two; /* entry len */ 247 memcpy(p, "..\0\0", 4); /* entry */ 248 p++; 249 *p++ = xdr_one; /* bitmap length */ 250 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ 251 *p++ = htonl(8); /* attribute buffer length */ 252 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode)); 253 254 readdir->pgbase = (char *)p - (char *)start; 255 readdir->count -= readdir->pgbase; 256 kunmap_atomic(start); 257 } 258 259 static int nfs4_wait_clnt_recover(struct nfs_client *clp) 260 { 261 int res; 262 263 might_sleep(); 264 265 res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING, 266 nfs_wait_bit_killable, TASK_KILLABLE); 267 if (res) 268 return res; 269 270 if (clp->cl_cons_state < 0) 271 return clp->cl_cons_state; 272 return 0; 273 } 274 275 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) 276 { 277 int res = 0; 278 279 might_sleep(); 280 281 if (*timeout <= 0) 282 *timeout = NFS4_POLL_RETRY_MIN; 283 if (*timeout > NFS4_POLL_RETRY_MAX) 284 *timeout = NFS4_POLL_RETRY_MAX; 285 freezable_schedule_timeout_killable(*timeout); 286 if (fatal_signal_pending(current)) 287 res = -ERESTARTSYS; 288 *timeout <<= 1; 289 return res; 290 } 291 292 /* This is the error handling routine for processes that are allowed 293 * to sleep. 294 */ 295 static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) 296 { 297 struct nfs_client *clp = server->nfs_client; 298 struct nfs4_state *state = exception->state; 299 struct inode *inode = exception->inode; 300 int ret = errorcode; 301 302 exception->retry = 0; 303 switch(errorcode) { 304 case 0: 305 return 0; 306 case -NFS4ERR_OPENMODE: 307 if (inode && nfs4_have_delegation(inode, FMODE_READ)) { 308 nfs4_inode_return_delegation(inode); 309 exception->retry = 1; 310 return 0; 311 } 312 if (state == NULL) 313 break; 314 nfs4_schedule_stateid_recovery(server, state); 315 goto wait_on_recovery; 316 case -NFS4ERR_DELEG_REVOKED: 317 case -NFS4ERR_ADMIN_REVOKED: 318 case -NFS4ERR_BAD_STATEID: 319 if (state == NULL) 320 break; 321 nfs_remove_bad_delegation(state->inode); 322 nfs4_schedule_stateid_recovery(server, state); 323 goto wait_on_recovery; 324 case -NFS4ERR_EXPIRED: 325 if (state != NULL) 326 nfs4_schedule_stateid_recovery(server, state); 327 case -NFS4ERR_STALE_STATEID: 328 case -NFS4ERR_STALE_CLIENTID: 329 nfs4_schedule_lease_recovery(clp); 330 goto wait_on_recovery; 331 #if defined(CONFIG_NFS_V4_1) 332 case -NFS4ERR_BADSESSION: 333 case -NFS4ERR_BADSLOT: 334 case -NFS4ERR_BAD_HIGH_SLOT: 335 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 336 case -NFS4ERR_DEADSESSION: 337 case -NFS4ERR_SEQ_FALSE_RETRY: 338 case -NFS4ERR_SEQ_MISORDERED: 339 dprintk("%s ERROR: %d Reset session\n", __func__, 340 errorcode); 341 nfs4_schedule_session_recovery(clp->cl_session, errorcode); 342 exception->retry = 1; 343 break; 344 #endif /* defined(CONFIG_NFS_V4_1) */ 345 case -NFS4ERR_FILE_OPEN: 346 if (exception->timeout > HZ) { 347 /* We have retried a decent amount, time to 348 * fail 349 */ 350 ret = -EBUSY; 351 break; 352 } 353 case -NFS4ERR_GRACE: 354 case -NFS4ERR_DELAY: 355 case -EKEYEXPIRED: 356 ret = nfs4_delay(server->client, &exception->timeout); 357 if (ret != 0) 358 break; 359 case -NFS4ERR_RETRY_UNCACHED_REP: 360 case -NFS4ERR_OLD_STATEID: 361 exception->retry = 1; 362 break; 363 case -NFS4ERR_BADOWNER: 364 /* The following works around a Linux server bug! */ 365 case -NFS4ERR_BADNAME: 366 if (server->caps & NFS_CAP_UIDGID_NOMAP) { 367 server->caps &= ~NFS_CAP_UIDGID_NOMAP; 368 exception->retry = 1; 369 printk(KERN_WARNING "NFS: v4 server %s " 370 "does not accept raw " 371 "uid/gids. " 372 "Reenabling the idmapper.\n", 373 server->nfs_client->cl_hostname); 374 } 375 } 376 /* We failed to handle the error */ 377 return nfs4_map_errors(ret); 378 wait_on_recovery: 379 ret = nfs4_wait_clnt_recover(clp); 380 if (ret == 0) 381 exception->retry = 1; 382 return ret; 383 } 384 385 386 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) 387 { 388 spin_lock(&clp->cl_lock); 389 if (time_before(clp->cl_last_renewal,timestamp)) 390 clp->cl_last_renewal = timestamp; 391 spin_unlock(&clp->cl_lock); 392 } 393 394 static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 395 { 396 do_renew_lease(server->nfs_client, timestamp); 397 } 398 399 #if defined(CONFIG_NFS_V4_1) 400 401 /* 402 * nfs4_free_slot - free a slot and efficiently update slot table. 403 * 404 * freeing a slot is trivially done by clearing its respective bit 405 * in the bitmap. 406 * If the freed slotid equals highest_used_slotid we want to update it 407 * so that the server would be able to size down the slot table if needed, 408 * otherwise we know that the highest_used_slotid is still in use. 409 * When updating highest_used_slotid there may be "holes" in the bitmap 410 * so we need to scan down from highest_used_slotid to 0 looking for the now 411 * highest slotid in use. 412 * If none found, highest_used_slotid is set to NFS4_NO_SLOT. 413 * 414 * Must be called while holding tbl->slot_tbl_lock 415 */ 416 static void 417 nfs4_free_slot(struct nfs4_slot_table *tbl, u32 slotid) 418 { 419 BUG_ON(slotid >= NFS4_MAX_SLOT_TABLE); 420 /* clear used bit in bitmap */ 421 __clear_bit(slotid, tbl->used_slots); 422 423 /* update highest_used_slotid when it is freed */ 424 if (slotid == tbl->highest_used_slotid) { 425 slotid = find_last_bit(tbl->used_slots, tbl->max_slots); 426 if (slotid < tbl->max_slots) 427 tbl->highest_used_slotid = slotid; 428 else 429 tbl->highest_used_slotid = NFS4_NO_SLOT; 430 } 431 dprintk("%s: slotid %u highest_used_slotid %d\n", __func__, 432 slotid, tbl->highest_used_slotid); 433 } 434 435 bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy) 436 { 437 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); 438 return true; 439 } 440 441 /* 442 * Signal state manager thread if session fore channel is drained 443 */ 444 static void nfs4_check_drain_fc_complete(struct nfs4_session *ses) 445 { 446 if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) { 447 rpc_wake_up_first(&ses->fc_slot_table.slot_tbl_waitq, 448 nfs4_set_task_privileged, NULL); 449 return; 450 } 451 452 if (ses->fc_slot_table.highest_used_slotid != NFS4_NO_SLOT) 453 return; 454 455 dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__); 456 complete(&ses->fc_slot_table.complete); 457 } 458 459 /* 460 * Signal state manager thread if session back channel is drained 461 */ 462 void nfs4_check_drain_bc_complete(struct nfs4_session *ses) 463 { 464 if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) || 465 ses->bc_slot_table.highest_used_slotid != NFS4_NO_SLOT) 466 return; 467 dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__); 468 complete(&ses->bc_slot_table.complete); 469 } 470 471 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) 472 { 473 struct nfs4_slot_table *tbl; 474 475 tbl = &res->sr_session->fc_slot_table; 476 if (!res->sr_slot) { 477 /* just wake up the next guy waiting since 478 * we may have not consumed a slot after all */ 479 dprintk("%s: No slot\n", __func__); 480 return; 481 } 482 483 spin_lock(&tbl->slot_tbl_lock); 484 nfs4_free_slot(tbl, res->sr_slot - tbl->slots); 485 nfs4_check_drain_fc_complete(res->sr_session); 486 spin_unlock(&tbl->slot_tbl_lock); 487 res->sr_slot = NULL; 488 } 489 490 static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 491 { 492 unsigned long timestamp; 493 struct nfs_client *clp; 494 495 /* 496 * sr_status remains 1 if an RPC level error occurred. The server 497 * may or may not have processed the sequence operation.. 498 * Proceed as if the server received and processed the sequence 499 * operation. 500 */ 501 if (res->sr_status == 1) 502 res->sr_status = NFS_OK; 503 504 /* don't increment the sequence number if the task wasn't sent */ 505 if (!RPC_WAS_SENT(task)) 506 goto out; 507 508 /* Check the SEQUENCE operation status */ 509 switch (res->sr_status) { 510 case 0: 511 /* Update the slot's sequence and clientid lease timer */ 512 ++res->sr_slot->seq_nr; 513 timestamp = res->sr_renewal_time; 514 clp = res->sr_session->clp; 515 do_renew_lease(clp, timestamp); 516 /* Check sequence flags */ 517 if (res->sr_status_flags != 0) 518 nfs4_schedule_lease_recovery(clp); 519 break; 520 case -NFS4ERR_DELAY: 521 /* The server detected a resend of the RPC call and 522 * returned NFS4ERR_DELAY as per Section 2.10.6.2 523 * of RFC5661. 524 */ 525 dprintk("%s: slot=%td seq=%d: Operation in progress\n", 526 __func__, 527 res->sr_slot - res->sr_session->fc_slot_table.slots, 528 res->sr_slot->seq_nr); 529 goto out_retry; 530 default: 531 /* Just update the slot sequence no. */ 532 ++res->sr_slot->seq_nr; 533 } 534 out: 535 /* The session may be reset by one of the error handlers. */ 536 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); 537 nfs41_sequence_free_slot(res); 538 return 1; 539 out_retry: 540 if (!rpc_restart_call(task)) 541 goto out; 542 rpc_delay(task, NFS4_POLL_RETRY_MAX); 543 return 0; 544 } 545 546 static int nfs4_sequence_done(struct rpc_task *task, 547 struct nfs4_sequence_res *res) 548 { 549 if (res->sr_session == NULL) 550 return 1; 551 return nfs41_sequence_done(task, res); 552 } 553 554 /* 555 * nfs4_find_slot - efficiently look for a free slot 556 * 557 * nfs4_find_slot looks for an unset bit in the used_slots bitmap. 558 * If found, we mark the slot as used, update the highest_used_slotid, 559 * and respectively set up the sequence operation args. 560 * The slot number is returned if found, or NFS4_NO_SLOT otherwise. 561 * 562 * Note: must be called with under the slot_tbl_lock. 563 */ 564 static u32 565 nfs4_find_slot(struct nfs4_slot_table *tbl) 566 { 567 u32 slotid; 568 u32 ret_id = NFS4_NO_SLOT; 569 570 dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n", 571 __func__, tbl->used_slots[0], tbl->highest_used_slotid, 572 tbl->max_slots); 573 slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots); 574 if (slotid >= tbl->max_slots) 575 goto out; 576 __set_bit(slotid, tbl->used_slots); 577 if (slotid > tbl->highest_used_slotid || 578 tbl->highest_used_slotid == NFS4_NO_SLOT) 579 tbl->highest_used_slotid = slotid; 580 ret_id = slotid; 581 out: 582 dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n", 583 __func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id); 584 return ret_id; 585 } 586 587 static void nfs41_init_sequence(struct nfs4_sequence_args *args, 588 struct nfs4_sequence_res *res, int cache_reply) 589 { 590 args->sa_session = NULL; 591 args->sa_cache_this = 0; 592 if (cache_reply) 593 args->sa_cache_this = 1; 594 res->sr_session = NULL; 595 res->sr_slot = NULL; 596 } 597 598 int nfs41_setup_sequence(struct nfs4_session *session, 599 struct nfs4_sequence_args *args, 600 struct nfs4_sequence_res *res, 601 struct rpc_task *task) 602 { 603 struct nfs4_slot *slot; 604 struct nfs4_slot_table *tbl; 605 u32 slotid; 606 607 dprintk("--> %s\n", __func__); 608 /* slot already allocated? */ 609 if (res->sr_slot != NULL) 610 return 0; 611 612 tbl = &session->fc_slot_table; 613 614 spin_lock(&tbl->slot_tbl_lock); 615 if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) && 616 !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) { 617 /* The state manager will wait until the slot table is empty */ 618 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 619 spin_unlock(&tbl->slot_tbl_lock); 620 dprintk("%s session is draining\n", __func__); 621 return -EAGAIN; 622 } 623 624 if (!rpc_queue_empty(&tbl->slot_tbl_waitq) && 625 !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) { 626 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 627 spin_unlock(&tbl->slot_tbl_lock); 628 dprintk("%s enforce FIFO order\n", __func__); 629 return -EAGAIN; 630 } 631 632 slotid = nfs4_find_slot(tbl); 633 if (slotid == NFS4_NO_SLOT) { 634 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 635 spin_unlock(&tbl->slot_tbl_lock); 636 dprintk("<-- %s: no free slots\n", __func__); 637 return -EAGAIN; 638 } 639 spin_unlock(&tbl->slot_tbl_lock); 640 641 rpc_task_set_priority(task, RPC_PRIORITY_NORMAL); 642 slot = tbl->slots + slotid; 643 args->sa_session = session; 644 args->sa_slotid = slotid; 645 646 dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr); 647 648 res->sr_session = session; 649 res->sr_slot = slot; 650 res->sr_renewal_time = jiffies; 651 res->sr_status_flags = 0; 652 /* 653 * sr_status is only set in decode_sequence, and so will remain 654 * set to 1 if an rpc level failure occurs. 655 */ 656 res->sr_status = 1; 657 return 0; 658 } 659 EXPORT_SYMBOL_GPL(nfs41_setup_sequence); 660 661 int nfs4_setup_sequence(const struct nfs_server *server, 662 struct nfs4_sequence_args *args, 663 struct nfs4_sequence_res *res, 664 struct rpc_task *task) 665 { 666 struct nfs4_session *session = nfs4_get_session(server); 667 int ret = 0; 668 669 if (session == NULL) 670 goto out; 671 672 dprintk("--> %s clp %p session %p sr_slot %td\n", 673 __func__, session->clp, session, res->sr_slot ? 674 res->sr_slot - session->fc_slot_table.slots : -1); 675 676 ret = nfs41_setup_sequence(session, args, res, task); 677 out: 678 dprintk("<-- %s status=%d\n", __func__, ret); 679 return ret; 680 } 681 682 struct nfs41_call_sync_data { 683 const struct nfs_server *seq_server; 684 struct nfs4_sequence_args *seq_args; 685 struct nfs4_sequence_res *seq_res; 686 }; 687 688 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) 689 { 690 struct nfs41_call_sync_data *data = calldata; 691 692 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); 693 694 if (nfs4_setup_sequence(data->seq_server, data->seq_args, 695 data->seq_res, task)) 696 return; 697 rpc_call_start(task); 698 } 699 700 static void nfs41_call_priv_sync_prepare(struct rpc_task *task, void *calldata) 701 { 702 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); 703 nfs41_call_sync_prepare(task, calldata); 704 } 705 706 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) 707 { 708 struct nfs41_call_sync_data *data = calldata; 709 710 nfs41_sequence_done(task, data->seq_res); 711 } 712 713 static const struct rpc_call_ops nfs41_call_sync_ops = { 714 .rpc_call_prepare = nfs41_call_sync_prepare, 715 .rpc_call_done = nfs41_call_sync_done, 716 }; 717 718 static const struct rpc_call_ops nfs41_call_priv_sync_ops = { 719 .rpc_call_prepare = nfs41_call_priv_sync_prepare, 720 .rpc_call_done = nfs41_call_sync_done, 721 }; 722 723 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, 724 struct nfs_server *server, 725 struct rpc_message *msg, 726 struct nfs4_sequence_args *args, 727 struct nfs4_sequence_res *res, 728 int privileged) 729 { 730 int ret; 731 struct rpc_task *task; 732 struct nfs41_call_sync_data data = { 733 .seq_server = server, 734 .seq_args = args, 735 .seq_res = res, 736 }; 737 struct rpc_task_setup task_setup = { 738 .rpc_client = clnt, 739 .rpc_message = msg, 740 .callback_ops = &nfs41_call_sync_ops, 741 .callback_data = &data 742 }; 743 744 if (privileged) 745 task_setup.callback_ops = &nfs41_call_priv_sync_ops; 746 task = rpc_run_task(&task_setup); 747 if (IS_ERR(task)) 748 ret = PTR_ERR(task); 749 else { 750 ret = task->tk_status; 751 rpc_put_task(task); 752 } 753 return ret; 754 } 755 756 int _nfs4_call_sync_session(struct rpc_clnt *clnt, 757 struct nfs_server *server, 758 struct rpc_message *msg, 759 struct nfs4_sequence_args *args, 760 struct nfs4_sequence_res *res, 761 int cache_reply) 762 { 763 nfs41_init_sequence(args, res, cache_reply); 764 return nfs4_call_sync_sequence(clnt, server, msg, args, res, 0); 765 } 766 767 #else 768 static inline 769 void nfs41_init_sequence(struct nfs4_sequence_args *args, 770 struct nfs4_sequence_res *res, int cache_reply) 771 { 772 } 773 774 static int nfs4_sequence_done(struct rpc_task *task, 775 struct nfs4_sequence_res *res) 776 { 777 return 1; 778 } 779 #endif /* CONFIG_NFS_V4_1 */ 780 781 int _nfs4_call_sync(struct rpc_clnt *clnt, 782 struct nfs_server *server, 783 struct rpc_message *msg, 784 struct nfs4_sequence_args *args, 785 struct nfs4_sequence_res *res, 786 int cache_reply) 787 { 788 nfs41_init_sequence(args, res, cache_reply); 789 return rpc_call_sync(clnt, msg, 0); 790 } 791 792 static inline 793 int nfs4_call_sync(struct rpc_clnt *clnt, 794 struct nfs_server *server, 795 struct rpc_message *msg, 796 struct nfs4_sequence_args *args, 797 struct nfs4_sequence_res *res, 798 int cache_reply) 799 { 800 return server->nfs_client->cl_mvops->call_sync(clnt, server, msg, 801 args, res, cache_reply); 802 } 803 804 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) 805 { 806 struct nfs_inode *nfsi = NFS_I(dir); 807 808 spin_lock(&dir->i_lock); 809 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 810 if (!cinfo->atomic || cinfo->before != dir->i_version) 811 nfs_force_lookup_revalidate(dir); 812 dir->i_version = cinfo->after; 813 spin_unlock(&dir->i_lock); 814 } 815 816 struct nfs4_opendata { 817 struct kref kref; 818 struct nfs_openargs o_arg; 819 struct nfs_openres o_res; 820 struct nfs_open_confirmargs c_arg; 821 struct nfs_open_confirmres c_res; 822 struct nfs4_string owner_name; 823 struct nfs4_string group_name; 824 struct nfs_fattr f_attr; 825 struct dentry *dir; 826 struct dentry *dentry; 827 struct nfs4_state_owner *owner; 828 struct nfs4_state *state; 829 struct iattr attrs; 830 unsigned long timestamp; 831 unsigned int rpc_done : 1; 832 int rpc_status; 833 int cancelled; 834 }; 835 836 837 static void nfs4_init_opendata_res(struct nfs4_opendata *p) 838 { 839 p->o_res.f_attr = &p->f_attr; 840 p->o_res.seqid = p->o_arg.seqid; 841 p->c_res.seqid = p->c_arg.seqid; 842 p->o_res.server = p->o_arg.server; 843 p->o_res.access_request = p->o_arg.access; 844 nfs_fattr_init(&p->f_attr); 845 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); 846 } 847 848 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, 849 struct nfs4_state_owner *sp, fmode_t fmode, int flags, 850 const struct iattr *attrs, 851 gfp_t gfp_mask) 852 { 853 struct dentry *parent = dget_parent(dentry); 854 struct inode *dir = parent->d_inode; 855 struct nfs_server *server = NFS_SERVER(dir); 856 struct nfs4_opendata *p; 857 858 p = kzalloc(sizeof(*p), gfp_mask); 859 if (p == NULL) 860 goto err; 861 p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask); 862 if (p->o_arg.seqid == NULL) 863 goto err_free; 864 nfs_sb_active(dentry->d_sb); 865 p->dentry = dget(dentry); 866 p->dir = parent; 867 p->owner = sp; 868 atomic_inc(&sp->so_count); 869 p->o_arg.fh = NFS_FH(dir); 870 p->o_arg.open_flags = flags; 871 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); 872 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS 873 * will return permission denied for all bits until close */ 874 if (!(flags & O_EXCL)) { 875 /* ask server to check for all possible rights as results 876 * are cached */ 877 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY | 878 NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE; 879 } 880 p->o_arg.clientid = server->nfs_client->cl_clientid; 881 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time); 882 p->o_arg.id.uniquifier = sp->so_seqid.owner_id; 883 p->o_arg.name = &dentry->d_name; 884 p->o_arg.server = server; 885 p->o_arg.bitmask = server->attr_bitmask; 886 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 887 p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; 888 if (attrs != NULL && attrs->ia_valid != 0) { 889 __be32 verf[2]; 890 891 p->o_arg.u.attrs = &p->attrs; 892 memcpy(&p->attrs, attrs, sizeof(p->attrs)); 893 894 verf[0] = jiffies; 895 verf[1] = current->pid; 896 memcpy(p->o_arg.u.verifier.data, verf, 897 sizeof(p->o_arg.u.verifier.data)); 898 } 899 p->c_arg.fh = &p->o_res.fh; 900 p->c_arg.stateid = &p->o_res.stateid; 901 p->c_arg.seqid = p->o_arg.seqid; 902 nfs4_init_opendata_res(p); 903 kref_init(&p->kref); 904 return p; 905 err_free: 906 kfree(p); 907 err: 908 dput(parent); 909 return NULL; 910 } 911 912 static void nfs4_opendata_free(struct kref *kref) 913 { 914 struct nfs4_opendata *p = container_of(kref, 915 struct nfs4_opendata, kref); 916 struct super_block *sb = p->dentry->d_sb; 917 918 nfs_free_seqid(p->o_arg.seqid); 919 if (p->state != NULL) 920 nfs4_put_open_state(p->state); 921 nfs4_put_state_owner(p->owner); 922 dput(p->dir); 923 dput(p->dentry); 924 nfs_sb_deactive(sb); 925 nfs_fattr_free_names(&p->f_attr); 926 kfree(p); 927 } 928 929 static void nfs4_opendata_put(struct nfs4_opendata *p) 930 { 931 if (p != NULL) 932 kref_put(&p->kref, nfs4_opendata_free); 933 } 934 935 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) 936 { 937 int ret; 938 939 ret = rpc_wait_for_completion_task(task); 940 return ret; 941 } 942 943 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode) 944 { 945 int ret = 0; 946 947 if (open_mode & (O_EXCL|O_TRUNC)) 948 goto out; 949 switch (mode & (FMODE_READ|FMODE_WRITE)) { 950 case FMODE_READ: 951 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 952 && state->n_rdonly != 0; 953 break; 954 case FMODE_WRITE: 955 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 956 && state->n_wronly != 0; 957 break; 958 case FMODE_READ|FMODE_WRITE: 959 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 960 && state->n_rdwr != 0; 961 } 962 out: 963 return ret; 964 } 965 966 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode) 967 { 968 if (delegation == NULL) 969 return 0; 970 if ((delegation->type & fmode) != fmode) 971 return 0; 972 if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) 973 return 0; 974 nfs_mark_delegation_referenced(delegation); 975 return 1; 976 } 977 978 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) 979 { 980 switch (fmode) { 981 case FMODE_WRITE: 982 state->n_wronly++; 983 break; 984 case FMODE_READ: 985 state->n_rdonly++; 986 break; 987 case FMODE_READ|FMODE_WRITE: 988 state->n_rdwr++; 989 } 990 nfs4_state_set_mode_locked(state, state->state | fmode); 991 } 992 993 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) 994 { 995 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 996 nfs4_stateid_copy(&state->stateid, stateid); 997 nfs4_stateid_copy(&state->open_stateid, stateid); 998 switch (fmode) { 999 case FMODE_READ: 1000 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1001 break; 1002 case FMODE_WRITE: 1003 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1004 break; 1005 case FMODE_READ|FMODE_WRITE: 1006 set_bit(NFS_O_RDWR_STATE, &state->flags); 1007 } 1008 } 1009 1010 static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) 1011 { 1012 write_seqlock(&state->seqlock); 1013 nfs_set_open_stateid_locked(state, stateid, fmode); 1014 write_sequnlock(&state->seqlock); 1015 } 1016 1017 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode) 1018 { 1019 /* 1020 * Protect the call to nfs4_state_set_mode_locked and 1021 * serialise the stateid update 1022 */ 1023 write_seqlock(&state->seqlock); 1024 if (deleg_stateid != NULL) { 1025 nfs4_stateid_copy(&state->stateid, deleg_stateid); 1026 set_bit(NFS_DELEGATED_STATE, &state->flags); 1027 } 1028 if (open_stateid != NULL) 1029 nfs_set_open_stateid_locked(state, open_stateid, fmode); 1030 write_sequnlock(&state->seqlock); 1031 spin_lock(&state->owner->so_lock); 1032 update_open_stateflags(state, fmode); 1033 spin_unlock(&state->owner->so_lock); 1034 } 1035 1036 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode) 1037 { 1038 struct nfs_inode *nfsi = NFS_I(state->inode); 1039 struct nfs_delegation *deleg_cur; 1040 int ret = 0; 1041 1042 fmode &= (FMODE_READ|FMODE_WRITE); 1043 1044 rcu_read_lock(); 1045 deleg_cur = rcu_dereference(nfsi->delegation); 1046 if (deleg_cur == NULL) 1047 goto no_delegation; 1048 1049 spin_lock(&deleg_cur->lock); 1050 if (nfsi->delegation != deleg_cur || 1051 (deleg_cur->type & fmode) != fmode) 1052 goto no_delegation_unlock; 1053 1054 if (delegation == NULL) 1055 delegation = &deleg_cur->stateid; 1056 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation)) 1057 goto no_delegation_unlock; 1058 1059 nfs_mark_delegation_referenced(deleg_cur); 1060 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode); 1061 ret = 1; 1062 no_delegation_unlock: 1063 spin_unlock(&deleg_cur->lock); 1064 no_delegation: 1065 rcu_read_unlock(); 1066 1067 if (!ret && open_stateid != NULL) { 1068 __update_open_stateid(state, open_stateid, NULL, fmode); 1069 ret = 1; 1070 } 1071 1072 return ret; 1073 } 1074 1075 1076 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) 1077 { 1078 struct nfs_delegation *delegation; 1079 1080 rcu_read_lock(); 1081 delegation = rcu_dereference(NFS_I(inode)->delegation); 1082 if (delegation == NULL || (delegation->type & fmode) == fmode) { 1083 rcu_read_unlock(); 1084 return; 1085 } 1086 rcu_read_unlock(); 1087 nfs4_inode_return_delegation(inode); 1088 } 1089 1090 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) 1091 { 1092 struct nfs4_state *state = opendata->state; 1093 struct nfs_inode *nfsi = NFS_I(state->inode); 1094 struct nfs_delegation *delegation; 1095 int open_mode = opendata->o_arg.open_flags & (O_EXCL|O_TRUNC); 1096 fmode_t fmode = opendata->o_arg.fmode; 1097 nfs4_stateid stateid; 1098 int ret = -EAGAIN; 1099 1100 for (;;) { 1101 if (can_open_cached(state, fmode, open_mode)) { 1102 spin_lock(&state->owner->so_lock); 1103 if (can_open_cached(state, fmode, open_mode)) { 1104 update_open_stateflags(state, fmode); 1105 spin_unlock(&state->owner->so_lock); 1106 goto out_return_state; 1107 } 1108 spin_unlock(&state->owner->so_lock); 1109 } 1110 rcu_read_lock(); 1111 delegation = rcu_dereference(nfsi->delegation); 1112 if (!can_open_delegated(delegation, fmode)) { 1113 rcu_read_unlock(); 1114 break; 1115 } 1116 /* Save the delegation */ 1117 nfs4_stateid_copy(&stateid, &delegation->stateid); 1118 rcu_read_unlock(); 1119 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); 1120 if (ret != 0) 1121 goto out; 1122 ret = -EAGAIN; 1123 1124 /* Try to update the stateid using the delegation */ 1125 if (update_open_stateid(state, NULL, &stateid, fmode)) 1126 goto out_return_state; 1127 } 1128 out: 1129 return ERR_PTR(ret); 1130 out_return_state: 1131 atomic_inc(&state->count); 1132 return state; 1133 } 1134 1135 static void 1136 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state) 1137 { 1138 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client; 1139 struct nfs_delegation *delegation; 1140 int delegation_flags = 0; 1141 1142 rcu_read_lock(); 1143 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 1144 if (delegation) 1145 delegation_flags = delegation->flags; 1146 rcu_read_unlock(); 1147 if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) { 1148 pr_err_ratelimited("NFS: Broken NFSv4 server %s is " 1149 "returning a delegation for " 1150 "OPEN(CLAIM_DELEGATE_CUR)\n", 1151 clp->cl_hostname); 1152 } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0) 1153 nfs_inode_set_delegation(state->inode, 1154 data->owner->so_cred, 1155 &data->o_res); 1156 else 1157 nfs_inode_reclaim_delegation(state->inode, 1158 data->owner->so_cred, 1159 &data->o_res); 1160 } 1161 1162 /* 1163 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes 1164 * and update the nfs4_state. 1165 */ 1166 static struct nfs4_state * 1167 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data) 1168 { 1169 struct inode *inode = data->state->inode; 1170 struct nfs4_state *state = data->state; 1171 int ret; 1172 1173 if (!data->rpc_done) { 1174 ret = data->rpc_status; 1175 goto err; 1176 } 1177 1178 ret = -ESTALE; 1179 if (!(data->f_attr.valid & NFS_ATTR_FATTR_TYPE) || 1180 !(data->f_attr.valid & NFS_ATTR_FATTR_FILEID) || 1181 !(data->f_attr.valid & NFS_ATTR_FATTR_CHANGE)) 1182 goto err; 1183 1184 ret = -ENOMEM; 1185 state = nfs4_get_open_state(inode, data->owner); 1186 if (state == NULL) 1187 goto err; 1188 1189 ret = nfs_refresh_inode(inode, &data->f_attr); 1190 if (ret) 1191 goto err; 1192 1193 if (data->o_res.delegation_type != 0) 1194 nfs4_opendata_check_deleg(data, state); 1195 update_open_stateid(state, &data->o_res.stateid, NULL, 1196 data->o_arg.fmode); 1197 1198 return state; 1199 err: 1200 return ERR_PTR(ret); 1201 1202 } 1203 1204 static struct nfs4_state * 1205 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1206 { 1207 struct inode *inode; 1208 struct nfs4_state *state = NULL; 1209 int ret; 1210 1211 if (!data->rpc_done) { 1212 state = nfs4_try_open_cached(data); 1213 goto out; 1214 } 1215 1216 ret = -EAGAIN; 1217 if (!(data->f_attr.valid & NFS_ATTR_FATTR)) 1218 goto err; 1219 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr); 1220 ret = PTR_ERR(inode); 1221 if (IS_ERR(inode)) 1222 goto err; 1223 ret = -ENOMEM; 1224 state = nfs4_get_open_state(inode, data->owner); 1225 if (state == NULL) 1226 goto err_put_inode; 1227 if (data->o_res.delegation_type != 0) 1228 nfs4_opendata_check_deleg(data, state); 1229 update_open_stateid(state, &data->o_res.stateid, NULL, 1230 data->o_arg.fmode); 1231 iput(inode); 1232 out: 1233 return state; 1234 err_put_inode: 1235 iput(inode); 1236 err: 1237 return ERR_PTR(ret); 1238 } 1239 1240 static struct nfs4_state * 1241 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1242 { 1243 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) 1244 return _nfs4_opendata_reclaim_to_nfs4_state(data); 1245 return _nfs4_opendata_to_nfs4_state(data); 1246 } 1247 1248 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state) 1249 { 1250 struct nfs_inode *nfsi = NFS_I(state->inode); 1251 struct nfs_open_context *ctx; 1252 1253 spin_lock(&state->inode->i_lock); 1254 list_for_each_entry(ctx, &nfsi->open_files, list) { 1255 if (ctx->state != state) 1256 continue; 1257 get_nfs_open_context(ctx); 1258 spin_unlock(&state->inode->i_lock); 1259 return ctx; 1260 } 1261 spin_unlock(&state->inode->i_lock); 1262 return ERR_PTR(-ENOENT); 1263 } 1264 1265 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state) 1266 { 1267 struct nfs4_opendata *opendata; 1268 1269 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, NULL, GFP_NOFS); 1270 if (opendata == NULL) 1271 return ERR_PTR(-ENOMEM); 1272 opendata->state = state; 1273 atomic_inc(&state->count); 1274 return opendata; 1275 } 1276 1277 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res) 1278 { 1279 struct nfs4_state *newstate; 1280 int ret; 1281 1282 opendata->o_arg.open_flags = 0; 1283 opendata->o_arg.fmode = fmode; 1284 memset(&opendata->o_res, 0, sizeof(opendata->o_res)); 1285 memset(&opendata->c_res, 0, sizeof(opendata->c_res)); 1286 nfs4_init_opendata_res(opendata); 1287 ret = _nfs4_recover_proc_open(opendata); 1288 if (ret != 0) 1289 return ret; 1290 newstate = nfs4_opendata_to_nfs4_state(opendata); 1291 if (IS_ERR(newstate)) 1292 return PTR_ERR(newstate); 1293 nfs4_close_state(newstate, fmode); 1294 *res = newstate; 1295 return 0; 1296 } 1297 1298 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) 1299 { 1300 struct nfs4_state *newstate; 1301 int ret; 1302 1303 /* memory barrier prior to reading state->n_* */ 1304 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1305 smp_rmb(); 1306 if (state->n_rdwr != 0) { 1307 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1308 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate); 1309 if (ret != 0) 1310 return ret; 1311 if (newstate != state) 1312 return -ESTALE; 1313 } 1314 if (state->n_wronly != 0) { 1315 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1316 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate); 1317 if (ret != 0) 1318 return ret; 1319 if (newstate != state) 1320 return -ESTALE; 1321 } 1322 if (state->n_rdonly != 0) { 1323 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1324 ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate); 1325 if (ret != 0) 1326 return ret; 1327 if (newstate != state) 1328 return -ESTALE; 1329 } 1330 /* 1331 * We may have performed cached opens for all three recoveries. 1332 * Check if we need to update the current stateid. 1333 */ 1334 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && 1335 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) { 1336 write_seqlock(&state->seqlock); 1337 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1338 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1339 write_sequnlock(&state->seqlock); 1340 } 1341 return 0; 1342 } 1343 1344 /* 1345 * OPEN_RECLAIM: 1346 * reclaim state on the server after a reboot. 1347 */ 1348 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 1349 { 1350 struct nfs_delegation *delegation; 1351 struct nfs4_opendata *opendata; 1352 fmode_t delegation_type = 0; 1353 int status; 1354 1355 opendata = nfs4_open_recoverdata_alloc(ctx, state); 1356 if (IS_ERR(opendata)) 1357 return PTR_ERR(opendata); 1358 opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS; 1359 opendata->o_arg.fh = NFS_FH(state->inode); 1360 rcu_read_lock(); 1361 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 1362 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) 1363 delegation_type = delegation->type; 1364 rcu_read_unlock(); 1365 opendata->o_arg.u.delegation_type = delegation_type; 1366 status = nfs4_open_recover(opendata, state); 1367 nfs4_opendata_put(opendata); 1368 return status; 1369 } 1370 1371 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 1372 { 1373 struct nfs_server *server = NFS_SERVER(state->inode); 1374 struct nfs4_exception exception = { }; 1375 int err; 1376 do { 1377 err = _nfs4_do_open_reclaim(ctx, state); 1378 if (err != -NFS4ERR_DELAY) 1379 break; 1380 nfs4_handle_exception(server, err, &exception); 1381 } while (exception.retry); 1382 return err; 1383 } 1384 1385 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) 1386 { 1387 struct nfs_open_context *ctx; 1388 int ret; 1389 1390 ctx = nfs4_state_find_open_context(state); 1391 if (IS_ERR(ctx)) 1392 return PTR_ERR(ctx); 1393 ret = nfs4_do_open_reclaim(ctx, state); 1394 put_nfs_open_context(ctx); 1395 return ret; 1396 } 1397 1398 static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) 1399 { 1400 struct nfs4_opendata *opendata; 1401 int ret; 1402 1403 opendata = nfs4_open_recoverdata_alloc(ctx, state); 1404 if (IS_ERR(opendata)) 1405 return PTR_ERR(opendata); 1406 opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR; 1407 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 1408 ret = nfs4_open_recover(opendata, state); 1409 nfs4_opendata_put(opendata); 1410 return ret; 1411 } 1412 1413 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) 1414 { 1415 struct nfs4_exception exception = { }; 1416 struct nfs_server *server = NFS_SERVER(state->inode); 1417 int err; 1418 do { 1419 err = _nfs4_open_delegation_recall(ctx, state, stateid); 1420 switch (err) { 1421 case 0: 1422 case -ENOENT: 1423 case -ESTALE: 1424 goto out; 1425 case -NFS4ERR_BADSESSION: 1426 case -NFS4ERR_BADSLOT: 1427 case -NFS4ERR_BAD_HIGH_SLOT: 1428 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1429 case -NFS4ERR_DEADSESSION: 1430 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err); 1431 goto out; 1432 case -NFS4ERR_STALE_CLIENTID: 1433 case -NFS4ERR_STALE_STATEID: 1434 case -NFS4ERR_EXPIRED: 1435 /* Don't recall a delegation if it was lost */ 1436 nfs4_schedule_lease_recovery(server->nfs_client); 1437 goto out; 1438 case -ERESTARTSYS: 1439 /* 1440 * The show must go on: exit, but mark the 1441 * stateid as needing recovery. 1442 */ 1443 case -NFS4ERR_DELEG_REVOKED: 1444 case -NFS4ERR_ADMIN_REVOKED: 1445 case -NFS4ERR_BAD_STATEID: 1446 nfs_inode_find_state_and_recover(state->inode, 1447 stateid); 1448 nfs4_schedule_stateid_recovery(server, state); 1449 case -EKEYEXPIRED: 1450 /* 1451 * User RPCSEC_GSS context has expired. 1452 * We cannot recover this stateid now, so 1453 * skip it and allow recovery thread to 1454 * proceed. 1455 */ 1456 case -ENOMEM: 1457 err = 0; 1458 goto out; 1459 } 1460 err = nfs4_handle_exception(server, err, &exception); 1461 } while (exception.retry); 1462 out: 1463 return err; 1464 } 1465 1466 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) 1467 { 1468 struct nfs4_opendata *data = calldata; 1469 1470 data->rpc_status = task->tk_status; 1471 if (data->rpc_status == 0) { 1472 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid); 1473 nfs_confirm_seqid(&data->owner->so_seqid, 0); 1474 renew_lease(data->o_res.server, data->timestamp); 1475 data->rpc_done = 1; 1476 } 1477 } 1478 1479 static void nfs4_open_confirm_release(void *calldata) 1480 { 1481 struct nfs4_opendata *data = calldata; 1482 struct nfs4_state *state = NULL; 1483 1484 /* If this request hasn't been cancelled, do nothing */ 1485 if (data->cancelled == 0) 1486 goto out_free; 1487 /* In case of error, no cleanup! */ 1488 if (!data->rpc_done) 1489 goto out_free; 1490 state = nfs4_opendata_to_nfs4_state(data); 1491 if (!IS_ERR(state)) 1492 nfs4_close_state(state, data->o_arg.fmode); 1493 out_free: 1494 nfs4_opendata_put(data); 1495 } 1496 1497 static const struct rpc_call_ops nfs4_open_confirm_ops = { 1498 .rpc_call_done = nfs4_open_confirm_done, 1499 .rpc_release = nfs4_open_confirm_release, 1500 }; 1501 1502 /* 1503 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata 1504 */ 1505 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) 1506 { 1507 struct nfs_server *server = NFS_SERVER(data->dir->d_inode); 1508 struct rpc_task *task; 1509 struct rpc_message msg = { 1510 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], 1511 .rpc_argp = &data->c_arg, 1512 .rpc_resp = &data->c_res, 1513 .rpc_cred = data->owner->so_cred, 1514 }; 1515 struct rpc_task_setup task_setup_data = { 1516 .rpc_client = server->client, 1517 .rpc_message = &msg, 1518 .callback_ops = &nfs4_open_confirm_ops, 1519 .callback_data = data, 1520 .workqueue = nfsiod_workqueue, 1521 .flags = RPC_TASK_ASYNC, 1522 }; 1523 int status; 1524 1525 kref_get(&data->kref); 1526 data->rpc_done = 0; 1527 data->rpc_status = 0; 1528 data->timestamp = jiffies; 1529 task = rpc_run_task(&task_setup_data); 1530 if (IS_ERR(task)) 1531 return PTR_ERR(task); 1532 status = nfs4_wait_for_completion_rpc_task(task); 1533 if (status != 0) { 1534 data->cancelled = 1; 1535 smp_wmb(); 1536 } else 1537 status = data->rpc_status; 1538 rpc_put_task(task); 1539 return status; 1540 } 1541 1542 static void nfs4_open_prepare(struct rpc_task *task, void *calldata) 1543 { 1544 struct nfs4_opendata *data = calldata; 1545 struct nfs4_state_owner *sp = data->owner; 1546 1547 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) 1548 return; 1549 /* 1550 * Check if we still need to send an OPEN call, or if we can use 1551 * a delegation instead. 1552 */ 1553 if (data->state != NULL) { 1554 struct nfs_delegation *delegation; 1555 1556 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags)) 1557 goto out_no_action; 1558 rcu_read_lock(); 1559 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation); 1560 if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR && 1561 can_open_delegated(delegation, data->o_arg.fmode)) 1562 goto unlock_no_action; 1563 rcu_read_unlock(); 1564 } 1565 /* Update client id. */ 1566 data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid; 1567 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) { 1568 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 1569 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0]; 1570 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh); 1571 } 1572 data->timestamp = jiffies; 1573 if (nfs4_setup_sequence(data->o_arg.server, 1574 &data->o_arg.seq_args, 1575 &data->o_res.seq_res, task)) 1576 return; 1577 rpc_call_start(task); 1578 return; 1579 unlock_no_action: 1580 rcu_read_unlock(); 1581 out_no_action: 1582 task->tk_action = NULL; 1583 1584 } 1585 1586 static void nfs4_recover_open_prepare(struct rpc_task *task, void *calldata) 1587 { 1588 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); 1589 nfs4_open_prepare(task, calldata); 1590 } 1591 1592 static void nfs4_open_done(struct rpc_task *task, void *calldata) 1593 { 1594 struct nfs4_opendata *data = calldata; 1595 1596 data->rpc_status = task->tk_status; 1597 1598 if (!nfs4_sequence_done(task, &data->o_res.seq_res)) 1599 return; 1600 1601 if (task->tk_status == 0) { 1602 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) { 1603 switch (data->o_res.f_attr->mode & S_IFMT) { 1604 case S_IFREG: 1605 break; 1606 case S_IFLNK: 1607 data->rpc_status = -ELOOP; 1608 break; 1609 case S_IFDIR: 1610 data->rpc_status = -EISDIR; 1611 break; 1612 default: 1613 data->rpc_status = -ENOTDIR; 1614 } 1615 } 1616 renew_lease(data->o_res.server, data->timestamp); 1617 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) 1618 nfs_confirm_seqid(&data->owner->so_seqid, 0); 1619 } 1620 data->rpc_done = 1; 1621 } 1622 1623 static void nfs4_open_release(void *calldata) 1624 { 1625 struct nfs4_opendata *data = calldata; 1626 struct nfs4_state *state = NULL; 1627 1628 /* If this request hasn't been cancelled, do nothing */ 1629 if (data->cancelled == 0) 1630 goto out_free; 1631 /* In case of error, no cleanup! */ 1632 if (data->rpc_status != 0 || !data->rpc_done) 1633 goto out_free; 1634 /* In case we need an open_confirm, no cleanup! */ 1635 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) 1636 goto out_free; 1637 state = nfs4_opendata_to_nfs4_state(data); 1638 if (!IS_ERR(state)) 1639 nfs4_close_state(state, data->o_arg.fmode); 1640 out_free: 1641 nfs4_opendata_put(data); 1642 } 1643 1644 static const struct rpc_call_ops nfs4_open_ops = { 1645 .rpc_call_prepare = nfs4_open_prepare, 1646 .rpc_call_done = nfs4_open_done, 1647 .rpc_release = nfs4_open_release, 1648 }; 1649 1650 static const struct rpc_call_ops nfs4_recover_open_ops = { 1651 .rpc_call_prepare = nfs4_recover_open_prepare, 1652 .rpc_call_done = nfs4_open_done, 1653 .rpc_release = nfs4_open_release, 1654 }; 1655 1656 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover) 1657 { 1658 struct inode *dir = data->dir->d_inode; 1659 struct nfs_server *server = NFS_SERVER(dir); 1660 struct nfs_openargs *o_arg = &data->o_arg; 1661 struct nfs_openres *o_res = &data->o_res; 1662 struct rpc_task *task; 1663 struct rpc_message msg = { 1664 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN], 1665 .rpc_argp = o_arg, 1666 .rpc_resp = o_res, 1667 .rpc_cred = data->owner->so_cred, 1668 }; 1669 struct rpc_task_setup task_setup_data = { 1670 .rpc_client = server->client, 1671 .rpc_message = &msg, 1672 .callback_ops = &nfs4_open_ops, 1673 .callback_data = data, 1674 .workqueue = nfsiod_workqueue, 1675 .flags = RPC_TASK_ASYNC, 1676 }; 1677 int status; 1678 1679 nfs41_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1); 1680 kref_get(&data->kref); 1681 data->rpc_done = 0; 1682 data->rpc_status = 0; 1683 data->cancelled = 0; 1684 if (isrecover) 1685 task_setup_data.callback_ops = &nfs4_recover_open_ops; 1686 task = rpc_run_task(&task_setup_data); 1687 if (IS_ERR(task)) 1688 return PTR_ERR(task); 1689 status = nfs4_wait_for_completion_rpc_task(task); 1690 if (status != 0) { 1691 data->cancelled = 1; 1692 smp_wmb(); 1693 } else 1694 status = data->rpc_status; 1695 rpc_put_task(task); 1696 1697 return status; 1698 } 1699 1700 static int _nfs4_recover_proc_open(struct nfs4_opendata *data) 1701 { 1702 struct inode *dir = data->dir->d_inode; 1703 struct nfs_openres *o_res = &data->o_res; 1704 int status; 1705 1706 status = nfs4_run_open_task(data, 1); 1707 if (status != 0 || !data->rpc_done) 1708 return status; 1709 1710 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); 1711 1712 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 1713 status = _nfs4_proc_open_confirm(data); 1714 if (status != 0) 1715 return status; 1716 } 1717 1718 return status; 1719 } 1720 1721 static int nfs4_opendata_access(struct rpc_cred *cred, 1722 struct nfs4_opendata *opendata, 1723 struct nfs4_state *state, fmode_t fmode) 1724 { 1725 struct nfs_access_entry cache; 1726 u32 mask; 1727 1728 /* access call failed or for some reason the server doesn't 1729 * support any access modes -- defer access call until later */ 1730 if (opendata->o_res.access_supported == 0) 1731 return 0; 1732 1733 mask = 0; 1734 /* don't check MAY_WRITE - a newly created file may not have 1735 * write mode bits, but POSIX allows the creating process to write */ 1736 if (fmode & FMODE_READ) 1737 mask |= MAY_READ; 1738 if (fmode & FMODE_EXEC) 1739 mask |= MAY_EXEC; 1740 1741 cache.cred = cred; 1742 cache.jiffies = jiffies; 1743 nfs_access_set_mask(&cache, opendata->o_res.access_result); 1744 nfs_access_add_cache(state->inode, &cache); 1745 1746 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0) 1747 return 0; 1748 1749 /* even though OPEN succeeded, access is denied. Close the file */ 1750 nfs4_close_state(state, fmode); 1751 return -NFS4ERR_ACCESS; 1752 } 1753 1754 /* 1755 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata 1756 */ 1757 static int _nfs4_proc_open(struct nfs4_opendata *data) 1758 { 1759 struct inode *dir = data->dir->d_inode; 1760 struct nfs_server *server = NFS_SERVER(dir); 1761 struct nfs_openargs *o_arg = &data->o_arg; 1762 struct nfs_openres *o_res = &data->o_res; 1763 int status; 1764 1765 status = nfs4_run_open_task(data, 0); 1766 if (!data->rpc_done) 1767 return status; 1768 if (status != 0) { 1769 if (status == -NFS4ERR_BADNAME && 1770 !(o_arg->open_flags & O_CREAT)) 1771 return -ENOENT; 1772 return status; 1773 } 1774 1775 nfs_fattr_map_and_free_names(server, &data->f_attr); 1776 1777 if (o_arg->open_flags & O_CREAT) 1778 update_changeattr(dir, &o_res->cinfo); 1779 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 1780 server->caps &= ~NFS_CAP_POSIX_LOCK; 1781 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 1782 status = _nfs4_proc_open_confirm(data); 1783 if (status != 0) 1784 return status; 1785 } 1786 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) 1787 _nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr); 1788 return 0; 1789 } 1790 1791 static int nfs4_client_recover_expired_lease(struct nfs_client *clp) 1792 { 1793 unsigned int loop; 1794 int ret; 1795 1796 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { 1797 ret = nfs4_wait_clnt_recover(clp); 1798 if (ret != 0) 1799 break; 1800 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) && 1801 !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state)) 1802 break; 1803 nfs4_schedule_state_manager(clp); 1804 ret = -EIO; 1805 } 1806 return ret; 1807 } 1808 1809 static int nfs4_recover_expired_lease(struct nfs_server *server) 1810 { 1811 return nfs4_client_recover_expired_lease(server->nfs_client); 1812 } 1813 1814 /* 1815 * OPEN_EXPIRED: 1816 * reclaim state on the server after a network partition. 1817 * Assumes caller holds the appropriate lock 1818 */ 1819 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 1820 { 1821 struct nfs4_opendata *opendata; 1822 int ret; 1823 1824 opendata = nfs4_open_recoverdata_alloc(ctx, state); 1825 if (IS_ERR(opendata)) 1826 return PTR_ERR(opendata); 1827 ret = nfs4_open_recover(opendata, state); 1828 if (ret == -ESTALE) 1829 d_drop(ctx->dentry); 1830 nfs4_opendata_put(opendata); 1831 return ret; 1832 } 1833 1834 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 1835 { 1836 struct nfs_server *server = NFS_SERVER(state->inode); 1837 struct nfs4_exception exception = { }; 1838 int err; 1839 1840 do { 1841 err = _nfs4_open_expired(ctx, state); 1842 switch (err) { 1843 default: 1844 goto out; 1845 case -NFS4ERR_GRACE: 1846 case -NFS4ERR_DELAY: 1847 nfs4_handle_exception(server, err, &exception); 1848 err = 0; 1849 } 1850 } while (exception.retry); 1851 out: 1852 return err; 1853 } 1854 1855 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 1856 { 1857 struct nfs_open_context *ctx; 1858 int ret; 1859 1860 ctx = nfs4_state_find_open_context(state); 1861 if (IS_ERR(ctx)) 1862 return PTR_ERR(ctx); 1863 ret = nfs4_do_open_expired(ctx, state); 1864 put_nfs_open_context(ctx); 1865 return ret; 1866 } 1867 1868 #if defined(CONFIG_NFS_V4_1) 1869 static void nfs41_clear_delegation_stateid(struct nfs4_state *state) 1870 { 1871 struct nfs_server *server = NFS_SERVER(state->inode); 1872 nfs4_stateid *stateid = &state->stateid; 1873 int status; 1874 1875 /* If a state reset has been done, test_stateid is unneeded */ 1876 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1877 return; 1878 1879 status = nfs41_test_stateid(server, stateid); 1880 if (status != NFS_OK) { 1881 /* Free the stateid unless the server explicitly 1882 * informs us the stateid is unrecognized. */ 1883 if (status != -NFS4ERR_BAD_STATEID) 1884 nfs41_free_stateid(server, stateid); 1885 nfs_remove_bad_delegation(state->inode); 1886 1887 write_seqlock(&state->seqlock); 1888 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1889 write_sequnlock(&state->seqlock); 1890 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1891 } 1892 } 1893 1894 /** 1895 * nfs41_check_open_stateid - possibly free an open stateid 1896 * 1897 * @state: NFSv4 state for an inode 1898 * 1899 * Returns NFS_OK if recovery for this stateid is now finished. 1900 * Otherwise a negative NFS4ERR value is returned. 1901 */ 1902 static int nfs41_check_open_stateid(struct nfs4_state *state) 1903 { 1904 struct nfs_server *server = NFS_SERVER(state->inode); 1905 nfs4_stateid *stateid = &state->open_stateid; 1906 int status; 1907 1908 /* If a state reset has been done, test_stateid is unneeded */ 1909 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) && 1910 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) && 1911 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0)) 1912 return -NFS4ERR_BAD_STATEID; 1913 1914 status = nfs41_test_stateid(server, stateid); 1915 if (status != NFS_OK) { 1916 /* Free the stateid unless the server explicitly 1917 * informs us the stateid is unrecognized. */ 1918 if (status != -NFS4ERR_BAD_STATEID) 1919 nfs41_free_stateid(server, stateid); 1920 1921 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1922 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1923 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1924 } 1925 return status; 1926 } 1927 1928 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 1929 { 1930 int status; 1931 1932 nfs41_clear_delegation_stateid(state); 1933 status = nfs41_check_open_stateid(state); 1934 if (status != NFS_OK) 1935 status = nfs4_open_expired(sp, state); 1936 return status; 1937 } 1938 #endif 1939 1940 /* 1941 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* 1942 * fields corresponding to attributes that were used to store the verifier. 1943 * Make sure we clobber those fields in the later setattr call 1944 */ 1945 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr) 1946 { 1947 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) && 1948 !(sattr->ia_valid & ATTR_ATIME_SET)) 1949 sattr->ia_valid |= ATTR_ATIME; 1950 1951 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) && 1952 !(sattr->ia_valid & ATTR_MTIME_SET)) 1953 sattr->ia_valid |= ATTR_MTIME; 1954 } 1955 1956 /* 1957 * Returns a referenced nfs4_state 1958 */ 1959 static int _nfs4_do_open(struct inode *dir, 1960 struct dentry *dentry, 1961 fmode_t fmode, 1962 int flags, 1963 struct iattr *sattr, 1964 struct rpc_cred *cred, 1965 struct nfs4_state **res, 1966 struct nfs4_threshold **ctx_th) 1967 { 1968 struct nfs4_state_owner *sp; 1969 struct nfs4_state *state = NULL; 1970 struct nfs_server *server = NFS_SERVER(dir); 1971 struct nfs4_opendata *opendata; 1972 int status; 1973 1974 /* Protect against reboot recovery conflicts */ 1975 status = -ENOMEM; 1976 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL); 1977 if (sp == NULL) { 1978 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); 1979 goto out_err; 1980 } 1981 status = nfs4_recover_expired_lease(server); 1982 if (status != 0) 1983 goto err_put_state_owner; 1984 if (dentry->d_inode != NULL) 1985 nfs4_return_incompatible_delegation(dentry->d_inode, fmode); 1986 status = -ENOMEM; 1987 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, GFP_KERNEL); 1988 if (opendata == NULL) 1989 goto err_put_state_owner; 1990 1991 if (ctx_th && server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) { 1992 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); 1993 if (!opendata->f_attr.mdsthreshold) 1994 goto err_opendata_put; 1995 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; 1996 } 1997 if (dentry->d_inode != NULL) 1998 opendata->state = nfs4_get_open_state(dentry->d_inode, sp); 1999 2000 status = _nfs4_proc_open(opendata); 2001 if (status != 0) 2002 goto err_opendata_put; 2003 2004 state = nfs4_opendata_to_nfs4_state(opendata); 2005 status = PTR_ERR(state); 2006 if (IS_ERR(state)) 2007 goto err_opendata_put; 2008 if (server->caps & NFS_CAP_POSIX_LOCK) 2009 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); 2010 2011 status = nfs4_opendata_access(cred, opendata, state, fmode); 2012 if (status != 0) 2013 goto err_opendata_put; 2014 2015 if (opendata->o_arg.open_flags & O_EXCL) { 2016 nfs4_exclusive_attrset(opendata, sattr); 2017 2018 nfs_fattr_init(opendata->o_res.f_attr); 2019 status = nfs4_do_setattr(state->inode, cred, 2020 opendata->o_res.f_attr, sattr, 2021 state); 2022 if (status == 0) 2023 nfs_setattr_update_inode(state->inode, sattr); 2024 nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr); 2025 } 2026 2027 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) 2028 *ctx_th = opendata->f_attr.mdsthreshold; 2029 else 2030 kfree(opendata->f_attr.mdsthreshold); 2031 opendata->f_attr.mdsthreshold = NULL; 2032 2033 nfs4_opendata_put(opendata); 2034 nfs4_put_state_owner(sp); 2035 *res = state; 2036 return 0; 2037 err_opendata_put: 2038 kfree(opendata->f_attr.mdsthreshold); 2039 nfs4_opendata_put(opendata); 2040 err_put_state_owner: 2041 nfs4_put_state_owner(sp); 2042 out_err: 2043 *res = NULL; 2044 return status; 2045 } 2046 2047 2048 static struct nfs4_state *nfs4_do_open(struct inode *dir, 2049 struct dentry *dentry, 2050 fmode_t fmode, 2051 int flags, 2052 struct iattr *sattr, 2053 struct rpc_cred *cred, 2054 struct nfs4_threshold **ctx_th) 2055 { 2056 struct nfs4_exception exception = { }; 2057 struct nfs4_state *res; 2058 int status; 2059 2060 fmode &= FMODE_READ|FMODE_WRITE|FMODE_EXEC; 2061 do { 2062 status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, 2063 &res, ctx_th); 2064 if (status == 0) 2065 break; 2066 /* NOTE: BAD_SEQID means the server and client disagree about the 2067 * book-keeping w.r.t. state-changing operations 2068 * (OPEN/CLOSE/LOCK/LOCKU...) 2069 * It is actually a sign of a bug on the client or on the server. 2070 * 2071 * If we receive a BAD_SEQID error in the particular case of 2072 * doing an OPEN, we assume that nfs_increment_open_seqid() will 2073 * have unhashed the old state_owner for us, and that we can 2074 * therefore safely retry using a new one. We should still warn 2075 * the user though... 2076 */ 2077 if (status == -NFS4ERR_BAD_SEQID) { 2078 pr_warn_ratelimited("NFS: v4 server %s " 2079 " returned a bad sequence-id error!\n", 2080 NFS_SERVER(dir)->nfs_client->cl_hostname); 2081 exception.retry = 1; 2082 continue; 2083 } 2084 /* 2085 * BAD_STATEID on OPEN means that the server cancelled our 2086 * state before it received the OPEN_CONFIRM. 2087 * Recover by retrying the request as per the discussion 2088 * on Page 181 of RFC3530. 2089 */ 2090 if (status == -NFS4ERR_BAD_STATEID) { 2091 exception.retry = 1; 2092 continue; 2093 } 2094 if (status == -EAGAIN) { 2095 /* We must have found a delegation */ 2096 exception.retry = 1; 2097 continue; 2098 } 2099 res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir), 2100 status, &exception)); 2101 } while (exception.retry); 2102 return res; 2103 } 2104 2105 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 2106 struct nfs_fattr *fattr, struct iattr *sattr, 2107 struct nfs4_state *state) 2108 { 2109 struct nfs_server *server = NFS_SERVER(inode); 2110 struct nfs_setattrargs arg = { 2111 .fh = NFS_FH(inode), 2112 .iap = sattr, 2113 .server = server, 2114 .bitmask = server->attr_bitmask, 2115 }; 2116 struct nfs_setattrres res = { 2117 .fattr = fattr, 2118 .server = server, 2119 }; 2120 struct rpc_message msg = { 2121 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 2122 .rpc_argp = &arg, 2123 .rpc_resp = &res, 2124 .rpc_cred = cred, 2125 }; 2126 unsigned long timestamp = jiffies; 2127 int status; 2128 2129 nfs_fattr_init(fattr); 2130 2131 if (state != NULL) { 2132 struct nfs_lockowner lockowner = { 2133 .l_owner = current->files, 2134 .l_pid = current->tgid, 2135 }; 2136 nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE, 2137 &lockowner); 2138 } else if (nfs4_copy_delegation_stateid(&arg.stateid, inode, 2139 FMODE_WRITE)) { 2140 /* Use that stateid */ 2141 } else 2142 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 2143 2144 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 2145 if (status == 0 && state != NULL) 2146 renew_lease(server, timestamp); 2147 return status; 2148 } 2149 2150 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 2151 struct nfs_fattr *fattr, struct iattr *sattr, 2152 struct nfs4_state *state) 2153 { 2154 struct nfs_server *server = NFS_SERVER(inode); 2155 struct nfs4_exception exception = { 2156 .state = state, 2157 .inode = inode, 2158 }; 2159 int err; 2160 do { 2161 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state); 2162 switch (err) { 2163 case -NFS4ERR_OPENMODE: 2164 if (state && !(state->state & FMODE_WRITE)) { 2165 err = -EBADF; 2166 if (sattr->ia_valid & ATTR_OPEN) 2167 err = -EACCES; 2168 goto out; 2169 } 2170 } 2171 err = nfs4_handle_exception(server, err, &exception); 2172 } while (exception.retry); 2173 out: 2174 return err; 2175 } 2176 2177 struct nfs4_closedata { 2178 struct inode *inode; 2179 struct nfs4_state *state; 2180 struct nfs_closeargs arg; 2181 struct nfs_closeres res; 2182 struct nfs_fattr fattr; 2183 unsigned long timestamp; 2184 bool roc; 2185 u32 roc_barrier; 2186 }; 2187 2188 static void nfs4_free_closedata(void *data) 2189 { 2190 struct nfs4_closedata *calldata = data; 2191 struct nfs4_state_owner *sp = calldata->state->owner; 2192 struct super_block *sb = calldata->state->inode->i_sb; 2193 2194 if (calldata->roc) 2195 pnfs_roc_release(calldata->state->inode); 2196 nfs4_put_open_state(calldata->state); 2197 nfs_free_seqid(calldata->arg.seqid); 2198 nfs4_put_state_owner(sp); 2199 nfs_sb_deactive(sb); 2200 kfree(calldata); 2201 } 2202 2203 static void nfs4_close_clear_stateid_flags(struct nfs4_state *state, 2204 fmode_t fmode) 2205 { 2206 spin_lock(&state->owner->so_lock); 2207 if (!(fmode & FMODE_READ)) 2208 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 2209 if (!(fmode & FMODE_WRITE)) 2210 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 2211 clear_bit(NFS_O_RDWR_STATE, &state->flags); 2212 spin_unlock(&state->owner->so_lock); 2213 } 2214 2215 static void nfs4_close_done(struct rpc_task *task, void *data) 2216 { 2217 struct nfs4_closedata *calldata = data; 2218 struct nfs4_state *state = calldata->state; 2219 struct nfs_server *server = NFS_SERVER(calldata->inode); 2220 2221 dprintk("%s: begin!\n", __func__); 2222 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 2223 return; 2224 /* hmm. we are done with the inode, and in the process of freeing 2225 * the state_owner. we keep this around to process errors 2226 */ 2227 switch (task->tk_status) { 2228 case 0: 2229 if (calldata->roc) 2230 pnfs_roc_set_barrier(state->inode, 2231 calldata->roc_barrier); 2232 nfs_set_open_stateid(state, &calldata->res.stateid, 0); 2233 renew_lease(server, calldata->timestamp); 2234 nfs4_close_clear_stateid_flags(state, 2235 calldata->arg.fmode); 2236 break; 2237 case -NFS4ERR_STALE_STATEID: 2238 case -NFS4ERR_OLD_STATEID: 2239 case -NFS4ERR_BAD_STATEID: 2240 case -NFS4ERR_EXPIRED: 2241 if (calldata->arg.fmode == 0) 2242 break; 2243 default: 2244 if (nfs4_async_handle_error(task, server, state) == -EAGAIN) 2245 rpc_restart_call_prepare(task); 2246 } 2247 nfs_release_seqid(calldata->arg.seqid); 2248 nfs_refresh_inode(calldata->inode, calldata->res.fattr); 2249 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status); 2250 } 2251 2252 static void nfs4_close_prepare(struct rpc_task *task, void *data) 2253 { 2254 struct nfs4_closedata *calldata = data; 2255 struct nfs4_state *state = calldata->state; 2256 struct inode *inode = calldata->inode; 2257 int call_close = 0; 2258 2259 dprintk("%s: begin!\n", __func__); 2260 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 2261 return; 2262 2263 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; 2264 calldata->arg.fmode = FMODE_READ|FMODE_WRITE; 2265 spin_lock(&state->owner->so_lock); 2266 /* Calculate the change in open mode */ 2267 if (state->n_rdwr == 0) { 2268 if (state->n_rdonly == 0) { 2269 call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags); 2270 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); 2271 calldata->arg.fmode &= ~FMODE_READ; 2272 } 2273 if (state->n_wronly == 0) { 2274 call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags); 2275 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); 2276 calldata->arg.fmode &= ~FMODE_WRITE; 2277 } 2278 } 2279 spin_unlock(&state->owner->so_lock); 2280 2281 if (!call_close) { 2282 /* Note: exit _without_ calling nfs4_close_done */ 2283 task->tk_action = NULL; 2284 goto out; 2285 } 2286 2287 if (calldata->arg.fmode == 0) { 2288 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; 2289 if (calldata->roc && 2290 pnfs_roc_drain(inode, &calldata->roc_barrier, task)) 2291 goto out; 2292 } 2293 2294 nfs_fattr_init(calldata->res.fattr); 2295 calldata->timestamp = jiffies; 2296 if (nfs4_setup_sequence(NFS_SERVER(inode), 2297 &calldata->arg.seq_args, 2298 &calldata->res.seq_res, 2299 task)) 2300 goto out; 2301 rpc_call_start(task); 2302 out: 2303 dprintk("%s: done!\n", __func__); 2304 } 2305 2306 static const struct rpc_call_ops nfs4_close_ops = { 2307 .rpc_call_prepare = nfs4_close_prepare, 2308 .rpc_call_done = nfs4_close_done, 2309 .rpc_release = nfs4_free_closedata, 2310 }; 2311 2312 /* 2313 * It is possible for data to be read/written from a mem-mapped file 2314 * after the sys_close call (which hits the vfs layer as a flush). 2315 * This means that we can't safely call nfsv4 close on a file until 2316 * the inode is cleared. This in turn means that we are not good 2317 * NFSv4 citizens - we do not indicate to the server to update the file's 2318 * share state even when we are done with one of the three share 2319 * stateid's in the inode. 2320 * 2321 * NOTE: Caller must be holding the sp->so_owner semaphore! 2322 */ 2323 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) 2324 { 2325 struct nfs_server *server = NFS_SERVER(state->inode); 2326 struct nfs4_closedata *calldata; 2327 struct nfs4_state_owner *sp = state->owner; 2328 struct rpc_task *task; 2329 struct rpc_message msg = { 2330 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], 2331 .rpc_cred = state->owner->so_cred, 2332 }; 2333 struct rpc_task_setup task_setup_data = { 2334 .rpc_client = server->client, 2335 .rpc_message = &msg, 2336 .callback_ops = &nfs4_close_ops, 2337 .workqueue = nfsiod_workqueue, 2338 .flags = RPC_TASK_ASYNC, 2339 }; 2340 int status = -ENOMEM; 2341 2342 calldata = kzalloc(sizeof(*calldata), gfp_mask); 2343 if (calldata == NULL) 2344 goto out; 2345 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1); 2346 calldata->inode = state->inode; 2347 calldata->state = state; 2348 calldata->arg.fh = NFS_FH(state->inode); 2349 calldata->arg.stateid = &state->open_stateid; 2350 /* Serialization for the sequence id */ 2351 calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask); 2352 if (calldata->arg.seqid == NULL) 2353 goto out_free_calldata; 2354 calldata->arg.fmode = 0; 2355 calldata->arg.bitmask = server->cache_consistency_bitmask; 2356 calldata->res.fattr = &calldata->fattr; 2357 calldata->res.seqid = calldata->arg.seqid; 2358 calldata->res.server = server; 2359 calldata->roc = pnfs_roc(state->inode); 2360 nfs_sb_active(calldata->inode->i_sb); 2361 2362 msg.rpc_argp = &calldata->arg; 2363 msg.rpc_resp = &calldata->res; 2364 task_setup_data.callback_data = calldata; 2365 task = rpc_run_task(&task_setup_data); 2366 if (IS_ERR(task)) 2367 return PTR_ERR(task); 2368 status = 0; 2369 if (wait) 2370 status = rpc_wait_for_completion_task(task); 2371 rpc_put_task(task); 2372 return status; 2373 out_free_calldata: 2374 kfree(calldata); 2375 out: 2376 nfs4_put_open_state(state); 2377 nfs4_put_state_owner(sp); 2378 return status; 2379 } 2380 2381 static struct inode * 2382 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr) 2383 { 2384 struct nfs4_state *state; 2385 2386 /* Protect against concurrent sillydeletes */ 2387 state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr, 2388 ctx->cred, &ctx->mdsthreshold); 2389 if (IS_ERR(state)) 2390 return ERR_CAST(state); 2391 ctx->state = state; 2392 return igrab(state->inode); 2393 } 2394 2395 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) 2396 { 2397 if (ctx->state == NULL) 2398 return; 2399 if (is_sync) 2400 nfs4_close_sync(ctx->state, ctx->mode); 2401 else 2402 nfs4_close_state(ctx->state, ctx->mode); 2403 } 2404 2405 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 2406 { 2407 struct nfs4_server_caps_arg args = { 2408 .fhandle = fhandle, 2409 }; 2410 struct nfs4_server_caps_res res = {}; 2411 struct rpc_message msg = { 2412 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], 2413 .rpc_argp = &args, 2414 .rpc_resp = &res, 2415 }; 2416 int status; 2417 2418 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2419 if (status == 0) { 2420 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); 2421 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS| 2422 NFS_CAP_SYMLINKS|NFS_CAP_FILEID| 2423 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER| 2424 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME| 2425 NFS_CAP_CTIME|NFS_CAP_MTIME); 2426 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL) 2427 server->caps |= NFS_CAP_ACLS; 2428 if (res.has_links != 0) 2429 server->caps |= NFS_CAP_HARDLINKS; 2430 if (res.has_symlinks != 0) 2431 server->caps |= NFS_CAP_SYMLINKS; 2432 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID) 2433 server->caps |= NFS_CAP_FILEID; 2434 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE) 2435 server->caps |= NFS_CAP_MODE; 2436 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS) 2437 server->caps |= NFS_CAP_NLINK; 2438 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER) 2439 server->caps |= NFS_CAP_OWNER; 2440 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP) 2441 server->caps |= NFS_CAP_OWNER_GROUP; 2442 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS) 2443 server->caps |= NFS_CAP_ATIME; 2444 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA) 2445 server->caps |= NFS_CAP_CTIME; 2446 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY) 2447 server->caps |= NFS_CAP_MTIME; 2448 2449 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); 2450 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; 2451 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; 2452 server->acl_bitmask = res.acl_bitmask; 2453 server->fh_expire_type = res.fh_expire_type; 2454 } 2455 2456 return status; 2457 } 2458 2459 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 2460 { 2461 struct nfs4_exception exception = { }; 2462 int err; 2463 do { 2464 err = nfs4_handle_exception(server, 2465 _nfs4_server_capabilities(server, fhandle), 2466 &exception); 2467 } while (exception.retry); 2468 return err; 2469 } 2470 2471 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 2472 struct nfs_fsinfo *info) 2473 { 2474 struct nfs4_lookup_root_arg args = { 2475 .bitmask = nfs4_fattr_bitmap, 2476 }; 2477 struct nfs4_lookup_res res = { 2478 .server = server, 2479 .fattr = info->fattr, 2480 .fh = fhandle, 2481 }; 2482 struct rpc_message msg = { 2483 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT], 2484 .rpc_argp = &args, 2485 .rpc_resp = &res, 2486 }; 2487 2488 nfs_fattr_init(info->fattr); 2489 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2490 } 2491 2492 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 2493 struct nfs_fsinfo *info) 2494 { 2495 struct nfs4_exception exception = { }; 2496 int err; 2497 do { 2498 err = _nfs4_lookup_root(server, fhandle, info); 2499 switch (err) { 2500 case 0: 2501 case -NFS4ERR_WRONGSEC: 2502 goto out; 2503 default: 2504 err = nfs4_handle_exception(server, err, &exception); 2505 } 2506 } while (exception.retry); 2507 out: 2508 return err; 2509 } 2510 2511 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 2512 struct nfs_fsinfo *info, rpc_authflavor_t flavor) 2513 { 2514 struct rpc_auth *auth; 2515 int ret; 2516 2517 auth = rpcauth_create(flavor, server->client); 2518 if (IS_ERR(auth)) { 2519 ret = -EIO; 2520 goto out; 2521 } 2522 ret = nfs4_lookup_root(server, fhandle, info); 2523 out: 2524 return ret; 2525 } 2526 2527 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 2528 struct nfs_fsinfo *info) 2529 { 2530 int i, len, status = 0; 2531 rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS]; 2532 2533 len = rpcauth_list_flavors(flav_array, ARRAY_SIZE(flav_array)); 2534 BUG_ON(len < 0); 2535 2536 for (i = 0; i < len; i++) { 2537 /* AUTH_UNIX is the default flavor if none was specified, 2538 * thus has already been tried. */ 2539 if (flav_array[i] == RPC_AUTH_UNIX) 2540 continue; 2541 2542 status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]); 2543 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 2544 continue; 2545 break; 2546 } 2547 /* 2548 * -EACCESS could mean that the user doesn't have correct permissions 2549 * to access the mount. It could also mean that we tried to mount 2550 * with a gss auth flavor, but rpc.gssd isn't running. Either way, 2551 * existing mount programs don't handle -EACCES very well so it should 2552 * be mapped to -EPERM instead. 2553 */ 2554 if (status == -EACCES) 2555 status = -EPERM; 2556 return status; 2557 } 2558 2559 /* 2560 * get the file handle for the "/" directory on the server 2561 */ 2562 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle, 2563 struct nfs_fsinfo *info) 2564 { 2565 int minor_version = server->nfs_client->cl_minorversion; 2566 int status = nfs4_lookup_root(server, fhandle, info); 2567 if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR)) 2568 /* 2569 * A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM 2570 * by nfs4_map_errors() as this function exits. 2571 */ 2572 status = nfs_v4_minor_ops[minor_version]->find_root_sec(server, fhandle, info); 2573 if (status == 0) 2574 status = nfs4_server_capabilities(server, fhandle); 2575 if (status == 0) 2576 status = nfs4_do_fsinfo(server, fhandle, info); 2577 return nfs4_map_errors(status); 2578 } 2579 2580 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh, 2581 struct nfs_fsinfo *info) 2582 { 2583 int error; 2584 struct nfs_fattr *fattr = info->fattr; 2585 2586 error = nfs4_server_capabilities(server, mntfh); 2587 if (error < 0) { 2588 dprintk("nfs4_get_root: getcaps error = %d\n", -error); 2589 return error; 2590 } 2591 2592 error = nfs4_proc_getattr(server, mntfh, fattr); 2593 if (error < 0) { 2594 dprintk("nfs4_get_root: getattr error = %d\n", -error); 2595 return error; 2596 } 2597 2598 if (fattr->valid & NFS_ATTR_FATTR_FSID && 2599 !nfs_fsid_equal(&server->fsid, &fattr->fsid)) 2600 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid)); 2601 2602 return error; 2603 } 2604 2605 /* 2606 * Get locations and (maybe) other attributes of a referral. 2607 * Note that we'll actually follow the referral later when 2608 * we detect fsid mismatch in inode revalidation 2609 */ 2610 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir, 2611 const struct qstr *name, struct nfs_fattr *fattr, 2612 struct nfs_fh *fhandle) 2613 { 2614 int status = -ENOMEM; 2615 struct page *page = NULL; 2616 struct nfs4_fs_locations *locations = NULL; 2617 2618 page = alloc_page(GFP_KERNEL); 2619 if (page == NULL) 2620 goto out; 2621 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 2622 if (locations == NULL) 2623 goto out; 2624 2625 status = nfs4_proc_fs_locations(client, dir, name, locations, page); 2626 if (status != 0) 2627 goto out; 2628 /* Make sure server returned a different fsid for the referral */ 2629 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) { 2630 dprintk("%s: server did not return a different fsid for" 2631 " a referral at %s\n", __func__, name->name); 2632 status = -EIO; 2633 goto out; 2634 } 2635 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ 2636 nfs_fixup_referral_attributes(&locations->fattr); 2637 2638 /* replace the lookup nfs_fattr with the locations nfs_fattr */ 2639 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr)); 2640 memset(fhandle, 0, sizeof(struct nfs_fh)); 2641 out: 2642 if (page) 2643 __free_page(page); 2644 kfree(locations); 2645 return status; 2646 } 2647 2648 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr) 2649 { 2650 struct nfs4_getattr_arg args = { 2651 .fh = fhandle, 2652 .bitmask = server->attr_bitmask, 2653 }; 2654 struct nfs4_getattr_res res = { 2655 .fattr = fattr, 2656 .server = server, 2657 }; 2658 struct rpc_message msg = { 2659 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 2660 .rpc_argp = &args, 2661 .rpc_resp = &res, 2662 }; 2663 2664 nfs_fattr_init(fattr); 2665 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2666 } 2667 2668 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr) 2669 { 2670 struct nfs4_exception exception = { }; 2671 int err; 2672 do { 2673 err = nfs4_handle_exception(server, 2674 _nfs4_proc_getattr(server, fhandle, fattr), 2675 &exception); 2676 } while (exception.retry); 2677 return err; 2678 } 2679 2680 /* 2681 * The file is not closed if it is opened due to the a request to change 2682 * the size of the file. The open call will not be needed once the 2683 * VFS layer lookup-intents are implemented. 2684 * 2685 * Close is called when the inode is destroyed. 2686 * If we haven't opened the file for O_WRONLY, we 2687 * need to in the size_change case to obtain a stateid. 2688 * 2689 * Got race? 2690 * Because OPEN is always done by name in nfsv4, it is 2691 * possible that we opened a different file by the same 2692 * name. We can recognize this race condition, but we 2693 * can't do anything about it besides returning an error. 2694 * 2695 * This will be fixed with VFS changes (lookup-intent). 2696 */ 2697 static int 2698 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, 2699 struct iattr *sattr) 2700 { 2701 struct inode *inode = dentry->d_inode; 2702 struct rpc_cred *cred = NULL; 2703 struct nfs4_state *state = NULL; 2704 int status; 2705 2706 if (pnfs_ld_layoutret_on_setattr(inode)) 2707 pnfs_return_layout(inode); 2708 2709 nfs_fattr_init(fattr); 2710 2711 /* Deal with open(O_TRUNC) */ 2712 if (sattr->ia_valid & ATTR_OPEN) 2713 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN); 2714 2715 /* Optimization: if the end result is no change, don't RPC */ 2716 if ((sattr->ia_valid & ~(ATTR_FILE)) == 0) 2717 return 0; 2718 2719 /* Search for an existing open(O_WRITE) file */ 2720 if (sattr->ia_valid & ATTR_FILE) { 2721 struct nfs_open_context *ctx; 2722 2723 ctx = nfs_file_open_context(sattr->ia_file); 2724 if (ctx) { 2725 cred = ctx->cred; 2726 state = ctx->state; 2727 } 2728 } 2729 2730 status = nfs4_do_setattr(inode, cred, fattr, sattr, state); 2731 if (status == 0) 2732 nfs_setattr_update_inode(inode, sattr); 2733 return status; 2734 } 2735 2736 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, 2737 const struct qstr *name, struct nfs_fh *fhandle, 2738 struct nfs_fattr *fattr) 2739 { 2740 struct nfs_server *server = NFS_SERVER(dir); 2741 int status; 2742 struct nfs4_lookup_arg args = { 2743 .bitmask = server->attr_bitmask, 2744 .dir_fh = NFS_FH(dir), 2745 .name = name, 2746 }; 2747 struct nfs4_lookup_res res = { 2748 .server = server, 2749 .fattr = fattr, 2750 .fh = fhandle, 2751 }; 2752 struct rpc_message msg = { 2753 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], 2754 .rpc_argp = &args, 2755 .rpc_resp = &res, 2756 }; 2757 2758 nfs_fattr_init(fattr); 2759 2760 dprintk("NFS call lookup %s\n", name->name); 2761 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0); 2762 dprintk("NFS reply lookup: %d\n", status); 2763 return status; 2764 } 2765 2766 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr) 2767 { 2768 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 2769 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT; 2770 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 2771 fattr->nlink = 2; 2772 } 2773 2774 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir, 2775 struct qstr *name, struct nfs_fh *fhandle, 2776 struct nfs_fattr *fattr) 2777 { 2778 struct nfs4_exception exception = { }; 2779 struct rpc_clnt *client = *clnt; 2780 int err; 2781 do { 2782 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr); 2783 switch (err) { 2784 case -NFS4ERR_BADNAME: 2785 err = -ENOENT; 2786 goto out; 2787 case -NFS4ERR_MOVED: 2788 err = nfs4_get_referral(client, dir, name, fattr, fhandle); 2789 goto out; 2790 case -NFS4ERR_WRONGSEC: 2791 err = -EPERM; 2792 if (client != *clnt) 2793 goto out; 2794 2795 client = nfs4_create_sec_client(client, dir, name); 2796 if (IS_ERR(client)) 2797 return PTR_ERR(client); 2798 2799 exception.retry = 1; 2800 break; 2801 default: 2802 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 2803 } 2804 } while (exception.retry); 2805 2806 out: 2807 if (err == 0) 2808 *clnt = client; 2809 else if (client != *clnt) 2810 rpc_shutdown_client(client); 2811 2812 return err; 2813 } 2814 2815 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, 2816 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 2817 { 2818 int status; 2819 struct rpc_clnt *client = NFS_CLIENT(dir); 2820 2821 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr); 2822 if (client != NFS_CLIENT(dir)) { 2823 rpc_shutdown_client(client); 2824 nfs_fixup_secinfo_attributes(fattr); 2825 } 2826 return status; 2827 } 2828 2829 struct rpc_clnt * 2830 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name, 2831 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 2832 { 2833 int status; 2834 struct rpc_clnt *client = rpc_clone_client(NFS_CLIENT(dir)); 2835 2836 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr); 2837 if (status < 0) { 2838 rpc_shutdown_client(client); 2839 return ERR_PTR(status); 2840 } 2841 return client; 2842 } 2843 2844 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 2845 { 2846 struct nfs_server *server = NFS_SERVER(inode); 2847 struct nfs4_accessargs args = { 2848 .fh = NFS_FH(inode), 2849 .bitmask = server->cache_consistency_bitmask, 2850 }; 2851 struct nfs4_accessres res = { 2852 .server = server, 2853 }; 2854 struct rpc_message msg = { 2855 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], 2856 .rpc_argp = &args, 2857 .rpc_resp = &res, 2858 .rpc_cred = entry->cred, 2859 }; 2860 int mode = entry->mask; 2861 int status; 2862 2863 /* 2864 * Determine which access bits we want to ask for... 2865 */ 2866 if (mode & MAY_READ) 2867 args.access |= NFS4_ACCESS_READ; 2868 if (S_ISDIR(inode->i_mode)) { 2869 if (mode & MAY_WRITE) 2870 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE; 2871 if (mode & MAY_EXEC) 2872 args.access |= NFS4_ACCESS_LOOKUP; 2873 } else { 2874 if (mode & MAY_WRITE) 2875 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND; 2876 if (mode & MAY_EXEC) 2877 args.access |= NFS4_ACCESS_EXECUTE; 2878 } 2879 2880 res.fattr = nfs_alloc_fattr(); 2881 if (res.fattr == NULL) 2882 return -ENOMEM; 2883 2884 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2885 if (!status) { 2886 nfs_access_set_mask(entry, res.access); 2887 nfs_refresh_inode(inode, res.fattr); 2888 } 2889 nfs_free_fattr(res.fattr); 2890 return status; 2891 } 2892 2893 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 2894 { 2895 struct nfs4_exception exception = { }; 2896 int err; 2897 do { 2898 err = nfs4_handle_exception(NFS_SERVER(inode), 2899 _nfs4_proc_access(inode, entry), 2900 &exception); 2901 } while (exception.retry); 2902 return err; 2903 } 2904 2905 /* 2906 * TODO: For the time being, we don't try to get any attributes 2907 * along with any of the zero-copy operations READ, READDIR, 2908 * READLINK, WRITE. 2909 * 2910 * In the case of the first three, we want to put the GETATTR 2911 * after the read-type operation -- this is because it is hard 2912 * to predict the length of a GETATTR response in v4, and thus 2913 * align the READ data correctly. This means that the GETATTR 2914 * may end up partially falling into the page cache, and we should 2915 * shift it into the 'tail' of the xdr_buf before processing. 2916 * To do this efficiently, we need to know the total length 2917 * of data received, which doesn't seem to be available outside 2918 * of the RPC layer. 2919 * 2920 * In the case of WRITE, we also want to put the GETATTR after 2921 * the operation -- in this case because we want to make sure 2922 * we get the post-operation mtime and size. 2923 * 2924 * Both of these changes to the XDR layer would in fact be quite 2925 * minor, but I decided to leave them for a subsequent patch. 2926 */ 2927 static int _nfs4_proc_readlink(struct inode *inode, struct page *page, 2928 unsigned int pgbase, unsigned int pglen) 2929 { 2930 struct nfs4_readlink args = { 2931 .fh = NFS_FH(inode), 2932 .pgbase = pgbase, 2933 .pglen = pglen, 2934 .pages = &page, 2935 }; 2936 struct nfs4_readlink_res res; 2937 struct rpc_message msg = { 2938 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], 2939 .rpc_argp = &args, 2940 .rpc_resp = &res, 2941 }; 2942 2943 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); 2944 } 2945 2946 static int nfs4_proc_readlink(struct inode *inode, struct page *page, 2947 unsigned int pgbase, unsigned int pglen) 2948 { 2949 struct nfs4_exception exception = { }; 2950 int err; 2951 do { 2952 err = nfs4_handle_exception(NFS_SERVER(inode), 2953 _nfs4_proc_readlink(inode, page, pgbase, pglen), 2954 &exception); 2955 } while (exception.retry); 2956 return err; 2957 } 2958 2959 /* 2960 * This is just for mknod. open(O_CREAT) will always do ->open_context(). 2961 */ 2962 static int 2963 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, 2964 int flags) 2965 { 2966 struct nfs_open_context *ctx; 2967 struct nfs4_state *state; 2968 int status = 0; 2969 2970 ctx = alloc_nfs_open_context(dentry, FMODE_READ); 2971 if (IS_ERR(ctx)) 2972 return PTR_ERR(ctx); 2973 2974 sattr->ia_mode &= ~current_umask(); 2975 state = nfs4_do_open(dir, dentry, ctx->mode, 2976 flags, sattr, ctx->cred, 2977 &ctx->mdsthreshold); 2978 d_drop(dentry); 2979 if (IS_ERR(state)) { 2980 status = PTR_ERR(state); 2981 goto out; 2982 } 2983 d_add(dentry, igrab(state->inode)); 2984 nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); 2985 ctx->state = state; 2986 out: 2987 put_nfs_open_context(ctx); 2988 return status; 2989 } 2990 2991 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name) 2992 { 2993 struct nfs_server *server = NFS_SERVER(dir); 2994 struct nfs_removeargs args = { 2995 .fh = NFS_FH(dir), 2996 .name = *name, 2997 }; 2998 struct nfs_removeres res = { 2999 .server = server, 3000 }; 3001 struct rpc_message msg = { 3002 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], 3003 .rpc_argp = &args, 3004 .rpc_resp = &res, 3005 }; 3006 int status; 3007 3008 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 3009 if (status == 0) 3010 update_changeattr(dir, &res.cinfo); 3011 return status; 3012 } 3013 3014 static int nfs4_proc_remove(struct inode *dir, struct qstr *name) 3015 { 3016 struct nfs4_exception exception = { }; 3017 int err; 3018 do { 3019 err = nfs4_handle_exception(NFS_SERVER(dir), 3020 _nfs4_proc_remove(dir, name), 3021 &exception); 3022 } while (exception.retry); 3023 return err; 3024 } 3025 3026 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) 3027 { 3028 struct nfs_server *server = NFS_SERVER(dir); 3029 struct nfs_removeargs *args = msg->rpc_argp; 3030 struct nfs_removeres *res = msg->rpc_resp; 3031 3032 res->server = server; 3033 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; 3034 nfs41_init_sequence(&args->seq_args, &res->seq_res, 1); 3035 } 3036 3037 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) 3038 { 3039 if (nfs4_setup_sequence(NFS_SERVER(data->dir), 3040 &data->args.seq_args, 3041 &data->res.seq_res, 3042 task)) 3043 return; 3044 rpc_call_start(task); 3045 } 3046 3047 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) 3048 { 3049 struct nfs_removeres *res = task->tk_msg.rpc_resp; 3050 3051 if (!nfs4_sequence_done(task, &res->seq_res)) 3052 return 0; 3053 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN) 3054 return 0; 3055 update_changeattr(dir, &res->cinfo); 3056 return 1; 3057 } 3058 3059 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir) 3060 { 3061 struct nfs_server *server = NFS_SERVER(dir); 3062 struct nfs_renameargs *arg = msg->rpc_argp; 3063 struct nfs_renameres *res = msg->rpc_resp; 3064 3065 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; 3066 res->server = server; 3067 nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1); 3068 } 3069 3070 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) 3071 { 3072 if (nfs4_setup_sequence(NFS_SERVER(data->old_dir), 3073 &data->args.seq_args, 3074 &data->res.seq_res, 3075 task)) 3076 return; 3077 rpc_call_start(task); 3078 } 3079 3080 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, 3081 struct inode *new_dir) 3082 { 3083 struct nfs_renameres *res = task->tk_msg.rpc_resp; 3084 3085 if (!nfs4_sequence_done(task, &res->seq_res)) 3086 return 0; 3087 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN) 3088 return 0; 3089 3090 update_changeattr(old_dir, &res->old_cinfo); 3091 update_changeattr(new_dir, &res->new_cinfo); 3092 return 1; 3093 } 3094 3095 static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, 3096 struct inode *new_dir, struct qstr *new_name) 3097 { 3098 struct nfs_server *server = NFS_SERVER(old_dir); 3099 struct nfs_renameargs arg = { 3100 .old_dir = NFS_FH(old_dir), 3101 .new_dir = NFS_FH(new_dir), 3102 .old_name = old_name, 3103 .new_name = new_name, 3104 }; 3105 struct nfs_renameres res = { 3106 .server = server, 3107 }; 3108 struct rpc_message msg = { 3109 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME], 3110 .rpc_argp = &arg, 3111 .rpc_resp = &res, 3112 }; 3113 int status = -ENOMEM; 3114 3115 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 3116 if (!status) { 3117 update_changeattr(old_dir, &res.old_cinfo); 3118 update_changeattr(new_dir, &res.new_cinfo); 3119 } 3120 return status; 3121 } 3122 3123 static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, 3124 struct inode *new_dir, struct qstr *new_name) 3125 { 3126 struct nfs4_exception exception = { }; 3127 int err; 3128 do { 3129 err = nfs4_handle_exception(NFS_SERVER(old_dir), 3130 _nfs4_proc_rename(old_dir, old_name, 3131 new_dir, new_name), 3132 &exception); 3133 } while (exception.retry); 3134 return err; 3135 } 3136 3137 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) 3138 { 3139 struct nfs_server *server = NFS_SERVER(inode); 3140 struct nfs4_link_arg arg = { 3141 .fh = NFS_FH(inode), 3142 .dir_fh = NFS_FH(dir), 3143 .name = name, 3144 .bitmask = server->attr_bitmask, 3145 }; 3146 struct nfs4_link_res res = { 3147 .server = server, 3148 }; 3149 struct rpc_message msg = { 3150 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], 3151 .rpc_argp = &arg, 3152 .rpc_resp = &res, 3153 }; 3154 int status = -ENOMEM; 3155 3156 res.fattr = nfs_alloc_fattr(); 3157 if (res.fattr == NULL) 3158 goto out; 3159 3160 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 3161 if (!status) { 3162 update_changeattr(dir, &res.cinfo); 3163 nfs_post_op_update_inode(inode, res.fattr); 3164 } 3165 out: 3166 nfs_free_fattr(res.fattr); 3167 return status; 3168 } 3169 3170 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) 3171 { 3172 struct nfs4_exception exception = { }; 3173 int err; 3174 do { 3175 err = nfs4_handle_exception(NFS_SERVER(inode), 3176 _nfs4_proc_link(inode, dir, name), 3177 &exception); 3178 } while (exception.retry); 3179 return err; 3180 } 3181 3182 struct nfs4_createdata { 3183 struct rpc_message msg; 3184 struct nfs4_create_arg arg; 3185 struct nfs4_create_res res; 3186 struct nfs_fh fh; 3187 struct nfs_fattr fattr; 3188 }; 3189 3190 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, 3191 struct qstr *name, struct iattr *sattr, u32 ftype) 3192 { 3193 struct nfs4_createdata *data; 3194 3195 data = kzalloc(sizeof(*data), GFP_KERNEL); 3196 if (data != NULL) { 3197 struct nfs_server *server = NFS_SERVER(dir); 3198 3199 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE]; 3200 data->msg.rpc_argp = &data->arg; 3201 data->msg.rpc_resp = &data->res; 3202 data->arg.dir_fh = NFS_FH(dir); 3203 data->arg.server = server; 3204 data->arg.name = name; 3205 data->arg.attrs = sattr; 3206 data->arg.ftype = ftype; 3207 data->arg.bitmask = server->attr_bitmask; 3208 data->res.server = server; 3209 data->res.fh = &data->fh; 3210 data->res.fattr = &data->fattr; 3211 nfs_fattr_init(data->res.fattr); 3212 } 3213 return data; 3214 } 3215 3216 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) 3217 { 3218 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 3219 &data->arg.seq_args, &data->res.seq_res, 1); 3220 if (status == 0) { 3221 update_changeattr(dir, &data->res.dir_cinfo); 3222 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr); 3223 } 3224 return status; 3225 } 3226 3227 static void nfs4_free_createdata(struct nfs4_createdata *data) 3228 { 3229 kfree(data); 3230 } 3231 3232 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 3233 struct page *page, unsigned int len, struct iattr *sattr) 3234 { 3235 struct nfs4_createdata *data; 3236 int status = -ENAMETOOLONG; 3237 3238 if (len > NFS4_MAXPATHLEN) 3239 goto out; 3240 3241 status = -ENOMEM; 3242 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK); 3243 if (data == NULL) 3244 goto out; 3245 3246 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK]; 3247 data->arg.u.symlink.pages = &page; 3248 data->arg.u.symlink.len = len; 3249 3250 status = nfs4_do_create(dir, dentry, data); 3251 3252 nfs4_free_createdata(data); 3253 out: 3254 return status; 3255 } 3256 3257 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 3258 struct page *page, unsigned int len, struct iattr *sattr) 3259 { 3260 struct nfs4_exception exception = { }; 3261 int err; 3262 do { 3263 err = nfs4_handle_exception(NFS_SERVER(dir), 3264 _nfs4_proc_symlink(dir, dentry, page, 3265 len, sattr), 3266 &exception); 3267 } while (exception.retry); 3268 return err; 3269 } 3270 3271 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 3272 struct iattr *sattr) 3273 { 3274 struct nfs4_createdata *data; 3275 int status = -ENOMEM; 3276 3277 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); 3278 if (data == NULL) 3279 goto out; 3280 3281 status = nfs4_do_create(dir, dentry, data); 3282 3283 nfs4_free_createdata(data); 3284 out: 3285 return status; 3286 } 3287 3288 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 3289 struct iattr *sattr) 3290 { 3291 struct nfs4_exception exception = { }; 3292 int err; 3293 3294 sattr->ia_mode &= ~current_umask(); 3295 do { 3296 err = nfs4_handle_exception(NFS_SERVER(dir), 3297 _nfs4_proc_mkdir(dir, dentry, sattr), 3298 &exception); 3299 } while (exception.retry); 3300 return err; 3301 } 3302 3303 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, 3304 u64 cookie, struct page **pages, unsigned int count, int plus) 3305 { 3306 struct inode *dir = dentry->d_inode; 3307 struct nfs4_readdir_arg args = { 3308 .fh = NFS_FH(dir), 3309 .pages = pages, 3310 .pgbase = 0, 3311 .count = count, 3312 .bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask, 3313 .plus = plus, 3314 }; 3315 struct nfs4_readdir_res res; 3316 struct rpc_message msg = { 3317 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR], 3318 .rpc_argp = &args, 3319 .rpc_resp = &res, 3320 .rpc_cred = cred, 3321 }; 3322 int status; 3323 3324 dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__, 3325 dentry->d_parent->d_name.name, 3326 dentry->d_name.name, 3327 (unsigned long long)cookie); 3328 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args); 3329 res.pgbase = args.pgbase; 3330 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); 3331 if (status >= 0) { 3332 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE); 3333 status += args.pgbase; 3334 } 3335 3336 nfs_invalidate_atime(dir); 3337 3338 dprintk("%s: returns %d\n", __func__, status); 3339 return status; 3340 } 3341 3342 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, 3343 u64 cookie, struct page **pages, unsigned int count, int plus) 3344 { 3345 struct nfs4_exception exception = { }; 3346 int err; 3347 do { 3348 err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode), 3349 _nfs4_proc_readdir(dentry, cred, cookie, 3350 pages, count, plus), 3351 &exception); 3352 } while (exception.retry); 3353 return err; 3354 } 3355 3356 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 3357 struct iattr *sattr, dev_t rdev) 3358 { 3359 struct nfs4_createdata *data; 3360 int mode = sattr->ia_mode; 3361 int status = -ENOMEM; 3362 3363 BUG_ON(!(sattr->ia_valid & ATTR_MODE)); 3364 BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode)); 3365 3366 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK); 3367 if (data == NULL) 3368 goto out; 3369 3370 if (S_ISFIFO(mode)) 3371 data->arg.ftype = NF4FIFO; 3372 else if (S_ISBLK(mode)) { 3373 data->arg.ftype = NF4BLK; 3374 data->arg.u.device.specdata1 = MAJOR(rdev); 3375 data->arg.u.device.specdata2 = MINOR(rdev); 3376 } 3377 else if (S_ISCHR(mode)) { 3378 data->arg.ftype = NF4CHR; 3379 data->arg.u.device.specdata1 = MAJOR(rdev); 3380 data->arg.u.device.specdata2 = MINOR(rdev); 3381 } 3382 3383 status = nfs4_do_create(dir, dentry, data); 3384 3385 nfs4_free_createdata(data); 3386 out: 3387 return status; 3388 } 3389 3390 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 3391 struct iattr *sattr, dev_t rdev) 3392 { 3393 struct nfs4_exception exception = { }; 3394 int err; 3395 3396 sattr->ia_mode &= ~current_umask(); 3397 do { 3398 err = nfs4_handle_exception(NFS_SERVER(dir), 3399 _nfs4_proc_mknod(dir, dentry, sattr, rdev), 3400 &exception); 3401 } while (exception.retry); 3402 return err; 3403 } 3404 3405 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, 3406 struct nfs_fsstat *fsstat) 3407 { 3408 struct nfs4_statfs_arg args = { 3409 .fh = fhandle, 3410 .bitmask = server->attr_bitmask, 3411 }; 3412 struct nfs4_statfs_res res = { 3413 .fsstat = fsstat, 3414 }; 3415 struct rpc_message msg = { 3416 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], 3417 .rpc_argp = &args, 3418 .rpc_resp = &res, 3419 }; 3420 3421 nfs_fattr_init(fsstat->fattr); 3422 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3423 } 3424 3425 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) 3426 { 3427 struct nfs4_exception exception = { }; 3428 int err; 3429 do { 3430 err = nfs4_handle_exception(server, 3431 _nfs4_proc_statfs(server, fhandle, fsstat), 3432 &exception); 3433 } while (exception.retry); 3434 return err; 3435 } 3436 3437 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, 3438 struct nfs_fsinfo *fsinfo) 3439 { 3440 struct nfs4_fsinfo_arg args = { 3441 .fh = fhandle, 3442 .bitmask = server->attr_bitmask, 3443 }; 3444 struct nfs4_fsinfo_res res = { 3445 .fsinfo = fsinfo, 3446 }; 3447 struct rpc_message msg = { 3448 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], 3449 .rpc_argp = &args, 3450 .rpc_resp = &res, 3451 }; 3452 3453 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3454 } 3455 3456 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 3457 { 3458 struct nfs4_exception exception = { }; 3459 int err; 3460 3461 do { 3462 err = nfs4_handle_exception(server, 3463 _nfs4_do_fsinfo(server, fhandle, fsinfo), 3464 &exception); 3465 } while (exception.retry); 3466 return err; 3467 } 3468 3469 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 3470 { 3471 int error; 3472 3473 nfs_fattr_init(fsinfo->fattr); 3474 error = nfs4_do_fsinfo(server, fhandle, fsinfo); 3475 if (error == 0) { 3476 /* block layout checks this! */ 3477 server->pnfs_blksize = fsinfo->blksize; 3478 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype); 3479 } 3480 3481 return error; 3482 } 3483 3484 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 3485 struct nfs_pathconf *pathconf) 3486 { 3487 struct nfs4_pathconf_arg args = { 3488 .fh = fhandle, 3489 .bitmask = server->attr_bitmask, 3490 }; 3491 struct nfs4_pathconf_res res = { 3492 .pathconf = pathconf, 3493 }; 3494 struct rpc_message msg = { 3495 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], 3496 .rpc_argp = &args, 3497 .rpc_resp = &res, 3498 }; 3499 3500 /* None of the pathconf attributes are mandatory to implement */ 3501 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { 3502 memset(pathconf, 0, sizeof(*pathconf)); 3503 return 0; 3504 } 3505 3506 nfs_fattr_init(pathconf->fattr); 3507 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3508 } 3509 3510 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 3511 struct nfs_pathconf *pathconf) 3512 { 3513 struct nfs4_exception exception = { }; 3514 int err; 3515 3516 do { 3517 err = nfs4_handle_exception(server, 3518 _nfs4_proc_pathconf(server, fhandle, pathconf), 3519 &exception); 3520 } while (exception.retry); 3521 return err; 3522 } 3523 3524 void __nfs4_read_done_cb(struct nfs_read_data *data) 3525 { 3526 nfs_invalidate_atime(data->header->inode); 3527 } 3528 3529 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data) 3530 { 3531 struct nfs_server *server = NFS_SERVER(data->header->inode); 3532 3533 if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) { 3534 rpc_restart_call_prepare(task); 3535 return -EAGAIN; 3536 } 3537 3538 __nfs4_read_done_cb(data); 3539 if (task->tk_status > 0) 3540 renew_lease(server, data->timestamp); 3541 return 0; 3542 } 3543 3544 static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data) 3545 { 3546 3547 dprintk("--> %s\n", __func__); 3548 3549 if (!nfs4_sequence_done(task, &data->res.seq_res)) 3550 return -EAGAIN; 3551 3552 return data->read_done_cb ? data->read_done_cb(task, data) : 3553 nfs4_read_done_cb(task, data); 3554 } 3555 3556 static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg) 3557 { 3558 data->timestamp = jiffies; 3559 data->read_done_cb = nfs4_read_done_cb; 3560 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 3561 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); 3562 } 3563 3564 static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data) 3565 { 3566 if (nfs4_setup_sequence(NFS_SERVER(data->header->inode), 3567 &data->args.seq_args, 3568 &data->res.seq_res, 3569 task)) 3570 return; 3571 rpc_call_start(task); 3572 } 3573 3574 static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data) 3575 { 3576 struct inode *inode = data->header->inode; 3577 3578 if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) { 3579 rpc_restart_call_prepare(task); 3580 return -EAGAIN; 3581 } 3582 if (task->tk_status >= 0) { 3583 renew_lease(NFS_SERVER(inode), data->timestamp); 3584 nfs_post_op_update_inode_force_wcc(inode, &data->fattr); 3585 } 3586 return 0; 3587 } 3588 3589 static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data) 3590 { 3591 if (!nfs4_sequence_done(task, &data->res.seq_res)) 3592 return -EAGAIN; 3593 return data->write_done_cb ? data->write_done_cb(task, data) : 3594 nfs4_write_done_cb(task, data); 3595 } 3596 3597 static 3598 bool nfs4_write_need_cache_consistency_data(const struct nfs_write_data *data) 3599 { 3600 const struct nfs_pgio_header *hdr = data->header; 3601 3602 /* Don't request attributes for pNFS or O_DIRECT writes */ 3603 if (data->ds_clp != NULL || hdr->dreq != NULL) 3604 return false; 3605 /* Otherwise, request attributes if and only if we don't hold 3606 * a delegation 3607 */ 3608 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0; 3609 } 3610 3611 static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg) 3612 { 3613 struct nfs_server *server = NFS_SERVER(data->header->inode); 3614 3615 if (!nfs4_write_need_cache_consistency_data(data)) { 3616 data->args.bitmask = NULL; 3617 data->res.fattr = NULL; 3618 } else 3619 data->args.bitmask = server->cache_consistency_bitmask; 3620 3621 if (!data->write_done_cb) 3622 data->write_done_cb = nfs4_write_done_cb; 3623 data->res.server = server; 3624 data->timestamp = jiffies; 3625 3626 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; 3627 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 3628 } 3629 3630 static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data) 3631 { 3632 if (nfs4_setup_sequence(NFS_SERVER(data->header->inode), 3633 &data->args.seq_args, 3634 &data->res.seq_res, 3635 task)) 3636 return; 3637 rpc_call_start(task); 3638 } 3639 3640 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) 3641 { 3642 if (nfs4_setup_sequence(NFS_SERVER(data->inode), 3643 &data->args.seq_args, 3644 &data->res.seq_res, 3645 task)) 3646 return; 3647 rpc_call_start(task); 3648 } 3649 3650 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data) 3651 { 3652 struct inode *inode = data->inode; 3653 3654 if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) { 3655 rpc_restart_call_prepare(task); 3656 return -EAGAIN; 3657 } 3658 return 0; 3659 } 3660 3661 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data) 3662 { 3663 if (!nfs4_sequence_done(task, &data->res.seq_res)) 3664 return -EAGAIN; 3665 return data->commit_done_cb(task, data); 3666 } 3667 3668 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg) 3669 { 3670 struct nfs_server *server = NFS_SERVER(data->inode); 3671 3672 if (data->commit_done_cb == NULL) 3673 data->commit_done_cb = nfs4_commit_done_cb; 3674 data->res.server = server; 3675 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; 3676 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 3677 } 3678 3679 struct nfs4_renewdata { 3680 struct nfs_client *client; 3681 unsigned long timestamp; 3682 }; 3683 3684 /* 3685 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special 3686 * standalone procedure for queueing an asynchronous RENEW. 3687 */ 3688 static void nfs4_renew_release(void *calldata) 3689 { 3690 struct nfs4_renewdata *data = calldata; 3691 struct nfs_client *clp = data->client; 3692 3693 if (atomic_read(&clp->cl_count) > 1) 3694 nfs4_schedule_state_renewal(clp); 3695 nfs_put_client(clp); 3696 kfree(data); 3697 } 3698 3699 static void nfs4_renew_done(struct rpc_task *task, void *calldata) 3700 { 3701 struct nfs4_renewdata *data = calldata; 3702 struct nfs_client *clp = data->client; 3703 unsigned long timestamp = data->timestamp; 3704 3705 if (task->tk_status < 0) { 3706 /* Unless we're shutting down, schedule state recovery! */ 3707 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) 3708 return; 3709 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { 3710 nfs4_schedule_lease_recovery(clp); 3711 return; 3712 } 3713 nfs4_schedule_path_down_recovery(clp); 3714 } 3715 do_renew_lease(clp, timestamp); 3716 } 3717 3718 static const struct rpc_call_ops nfs4_renew_ops = { 3719 .rpc_call_done = nfs4_renew_done, 3720 .rpc_release = nfs4_renew_release, 3721 }; 3722 3723 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 3724 { 3725 struct rpc_message msg = { 3726 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 3727 .rpc_argp = clp, 3728 .rpc_cred = cred, 3729 }; 3730 struct nfs4_renewdata *data; 3731 3732 if (renew_flags == 0) 3733 return 0; 3734 if (!atomic_inc_not_zero(&clp->cl_count)) 3735 return -EIO; 3736 data = kmalloc(sizeof(*data), GFP_NOFS); 3737 if (data == NULL) 3738 return -ENOMEM; 3739 data->client = clp; 3740 data->timestamp = jiffies; 3741 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT, 3742 &nfs4_renew_ops, data); 3743 } 3744 3745 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) 3746 { 3747 struct rpc_message msg = { 3748 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 3749 .rpc_argp = clp, 3750 .rpc_cred = cred, 3751 }; 3752 unsigned long now = jiffies; 3753 int status; 3754 3755 status = rpc_call_sync(clp->cl_rpcclient, &msg, 0); 3756 if (status < 0) 3757 return status; 3758 do_renew_lease(clp, now); 3759 return 0; 3760 } 3761 3762 static inline int nfs4_server_supports_acls(struct nfs_server *server) 3763 { 3764 return (server->caps & NFS_CAP_ACLS) 3765 && (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) 3766 && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL); 3767 } 3768 3769 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that 3770 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on 3771 * the stack. 3772 */ 3773 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 3774 3775 static int buf_to_pages_noslab(const void *buf, size_t buflen, 3776 struct page **pages, unsigned int *pgbase) 3777 { 3778 struct page *newpage, **spages; 3779 int rc = 0; 3780 size_t len; 3781 spages = pages; 3782 3783 do { 3784 len = min_t(size_t, PAGE_SIZE, buflen); 3785 newpage = alloc_page(GFP_KERNEL); 3786 3787 if (newpage == NULL) 3788 goto unwind; 3789 memcpy(page_address(newpage), buf, len); 3790 buf += len; 3791 buflen -= len; 3792 *pages++ = newpage; 3793 rc++; 3794 } while (buflen != 0); 3795 3796 return rc; 3797 3798 unwind: 3799 for(; rc > 0; rc--) 3800 __free_page(spages[rc-1]); 3801 return -ENOMEM; 3802 } 3803 3804 struct nfs4_cached_acl { 3805 int cached; 3806 size_t len; 3807 char data[0]; 3808 }; 3809 3810 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) 3811 { 3812 struct nfs_inode *nfsi = NFS_I(inode); 3813 3814 spin_lock(&inode->i_lock); 3815 kfree(nfsi->nfs4_acl); 3816 nfsi->nfs4_acl = acl; 3817 spin_unlock(&inode->i_lock); 3818 } 3819 3820 static void nfs4_zap_acl_attr(struct inode *inode) 3821 { 3822 nfs4_set_cached_acl(inode, NULL); 3823 } 3824 3825 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen) 3826 { 3827 struct nfs_inode *nfsi = NFS_I(inode); 3828 struct nfs4_cached_acl *acl; 3829 int ret = -ENOENT; 3830 3831 spin_lock(&inode->i_lock); 3832 acl = nfsi->nfs4_acl; 3833 if (acl == NULL) 3834 goto out; 3835 if (buf == NULL) /* user is just asking for length */ 3836 goto out_len; 3837 if (acl->cached == 0) 3838 goto out; 3839 ret = -ERANGE; /* see getxattr(2) man page */ 3840 if (acl->len > buflen) 3841 goto out; 3842 memcpy(buf, acl->data, acl->len); 3843 out_len: 3844 ret = acl->len; 3845 out: 3846 spin_unlock(&inode->i_lock); 3847 return ret; 3848 } 3849 3850 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len) 3851 { 3852 struct nfs4_cached_acl *acl; 3853 size_t buflen = sizeof(*acl) + acl_len; 3854 3855 if (buflen <= PAGE_SIZE) { 3856 acl = kmalloc(buflen, GFP_KERNEL); 3857 if (acl == NULL) 3858 goto out; 3859 acl->cached = 1; 3860 _copy_from_pages(acl->data, pages, pgbase, acl_len); 3861 } else { 3862 acl = kmalloc(sizeof(*acl), GFP_KERNEL); 3863 if (acl == NULL) 3864 goto out; 3865 acl->cached = 0; 3866 } 3867 acl->len = acl_len; 3868 out: 3869 nfs4_set_cached_acl(inode, acl); 3870 } 3871 3872 /* 3873 * The getxattr API returns the required buffer length when called with a 3874 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating 3875 * the required buf. On a NULL buf, we send a page of data to the server 3876 * guessing that the ACL request can be serviced by a page. If so, we cache 3877 * up to the page of ACL data, and the 2nd call to getxattr is serviced by 3878 * the cache. If not so, we throw away the page, and cache the required 3879 * length. The next getxattr call will then produce another round trip to 3880 * the server, this time with the input buf of the required size. 3881 */ 3882 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 3883 { 3884 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, }; 3885 struct nfs_getaclargs args = { 3886 .fh = NFS_FH(inode), 3887 .acl_pages = pages, 3888 .acl_len = buflen, 3889 }; 3890 struct nfs_getaclres res = { 3891 .acl_len = buflen, 3892 }; 3893 struct rpc_message msg = { 3894 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], 3895 .rpc_argp = &args, 3896 .rpc_resp = &res, 3897 }; 3898 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 3899 int ret = -ENOMEM, i; 3900 3901 /* As long as we're doing a round trip to the server anyway, 3902 * let's be prepared for a page of acl data. */ 3903 if (npages == 0) 3904 npages = 1; 3905 if (npages > ARRAY_SIZE(pages)) 3906 return -ERANGE; 3907 3908 for (i = 0; i < npages; i++) { 3909 pages[i] = alloc_page(GFP_KERNEL); 3910 if (!pages[i]) 3911 goto out_free; 3912 } 3913 3914 /* for decoding across pages */ 3915 res.acl_scratch = alloc_page(GFP_KERNEL); 3916 if (!res.acl_scratch) 3917 goto out_free; 3918 3919 args.acl_len = npages * PAGE_SIZE; 3920 args.acl_pgbase = 0; 3921 3922 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", 3923 __func__, buf, buflen, npages, args.acl_len); 3924 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), 3925 &msg, &args.seq_args, &res.seq_res, 0); 3926 if (ret) 3927 goto out_free; 3928 3929 /* Handle the case where the passed-in buffer is too short */ 3930 if (res.acl_flags & NFS4_ACL_TRUNC) { 3931 /* Did the user only issue a request for the acl length? */ 3932 if (buf == NULL) 3933 goto out_ok; 3934 ret = -ERANGE; 3935 goto out_free; 3936 } 3937 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len); 3938 if (buf) 3939 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); 3940 out_ok: 3941 ret = res.acl_len; 3942 out_free: 3943 for (i = 0; i < npages; i++) 3944 if (pages[i]) 3945 __free_page(pages[i]); 3946 if (res.acl_scratch) 3947 __free_page(res.acl_scratch); 3948 return ret; 3949 } 3950 3951 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 3952 { 3953 struct nfs4_exception exception = { }; 3954 ssize_t ret; 3955 do { 3956 ret = __nfs4_get_acl_uncached(inode, buf, buflen); 3957 if (ret >= 0) 3958 break; 3959 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); 3960 } while (exception.retry); 3961 return ret; 3962 } 3963 3964 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) 3965 { 3966 struct nfs_server *server = NFS_SERVER(inode); 3967 int ret; 3968 3969 if (!nfs4_server_supports_acls(server)) 3970 return -EOPNOTSUPP; 3971 ret = nfs_revalidate_inode(server, inode); 3972 if (ret < 0) 3973 return ret; 3974 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) 3975 nfs_zap_acl_cache(inode); 3976 ret = nfs4_read_cached_acl(inode, buf, buflen); 3977 if (ret != -ENOENT) 3978 /* -ENOENT is returned if there is no ACL or if there is an ACL 3979 * but no cached acl data, just the acl length */ 3980 return ret; 3981 return nfs4_get_acl_uncached(inode, buf, buflen); 3982 } 3983 3984 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 3985 { 3986 struct nfs_server *server = NFS_SERVER(inode); 3987 struct page *pages[NFS4ACL_MAXPAGES]; 3988 struct nfs_setaclargs arg = { 3989 .fh = NFS_FH(inode), 3990 .acl_pages = pages, 3991 .acl_len = buflen, 3992 }; 3993 struct nfs_setaclres res; 3994 struct rpc_message msg = { 3995 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], 3996 .rpc_argp = &arg, 3997 .rpc_resp = &res, 3998 }; 3999 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 4000 int ret, i; 4001 4002 if (!nfs4_server_supports_acls(server)) 4003 return -EOPNOTSUPP; 4004 if (npages > ARRAY_SIZE(pages)) 4005 return -ERANGE; 4006 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase); 4007 if (i < 0) 4008 return i; 4009 nfs4_inode_return_delegation(inode); 4010 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 4011 4012 /* 4013 * Free each page after tx, so the only ref left is 4014 * held by the network stack 4015 */ 4016 for (; i > 0; i--) 4017 put_page(pages[i-1]); 4018 4019 /* 4020 * Acl update can result in inode attribute update. 4021 * so mark the attribute cache invalid. 4022 */ 4023 spin_lock(&inode->i_lock); 4024 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR; 4025 spin_unlock(&inode->i_lock); 4026 nfs_access_zap_cache(inode); 4027 nfs_zap_acl_cache(inode); 4028 return ret; 4029 } 4030 4031 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 4032 { 4033 struct nfs4_exception exception = { }; 4034 int err; 4035 do { 4036 err = nfs4_handle_exception(NFS_SERVER(inode), 4037 __nfs4_proc_set_acl(inode, buf, buflen), 4038 &exception); 4039 } while (exception.retry); 4040 return err; 4041 } 4042 4043 static int 4044 nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state) 4045 { 4046 struct nfs_client *clp = server->nfs_client; 4047 4048 if (task->tk_status >= 0) 4049 return 0; 4050 switch(task->tk_status) { 4051 case -NFS4ERR_DELEG_REVOKED: 4052 case -NFS4ERR_ADMIN_REVOKED: 4053 case -NFS4ERR_BAD_STATEID: 4054 if (state == NULL) 4055 break; 4056 nfs_remove_bad_delegation(state->inode); 4057 case -NFS4ERR_OPENMODE: 4058 if (state == NULL) 4059 break; 4060 nfs4_schedule_stateid_recovery(server, state); 4061 goto wait_on_recovery; 4062 case -NFS4ERR_EXPIRED: 4063 if (state != NULL) 4064 nfs4_schedule_stateid_recovery(server, state); 4065 case -NFS4ERR_STALE_STATEID: 4066 case -NFS4ERR_STALE_CLIENTID: 4067 nfs4_schedule_lease_recovery(clp); 4068 goto wait_on_recovery; 4069 #if defined(CONFIG_NFS_V4_1) 4070 case -NFS4ERR_BADSESSION: 4071 case -NFS4ERR_BADSLOT: 4072 case -NFS4ERR_BAD_HIGH_SLOT: 4073 case -NFS4ERR_DEADSESSION: 4074 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 4075 case -NFS4ERR_SEQ_FALSE_RETRY: 4076 case -NFS4ERR_SEQ_MISORDERED: 4077 dprintk("%s ERROR %d, Reset session\n", __func__, 4078 task->tk_status); 4079 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); 4080 task->tk_status = 0; 4081 return -EAGAIN; 4082 #endif /* CONFIG_NFS_V4_1 */ 4083 case -NFS4ERR_DELAY: 4084 nfs_inc_server_stats(server, NFSIOS_DELAY); 4085 case -NFS4ERR_GRACE: 4086 case -EKEYEXPIRED: 4087 rpc_delay(task, NFS4_POLL_RETRY_MAX); 4088 task->tk_status = 0; 4089 return -EAGAIN; 4090 case -NFS4ERR_RETRY_UNCACHED_REP: 4091 case -NFS4ERR_OLD_STATEID: 4092 task->tk_status = 0; 4093 return -EAGAIN; 4094 } 4095 task->tk_status = nfs4_map_errors(task->tk_status); 4096 return 0; 4097 wait_on_recovery: 4098 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); 4099 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) 4100 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); 4101 task->tk_status = 0; 4102 return -EAGAIN; 4103 } 4104 4105 static void nfs4_init_boot_verifier(const struct nfs_client *clp, 4106 nfs4_verifier *bootverf) 4107 { 4108 __be32 verf[2]; 4109 4110 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { 4111 /* An impossible timestamp guarantees this value 4112 * will never match a generated boot time. */ 4113 verf[0] = 0; 4114 verf[1] = (__be32)(NSEC_PER_SEC + 1); 4115 } else { 4116 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 4117 verf[0] = (__be32)nn->boot_time.tv_sec; 4118 verf[1] = (__be32)nn->boot_time.tv_nsec; 4119 } 4120 memcpy(bootverf->data, verf, sizeof(bootverf->data)); 4121 } 4122 4123 static unsigned int 4124 nfs4_init_nonuniform_client_string(const struct nfs_client *clp, 4125 char *buf, size_t len) 4126 { 4127 unsigned int result; 4128 4129 rcu_read_lock(); 4130 result = scnprintf(buf, len, "Linux NFSv4.0 %s/%s %s", 4131 clp->cl_ipaddr, 4132 rpc_peeraddr2str(clp->cl_rpcclient, 4133 RPC_DISPLAY_ADDR), 4134 rpc_peeraddr2str(clp->cl_rpcclient, 4135 RPC_DISPLAY_PROTO)); 4136 rcu_read_unlock(); 4137 return result; 4138 } 4139 4140 static unsigned int 4141 nfs4_init_uniform_client_string(const struct nfs_client *clp, 4142 char *buf, size_t len) 4143 { 4144 char *nodename = clp->cl_rpcclient->cl_nodename; 4145 4146 if (nfs4_client_id_uniquifier[0] != '\0') 4147 nodename = nfs4_client_id_uniquifier; 4148 return scnprintf(buf, len, "Linux NFSv%u.%u %s", 4149 clp->rpc_ops->version, clp->cl_minorversion, 4150 nodename); 4151 } 4152 4153 /** 4154 * nfs4_proc_setclientid - Negotiate client ID 4155 * @clp: state data structure 4156 * @program: RPC program for NFSv4 callback service 4157 * @port: IP port number for NFS4 callback service 4158 * @cred: RPC credential to use for this call 4159 * @res: where to place the result 4160 * 4161 * Returns zero, a negative errno, or a negative NFS4ERR status code. 4162 */ 4163 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, 4164 unsigned short port, struct rpc_cred *cred, 4165 struct nfs4_setclientid_res *res) 4166 { 4167 nfs4_verifier sc_verifier; 4168 struct nfs4_setclientid setclientid = { 4169 .sc_verifier = &sc_verifier, 4170 .sc_prog = program, 4171 .sc_cb_ident = clp->cl_cb_ident, 4172 }; 4173 struct rpc_message msg = { 4174 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], 4175 .rpc_argp = &setclientid, 4176 .rpc_resp = res, 4177 .rpc_cred = cred, 4178 }; 4179 int status; 4180 4181 /* nfs_client_id4 */ 4182 nfs4_init_boot_verifier(clp, &sc_verifier); 4183 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags)) 4184 setclientid.sc_name_len = 4185 nfs4_init_uniform_client_string(clp, 4186 setclientid.sc_name, 4187 sizeof(setclientid.sc_name)); 4188 else 4189 setclientid.sc_name_len = 4190 nfs4_init_nonuniform_client_string(clp, 4191 setclientid.sc_name, 4192 sizeof(setclientid.sc_name)); 4193 /* cb_client4 */ 4194 rcu_read_lock(); 4195 setclientid.sc_netid_len = scnprintf(setclientid.sc_netid, 4196 sizeof(setclientid.sc_netid), 4197 rpc_peeraddr2str(clp->cl_rpcclient, 4198 RPC_DISPLAY_NETID)); 4199 rcu_read_unlock(); 4200 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, 4201 sizeof(setclientid.sc_uaddr), "%s.%u.%u", 4202 clp->cl_ipaddr, port >> 8, port & 255); 4203 4204 dprintk("NFS call setclientid auth=%s, '%.*s'\n", 4205 clp->cl_rpcclient->cl_auth->au_ops->au_name, 4206 setclientid.sc_name_len, setclientid.sc_name); 4207 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 4208 dprintk("NFS reply setclientid: %d\n", status); 4209 return status; 4210 } 4211 4212 /** 4213 * nfs4_proc_setclientid_confirm - Confirm client ID 4214 * @clp: state data structure 4215 * @res: result of a previous SETCLIENTID 4216 * @cred: RPC credential to use for this call 4217 * 4218 * Returns zero, a negative errno, or a negative NFS4ERR status code. 4219 */ 4220 int nfs4_proc_setclientid_confirm(struct nfs_client *clp, 4221 struct nfs4_setclientid_res *arg, 4222 struct rpc_cred *cred) 4223 { 4224 struct nfs_fsinfo fsinfo; 4225 struct rpc_message msg = { 4226 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], 4227 .rpc_argp = arg, 4228 .rpc_resp = &fsinfo, 4229 .rpc_cred = cred, 4230 }; 4231 unsigned long now; 4232 int status; 4233 4234 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n", 4235 clp->cl_rpcclient->cl_auth->au_ops->au_name, 4236 clp->cl_clientid); 4237 now = jiffies; 4238 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 4239 if (status == 0) { 4240 spin_lock(&clp->cl_lock); 4241 clp->cl_lease_time = fsinfo.lease_time * HZ; 4242 clp->cl_last_renewal = now; 4243 spin_unlock(&clp->cl_lock); 4244 } 4245 dprintk("NFS reply setclientid_confirm: %d\n", status); 4246 return status; 4247 } 4248 4249 struct nfs4_delegreturndata { 4250 struct nfs4_delegreturnargs args; 4251 struct nfs4_delegreturnres res; 4252 struct nfs_fh fh; 4253 nfs4_stateid stateid; 4254 unsigned long timestamp; 4255 struct nfs_fattr fattr; 4256 int rpc_status; 4257 }; 4258 4259 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) 4260 { 4261 struct nfs4_delegreturndata *data = calldata; 4262 4263 if (!nfs4_sequence_done(task, &data->res.seq_res)) 4264 return; 4265 4266 switch (task->tk_status) { 4267 case -NFS4ERR_STALE_STATEID: 4268 case -NFS4ERR_EXPIRED: 4269 case 0: 4270 renew_lease(data->res.server, data->timestamp); 4271 break; 4272 default: 4273 if (nfs4_async_handle_error(task, data->res.server, NULL) == 4274 -EAGAIN) { 4275 rpc_restart_call_prepare(task); 4276 return; 4277 } 4278 } 4279 data->rpc_status = task->tk_status; 4280 } 4281 4282 static void nfs4_delegreturn_release(void *calldata) 4283 { 4284 kfree(calldata); 4285 } 4286 4287 #if defined(CONFIG_NFS_V4_1) 4288 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) 4289 { 4290 struct nfs4_delegreturndata *d_data; 4291 4292 d_data = (struct nfs4_delegreturndata *)data; 4293 4294 if (nfs4_setup_sequence(d_data->res.server, 4295 &d_data->args.seq_args, 4296 &d_data->res.seq_res, task)) 4297 return; 4298 rpc_call_start(task); 4299 } 4300 #endif /* CONFIG_NFS_V4_1 */ 4301 4302 static const struct rpc_call_ops nfs4_delegreturn_ops = { 4303 #if defined(CONFIG_NFS_V4_1) 4304 .rpc_call_prepare = nfs4_delegreturn_prepare, 4305 #endif /* CONFIG_NFS_V4_1 */ 4306 .rpc_call_done = nfs4_delegreturn_done, 4307 .rpc_release = nfs4_delegreturn_release, 4308 }; 4309 4310 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) 4311 { 4312 struct nfs4_delegreturndata *data; 4313 struct nfs_server *server = NFS_SERVER(inode); 4314 struct rpc_task *task; 4315 struct rpc_message msg = { 4316 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], 4317 .rpc_cred = cred, 4318 }; 4319 struct rpc_task_setup task_setup_data = { 4320 .rpc_client = server->client, 4321 .rpc_message = &msg, 4322 .callback_ops = &nfs4_delegreturn_ops, 4323 .flags = RPC_TASK_ASYNC, 4324 }; 4325 int status = 0; 4326 4327 data = kzalloc(sizeof(*data), GFP_NOFS); 4328 if (data == NULL) 4329 return -ENOMEM; 4330 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 4331 data->args.fhandle = &data->fh; 4332 data->args.stateid = &data->stateid; 4333 data->args.bitmask = server->cache_consistency_bitmask; 4334 nfs_copy_fh(&data->fh, NFS_FH(inode)); 4335 nfs4_stateid_copy(&data->stateid, stateid); 4336 data->res.fattr = &data->fattr; 4337 data->res.server = server; 4338 nfs_fattr_init(data->res.fattr); 4339 data->timestamp = jiffies; 4340 data->rpc_status = 0; 4341 4342 task_setup_data.callback_data = data; 4343 msg.rpc_argp = &data->args; 4344 msg.rpc_resp = &data->res; 4345 task = rpc_run_task(&task_setup_data); 4346 if (IS_ERR(task)) 4347 return PTR_ERR(task); 4348 if (!issync) 4349 goto out; 4350 status = nfs4_wait_for_completion_rpc_task(task); 4351 if (status != 0) 4352 goto out; 4353 status = data->rpc_status; 4354 if (status == 0) 4355 nfs_post_op_update_inode_force_wcc(inode, &data->fattr); 4356 else 4357 nfs_refresh_inode(inode, &data->fattr); 4358 out: 4359 rpc_put_task(task); 4360 return status; 4361 } 4362 4363 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) 4364 { 4365 struct nfs_server *server = NFS_SERVER(inode); 4366 struct nfs4_exception exception = { }; 4367 int err; 4368 do { 4369 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync); 4370 switch (err) { 4371 case -NFS4ERR_STALE_STATEID: 4372 case -NFS4ERR_EXPIRED: 4373 case 0: 4374 return 0; 4375 } 4376 err = nfs4_handle_exception(server, err, &exception); 4377 } while (exception.retry); 4378 return err; 4379 } 4380 4381 #define NFS4_LOCK_MINTIMEOUT (1 * HZ) 4382 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) 4383 4384 /* 4385 * sleep, with exponential backoff, and retry the LOCK operation. 4386 */ 4387 static unsigned long 4388 nfs4_set_lock_task_retry(unsigned long timeout) 4389 { 4390 freezable_schedule_timeout_killable(timeout); 4391 timeout <<= 1; 4392 if (timeout > NFS4_LOCK_MAXTIMEOUT) 4393 return NFS4_LOCK_MAXTIMEOUT; 4394 return timeout; 4395 } 4396 4397 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 4398 { 4399 struct inode *inode = state->inode; 4400 struct nfs_server *server = NFS_SERVER(inode); 4401 struct nfs_client *clp = server->nfs_client; 4402 struct nfs_lockt_args arg = { 4403 .fh = NFS_FH(inode), 4404 .fl = request, 4405 }; 4406 struct nfs_lockt_res res = { 4407 .denied = request, 4408 }; 4409 struct rpc_message msg = { 4410 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], 4411 .rpc_argp = &arg, 4412 .rpc_resp = &res, 4413 .rpc_cred = state->owner->so_cred, 4414 }; 4415 struct nfs4_lock_state *lsp; 4416 int status; 4417 4418 arg.lock_owner.clientid = clp->cl_clientid; 4419 status = nfs4_set_lock_state(state, request); 4420 if (status != 0) 4421 goto out; 4422 lsp = request->fl_u.nfs4_fl.owner; 4423 arg.lock_owner.id = lsp->ls_seqid.owner_id; 4424 arg.lock_owner.s_dev = server->s_dev; 4425 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 4426 switch (status) { 4427 case 0: 4428 request->fl_type = F_UNLCK; 4429 break; 4430 case -NFS4ERR_DENIED: 4431 status = 0; 4432 } 4433 request->fl_ops->fl_release_private(request); 4434 out: 4435 return status; 4436 } 4437 4438 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 4439 { 4440 struct nfs4_exception exception = { }; 4441 int err; 4442 4443 do { 4444 err = nfs4_handle_exception(NFS_SERVER(state->inode), 4445 _nfs4_proc_getlk(state, cmd, request), 4446 &exception); 4447 } while (exception.retry); 4448 return err; 4449 } 4450 4451 static int do_vfs_lock(struct file *file, struct file_lock *fl) 4452 { 4453 int res = 0; 4454 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { 4455 case FL_POSIX: 4456 res = posix_lock_file_wait(file, fl); 4457 break; 4458 case FL_FLOCK: 4459 res = flock_lock_file_wait(file, fl); 4460 break; 4461 default: 4462 BUG(); 4463 } 4464 return res; 4465 } 4466 4467 struct nfs4_unlockdata { 4468 struct nfs_locku_args arg; 4469 struct nfs_locku_res res; 4470 struct nfs4_lock_state *lsp; 4471 struct nfs_open_context *ctx; 4472 struct file_lock fl; 4473 const struct nfs_server *server; 4474 unsigned long timestamp; 4475 }; 4476 4477 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, 4478 struct nfs_open_context *ctx, 4479 struct nfs4_lock_state *lsp, 4480 struct nfs_seqid *seqid) 4481 { 4482 struct nfs4_unlockdata *p; 4483 struct inode *inode = lsp->ls_state->inode; 4484 4485 p = kzalloc(sizeof(*p), GFP_NOFS); 4486 if (p == NULL) 4487 return NULL; 4488 p->arg.fh = NFS_FH(inode); 4489 p->arg.fl = &p->fl; 4490 p->arg.seqid = seqid; 4491 p->res.seqid = seqid; 4492 p->arg.stateid = &lsp->ls_stateid; 4493 p->lsp = lsp; 4494 atomic_inc(&lsp->ls_count); 4495 /* Ensure we don't close file until we're done freeing locks! */ 4496 p->ctx = get_nfs_open_context(ctx); 4497 memcpy(&p->fl, fl, sizeof(p->fl)); 4498 p->server = NFS_SERVER(inode); 4499 return p; 4500 } 4501 4502 static void nfs4_locku_release_calldata(void *data) 4503 { 4504 struct nfs4_unlockdata *calldata = data; 4505 nfs_free_seqid(calldata->arg.seqid); 4506 nfs4_put_lock_state(calldata->lsp); 4507 put_nfs_open_context(calldata->ctx); 4508 kfree(calldata); 4509 } 4510 4511 static void nfs4_locku_done(struct rpc_task *task, void *data) 4512 { 4513 struct nfs4_unlockdata *calldata = data; 4514 4515 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 4516 return; 4517 switch (task->tk_status) { 4518 case 0: 4519 nfs4_stateid_copy(&calldata->lsp->ls_stateid, 4520 &calldata->res.stateid); 4521 renew_lease(calldata->server, calldata->timestamp); 4522 break; 4523 case -NFS4ERR_BAD_STATEID: 4524 case -NFS4ERR_OLD_STATEID: 4525 case -NFS4ERR_STALE_STATEID: 4526 case -NFS4ERR_EXPIRED: 4527 break; 4528 default: 4529 if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN) 4530 rpc_restart_call_prepare(task); 4531 } 4532 } 4533 4534 static void nfs4_locku_prepare(struct rpc_task *task, void *data) 4535 { 4536 struct nfs4_unlockdata *calldata = data; 4537 4538 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 4539 return; 4540 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) { 4541 /* Note: exit _without_ running nfs4_locku_done */ 4542 task->tk_action = NULL; 4543 return; 4544 } 4545 calldata->timestamp = jiffies; 4546 if (nfs4_setup_sequence(calldata->server, 4547 &calldata->arg.seq_args, 4548 &calldata->res.seq_res, task)) 4549 return; 4550 rpc_call_start(task); 4551 } 4552 4553 static const struct rpc_call_ops nfs4_locku_ops = { 4554 .rpc_call_prepare = nfs4_locku_prepare, 4555 .rpc_call_done = nfs4_locku_done, 4556 .rpc_release = nfs4_locku_release_calldata, 4557 }; 4558 4559 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, 4560 struct nfs_open_context *ctx, 4561 struct nfs4_lock_state *lsp, 4562 struct nfs_seqid *seqid) 4563 { 4564 struct nfs4_unlockdata *data; 4565 struct rpc_message msg = { 4566 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], 4567 .rpc_cred = ctx->cred, 4568 }; 4569 struct rpc_task_setup task_setup_data = { 4570 .rpc_client = NFS_CLIENT(lsp->ls_state->inode), 4571 .rpc_message = &msg, 4572 .callback_ops = &nfs4_locku_ops, 4573 .workqueue = nfsiod_workqueue, 4574 .flags = RPC_TASK_ASYNC, 4575 }; 4576 4577 /* Ensure this is an unlock - when canceling a lock, the 4578 * canceled lock is passed in, and it won't be an unlock. 4579 */ 4580 fl->fl_type = F_UNLCK; 4581 4582 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); 4583 if (data == NULL) { 4584 nfs_free_seqid(seqid); 4585 return ERR_PTR(-ENOMEM); 4586 } 4587 4588 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); 4589 msg.rpc_argp = &data->arg; 4590 msg.rpc_resp = &data->res; 4591 task_setup_data.callback_data = data; 4592 return rpc_run_task(&task_setup_data); 4593 } 4594 4595 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) 4596 { 4597 struct nfs_inode *nfsi = NFS_I(state->inode); 4598 struct nfs_seqid *seqid; 4599 struct nfs4_lock_state *lsp; 4600 struct rpc_task *task; 4601 int status = 0; 4602 unsigned char fl_flags = request->fl_flags; 4603 4604 status = nfs4_set_lock_state(state, request); 4605 /* Unlock _before_ we do the RPC call */ 4606 request->fl_flags |= FL_EXISTS; 4607 down_read(&nfsi->rwsem); 4608 if (do_vfs_lock(request->fl_file, request) == -ENOENT) { 4609 up_read(&nfsi->rwsem); 4610 goto out; 4611 } 4612 up_read(&nfsi->rwsem); 4613 if (status != 0) 4614 goto out; 4615 /* Is this a delegated lock? */ 4616 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) 4617 goto out; 4618 lsp = request->fl_u.nfs4_fl.owner; 4619 seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); 4620 status = -ENOMEM; 4621 if (seqid == NULL) 4622 goto out; 4623 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid); 4624 status = PTR_ERR(task); 4625 if (IS_ERR(task)) 4626 goto out; 4627 status = nfs4_wait_for_completion_rpc_task(task); 4628 rpc_put_task(task); 4629 out: 4630 request->fl_flags = fl_flags; 4631 return status; 4632 } 4633 4634 struct nfs4_lockdata { 4635 struct nfs_lock_args arg; 4636 struct nfs_lock_res res; 4637 struct nfs4_lock_state *lsp; 4638 struct nfs_open_context *ctx; 4639 struct file_lock fl; 4640 unsigned long timestamp; 4641 int rpc_status; 4642 int cancelled; 4643 struct nfs_server *server; 4644 }; 4645 4646 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, 4647 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, 4648 gfp_t gfp_mask) 4649 { 4650 struct nfs4_lockdata *p; 4651 struct inode *inode = lsp->ls_state->inode; 4652 struct nfs_server *server = NFS_SERVER(inode); 4653 4654 p = kzalloc(sizeof(*p), gfp_mask); 4655 if (p == NULL) 4656 return NULL; 4657 4658 p->arg.fh = NFS_FH(inode); 4659 p->arg.fl = &p->fl; 4660 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); 4661 if (p->arg.open_seqid == NULL) 4662 goto out_free; 4663 p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask); 4664 if (p->arg.lock_seqid == NULL) 4665 goto out_free_seqid; 4666 p->arg.lock_stateid = &lsp->ls_stateid; 4667 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; 4668 p->arg.lock_owner.id = lsp->ls_seqid.owner_id; 4669 p->arg.lock_owner.s_dev = server->s_dev; 4670 p->res.lock_seqid = p->arg.lock_seqid; 4671 p->lsp = lsp; 4672 p->server = server; 4673 atomic_inc(&lsp->ls_count); 4674 p->ctx = get_nfs_open_context(ctx); 4675 memcpy(&p->fl, fl, sizeof(p->fl)); 4676 return p; 4677 out_free_seqid: 4678 nfs_free_seqid(p->arg.open_seqid); 4679 out_free: 4680 kfree(p); 4681 return NULL; 4682 } 4683 4684 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) 4685 { 4686 struct nfs4_lockdata *data = calldata; 4687 struct nfs4_state *state = data->lsp->ls_state; 4688 4689 dprintk("%s: begin!\n", __func__); 4690 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) 4691 return; 4692 /* Do we need to do an open_to_lock_owner? */ 4693 if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) { 4694 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) 4695 return; 4696 data->arg.open_stateid = &state->stateid; 4697 data->arg.new_lock_owner = 1; 4698 data->res.open_seqid = data->arg.open_seqid; 4699 } else 4700 data->arg.new_lock_owner = 0; 4701 data->timestamp = jiffies; 4702 if (nfs4_setup_sequence(data->server, 4703 &data->arg.seq_args, 4704 &data->res.seq_res, task)) 4705 return; 4706 rpc_call_start(task); 4707 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status); 4708 } 4709 4710 static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata) 4711 { 4712 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); 4713 nfs4_lock_prepare(task, calldata); 4714 } 4715 4716 static void nfs4_lock_done(struct rpc_task *task, void *calldata) 4717 { 4718 struct nfs4_lockdata *data = calldata; 4719 4720 dprintk("%s: begin!\n", __func__); 4721 4722 if (!nfs4_sequence_done(task, &data->res.seq_res)) 4723 return; 4724 4725 data->rpc_status = task->tk_status; 4726 if (data->arg.new_lock_owner != 0) { 4727 if (data->rpc_status == 0) 4728 nfs_confirm_seqid(&data->lsp->ls_seqid, 0); 4729 else 4730 goto out; 4731 } 4732 if (data->rpc_status == 0) { 4733 nfs4_stateid_copy(&data->lsp->ls_stateid, &data->res.stateid); 4734 set_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags); 4735 renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp); 4736 } 4737 out: 4738 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status); 4739 } 4740 4741 static void nfs4_lock_release(void *calldata) 4742 { 4743 struct nfs4_lockdata *data = calldata; 4744 4745 dprintk("%s: begin!\n", __func__); 4746 nfs_free_seqid(data->arg.open_seqid); 4747 if (data->cancelled != 0) { 4748 struct rpc_task *task; 4749 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, 4750 data->arg.lock_seqid); 4751 if (!IS_ERR(task)) 4752 rpc_put_task_async(task); 4753 dprintk("%s: cancelling lock!\n", __func__); 4754 } else 4755 nfs_free_seqid(data->arg.lock_seqid); 4756 nfs4_put_lock_state(data->lsp); 4757 put_nfs_open_context(data->ctx); 4758 kfree(data); 4759 dprintk("%s: done!\n", __func__); 4760 } 4761 4762 static const struct rpc_call_ops nfs4_lock_ops = { 4763 .rpc_call_prepare = nfs4_lock_prepare, 4764 .rpc_call_done = nfs4_lock_done, 4765 .rpc_release = nfs4_lock_release, 4766 }; 4767 4768 static const struct rpc_call_ops nfs4_recover_lock_ops = { 4769 .rpc_call_prepare = nfs4_recover_lock_prepare, 4770 .rpc_call_done = nfs4_lock_done, 4771 .rpc_release = nfs4_lock_release, 4772 }; 4773 4774 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) 4775 { 4776 switch (error) { 4777 case -NFS4ERR_ADMIN_REVOKED: 4778 case -NFS4ERR_BAD_STATEID: 4779 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 4780 if (new_lock_owner != 0 || 4781 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) 4782 nfs4_schedule_stateid_recovery(server, lsp->ls_state); 4783 break; 4784 case -NFS4ERR_STALE_STATEID: 4785 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 4786 case -NFS4ERR_EXPIRED: 4787 nfs4_schedule_lease_recovery(server->nfs_client); 4788 }; 4789 } 4790 4791 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) 4792 { 4793 struct nfs4_lockdata *data; 4794 struct rpc_task *task; 4795 struct rpc_message msg = { 4796 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK], 4797 .rpc_cred = state->owner->so_cred, 4798 }; 4799 struct rpc_task_setup task_setup_data = { 4800 .rpc_client = NFS_CLIENT(state->inode), 4801 .rpc_message = &msg, 4802 .callback_ops = &nfs4_lock_ops, 4803 .workqueue = nfsiod_workqueue, 4804 .flags = RPC_TASK_ASYNC, 4805 }; 4806 int ret; 4807 4808 dprintk("%s: begin!\n", __func__); 4809 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file), 4810 fl->fl_u.nfs4_fl.owner, 4811 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS); 4812 if (data == NULL) 4813 return -ENOMEM; 4814 if (IS_SETLKW(cmd)) 4815 data->arg.block = 1; 4816 if (recovery_type > NFS_LOCK_NEW) { 4817 if (recovery_type == NFS_LOCK_RECLAIM) 4818 data->arg.reclaim = NFS_LOCK_RECLAIM; 4819 task_setup_data.callback_ops = &nfs4_recover_lock_ops; 4820 } 4821 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); 4822 msg.rpc_argp = &data->arg; 4823 msg.rpc_resp = &data->res; 4824 task_setup_data.callback_data = data; 4825 task = rpc_run_task(&task_setup_data); 4826 if (IS_ERR(task)) 4827 return PTR_ERR(task); 4828 ret = nfs4_wait_for_completion_rpc_task(task); 4829 if (ret == 0) { 4830 ret = data->rpc_status; 4831 if (ret) 4832 nfs4_handle_setlk_error(data->server, data->lsp, 4833 data->arg.new_lock_owner, ret); 4834 } else 4835 data->cancelled = 1; 4836 rpc_put_task(task); 4837 dprintk("%s: done, ret = %d!\n", __func__, ret); 4838 return ret; 4839 } 4840 4841 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) 4842 { 4843 struct nfs_server *server = NFS_SERVER(state->inode); 4844 struct nfs4_exception exception = { 4845 .inode = state->inode, 4846 }; 4847 int err; 4848 4849 do { 4850 /* Cache the lock if possible... */ 4851 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 4852 return 0; 4853 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); 4854 if (err != -NFS4ERR_DELAY) 4855 break; 4856 nfs4_handle_exception(server, err, &exception); 4857 } while (exception.retry); 4858 return err; 4859 } 4860 4861 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) 4862 { 4863 struct nfs_server *server = NFS_SERVER(state->inode); 4864 struct nfs4_exception exception = { 4865 .inode = state->inode, 4866 }; 4867 int err; 4868 4869 err = nfs4_set_lock_state(state, request); 4870 if (err != 0) 4871 return err; 4872 do { 4873 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 4874 return 0; 4875 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED); 4876 switch (err) { 4877 default: 4878 goto out; 4879 case -NFS4ERR_GRACE: 4880 case -NFS4ERR_DELAY: 4881 nfs4_handle_exception(server, err, &exception); 4882 err = 0; 4883 } 4884 } while (exception.retry); 4885 out: 4886 return err; 4887 } 4888 4889 #if defined(CONFIG_NFS_V4_1) 4890 /** 4891 * nfs41_check_expired_locks - possibly free a lock stateid 4892 * 4893 * @state: NFSv4 state for an inode 4894 * 4895 * Returns NFS_OK if recovery for this stateid is now finished. 4896 * Otherwise a negative NFS4ERR value is returned. 4897 */ 4898 static int nfs41_check_expired_locks(struct nfs4_state *state) 4899 { 4900 int status, ret = -NFS4ERR_BAD_STATEID; 4901 struct nfs4_lock_state *lsp; 4902 struct nfs_server *server = NFS_SERVER(state->inode); 4903 4904 list_for_each_entry(lsp, &state->lock_states, ls_locks) { 4905 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 4906 status = nfs41_test_stateid(server, &lsp->ls_stateid); 4907 if (status != NFS_OK) { 4908 /* Free the stateid unless the server 4909 * informs us the stateid is unrecognized. */ 4910 if (status != -NFS4ERR_BAD_STATEID) 4911 nfs41_free_stateid(server, 4912 &lsp->ls_stateid); 4913 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 4914 ret = status; 4915 } 4916 } 4917 }; 4918 4919 return ret; 4920 } 4921 4922 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) 4923 { 4924 int status = NFS_OK; 4925 4926 if (test_bit(LK_STATE_IN_USE, &state->flags)) 4927 status = nfs41_check_expired_locks(state); 4928 if (status != NFS_OK) 4929 status = nfs4_lock_expired(state, request); 4930 return status; 4931 } 4932 #endif 4933 4934 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 4935 { 4936 struct nfs_inode *nfsi = NFS_I(state->inode); 4937 unsigned char fl_flags = request->fl_flags; 4938 int status = -ENOLCK; 4939 4940 if ((fl_flags & FL_POSIX) && 4941 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) 4942 goto out; 4943 /* Is this a delegated open? */ 4944 status = nfs4_set_lock_state(state, request); 4945 if (status != 0) 4946 goto out; 4947 request->fl_flags |= FL_ACCESS; 4948 status = do_vfs_lock(request->fl_file, request); 4949 if (status < 0) 4950 goto out; 4951 down_read(&nfsi->rwsem); 4952 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { 4953 /* Yes: cache locks! */ 4954 /* ...but avoid races with delegation recall... */ 4955 request->fl_flags = fl_flags & ~FL_SLEEP; 4956 status = do_vfs_lock(request->fl_file, request); 4957 goto out_unlock; 4958 } 4959 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); 4960 if (status != 0) 4961 goto out_unlock; 4962 /* Note: we always want to sleep here! */ 4963 request->fl_flags = fl_flags | FL_SLEEP; 4964 if (do_vfs_lock(request->fl_file, request) < 0) 4965 printk(KERN_WARNING "NFS: %s: VFS is out of sync with lock " 4966 "manager!\n", __func__); 4967 out_unlock: 4968 up_read(&nfsi->rwsem); 4969 out: 4970 request->fl_flags = fl_flags; 4971 return status; 4972 } 4973 4974 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 4975 { 4976 struct nfs4_exception exception = { 4977 .state = state, 4978 .inode = state->inode, 4979 }; 4980 int err; 4981 4982 do { 4983 err = _nfs4_proc_setlk(state, cmd, request); 4984 if (err == -NFS4ERR_DENIED) 4985 err = -EAGAIN; 4986 err = nfs4_handle_exception(NFS_SERVER(state->inode), 4987 err, &exception); 4988 } while (exception.retry); 4989 return err; 4990 } 4991 4992 static int 4993 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) 4994 { 4995 struct nfs_open_context *ctx; 4996 struct nfs4_state *state; 4997 unsigned long timeout = NFS4_LOCK_MINTIMEOUT; 4998 int status; 4999 5000 /* verify open state */ 5001 ctx = nfs_file_open_context(filp); 5002 state = ctx->state; 5003 5004 if (request->fl_start < 0 || request->fl_end < 0) 5005 return -EINVAL; 5006 5007 if (IS_GETLK(cmd)) { 5008 if (state != NULL) 5009 return nfs4_proc_getlk(state, F_GETLK, request); 5010 return 0; 5011 } 5012 5013 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) 5014 return -EINVAL; 5015 5016 if (request->fl_type == F_UNLCK) { 5017 if (state != NULL) 5018 return nfs4_proc_unlck(state, cmd, request); 5019 return 0; 5020 } 5021 5022 if (state == NULL) 5023 return -ENOLCK; 5024 /* 5025 * Don't rely on the VFS having checked the file open mode, 5026 * since it won't do this for flock() locks. 5027 */ 5028 switch (request->fl_type) { 5029 case F_RDLCK: 5030 if (!(filp->f_mode & FMODE_READ)) 5031 return -EBADF; 5032 break; 5033 case F_WRLCK: 5034 if (!(filp->f_mode & FMODE_WRITE)) 5035 return -EBADF; 5036 } 5037 5038 do { 5039 status = nfs4_proc_setlk(state, cmd, request); 5040 if ((status != -EAGAIN) || IS_SETLK(cmd)) 5041 break; 5042 timeout = nfs4_set_lock_task_retry(timeout); 5043 status = -ERESTARTSYS; 5044 if (signalled()) 5045 break; 5046 } while(status < 0); 5047 return status; 5048 } 5049 5050 int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl) 5051 { 5052 struct nfs_server *server = NFS_SERVER(state->inode); 5053 struct nfs4_exception exception = { }; 5054 int err; 5055 5056 err = nfs4_set_lock_state(state, fl); 5057 if (err != 0) 5058 goto out; 5059 do { 5060 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); 5061 switch (err) { 5062 default: 5063 printk(KERN_ERR "NFS: %s: unhandled error " 5064 "%d.\n", __func__, err); 5065 case 0: 5066 case -ESTALE: 5067 goto out; 5068 case -NFS4ERR_EXPIRED: 5069 nfs4_schedule_stateid_recovery(server, state); 5070 case -NFS4ERR_STALE_CLIENTID: 5071 case -NFS4ERR_STALE_STATEID: 5072 nfs4_schedule_lease_recovery(server->nfs_client); 5073 goto out; 5074 case -NFS4ERR_BADSESSION: 5075 case -NFS4ERR_BADSLOT: 5076 case -NFS4ERR_BAD_HIGH_SLOT: 5077 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 5078 case -NFS4ERR_DEADSESSION: 5079 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err); 5080 goto out; 5081 case -ERESTARTSYS: 5082 /* 5083 * The show must go on: exit, but mark the 5084 * stateid as needing recovery. 5085 */ 5086 case -NFS4ERR_DELEG_REVOKED: 5087 case -NFS4ERR_ADMIN_REVOKED: 5088 case -NFS4ERR_BAD_STATEID: 5089 case -NFS4ERR_OPENMODE: 5090 nfs4_schedule_stateid_recovery(server, state); 5091 err = 0; 5092 goto out; 5093 case -EKEYEXPIRED: 5094 /* 5095 * User RPCSEC_GSS context has expired. 5096 * We cannot recover this stateid now, so 5097 * skip it and allow recovery thread to 5098 * proceed. 5099 */ 5100 err = 0; 5101 goto out; 5102 case -ENOMEM: 5103 case -NFS4ERR_DENIED: 5104 /* kill_proc(fl->fl_pid, SIGLOST, 1); */ 5105 err = 0; 5106 goto out; 5107 case -NFS4ERR_DELAY: 5108 break; 5109 } 5110 err = nfs4_handle_exception(server, err, &exception); 5111 } while (exception.retry); 5112 out: 5113 return err; 5114 } 5115 5116 struct nfs_release_lockowner_data { 5117 struct nfs4_lock_state *lsp; 5118 struct nfs_server *server; 5119 struct nfs_release_lockowner_args args; 5120 }; 5121 5122 static void nfs4_release_lockowner_release(void *calldata) 5123 { 5124 struct nfs_release_lockowner_data *data = calldata; 5125 nfs4_free_lock_state(data->server, data->lsp); 5126 kfree(calldata); 5127 } 5128 5129 static const struct rpc_call_ops nfs4_release_lockowner_ops = { 5130 .rpc_release = nfs4_release_lockowner_release, 5131 }; 5132 5133 int nfs4_release_lockowner(struct nfs4_lock_state *lsp) 5134 { 5135 struct nfs_server *server = lsp->ls_state->owner->so_server; 5136 struct nfs_release_lockowner_data *data; 5137 struct rpc_message msg = { 5138 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER], 5139 }; 5140 5141 if (server->nfs_client->cl_mvops->minor_version != 0) 5142 return -EINVAL; 5143 data = kmalloc(sizeof(*data), GFP_NOFS); 5144 if (!data) 5145 return -ENOMEM; 5146 data->lsp = lsp; 5147 data->server = server; 5148 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 5149 data->args.lock_owner.id = lsp->ls_seqid.owner_id; 5150 data->args.lock_owner.s_dev = server->s_dev; 5151 msg.rpc_argp = &data->args; 5152 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); 5153 return 0; 5154 } 5155 5156 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" 5157 5158 static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key, 5159 const void *buf, size_t buflen, 5160 int flags, int type) 5161 { 5162 if (strcmp(key, "") != 0) 5163 return -EINVAL; 5164 5165 return nfs4_proc_set_acl(dentry->d_inode, buf, buflen); 5166 } 5167 5168 static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key, 5169 void *buf, size_t buflen, int type) 5170 { 5171 if (strcmp(key, "") != 0) 5172 return -EINVAL; 5173 5174 return nfs4_proc_get_acl(dentry->d_inode, buf, buflen); 5175 } 5176 5177 static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list, 5178 size_t list_len, const char *name, 5179 size_t name_len, int type) 5180 { 5181 size_t len = sizeof(XATTR_NAME_NFSV4_ACL); 5182 5183 if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode))) 5184 return 0; 5185 5186 if (list && len <= list_len) 5187 memcpy(list, XATTR_NAME_NFSV4_ACL, len); 5188 return len; 5189 } 5190 5191 /* 5192 * nfs_fhget will use either the mounted_on_fileid or the fileid 5193 */ 5194 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) 5195 { 5196 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || 5197 (fattr->valid & NFS_ATTR_FATTR_FILEID)) && 5198 (fattr->valid & NFS_ATTR_FATTR_FSID) && 5199 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS))) 5200 return; 5201 5202 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 5203 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL; 5204 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 5205 fattr->nlink = 2; 5206 } 5207 5208 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 5209 const struct qstr *name, 5210 struct nfs4_fs_locations *fs_locations, 5211 struct page *page) 5212 { 5213 struct nfs_server *server = NFS_SERVER(dir); 5214 u32 bitmask[2] = { 5215 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 5216 }; 5217 struct nfs4_fs_locations_arg args = { 5218 .dir_fh = NFS_FH(dir), 5219 .name = name, 5220 .page = page, 5221 .bitmask = bitmask, 5222 }; 5223 struct nfs4_fs_locations_res res = { 5224 .fs_locations = fs_locations, 5225 }; 5226 struct rpc_message msg = { 5227 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 5228 .rpc_argp = &args, 5229 .rpc_resp = &res, 5230 }; 5231 int status; 5232 5233 dprintk("%s: start\n", __func__); 5234 5235 /* Ask for the fileid of the absent filesystem if mounted_on_fileid 5236 * is not supported */ 5237 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) 5238 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID; 5239 else 5240 bitmask[0] |= FATTR4_WORD0_FILEID; 5241 5242 nfs_fattr_init(&fs_locations->fattr); 5243 fs_locations->server = server; 5244 fs_locations->nlocations = 0; 5245 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0); 5246 dprintk("%s: returned status = %d\n", __func__, status); 5247 return status; 5248 } 5249 5250 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 5251 const struct qstr *name, 5252 struct nfs4_fs_locations *fs_locations, 5253 struct page *page) 5254 { 5255 struct nfs4_exception exception = { }; 5256 int err; 5257 do { 5258 err = nfs4_handle_exception(NFS_SERVER(dir), 5259 _nfs4_proc_fs_locations(client, dir, name, fs_locations, page), 5260 &exception); 5261 } while (exception.retry); 5262 return err; 5263 } 5264 5265 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors) 5266 { 5267 int status; 5268 struct nfs4_secinfo_arg args = { 5269 .dir_fh = NFS_FH(dir), 5270 .name = name, 5271 }; 5272 struct nfs4_secinfo_res res = { 5273 .flavors = flavors, 5274 }; 5275 struct rpc_message msg = { 5276 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO], 5277 .rpc_argp = &args, 5278 .rpc_resp = &res, 5279 }; 5280 5281 dprintk("NFS call secinfo %s\n", name->name); 5282 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); 5283 dprintk("NFS reply secinfo: %d\n", status); 5284 return status; 5285 } 5286 5287 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, 5288 struct nfs4_secinfo_flavors *flavors) 5289 { 5290 struct nfs4_exception exception = { }; 5291 int err; 5292 do { 5293 err = nfs4_handle_exception(NFS_SERVER(dir), 5294 _nfs4_proc_secinfo(dir, name, flavors), 5295 &exception); 5296 } while (exception.retry); 5297 return err; 5298 } 5299 5300 #ifdef CONFIG_NFS_V4_1 5301 /* 5302 * Check the exchange flags returned by the server for invalid flags, having 5303 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or 5304 * DS flags set. 5305 */ 5306 static int nfs4_check_cl_exchange_flags(u32 flags) 5307 { 5308 if (flags & ~EXCHGID4_FLAG_MASK_R) 5309 goto out_inval; 5310 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && 5311 (flags & EXCHGID4_FLAG_USE_NON_PNFS)) 5312 goto out_inval; 5313 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) 5314 goto out_inval; 5315 return NFS_OK; 5316 out_inval: 5317 return -NFS4ERR_INVAL; 5318 } 5319 5320 static bool 5321 nfs41_same_server_scope(struct nfs41_server_scope *a, 5322 struct nfs41_server_scope *b) 5323 { 5324 if (a->server_scope_sz == b->server_scope_sz && 5325 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0) 5326 return true; 5327 5328 return false; 5329 } 5330 5331 /* 5332 * nfs4_proc_bind_conn_to_session() 5333 * 5334 * The 4.1 client currently uses the same TCP connection for the 5335 * fore and backchannel. 5336 */ 5337 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred) 5338 { 5339 int status; 5340 struct nfs41_bind_conn_to_session_res res; 5341 struct rpc_message msg = { 5342 .rpc_proc = 5343 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION], 5344 .rpc_argp = clp, 5345 .rpc_resp = &res, 5346 .rpc_cred = cred, 5347 }; 5348 5349 dprintk("--> %s\n", __func__); 5350 BUG_ON(clp == NULL); 5351 5352 res.session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS); 5353 if (unlikely(res.session == NULL)) { 5354 status = -ENOMEM; 5355 goto out; 5356 } 5357 5358 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5359 if (status == 0) { 5360 if (memcmp(res.session->sess_id.data, 5361 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { 5362 dprintk("NFS: %s: Session ID mismatch\n", __func__); 5363 status = -EIO; 5364 goto out_session; 5365 } 5366 if (res.dir != NFS4_CDFS4_BOTH) { 5367 dprintk("NFS: %s: Unexpected direction from server\n", 5368 __func__); 5369 status = -EIO; 5370 goto out_session; 5371 } 5372 if (res.use_conn_in_rdma_mode) { 5373 dprintk("NFS: %s: Server returned RDMA mode = true\n", 5374 __func__); 5375 status = -EIO; 5376 goto out_session; 5377 } 5378 } 5379 out_session: 5380 kfree(res.session); 5381 out: 5382 dprintk("<-- %s status= %d\n", __func__, status); 5383 return status; 5384 } 5385 5386 /* 5387 * nfs4_proc_exchange_id() 5388 * 5389 * Returns zero, a negative errno, or a negative NFS4ERR status code. 5390 * 5391 * Since the clientid has expired, all compounds using sessions 5392 * associated with the stale clientid will be returning 5393 * NFS4ERR_BADSESSION in the sequence operation, and will therefore 5394 * be in some phase of session reset. 5395 */ 5396 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) 5397 { 5398 nfs4_verifier verifier; 5399 struct nfs41_exchange_id_args args = { 5400 .verifier = &verifier, 5401 .client = clp, 5402 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER, 5403 }; 5404 struct nfs41_exchange_id_res res = { 5405 0 5406 }; 5407 int status; 5408 struct rpc_message msg = { 5409 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], 5410 .rpc_argp = &args, 5411 .rpc_resp = &res, 5412 .rpc_cred = cred, 5413 }; 5414 5415 nfs4_init_boot_verifier(clp, &verifier); 5416 args.id_len = nfs4_init_uniform_client_string(clp, args.id, 5417 sizeof(args.id)); 5418 dprintk("NFS call exchange_id auth=%s, '%.*s'\n", 5419 clp->cl_rpcclient->cl_auth->au_ops->au_name, 5420 args.id_len, args.id); 5421 5422 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner), 5423 GFP_NOFS); 5424 if (unlikely(res.server_owner == NULL)) { 5425 status = -ENOMEM; 5426 goto out; 5427 } 5428 5429 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope), 5430 GFP_NOFS); 5431 if (unlikely(res.server_scope == NULL)) { 5432 status = -ENOMEM; 5433 goto out_server_owner; 5434 } 5435 5436 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS); 5437 if (unlikely(res.impl_id == NULL)) { 5438 status = -ENOMEM; 5439 goto out_server_scope; 5440 } 5441 5442 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5443 if (status == 0) 5444 status = nfs4_check_cl_exchange_flags(res.flags); 5445 5446 if (status == 0) { 5447 clp->cl_clientid = res.clientid; 5448 clp->cl_exchange_flags = (res.flags & ~EXCHGID4_FLAG_CONFIRMED_R); 5449 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) 5450 clp->cl_seqid = res.seqid; 5451 5452 kfree(clp->cl_serverowner); 5453 clp->cl_serverowner = res.server_owner; 5454 res.server_owner = NULL; 5455 5456 /* use the most recent implementation id */ 5457 kfree(clp->cl_implid); 5458 clp->cl_implid = res.impl_id; 5459 5460 if (clp->cl_serverscope != NULL && 5461 !nfs41_same_server_scope(clp->cl_serverscope, 5462 res.server_scope)) { 5463 dprintk("%s: server_scope mismatch detected\n", 5464 __func__); 5465 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); 5466 kfree(clp->cl_serverscope); 5467 clp->cl_serverscope = NULL; 5468 } 5469 5470 if (clp->cl_serverscope == NULL) { 5471 clp->cl_serverscope = res.server_scope; 5472 goto out; 5473 } 5474 } else 5475 kfree(res.impl_id); 5476 5477 out_server_owner: 5478 kfree(res.server_owner); 5479 out_server_scope: 5480 kfree(res.server_scope); 5481 out: 5482 if (clp->cl_implid != NULL) 5483 dprintk("NFS reply exchange_id: Server Implementation ID: " 5484 "domain: %s, name: %s, date: %llu,%u\n", 5485 clp->cl_implid->domain, clp->cl_implid->name, 5486 clp->cl_implid->date.seconds, 5487 clp->cl_implid->date.nseconds); 5488 dprintk("NFS reply exchange_id: %d\n", status); 5489 return status; 5490 } 5491 5492 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, 5493 struct rpc_cred *cred) 5494 { 5495 struct rpc_message msg = { 5496 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID], 5497 .rpc_argp = clp, 5498 .rpc_cred = cred, 5499 }; 5500 int status; 5501 5502 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5503 if (status) 5504 dprintk("NFS: Got error %d from the server %s on " 5505 "DESTROY_CLIENTID.", status, clp->cl_hostname); 5506 return status; 5507 } 5508 5509 static int nfs4_proc_destroy_clientid(struct nfs_client *clp, 5510 struct rpc_cred *cred) 5511 { 5512 unsigned int loop; 5513 int ret; 5514 5515 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { 5516 ret = _nfs4_proc_destroy_clientid(clp, cred); 5517 switch (ret) { 5518 case -NFS4ERR_DELAY: 5519 case -NFS4ERR_CLIENTID_BUSY: 5520 ssleep(1); 5521 break; 5522 default: 5523 return ret; 5524 } 5525 } 5526 return 0; 5527 } 5528 5529 int nfs4_destroy_clientid(struct nfs_client *clp) 5530 { 5531 struct rpc_cred *cred; 5532 int ret = 0; 5533 5534 if (clp->cl_mvops->minor_version < 1) 5535 goto out; 5536 if (clp->cl_exchange_flags == 0) 5537 goto out; 5538 if (clp->cl_preserve_clid) 5539 goto out; 5540 cred = nfs4_get_exchange_id_cred(clp); 5541 ret = nfs4_proc_destroy_clientid(clp, cred); 5542 if (cred) 5543 put_rpccred(cred); 5544 switch (ret) { 5545 case 0: 5546 case -NFS4ERR_STALE_CLIENTID: 5547 clp->cl_exchange_flags = 0; 5548 } 5549 out: 5550 return ret; 5551 } 5552 5553 struct nfs4_get_lease_time_data { 5554 struct nfs4_get_lease_time_args *args; 5555 struct nfs4_get_lease_time_res *res; 5556 struct nfs_client *clp; 5557 }; 5558 5559 static void nfs4_get_lease_time_prepare(struct rpc_task *task, 5560 void *calldata) 5561 { 5562 int ret; 5563 struct nfs4_get_lease_time_data *data = 5564 (struct nfs4_get_lease_time_data *)calldata; 5565 5566 dprintk("--> %s\n", __func__); 5567 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); 5568 /* just setup sequence, do not trigger session recovery 5569 since we're invoked within one */ 5570 ret = nfs41_setup_sequence(data->clp->cl_session, 5571 &data->args->la_seq_args, 5572 &data->res->lr_seq_res, task); 5573 5574 BUG_ON(ret == -EAGAIN); 5575 rpc_call_start(task); 5576 dprintk("<-- %s\n", __func__); 5577 } 5578 5579 /* 5580 * Called from nfs4_state_manager thread for session setup, so don't recover 5581 * from sequence operation or clientid errors. 5582 */ 5583 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) 5584 { 5585 struct nfs4_get_lease_time_data *data = 5586 (struct nfs4_get_lease_time_data *)calldata; 5587 5588 dprintk("--> %s\n", __func__); 5589 if (!nfs41_sequence_done(task, &data->res->lr_seq_res)) 5590 return; 5591 switch (task->tk_status) { 5592 case -NFS4ERR_DELAY: 5593 case -NFS4ERR_GRACE: 5594 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); 5595 rpc_delay(task, NFS4_POLL_RETRY_MIN); 5596 task->tk_status = 0; 5597 /* fall through */ 5598 case -NFS4ERR_RETRY_UNCACHED_REP: 5599 rpc_restart_call_prepare(task); 5600 return; 5601 } 5602 dprintk("<-- %s\n", __func__); 5603 } 5604 5605 static const struct rpc_call_ops nfs4_get_lease_time_ops = { 5606 .rpc_call_prepare = nfs4_get_lease_time_prepare, 5607 .rpc_call_done = nfs4_get_lease_time_done, 5608 }; 5609 5610 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) 5611 { 5612 struct rpc_task *task; 5613 struct nfs4_get_lease_time_args args; 5614 struct nfs4_get_lease_time_res res = { 5615 .lr_fsinfo = fsinfo, 5616 }; 5617 struct nfs4_get_lease_time_data data = { 5618 .args = &args, 5619 .res = &res, 5620 .clp = clp, 5621 }; 5622 struct rpc_message msg = { 5623 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], 5624 .rpc_argp = &args, 5625 .rpc_resp = &res, 5626 }; 5627 struct rpc_task_setup task_setup = { 5628 .rpc_client = clp->cl_rpcclient, 5629 .rpc_message = &msg, 5630 .callback_ops = &nfs4_get_lease_time_ops, 5631 .callback_data = &data, 5632 .flags = RPC_TASK_TIMEOUT, 5633 }; 5634 int status; 5635 5636 nfs41_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0); 5637 dprintk("--> %s\n", __func__); 5638 task = rpc_run_task(&task_setup); 5639 5640 if (IS_ERR(task)) 5641 status = PTR_ERR(task); 5642 else { 5643 status = task->tk_status; 5644 rpc_put_task(task); 5645 } 5646 dprintk("<-- %s return %d\n", __func__, status); 5647 5648 return status; 5649 } 5650 5651 static struct nfs4_slot *nfs4_alloc_slots(u32 max_slots, gfp_t gfp_flags) 5652 { 5653 return kcalloc(max_slots, sizeof(struct nfs4_slot), gfp_flags); 5654 } 5655 5656 static void nfs4_add_and_init_slots(struct nfs4_slot_table *tbl, 5657 struct nfs4_slot *new, 5658 u32 max_slots, 5659 u32 ivalue) 5660 { 5661 struct nfs4_slot *old = NULL; 5662 u32 i; 5663 5664 spin_lock(&tbl->slot_tbl_lock); 5665 if (new) { 5666 old = tbl->slots; 5667 tbl->slots = new; 5668 tbl->max_slots = max_slots; 5669 } 5670 tbl->highest_used_slotid = -1; /* no slot is currently used */ 5671 for (i = 0; i < tbl->max_slots; i++) 5672 tbl->slots[i].seq_nr = ivalue; 5673 spin_unlock(&tbl->slot_tbl_lock); 5674 kfree(old); 5675 } 5676 5677 /* 5678 * (re)Initialise a slot table 5679 */ 5680 static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs, 5681 u32 ivalue) 5682 { 5683 struct nfs4_slot *new = NULL; 5684 int ret = -ENOMEM; 5685 5686 dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__, 5687 max_reqs, tbl->max_slots); 5688 5689 /* Does the newly negotiated max_reqs match the existing slot table? */ 5690 if (max_reqs != tbl->max_slots) { 5691 new = nfs4_alloc_slots(max_reqs, GFP_NOFS); 5692 if (!new) 5693 goto out; 5694 } 5695 ret = 0; 5696 5697 nfs4_add_and_init_slots(tbl, new, max_reqs, ivalue); 5698 dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, 5699 tbl, tbl->slots, tbl->max_slots); 5700 out: 5701 dprintk("<-- %s: return %d\n", __func__, ret); 5702 return ret; 5703 } 5704 5705 /* Destroy the slot table */ 5706 static void nfs4_destroy_slot_tables(struct nfs4_session *session) 5707 { 5708 if (session->fc_slot_table.slots != NULL) { 5709 kfree(session->fc_slot_table.slots); 5710 session->fc_slot_table.slots = NULL; 5711 } 5712 if (session->bc_slot_table.slots != NULL) { 5713 kfree(session->bc_slot_table.slots); 5714 session->bc_slot_table.slots = NULL; 5715 } 5716 return; 5717 } 5718 5719 /* 5720 * Initialize or reset the forechannel and backchannel tables 5721 */ 5722 static int nfs4_setup_session_slot_tables(struct nfs4_session *ses) 5723 { 5724 struct nfs4_slot_table *tbl; 5725 int status; 5726 5727 dprintk("--> %s\n", __func__); 5728 /* Fore channel */ 5729 tbl = &ses->fc_slot_table; 5730 status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1); 5731 if (status) /* -ENOMEM */ 5732 return status; 5733 /* Back channel */ 5734 tbl = &ses->bc_slot_table; 5735 status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0); 5736 if (status && tbl->slots == NULL) 5737 /* Fore and back channel share a connection so get 5738 * both slot tables or neither */ 5739 nfs4_destroy_slot_tables(ses); 5740 return status; 5741 } 5742 5743 struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp) 5744 { 5745 struct nfs4_session *session; 5746 struct nfs4_slot_table *tbl; 5747 5748 session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS); 5749 if (!session) 5750 return NULL; 5751 5752 tbl = &session->fc_slot_table; 5753 tbl->highest_used_slotid = NFS4_NO_SLOT; 5754 spin_lock_init(&tbl->slot_tbl_lock); 5755 rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table"); 5756 init_completion(&tbl->complete); 5757 5758 tbl = &session->bc_slot_table; 5759 tbl->highest_used_slotid = NFS4_NO_SLOT; 5760 spin_lock_init(&tbl->slot_tbl_lock); 5761 rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table"); 5762 init_completion(&tbl->complete); 5763 5764 session->session_state = 1<<NFS4_SESSION_INITING; 5765 5766 session->clp = clp; 5767 return session; 5768 } 5769 5770 void nfs4_destroy_session(struct nfs4_session *session) 5771 { 5772 struct rpc_xprt *xprt; 5773 struct rpc_cred *cred; 5774 5775 cred = nfs4_get_exchange_id_cred(session->clp); 5776 nfs4_proc_destroy_session(session, cred); 5777 if (cred) 5778 put_rpccred(cred); 5779 5780 rcu_read_lock(); 5781 xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt); 5782 rcu_read_unlock(); 5783 dprintk("%s Destroy backchannel for xprt %p\n", 5784 __func__, xprt); 5785 xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS); 5786 nfs4_destroy_slot_tables(session); 5787 kfree(session); 5788 } 5789 5790 /* 5791 * Initialize the values to be used by the client in CREATE_SESSION 5792 * If nfs4_init_session set the fore channel request and response sizes, 5793 * use them. 5794 * 5795 * Set the back channel max_resp_sz_cached to zero to force the client to 5796 * always set csa_cachethis to FALSE because the current implementation 5797 * of the back channel DRC only supports caching the CB_SEQUENCE operation. 5798 */ 5799 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args) 5800 { 5801 struct nfs4_session *session = args->client->cl_session; 5802 unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz, 5803 mxresp_sz = session->fc_attrs.max_resp_sz; 5804 5805 if (mxrqst_sz == 0) 5806 mxrqst_sz = NFS_MAX_FILE_IO_SIZE; 5807 if (mxresp_sz == 0) 5808 mxresp_sz = NFS_MAX_FILE_IO_SIZE; 5809 /* Fore channel attributes */ 5810 args->fc_attrs.max_rqst_sz = mxrqst_sz; 5811 args->fc_attrs.max_resp_sz = mxresp_sz; 5812 args->fc_attrs.max_ops = NFS4_MAX_OPS; 5813 args->fc_attrs.max_reqs = max_session_slots; 5814 5815 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " 5816 "max_ops=%u max_reqs=%u\n", 5817 __func__, 5818 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, 5819 args->fc_attrs.max_ops, args->fc_attrs.max_reqs); 5820 5821 /* Back channel attributes */ 5822 args->bc_attrs.max_rqst_sz = PAGE_SIZE; 5823 args->bc_attrs.max_resp_sz = PAGE_SIZE; 5824 args->bc_attrs.max_resp_sz_cached = 0; 5825 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; 5826 args->bc_attrs.max_reqs = 1; 5827 5828 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " 5829 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", 5830 __func__, 5831 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, 5832 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, 5833 args->bc_attrs.max_reqs); 5834 } 5835 5836 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session) 5837 { 5838 struct nfs4_channel_attrs *sent = &args->fc_attrs; 5839 struct nfs4_channel_attrs *rcvd = &session->fc_attrs; 5840 5841 if (rcvd->max_resp_sz > sent->max_resp_sz) 5842 return -EINVAL; 5843 /* 5844 * Our requested max_ops is the minimum we need; we're not 5845 * prepared to break up compounds into smaller pieces than that. 5846 * So, no point even trying to continue if the server won't 5847 * cooperate: 5848 */ 5849 if (rcvd->max_ops < sent->max_ops) 5850 return -EINVAL; 5851 if (rcvd->max_reqs == 0) 5852 return -EINVAL; 5853 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE) 5854 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE; 5855 return 0; 5856 } 5857 5858 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session) 5859 { 5860 struct nfs4_channel_attrs *sent = &args->bc_attrs; 5861 struct nfs4_channel_attrs *rcvd = &session->bc_attrs; 5862 5863 if (rcvd->max_rqst_sz > sent->max_rqst_sz) 5864 return -EINVAL; 5865 if (rcvd->max_resp_sz < sent->max_resp_sz) 5866 return -EINVAL; 5867 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) 5868 return -EINVAL; 5869 /* These would render the backchannel useless: */ 5870 if (rcvd->max_ops != sent->max_ops) 5871 return -EINVAL; 5872 if (rcvd->max_reqs != sent->max_reqs) 5873 return -EINVAL; 5874 return 0; 5875 } 5876 5877 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, 5878 struct nfs4_session *session) 5879 { 5880 int ret; 5881 5882 ret = nfs4_verify_fore_channel_attrs(args, session); 5883 if (ret) 5884 return ret; 5885 return nfs4_verify_back_channel_attrs(args, session); 5886 } 5887 5888 static int _nfs4_proc_create_session(struct nfs_client *clp, 5889 struct rpc_cred *cred) 5890 { 5891 struct nfs4_session *session = clp->cl_session; 5892 struct nfs41_create_session_args args = { 5893 .client = clp, 5894 .cb_program = NFS4_CALLBACK, 5895 }; 5896 struct nfs41_create_session_res res = { 5897 .client = clp, 5898 }; 5899 struct rpc_message msg = { 5900 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], 5901 .rpc_argp = &args, 5902 .rpc_resp = &res, 5903 .rpc_cred = cred, 5904 }; 5905 int status; 5906 5907 nfs4_init_channel_attrs(&args); 5908 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); 5909 5910 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5911 5912 if (!status) 5913 /* Verify the session's negotiated channel_attrs values */ 5914 status = nfs4_verify_channel_attrs(&args, session); 5915 if (!status) { 5916 /* Increment the clientid slot sequence id */ 5917 clp->cl_seqid++; 5918 } 5919 5920 return status; 5921 } 5922 5923 /* 5924 * Issues a CREATE_SESSION operation to the server. 5925 * It is the responsibility of the caller to verify the session is 5926 * expired before calling this routine. 5927 */ 5928 int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred) 5929 { 5930 int status; 5931 unsigned *ptr; 5932 struct nfs4_session *session = clp->cl_session; 5933 5934 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 5935 5936 status = _nfs4_proc_create_session(clp, cred); 5937 if (status) 5938 goto out; 5939 5940 /* Init or reset the session slot tables */ 5941 status = nfs4_setup_session_slot_tables(session); 5942 dprintk("slot table setup returned %d\n", status); 5943 if (status) 5944 goto out; 5945 5946 ptr = (unsigned *)&session->sess_id.data[0]; 5947 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, 5948 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); 5949 out: 5950 dprintk("<-- %s\n", __func__); 5951 return status; 5952 } 5953 5954 /* 5955 * Issue the over-the-wire RPC DESTROY_SESSION. 5956 * The caller must serialize access to this routine. 5957 */ 5958 int nfs4_proc_destroy_session(struct nfs4_session *session, 5959 struct rpc_cred *cred) 5960 { 5961 struct rpc_message msg = { 5962 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION], 5963 .rpc_argp = session, 5964 .rpc_cred = cred, 5965 }; 5966 int status = 0; 5967 5968 dprintk("--> nfs4_proc_destroy_session\n"); 5969 5970 /* session is still being setup */ 5971 if (session->clp->cl_cons_state != NFS_CS_READY) 5972 return status; 5973 5974 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5975 5976 if (status) 5977 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " 5978 "Session has been destroyed regardless...\n", status); 5979 5980 dprintk("<-- nfs4_proc_destroy_session\n"); 5981 return status; 5982 } 5983 5984 /* 5985 * With sessions, the client is not marked ready until after a 5986 * successful EXCHANGE_ID and CREATE_SESSION. 5987 * 5988 * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate 5989 * other versions of NFS can be tried. 5990 */ 5991 static int nfs41_check_session_ready(struct nfs_client *clp) 5992 { 5993 int ret; 5994 5995 if (clp->cl_cons_state == NFS_CS_SESSION_INITING) { 5996 ret = nfs4_client_recover_expired_lease(clp); 5997 if (ret) 5998 return ret; 5999 } 6000 if (clp->cl_cons_state < NFS_CS_READY) 6001 return -EPROTONOSUPPORT; 6002 smp_rmb(); 6003 return 0; 6004 } 6005 6006 int nfs4_init_session(struct nfs_server *server) 6007 { 6008 struct nfs_client *clp = server->nfs_client; 6009 struct nfs4_session *session; 6010 unsigned int rsize, wsize; 6011 6012 if (!nfs4_has_session(clp)) 6013 return 0; 6014 6015 session = clp->cl_session; 6016 spin_lock(&clp->cl_lock); 6017 if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) { 6018 6019 rsize = server->rsize; 6020 if (rsize == 0) 6021 rsize = NFS_MAX_FILE_IO_SIZE; 6022 wsize = server->wsize; 6023 if (wsize == 0) 6024 wsize = NFS_MAX_FILE_IO_SIZE; 6025 6026 session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead; 6027 session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead; 6028 } 6029 spin_unlock(&clp->cl_lock); 6030 6031 return nfs41_check_session_ready(clp); 6032 } 6033 6034 int nfs4_init_ds_session(struct nfs_client *clp, unsigned long lease_time) 6035 { 6036 struct nfs4_session *session = clp->cl_session; 6037 int ret; 6038 6039 spin_lock(&clp->cl_lock); 6040 if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) { 6041 /* 6042 * Do not set NFS_CS_CHECK_LEASE_TIME instead set the 6043 * DS lease to be equal to the MDS lease. 6044 */ 6045 clp->cl_lease_time = lease_time; 6046 clp->cl_last_renewal = jiffies; 6047 } 6048 spin_unlock(&clp->cl_lock); 6049 6050 ret = nfs41_check_session_ready(clp); 6051 if (ret) 6052 return ret; 6053 /* Test for the DS role */ 6054 if (!is_ds_client(clp)) 6055 return -ENODEV; 6056 return 0; 6057 } 6058 EXPORT_SYMBOL_GPL(nfs4_init_ds_session); 6059 6060 6061 /* 6062 * Renew the cl_session lease. 6063 */ 6064 struct nfs4_sequence_data { 6065 struct nfs_client *clp; 6066 struct nfs4_sequence_args args; 6067 struct nfs4_sequence_res res; 6068 }; 6069 6070 static void nfs41_sequence_release(void *data) 6071 { 6072 struct nfs4_sequence_data *calldata = data; 6073 struct nfs_client *clp = calldata->clp; 6074 6075 if (atomic_read(&clp->cl_count) > 1) 6076 nfs4_schedule_state_renewal(clp); 6077 nfs_put_client(clp); 6078 kfree(calldata); 6079 } 6080 6081 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) 6082 { 6083 switch(task->tk_status) { 6084 case -NFS4ERR_DELAY: 6085 rpc_delay(task, NFS4_POLL_RETRY_MAX); 6086 return -EAGAIN; 6087 default: 6088 nfs4_schedule_lease_recovery(clp); 6089 } 6090 return 0; 6091 } 6092 6093 static void nfs41_sequence_call_done(struct rpc_task *task, void *data) 6094 { 6095 struct nfs4_sequence_data *calldata = data; 6096 struct nfs_client *clp = calldata->clp; 6097 6098 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) 6099 return; 6100 6101 if (task->tk_status < 0) { 6102 dprintk("%s ERROR %d\n", __func__, task->tk_status); 6103 if (atomic_read(&clp->cl_count) == 1) 6104 goto out; 6105 6106 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { 6107 rpc_restart_call_prepare(task); 6108 return; 6109 } 6110 } 6111 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 6112 out: 6113 dprintk("<-- %s\n", __func__); 6114 } 6115 6116 static void nfs41_sequence_prepare(struct rpc_task *task, void *data) 6117 { 6118 struct nfs4_sequence_data *calldata = data; 6119 struct nfs_client *clp = calldata->clp; 6120 struct nfs4_sequence_args *args; 6121 struct nfs4_sequence_res *res; 6122 6123 args = task->tk_msg.rpc_argp; 6124 res = task->tk_msg.rpc_resp; 6125 6126 if (nfs41_setup_sequence(clp->cl_session, args, res, task)) 6127 return; 6128 rpc_call_start(task); 6129 } 6130 6131 static const struct rpc_call_ops nfs41_sequence_ops = { 6132 .rpc_call_done = nfs41_sequence_call_done, 6133 .rpc_call_prepare = nfs41_sequence_prepare, 6134 .rpc_release = nfs41_sequence_release, 6135 }; 6136 6137 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) 6138 { 6139 struct nfs4_sequence_data *calldata; 6140 struct rpc_message msg = { 6141 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], 6142 .rpc_cred = cred, 6143 }; 6144 struct rpc_task_setup task_setup_data = { 6145 .rpc_client = clp->cl_rpcclient, 6146 .rpc_message = &msg, 6147 .callback_ops = &nfs41_sequence_ops, 6148 .flags = RPC_TASK_ASYNC | RPC_TASK_SOFT, 6149 }; 6150 6151 if (!atomic_inc_not_zero(&clp->cl_count)) 6152 return ERR_PTR(-EIO); 6153 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 6154 if (calldata == NULL) { 6155 nfs_put_client(clp); 6156 return ERR_PTR(-ENOMEM); 6157 } 6158 nfs41_init_sequence(&calldata->args, &calldata->res, 0); 6159 msg.rpc_argp = &calldata->args; 6160 msg.rpc_resp = &calldata->res; 6161 calldata->clp = clp; 6162 task_setup_data.callback_data = calldata; 6163 6164 return rpc_run_task(&task_setup_data); 6165 } 6166 6167 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 6168 { 6169 struct rpc_task *task; 6170 int ret = 0; 6171 6172 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) 6173 return 0; 6174 task = _nfs41_proc_sequence(clp, cred); 6175 if (IS_ERR(task)) 6176 ret = PTR_ERR(task); 6177 else 6178 rpc_put_task_async(task); 6179 dprintk("<-- %s status=%d\n", __func__, ret); 6180 return ret; 6181 } 6182 6183 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) 6184 { 6185 struct rpc_task *task; 6186 int ret; 6187 6188 task = _nfs41_proc_sequence(clp, cred); 6189 if (IS_ERR(task)) { 6190 ret = PTR_ERR(task); 6191 goto out; 6192 } 6193 ret = rpc_wait_for_completion_task(task); 6194 if (!ret) { 6195 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp; 6196 6197 if (task->tk_status == 0) 6198 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); 6199 ret = task->tk_status; 6200 } 6201 rpc_put_task(task); 6202 out: 6203 dprintk("<-- %s status=%d\n", __func__, ret); 6204 return ret; 6205 } 6206 6207 struct nfs4_reclaim_complete_data { 6208 struct nfs_client *clp; 6209 struct nfs41_reclaim_complete_args arg; 6210 struct nfs41_reclaim_complete_res res; 6211 }; 6212 6213 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) 6214 { 6215 struct nfs4_reclaim_complete_data *calldata = data; 6216 6217 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); 6218 if (nfs41_setup_sequence(calldata->clp->cl_session, 6219 &calldata->arg.seq_args, 6220 &calldata->res.seq_res, task)) 6221 return; 6222 6223 rpc_call_start(task); 6224 } 6225 6226 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) 6227 { 6228 switch(task->tk_status) { 6229 case 0: 6230 case -NFS4ERR_COMPLETE_ALREADY: 6231 case -NFS4ERR_WRONG_CRED: /* What to do here? */ 6232 break; 6233 case -NFS4ERR_DELAY: 6234 rpc_delay(task, NFS4_POLL_RETRY_MAX); 6235 /* fall through */ 6236 case -NFS4ERR_RETRY_UNCACHED_REP: 6237 return -EAGAIN; 6238 default: 6239 nfs4_schedule_lease_recovery(clp); 6240 } 6241 return 0; 6242 } 6243 6244 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) 6245 { 6246 struct nfs4_reclaim_complete_data *calldata = data; 6247 struct nfs_client *clp = calldata->clp; 6248 struct nfs4_sequence_res *res = &calldata->res.seq_res; 6249 6250 dprintk("--> %s\n", __func__); 6251 if (!nfs41_sequence_done(task, res)) 6252 return; 6253 6254 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { 6255 rpc_restart_call_prepare(task); 6256 return; 6257 } 6258 dprintk("<-- %s\n", __func__); 6259 } 6260 6261 static void nfs4_free_reclaim_complete_data(void *data) 6262 { 6263 struct nfs4_reclaim_complete_data *calldata = data; 6264 6265 kfree(calldata); 6266 } 6267 6268 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = { 6269 .rpc_call_prepare = nfs4_reclaim_complete_prepare, 6270 .rpc_call_done = nfs4_reclaim_complete_done, 6271 .rpc_release = nfs4_free_reclaim_complete_data, 6272 }; 6273 6274 /* 6275 * Issue a global reclaim complete. 6276 */ 6277 static int nfs41_proc_reclaim_complete(struct nfs_client *clp) 6278 { 6279 struct nfs4_reclaim_complete_data *calldata; 6280 struct rpc_task *task; 6281 struct rpc_message msg = { 6282 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE], 6283 }; 6284 struct rpc_task_setup task_setup_data = { 6285 .rpc_client = clp->cl_rpcclient, 6286 .rpc_message = &msg, 6287 .callback_ops = &nfs4_reclaim_complete_call_ops, 6288 .flags = RPC_TASK_ASYNC, 6289 }; 6290 int status = -ENOMEM; 6291 6292 dprintk("--> %s\n", __func__); 6293 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 6294 if (calldata == NULL) 6295 goto out; 6296 calldata->clp = clp; 6297 calldata->arg.one_fs = 0; 6298 6299 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0); 6300 msg.rpc_argp = &calldata->arg; 6301 msg.rpc_resp = &calldata->res; 6302 task_setup_data.callback_data = calldata; 6303 task = rpc_run_task(&task_setup_data); 6304 if (IS_ERR(task)) { 6305 status = PTR_ERR(task); 6306 goto out; 6307 } 6308 status = nfs4_wait_for_completion_rpc_task(task); 6309 if (status == 0) 6310 status = task->tk_status; 6311 rpc_put_task(task); 6312 return 0; 6313 out: 6314 dprintk("<-- %s status=%d\n", __func__, status); 6315 return status; 6316 } 6317 6318 static void 6319 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) 6320 { 6321 struct nfs4_layoutget *lgp = calldata; 6322 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 6323 6324 dprintk("--> %s\n", __func__); 6325 /* Note the is a race here, where a CB_LAYOUTRECALL can come in 6326 * right now covering the LAYOUTGET we are about to send. 6327 * However, that is not so catastrophic, and there seems 6328 * to be no way to prevent it completely. 6329 */ 6330 if (nfs4_setup_sequence(server, &lgp->args.seq_args, 6331 &lgp->res.seq_res, task)) 6332 return; 6333 if (pnfs_choose_layoutget_stateid(&lgp->args.stateid, 6334 NFS_I(lgp->args.inode)->layout, 6335 lgp->args.ctx->state)) { 6336 rpc_exit(task, NFS4_OK); 6337 return; 6338 } 6339 rpc_call_start(task); 6340 } 6341 6342 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) 6343 { 6344 struct nfs4_layoutget *lgp = calldata; 6345 struct inode *inode = lgp->args.inode; 6346 struct nfs_server *server = NFS_SERVER(inode); 6347 struct pnfs_layout_hdr *lo; 6348 struct nfs4_state *state = NULL; 6349 6350 dprintk("--> %s\n", __func__); 6351 6352 if (!nfs4_sequence_done(task, &lgp->res.seq_res)) 6353 goto out; 6354 6355 switch (task->tk_status) { 6356 case 0: 6357 goto out; 6358 case -NFS4ERR_LAYOUTTRYLATER: 6359 case -NFS4ERR_RECALLCONFLICT: 6360 task->tk_status = -NFS4ERR_DELAY; 6361 break; 6362 case -NFS4ERR_EXPIRED: 6363 case -NFS4ERR_BAD_STATEID: 6364 spin_lock(&inode->i_lock); 6365 lo = NFS_I(inode)->layout; 6366 if (!lo || list_empty(&lo->plh_segs)) { 6367 spin_unlock(&inode->i_lock); 6368 /* If the open stateid was bad, then recover it. */ 6369 state = lgp->args.ctx->state; 6370 } else { 6371 LIST_HEAD(head); 6372 6373 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL); 6374 spin_unlock(&inode->i_lock); 6375 /* Mark the bad layout state as invalid, then 6376 * retry using the open stateid. */ 6377 pnfs_free_lseg_list(&head); 6378 } 6379 } 6380 if (nfs4_async_handle_error(task, server, state) == -EAGAIN) 6381 rpc_restart_call_prepare(task); 6382 out: 6383 dprintk("<-- %s\n", __func__); 6384 } 6385 6386 static size_t max_response_pages(struct nfs_server *server) 6387 { 6388 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 6389 return nfs_page_array_len(0, max_resp_sz); 6390 } 6391 6392 static void nfs4_free_pages(struct page **pages, size_t size) 6393 { 6394 int i; 6395 6396 if (!pages) 6397 return; 6398 6399 for (i = 0; i < size; i++) { 6400 if (!pages[i]) 6401 break; 6402 __free_page(pages[i]); 6403 } 6404 kfree(pages); 6405 } 6406 6407 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags) 6408 { 6409 struct page **pages; 6410 int i; 6411 6412 pages = kcalloc(size, sizeof(struct page *), gfp_flags); 6413 if (!pages) { 6414 dprintk("%s: can't alloc array of %zu pages\n", __func__, size); 6415 return NULL; 6416 } 6417 6418 for (i = 0; i < size; i++) { 6419 pages[i] = alloc_page(gfp_flags); 6420 if (!pages[i]) { 6421 dprintk("%s: failed to allocate page\n", __func__); 6422 nfs4_free_pages(pages, size); 6423 return NULL; 6424 } 6425 } 6426 6427 return pages; 6428 } 6429 6430 static void nfs4_layoutget_release(void *calldata) 6431 { 6432 struct nfs4_layoutget *lgp = calldata; 6433 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 6434 size_t max_pages = max_response_pages(server); 6435 6436 dprintk("--> %s\n", __func__); 6437 nfs4_free_pages(lgp->args.layout.pages, max_pages); 6438 put_nfs_open_context(lgp->args.ctx); 6439 kfree(calldata); 6440 dprintk("<-- %s\n", __func__); 6441 } 6442 6443 static const struct rpc_call_ops nfs4_layoutget_call_ops = { 6444 .rpc_call_prepare = nfs4_layoutget_prepare, 6445 .rpc_call_done = nfs4_layoutget_done, 6446 .rpc_release = nfs4_layoutget_release, 6447 }; 6448 6449 struct pnfs_layout_segment * 6450 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags) 6451 { 6452 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 6453 size_t max_pages = max_response_pages(server); 6454 struct rpc_task *task; 6455 struct rpc_message msg = { 6456 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], 6457 .rpc_argp = &lgp->args, 6458 .rpc_resp = &lgp->res, 6459 }; 6460 struct rpc_task_setup task_setup_data = { 6461 .rpc_client = server->client, 6462 .rpc_message = &msg, 6463 .callback_ops = &nfs4_layoutget_call_ops, 6464 .callback_data = lgp, 6465 .flags = RPC_TASK_ASYNC, 6466 }; 6467 struct pnfs_layout_segment *lseg = NULL; 6468 int status = 0; 6469 6470 dprintk("--> %s\n", __func__); 6471 6472 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags); 6473 if (!lgp->args.layout.pages) { 6474 nfs4_layoutget_release(lgp); 6475 return ERR_PTR(-ENOMEM); 6476 } 6477 lgp->args.layout.pglen = max_pages * PAGE_SIZE; 6478 6479 lgp->res.layoutp = &lgp->args.layout; 6480 lgp->res.seq_res.sr_slot = NULL; 6481 nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0); 6482 task = rpc_run_task(&task_setup_data); 6483 if (IS_ERR(task)) 6484 return ERR_CAST(task); 6485 status = nfs4_wait_for_completion_rpc_task(task); 6486 if (status == 0) 6487 status = task->tk_status; 6488 if (status == 0) 6489 lseg = pnfs_layout_process(lgp); 6490 rpc_put_task(task); 6491 dprintk("<-- %s status=%d\n", __func__, status); 6492 if (status) 6493 return ERR_PTR(status); 6494 return lseg; 6495 } 6496 6497 static void 6498 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) 6499 { 6500 struct nfs4_layoutreturn *lrp = calldata; 6501 6502 dprintk("--> %s\n", __func__); 6503 if (nfs41_setup_sequence(lrp->clp->cl_session, &lrp->args.seq_args, 6504 &lrp->res.seq_res, task)) 6505 return; 6506 rpc_call_start(task); 6507 } 6508 6509 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) 6510 { 6511 struct nfs4_layoutreturn *lrp = calldata; 6512 struct nfs_server *server; 6513 6514 dprintk("--> %s\n", __func__); 6515 6516 if (!nfs4_sequence_done(task, &lrp->res.seq_res)) 6517 return; 6518 6519 server = NFS_SERVER(lrp->args.inode); 6520 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { 6521 rpc_restart_call_prepare(task); 6522 return; 6523 } 6524 dprintk("<-- %s\n", __func__); 6525 } 6526 6527 static void nfs4_layoutreturn_release(void *calldata) 6528 { 6529 struct nfs4_layoutreturn *lrp = calldata; 6530 struct pnfs_layout_hdr *lo = lrp->args.layout; 6531 6532 dprintk("--> %s\n", __func__); 6533 spin_lock(&lo->plh_inode->i_lock); 6534 if (lrp->res.lrs_present) 6535 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); 6536 lo->plh_block_lgets--; 6537 spin_unlock(&lo->plh_inode->i_lock); 6538 pnfs_put_layout_hdr(lrp->args.layout); 6539 kfree(calldata); 6540 dprintk("<-- %s\n", __func__); 6541 } 6542 6543 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { 6544 .rpc_call_prepare = nfs4_layoutreturn_prepare, 6545 .rpc_call_done = nfs4_layoutreturn_done, 6546 .rpc_release = nfs4_layoutreturn_release, 6547 }; 6548 6549 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp) 6550 { 6551 struct rpc_task *task; 6552 struct rpc_message msg = { 6553 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN], 6554 .rpc_argp = &lrp->args, 6555 .rpc_resp = &lrp->res, 6556 }; 6557 struct rpc_task_setup task_setup_data = { 6558 .rpc_client = lrp->clp->cl_rpcclient, 6559 .rpc_message = &msg, 6560 .callback_ops = &nfs4_layoutreturn_call_ops, 6561 .callback_data = lrp, 6562 }; 6563 int status; 6564 6565 dprintk("--> %s\n", __func__); 6566 nfs41_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1); 6567 task = rpc_run_task(&task_setup_data); 6568 if (IS_ERR(task)) 6569 return PTR_ERR(task); 6570 status = task->tk_status; 6571 dprintk("<-- %s status=%d\n", __func__, status); 6572 rpc_put_task(task); 6573 return status; 6574 } 6575 6576 /* 6577 * Retrieve the list of Data Server devices from the MDS. 6578 */ 6579 static int _nfs4_getdevicelist(struct nfs_server *server, 6580 const struct nfs_fh *fh, 6581 struct pnfs_devicelist *devlist) 6582 { 6583 struct nfs4_getdevicelist_args args = { 6584 .fh = fh, 6585 .layoutclass = server->pnfs_curr_ld->id, 6586 }; 6587 struct nfs4_getdevicelist_res res = { 6588 .devlist = devlist, 6589 }; 6590 struct rpc_message msg = { 6591 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICELIST], 6592 .rpc_argp = &args, 6593 .rpc_resp = &res, 6594 }; 6595 int status; 6596 6597 dprintk("--> %s\n", __func__); 6598 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, 6599 &res.seq_res, 0); 6600 dprintk("<-- %s status=%d\n", __func__, status); 6601 return status; 6602 } 6603 6604 int nfs4_proc_getdevicelist(struct nfs_server *server, 6605 const struct nfs_fh *fh, 6606 struct pnfs_devicelist *devlist) 6607 { 6608 struct nfs4_exception exception = { }; 6609 int err; 6610 6611 do { 6612 err = nfs4_handle_exception(server, 6613 _nfs4_getdevicelist(server, fh, devlist), 6614 &exception); 6615 } while (exception.retry); 6616 6617 dprintk("%s: err=%d, num_devs=%u\n", __func__, 6618 err, devlist->num_devs); 6619 6620 return err; 6621 } 6622 EXPORT_SYMBOL_GPL(nfs4_proc_getdevicelist); 6623 6624 static int 6625 _nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev) 6626 { 6627 struct nfs4_getdeviceinfo_args args = { 6628 .pdev = pdev, 6629 }; 6630 struct nfs4_getdeviceinfo_res res = { 6631 .pdev = pdev, 6632 }; 6633 struct rpc_message msg = { 6634 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO], 6635 .rpc_argp = &args, 6636 .rpc_resp = &res, 6637 }; 6638 int status; 6639 6640 dprintk("--> %s\n", __func__); 6641 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 6642 dprintk("<-- %s status=%d\n", __func__, status); 6643 6644 return status; 6645 } 6646 6647 int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev) 6648 { 6649 struct nfs4_exception exception = { }; 6650 int err; 6651 6652 do { 6653 err = nfs4_handle_exception(server, 6654 _nfs4_proc_getdeviceinfo(server, pdev), 6655 &exception); 6656 } while (exception.retry); 6657 return err; 6658 } 6659 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo); 6660 6661 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) 6662 { 6663 struct nfs4_layoutcommit_data *data = calldata; 6664 struct nfs_server *server = NFS_SERVER(data->args.inode); 6665 6666 if (nfs4_setup_sequence(server, &data->args.seq_args, 6667 &data->res.seq_res, task)) 6668 return; 6669 rpc_call_start(task); 6670 } 6671 6672 static void 6673 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) 6674 { 6675 struct nfs4_layoutcommit_data *data = calldata; 6676 struct nfs_server *server = NFS_SERVER(data->args.inode); 6677 6678 if (!nfs4_sequence_done(task, &data->res.seq_res)) 6679 return; 6680 6681 switch (task->tk_status) { /* Just ignore these failures */ 6682 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */ 6683 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ 6684 case -NFS4ERR_BADLAYOUT: /* no layout */ 6685 case -NFS4ERR_GRACE: /* loca_recalim always false */ 6686 task->tk_status = 0; 6687 break; 6688 case 0: 6689 nfs_post_op_update_inode_force_wcc(data->args.inode, 6690 data->res.fattr); 6691 break; 6692 default: 6693 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { 6694 rpc_restart_call_prepare(task); 6695 return; 6696 } 6697 } 6698 } 6699 6700 static void nfs4_layoutcommit_release(void *calldata) 6701 { 6702 struct nfs4_layoutcommit_data *data = calldata; 6703 struct pnfs_layout_segment *lseg, *tmp; 6704 unsigned long *bitlock = &NFS_I(data->args.inode)->flags; 6705 6706 pnfs_cleanup_layoutcommit(data); 6707 /* Matched by references in pnfs_set_layoutcommit */ 6708 list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) { 6709 list_del_init(&lseg->pls_lc_list); 6710 if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, 6711 &lseg->pls_flags)) 6712 pnfs_put_lseg(lseg); 6713 } 6714 6715 clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock); 6716 smp_mb__after_clear_bit(); 6717 wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING); 6718 6719 put_rpccred(data->cred); 6720 kfree(data); 6721 } 6722 6723 static const struct rpc_call_ops nfs4_layoutcommit_ops = { 6724 .rpc_call_prepare = nfs4_layoutcommit_prepare, 6725 .rpc_call_done = nfs4_layoutcommit_done, 6726 .rpc_release = nfs4_layoutcommit_release, 6727 }; 6728 6729 int 6730 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) 6731 { 6732 struct rpc_message msg = { 6733 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT], 6734 .rpc_argp = &data->args, 6735 .rpc_resp = &data->res, 6736 .rpc_cred = data->cred, 6737 }; 6738 struct rpc_task_setup task_setup_data = { 6739 .task = &data->task, 6740 .rpc_client = NFS_CLIENT(data->args.inode), 6741 .rpc_message = &msg, 6742 .callback_ops = &nfs4_layoutcommit_ops, 6743 .callback_data = data, 6744 .flags = RPC_TASK_ASYNC, 6745 }; 6746 struct rpc_task *task; 6747 int status = 0; 6748 6749 dprintk("NFS: %4d initiating layoutcommit call. sync %d " 6750 "lbw: %llu inode %lu\n", 6751 data->task.tk_pid, sync, 6752 data->args.lastbytewritten, 6753 data->args.inode->i_ino); 6754 6755 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 6756 task = rpc_run_task(&task_setup_data); 6757 if (IS_ERR(task)) 6758 return PTR_ERR(task); 6759 if (sync == false) 6760 goto out; 6761 status = nfs4_wait_for_completion_rpc_task(task); 6762 if (status != 0) 6763 goto out; 6764 status = task->tk_status; 6765 out: 6766 dprintk("%s: status %d\n", __func__, status); 6767 rpc_put_task(task); 6768 return status; 6769 } 6770 6771 static int 6772 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 6773 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) 6774 { 6775 struct nfs41_secinfo_no_name_args args = { 6776 .style = SECINFO_STYLE_CURRENT_FH, 6777 }; 6778 struct nfs4_secinfo_res res = { 6779 .flavors = flavors, 6780 }; 6781 struct rpc_message msg = { 6782 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME], 6783 .rpc_argp = &args, 6784 .rpc_resp = &res, 6785 }; 6786 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 6787 } 6788 6789 static int 6790 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 6791 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) 6792 { 6793 struct nfs4_exception exception = { }; 6794 int err; 6795 do { 6796 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); 6797 switch (err) { 6798 case 0: 6799 case -NFS4ERR_WRONGSEC: 6800 case -NFS4ERR_NOTSUPP: 6801 goto out; 6802 default: 6803 err = nfs4_handle_exception(server, err, &exception); 6804 } 6805 } while (exception.retry); 6806 out: 6807 return err; 6808 } 6809 6810 static int 6811 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 6812 struct nfs_fsinfo *info) 6813 { 6814 int err; 6815 struct page *page; 6816 rpc_authflavor_t flavor; 6817 struct nfs4_secinfo_flavors *flavors; 6818 6819 page = alloc_page(GFP_KERNEL); 6820 if (!page) { 6821 err = -ENOMEM; 6822 goto out; 6823 } 6824 6825 flavors = page_address(page); 6826 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); 6827 6828 /* 6829 * Fall back on "guess and check" method if 6830 * the server doesn't support SECINFO_NO_NAME 6831 */ 6832 if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) { 6833 err = nfs4_find_root_sec(server, fhandle, info); 6834 goto out_freepage; 6835 } 6836 if (err) 6837 goto out_freepage; 6838 6839 flavor = nfs_find_best_sec(flavors); 6840 if (err == 0) 6841 err = nfs4_lookup_root_sec(server, fhandle, info, flavor); 6842 6843 out_freepage: 6844 put_page(page); 6845 if (err == -EACCES) 6846 return -EPERM; 6847 out: 6848 return err; 6849 } 6850 6851 static int _nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid) 6852 { 6853 int status; 6854 struct nfs41_test_stateid_args args = { 6855 .stateid = stateid, 6856 }; 6857 struct nfs41_test_stateid_res res; 6858 struct rpc_message msg = { 6859 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID], 6860 .rpc_argp = &args, 6861 .rpc_resp = &res, 6862 }; 6863 6864 dprintk("NFS call test_stateid %p\n", stateid); 6865 nfs41_init_sequence(&args.seq_args, &res.seq_res, 0); 6866 status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 6867 if (status != NFS_OK) { 6868 dprintk("NFS reply test_stateid: failed, %d\n", status); 6869 return status; 6870 } 6871 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status); 6872 return -res.status; 6873 } 6874 6875 /** 6876 * nfs41_test_stateid - perform a TEST_STATEID operation 6877 * 6878 * @server: server / transport on which to perform the operation 6879 * @stateid: state ID to test 6880 * 6881 * Returns NFS_OK if the server recognizes that "stateid" is valid. 6882 * Otherwise a negative NFS4ERR value is returned if the operation 6883 * failed or the state ID is not currently valid. 6884 */ 6885 static int nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid) 6886 { 6887 struct nfs4_exception exception = { }; 6888 int err; 6889 do { 6890 err = _nfs41_test_stateid(server, stateid); 6891 if (err != -NFS4ERR_DELAY) 6892 break; 6893 nfs4_handle_exception(server, err, &exception); 6894 } while (exception.retry); 6895 return err; 6896 } 6897 6898 static int _nfs4_free_stateid(struct nfs_server *server, nfs4_stateid *stateid) 6899 { 6900 struct nfs41_free_stateid_args args = { 6901 .stateid = stateid, 6902 }; 6903 struct nfs41_free_stateid_res res; 6904 struct rpc_message msg = { 6905 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID], 6906 .rpc_argp = &args, 6907 .rpc_resp = &res, 6908 }; 6909 int status; 6910 6911 dprintk("NFS call free_stateid %p\n", stateid); 6912 nfs41_init_sequence(&args.seq_args, &res.seq_res, 0); 6913 status = nfs4_call_sync_sequence(server->client, server, &msg, 6914 &args.seq_args, &res.seq_res, 1); 6915 dprintk("NFS reply free_stateid: %d\n", status); 6916 return status; 6917 } 6918 6919 /** 6920 * nfs41_free_stateid - perform a FREE_STATEID operation 6921 * 6922 * @server: server / transport on which to perform the operation 6923 * @stateid: state ID to release 6924 * 6925 * Returns NFS_OK if the server freed "stateid". Otherwise a 6926 * negative NFS4ERR value is returned. 6927 */ 6928 static int nfs41_free_stateid(struct nfs_server *server, nfs4_stateid *stateid) 6929 { 6930 struct nfs4_exception exception = { }; 6931 int err; 6932 do { 6933 err = _nfs4_free_stateid(server, stateid); 6934 if (err != -NFS4ERR_DELAY) 6935 break; 6936 nfs4_handle_exception(server, err, &exception); 6937 } while (exception.retry); 6938 return err; 6939 } 6940 6941 static bool nfs41_match_stateid(const nfs4_stateid *s1, 6942 const nfs4_stateid *s2) 6943 { 6944 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0) 6945 return false; 6946 6947 if (s1->seqid == s2->seqid) 6948 return true; 6949 if (s1->seqid == 0 || s2->seqid == 0) 6950 return true; 6951 6952 return false; 6953 } 6954 6955 #endif /* CONFIG_NFS_V4_1 */ 6956 6957 static bool nfs4_match_stateid(const nfs4_stateid *s1, 6958 const nfs4_stateid *s2) 6959 { 6960 return nfs4_stateid_match(s1, s2); 6961 } 6962 6963 6964 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { 6965 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 6966 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 6967 .recover_open = nfs4_open_reclaim, 6968 .recover_lock = nfs4_lock_reclaim, 6969 .establish_clid = nfs4_init_clientid, 6970 .get_clid_cred = nfs4_get_setclientid_cred, 6971 .detect_trunking = nfs40_discover_server_trunking, 6972 }; 6973 6974 #if defined(CONFIG_NFS_V4_1) 6975 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { 6976 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 6977 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 6978 .recover_open = nfs4_open_reclaim, 6979 .recover_lock = nfs4_lock_reclaim, 6980 .establish_clid = nfs41_init_clientid, 6981 .get_clid_cred = nfs4_get_exchange_id_cred, 6982 .reclaim_complete = nfs41_proc_reclaim_complete, 6983 .detect_trunking = nfs41_discover_server_trunking, 6984 }; 6985 #endif /* CONFIG_NFS_V4_1 */ 6986 6987 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { 6988 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 6989 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 6990 .recover_open = nfs4_open_expired, 6991 .recover_lock = nfs4_lock_expired, 6992 .establish_clid = nfs4_init_clientid, 6993 .get_clid_cred = nfs4_get_setclientid_cred, 6994 }; 6995 6996 #if defined(CONFIG_NFS_V4_1) 6997 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { 6998 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 6999 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 7000 .recover_open = nfs41_open_expired, 7001 .recover_lock = nfs41_lock_expired, 7002 .establish_clid = nfs41_init_clientid, 7003 .get_clid_cred = nfs4_get_exchange_id_cred, 7004 }; 7005 #endif /* CONFIG_NFS_V4_1 */ 7006 7007 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { 7008 .sched_state_renewal = nfs4_proc_async_renew, 7009 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked, 7010 .renew_lease = nfs4_proc_renew, 7011 }; 7012 7013 #if defined(CONFIG_NFS_V4_1) 7014 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { 7015 .sched_state_renewal = nfs41_proc_async_sequence, 7016 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked, 7017 .renew_lease = nfs4_proc_sequence, 7018 }; 7019 #endif 7020 7021 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { 7022 .minor_version = 0, 7023 .call_sync = _nfs4_call_sync, 7024 .match_stateid = nfs4_match_stateid, 7025 .find_root_sec = nfs4_find_root_sec, 7026 .reboot_recovery_ops = &nfs40_reboot_recovery_ops, 7027 .nograce_recovery_ops = &nfs40_nograce_recovery_ops, 7028 .state_renewal_ops = &nfs40_state_renewal_ops, 7029 }; 7030 7031 #if defined(CONFIG_NFS_V4_1) 7032 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { 7033 .minor_version = 1, 7034 .call_sync = _nfs4_call_sync_session, 7035 .match_stateid = nfs41_match_stateid, 7036 .find_root_sec = nfs41_find_root_sec, 7037 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 7038 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 7039 .state_renewal_ops = &nfs41_state_renewal_ops, 7040 }; 7041 #endif 7042 7043 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { 7044 [0] = &nfs_v4_0_minor_ops, 7045 #if defined(CONFIG_NFS_V4_1) 7046 [1] = &nfs_v4_1_minor_ops, 7047 #endif 7048 }; 7049 7050 const struct inode_operations nfs4_dir_inode_operations = { 7051 .create = nfs_create, 7052 .lookup = nfs_lookup, 7053 .atomic_open = nfs_atomic_open, 7054 .link = nfs_link, 7055 .unlink = nfs_unlink, 7056 .symlink = nfs_symlink, 7057 .mkdir = nfs_mkdir, 7058 .rmdir = nfs_rmdir, 7059 .mknod = nfs_mknod, 7060 .rename = nfs_rename, 7061 .permission = nfs_permission, 7062 .getattr = nfs_getattr, 7063 .setattr = nfs_setattr, 7064 .getxattr = generic_getxattr, 7065 .setxattr = generic_setxattr, 7066 .listxattr = generic_listxattr, 7067 .removexattr = generic_removexattr, 7068 }; 7069 7070 static const struct inode_operations nfs4_file_inode_operations = { 7071 .permission = nfs_permission, 7072 .getattr = nfs_getattr, 7073 .setattr = nfs_setattr, 7074 .getxattr = generic_getxattr, 7075 .setxattr = generic_setxattr, 7076 .listxattr = generic_listxattr, 7077 .removexattr = generic_removexattr, 7078 }; 7079 7080 const struct nfs_rpc_ops nfs_v4_clientops = { 7081 .version = 4, /* protocol version */ 7082 .dentry_ops = &nfs4_dentry_operations, 7083 .dir_inode_ops = &nfs4_dir_inode_operations, 7084 .file_inode_ops = &nfs4_file_inode_operations, 7085 .file_ops = &nfs4_file_operations, 7086 .getroot = nfs4_proc_get_root, 7087 .submount = nfs4_submount, 7088 .try_mount = nfs4_try_mount, 7089 .getattr = nfs4_proc_getattr, 7090 .setattr = nfs4_proc_setattr, 7091 .lookup = nfs4_proc_lookup, 7092 .access = nfs4_proc_access, 7093 .readlink = nfs4_proc_readlink, 7094 .create = nfs4_proc_create, 7095 .remove = nfs4_proc_remove, 7096 .unlink_setup = nfs4_proc_unlink_setup, 7097 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, 7098 .unlink_done = nfs4_proc_unlink_done, 7099 .rename = nfs4_proc_rename, 7100 .rename_setup = nfs4_proc_rename_setup, 7101 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, 7102 .rename_done = nfs4_proc_rename_done, 7103 .link = nfs4_proc_link, 7104 .symlink = nfs4_proc_symlink, 7105 .mkdir = nfs4_proc_mkdir, 7106 .rmdir = nfs4_proc_remove, 7107 .readdir = nfs4_proc_readdir, 7108 .mknod = nfs4_proc_mknod, 7109 .statfs = nfs4_proc_statfs, 7110 .fsinfo = nfs4_proc_fsinfo, 7111 .pathconf = nfs4_proc_pathconf, 7112 .set_capabilities = nfs4_server_capabilities, 7113 .decode_dirent = nfs4_decode_dirent, 7114 .read_setup = nfs4_proc_read_setup, 7115 .read_pageio_init = pnfs_pageio_init_read, 7116 .read_rpc_prepare = nfs4_proc_read_rpc_prepare, 7117 .read_done = nfs4_read_done, 7118 .write_setup = nfs4_proc_write_setup, 7119 .write_pageio_init = pnfs_pageio_init_write, 7120 .write_rpc_prepare = nfs4_proc_write_rpc_prepare, 7121 .write_done = nfs4_write_done, 7122 .commit_setup = nfs4_proc_commit_setup, 7123 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare, 7124 .commit_done = nfs4_commit_done, 7125 .lock = nfs4_proc_lock, 7126 .clear_acl_cache = nfs4_zap_acl_attr, 7127 .close_context = nfs4_close_context, 7128 .open_context = nfs4_atomic_open, 7129 .have_delegation = nfs4_have_delegation, 7130 .return_delegation = nfs4_inode_return_delegation, 7131 .alloc_client = nfs4_alloc_client, 7132 .init_client = nfs4_init_client, 7133 .free_client = nfs4_free_client, 7134 .create_server = nfs4_create_server, 7135 .clone_server = nfs_clone_server, 7136 }; 7137 7138 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { 7139 .prefix = XATTR_NAME_NFSV4_ACL, 7140 .list = nfs4_xattr_list_nfs4_acl, 7141 .get = nfs4_xattr_get_nfs4_acl, 7142 .set = nfs4_xattr_set_nfs4_acl, 7143 }; 7144 7145 const struct xattr_handler *nfs4_xattr_handlers[] = { 7146 &nfs4_xattr_nfs4_acl_handler, 7147 NULL 7148 }; 7149 7150 /* 7151 * Local variables: 7152 * c-basic-offset: 8 7153 * End: 7154 */ 7155