1 /* 2 * fs/nfs/nfs4proc.c 3 * 4 * Client-side procedure declarations for NFSv4. 5 * 6 * Copyright (c) 2002 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Kendrick Smith <kmsmith@umich.edu> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <linux/mm.h> 39 #include <linux/delay.h> 40 #include <linux/errno.h> 41 #include <linux/string.h> 42 #include <linux/ratelimit.h> 43 #include <linux/printk.h> 44 #include <linux/slab.h> 45 #include <linux/sunrpc/clnt.h> 46 #include <linux/nfs.h> 47 #include <linux/nfs4.h> 48 #include <linux/nfs_fs.h> 49 #include <linux/nfs_page.h> 50 #include <linux/nfs_mount.h> 51 #include <linux/namei.h> 52 #include <linux/mount.h> 53 #include <linux/module.h> 54 #include <linux/nfs_idmap.h> 55 #include <linux/sunrpc/bc_xprt.h> 56 #include <linux/xattr.h> 57 #include <linux/utsname.h> 58 #include <linux/freezer.h> 59 60 #include "nfs4_fs.h" 61 #include "delegation.h" 62 #include "internal.h" 63 #include "iostat.h" 64 #include "callback.h" 65 #include "pnfs.h" 66 #include "netns.h" 67 68 #define NFSDBG_FACILITY NFSDBG_PROC 69 70 #define NFS4_POLL_RETRY_MIN (HZ/10) 71 #define NFS4_POLL_RETRY_MAX (15*HZ) 72 73 #define NFS4_MAX_LOOP_ON_RECOVER (10) 74 75 struct nfs4_opendata; 76 static int _nfs4_proc_open(struct nfs4_opendata *data); 77 static int _nfs4_recover_proc_open(struct nfs4_opendata *data); 78 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); 79 static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *); 80 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); 81 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *); 82 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr); 83 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 84 struct nfs_fattr *fattr, struct iattr *sattr, 85 struct nfs4_state *state); 86 #ifdef CONFIG_NFS_V4_1 87 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *); 88 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *); 89 #endif 90 /* Prevent leaks of NFSv4 errors into userland */ 91 static int nfs4_map_errors(int err) 92 { 93 if (err >= -1000) 94 return err; 95 switch (err) { 96 case -NFS4ERR_RESOURCE: 97 return -EREMOTEIO; 98 case -NFS4ERR_WRONGSEC: 99 return -EPERM; 100 case -NFS4ERR_BADOWNER: 101 case -NFS4ERR_BADNAME: 102 return -EINVAL; 103 case -NFS4ERR_SHARE_DENIED: 104 return -EACCES; 105 case -NFS4ERR_MINOR_VERS_MISMATCH: 106 return -EPROTONOSUPPORT; 107 default: 108 dprintk("%s could not handle NFSv4 error %d\n", 109 __func__, -err); 110 break; 111 } 112 return -EIO; 113 } 114 115 /* 116 * This is our standard bitmap for GETATTR requests. 117 */ 118 const u32 nfs4_fattr_bitmap[3] = { 119 FATTR4_WORD0_TYPE 120 | FATTR4_WORD0_CHANGE 121 | FATTR4_WORD0_SIZE 122 | FATTR4_WORD0_FSID 123 | FATTR4_WORD0_FILEID, 124 FATTR4_WORD1_MODE 125 | FATTR4_WORD1_NUMLINKS 126 | FATTR4_WORD1_OWNER 127 | FATTR4_WORD1_OWNER_GROUP 128 | FATTR4_WORD1_RAWDEV 129 | FATTR4_WORD1_SPACE_USED 130 | FATTR4_WORD1_TIME_ACCESS 131 | FATTR4_WORD1_TIME_METADATA 132 | FATTR4_WORD1_TIME_MODIFY 133 }; 134 135 static const u32 nfs4_pnfs_open_bitmap[3] = { 136 FATTR4_WORD0_TYPE 137 | FATTR4_WORD0_CHANGE 138 | FATTR4_WORD0_SIZE 139 | FATTR4_WORD0_FSID 140 | FATTR4_WORD0_FILEID, 141 FATTR4_WORD1_MODE 142 | FATTR4_WORD1_NUMLINKS 143 | FATTR4_WORD1_OWNER 144 | FATTR4_WORD1_OWNER_GROUP 145 | FATTR4_WORD1_RAWDEV 146 | FATTR4_WORD1_SPACE_USED 147 | FATTR4_WORD1_TIME_ACCESS 148 | FATTR4_WORD1_TIME_METADATA 149 | FATTR4_WORD1_TIME_MODIFY, 150 FATTR4_WORD2_MDSTHRESHOLD 151 }; 152 153 const u32 nfs4_statfs_bitmap[2] = { 154 FATTR4_WORD0_FILES_AVAIL 155 | FATTR4_WORD0_FILES_FREE 156 | FATTR4_WORD0_FILES_TOTAL, 157 FATTR4_WORD1_SPACE_AVAIL 158 | FATTR4_WORD1_SPACE_FREE 159 | FATTR4_WORD1_SPACE_TOTAL 160 }; 161 162 const u32 nfs4_pathconf_bitmap[2] = { 163 FATTR4_WORD0_MAXLINK 164 | FATTR4_WORD0_MAXNAME, 165 0 166 }; 167 168 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE 169 | FATTR4_WORD0_MAXREAD 170 | FATTR4_WORD0_MAXWRITE 171 | FATTR4_WORD0_LEASE_TIME, 172 FATTR4_WORD1_TIME_DELTA 173 | FATTR4_WORD1_FS_LAYOUT_TYPES, 174 FATTR4_WORD2_LAYOUT_BLKSIZE 175 }; 176 177 const u32 nfs4_fs_locations_bitmap[2] = { 178 FATTR4_WORD0_TYPE 179 | FATTR4_WORD0_CHANGE 180 | FATTR4_WORD0_SIZE 181 | FATTR4_WORD0_FSID 182 | FATTR4_WORD0_FILEID 183 | FATTR4_WORD0_FS_LOCATIONS, 184 FATTR4_WORD1_MODE 185 | FATTR4_WORD1_NUMLINKS 186 | FATTR4_WORD1_OWNER 187 | FATTR4_WORD1_OWNER_GROUP 188 | FATTR4_WORD1_RAWDEV 189 | FATTR4_WORD1_SPACE_USED 190 | FATTR4_WORD1_TIME_ACCESS 191 | FATTR4_WORD1_TIME_METADATA 192 | FATTR4_WORD1_TIME_MODIFY 193 | FATTR4_WORD1_MOUNTED_ON_FILEID 194 }; 195 196 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, 197 struct nfs4_readdir_arg *readdir) 198 { 199 __be32 *start, *p; 200 201 BUG_ON(readdir->count < 80); 202 if (cookie > 2) { 203 readdir->cookie = cookie; 204 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); 205 return; 206 } 207 208 readdir->cookie = 0; 209 memset(&readdir->verifier, 0, sizeof(readdir->verifier)); 210 if (cookie == 2) 211 return; 212 213 /* 214 * NFSv4 servers do not return entries for '.' and '..' 215 * Therefore, we fake these entries here. We let '.' 216 * have cookie 0 and '..' have cookie 1. Note that 217 * when talking to the server, we always send cookie 0 218 * instead of 1 or 2. 219 */ 220 start = p = kmap_atomic(*readdir->pages); 221 222 if (cookie == 0) { 223 *p++ = xdr_one; /* next */ 224 *p++ = xdr_zero; /* cookie, first word */ 225 *p++ = xdr_one; /* cookie, second word */ 226 *p++ = xdr_one; /* entry len */ 227 memcpy(p, ".\0\0\0", 4); /* entry */ 228 p++; 229 *p++ = xdr_one; /* bitmap length */ 230 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ 231 *p++ = htonl(8); /* attribute buffer length */ 232 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode)); 233 } 234 235 *p++ = xdr_one; /* next */ 236 *p++ = xdr_zero; /* cookie, first word */ 237 *p++ = xdr_two; /* cookie, second word */ 238 *p++ = xdr_two; /* entry len */ 239 memcpy(p, "..\0\0", 4); /* entry */ 240 p++; 241 *p++ = xdr_one; /* bitmap length */ 242 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ 243 *p++ = htonl(8); /* attribute buffer length */ 244 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode)); 245 246 readdir->pgbase = (char *)p - (char *)start; 247 readdir->count -= readdir->pgbase; 248 kunmap_atomic(start); 249 } 250 251 static int nfs4_wait_clnt_recover(struct nfs_client *clp) 252 { 253 int res; 254 255 might_sleep(); 256 257 res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING, 258 nfs_wait_bit_killable, TASK_KILLABLE); 259 if (res) 260 return res; 261 262 if (clp->cl_cons_state < 0) 263 return clp->cl_cons_state; 264 return 0; 265 } 266 267 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) 268 { 269 int res = 0; 270 271 might_sleep(); 272 273 if (*timeout <= 0) 274 *timeout = NFS4_POLL_RETRY_MIN; 275 if (*timeout > NFS4_POLL_RETRY_MAX) 276 *timeout = NFS4_POLL_RETRY_MAX; 277 freezable_schedule_timeout_killable(*timeout); 278 if (fatal_signal_pending(current)) 279 res = -ERESTARTSYS; 280 *timeout <<= 1; 281 return res; 282 } 283 284 /* This is the error handling routine for processes that are allowed 285 * to sleep. 286 */ 287 static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) 288 { 289 struct nfs_client *clp = server->nfs_client; 290 struct nfs4_state *state = exception->state; 291 struct inode *inode = exception->inode; 292 int ret = errorcode; 293 294 exception->retry = 0; 295 switch(errorcode) { 296 case 0: 297 return 0; 298 case -NFS4ERR_OPENMODE: 299 if (inode && nfs4_have_delegation(inode, FMODE_READ)) { 300 nfs4_inode_return_delegation(inode); 301 exception->retry = 1; 302 return 0; 303 } 304 if (state == NULL) 305 break; 306 nfs4_schedule_stateid_recovery(server, state); 307 goto wait_on_recovery; 308 case -NFS4ERR_DELEG_REVOKED: 309 case -NFS4ERR_ADMIN_REVOKED: 310 case -NFS4ERR_BAD_STATEID: 311 if (state == NULL) 312 break; 313 nfs_remove_bad_delegation(state->inode); 314 nfs4_schedule_stateid_recovery(server, state); 315 goto wait_on_recovery; 316 case -NFS4ERR_EXPIRED: 317 if (state != NULL) 318 nfs4_schedule_stateid_recovery(server, state); 319 case -NFS4ERR_STALE_STATEID: 320 case -NFS4ERR_STALE_CLIENTID: 321 nfs4_schedule_lease_recovery(clp); 322 goto wait_on_recovery; 323 #if defined(CONFIG_NFS_V4_1) 324 case -NFS4ERR_BADSESSION: 325 case -NFS4ERR_BADSLOT: 326 case -NFS4ERR_BAD_HIGH_SLOT: 327 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 328 case -NFS4ERR_DEADSESSION: 329 case -NFS4ERR_SEQ_FALSE_RETRY: 330 case -NFS4ERR_SEQ_MISORDERED: 331 dprintk("%s ERROR: %d Reset session\n", __func__, 332 errorcode); 333 nfs4_schedule_session_recovery(clp->cl_session, errorcode); 334 exception->retry = 1; 335 break; 336 #endif /* defined(CONFIG_NFS_V4_1) */ 337 case -NFS4ERR_FILE_OPEN: 338 if (exception->timeout > HZ) { 339 /* We have retried a decent amount, time to 340 * fail 341 */ 342 ret = -EBUSY; 343 break; 344 } 345 case -NFS4ERR_GRACE: 346 case -NFS4ERR_DELAY: 347 case -EKEYEXPIRED: 348 ret = nfs4_delay(server->client, &exception->timeout); 349 if (ret != 0) 350 break; 351 case -NFS4ERR_RETRY_UNCACHED_REP: 352 case -NFS4ERR_OLD_STATEID: 353 exception->retry = 1; 354 break; 355 case -NFS4ERR_BADOWNER: 356 /* The following works around a Linux server bug! */ 357 case -NFS4ERR_BADNAME: 358 if (server->caps & NFS_CAP_UIDGID_NOMAP) { 359 server->caps &= ~NFS_CAP_UIDGID_NOMAP; 360 exception->retry = 1; 361 printk(KERN_WARNING "NFS: v4 server %s " 362 "does not accept raw " 363 "uid/gids. " 364 "Reenabling the idmapper.\n", 365 server->nfs_client->cl_hostname); 366 } 367 } 368 /* We failed to handle the error */ 369 return nfs4_map_errors(ret); 370 wait_on_recovery: 371 ret = nfs4_wait_clnt_recover(clp); 372 if (ret == 0) 373 exception->retry = 1; 374 return ret; 375 } 376 377 378 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) 379 { 380 spin_lock(&clp->cl_lock); 381 if (time_before(clp->cl_last_renewal,timestamp)) 382 clp->cl_last_renewal = timestamp; 383 spin_unlock(&clp->cl_lock); 384 } 385 386 static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 387 { 388 do_renew_lease(server->nfs_client, timestamp); 389 } 390 391 #if defined(CONFIG_NFS_V4_1) 392 393 /* 394 * nfs4_free_slot - free a slot and efficiently update slot table. 395 * 396 * freeing a slot is trivially done by clearing its respective bit 397 * in the bitmap. 398 * If the freed slotid equals highest_used_slotid we want to update it 399 * so that the server would be able to size down the slot table if needed, 400 * otherwise we know that the highest_used_slotid is still in use. 401 * When updating highest_used_slotid there may be "holes" in the bitmap 402 * so we need to scan down from highest_used_slotid to 0 looking for the now 403 * highest slotid in use. 404 * If none found, highest_used_slotid is set to NFS4_NO_SLOT. 405 * 406 * Must be called while holding tbl->slot_tbl_lock 407 */ 408 static void 409 nfs4_free_slot(struct nfs4_slot_table *tbl, u32 slotid) 410 { 411 BUG_ON(slotid >= NFS4_MAX_SLOT_TABLE); 412 /* clear used bit in bitmap */ 413 __clear_bit(slotid, tbl->used_slots); 414 415 /* update highest_used_slotid when it is freed */ 416 if (slotid == tbl->highest_used_slotid) { 417 slotid = find_last_bit(tbl->used_slots, tbl->max_slots); 418 if (slotid < tbl->max_slots) 419 tbl->highest_used_slotid = slotid; 420 else 421 tbl->highest_used_slotid = NFS4_NO_SLOT; 422 } 423 dprintk("%s: slotid %u highest_used_slotid %d\n", __func__, 424 slotid, tbl->highest_used_slotid); 425 } 426 427 bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy) 428 { 429 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); 430 return true; 431 } 432 433 /* 434 * Signal state manager thread if session fore channel is drained 435 */ 436 static void nfs4_check_drain_fc_complete(struct nfs4_session *ses) 437 { 438 if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) { 439 rpc_wake_up_first(&ses->fc_slot_table.slot_tbl_waitq, 440 nfs4_set_task_privileged, NULL); 441 return; 442 } 443 444 if (ses->fc_slot_table.highest_used_slotid != NFS4_NO_SLOT) 445 return; 446 447 dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__); 448 complete(&ses->fc_slot_table.complete); 449 } 450 451 /* 452 * Signal state manager thread if session back channel is drained 453 */ 454 void nfs4_check_drain_bc_complete(struct nfs4_session *ses) 455 { 456 if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) || 457 ses->bc_slot_table.highest_used_slotid != NFS4_NO_SLOT) 458 return; 459 dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__); 460 complete(&ses->bc_slot_table.complete); 461 } 462 463 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) 464 { 465 struct nfs4_slot_table *tbl; 466 467 tbl = &res->sr_session->fc_slot_table; 468 if (!res->sr_slot) { 469 /* just wake up the next guy waiting since 470 * we may have not consumed a slot after all */ 471 dprintk("%s: No slot\n", __func__); 472 return; 473 } 474 475 spin_lock(&tbl->slot_tbl_lock); 476 nfs4_free_slot(tbl, res->sr_slot - tbl->slots); 477 nfs4_check_drain_fc_complete(res->sr_session); 478 spin_unlock(&tbl->slot_tbl_lock); 479 res->sr_slot = NULL; 480 } 481 482 static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 483 { 484 unsigned long timestamp; 485 struct nfs_client *clp; 486 487 /* 488 * sr_status remains 1 if an RPC level error occurred. The server 489 * may or may not have processed the sequence operation.. 490 * Proceed as if the server received and processed the sequence 491 * operation. 492 */ 493 if (res->sr_status == 1) 494 res->sr_status = NFS_OK; 495 496 /* don't increment the sequence number if the task wasn't sent */ 497 if (!RPC_WAS_SENT(task)) 498 goto out; 499 500 /* Check the SEQUENCE operation status */ 501 switch (res->sr_status) { 502 case 0: 503 /* Update the slot's sequence and clientid lease timer */ 504 ++res->sr_slot->seq_nr; 505 timestamp = res->sr_renewal_time; 506 clp = res->sr_session->clp; 507 do_renew_lease(clp, timestamp); 508 /* Check sequence flags */ 509 if (res->sr_status_flags != 0) 510 nfs4_schedule_lease_recovery(clp); 511 break; 512 case -NFS4ERR_DELAY: 513 /* The server detected a resend of the RPC call and 514 * returned NFS4ERR_DELAY as per Section 2.10.6.2 515 * of RFC5661. 516 */ 517 dprintk("%s: slot=%td seq=%d: Operation in progress\n", 518 __func__, 519 res->sr_slot - res->sr_session->fc_slot_table.slots, 520 res->sr_slot->seq_nr); 521 goto out_retry; 522 default: 523 /* Just update the slot sequence no. */ 524 ++res->sr_slot->seq_nr; 525 } 526 out: 527 /* The session may be reset by one of the error handlers. */ 528 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); 529 nfs41_sequence_free_slot(res); 530 return 1; 531 out_retry: 532 if (!rpc_restart_call(task)) 533 goto out; 534 rpc_delay(task, NFS4_POLL_RETRY_MAX); 535 return 0; 536 } 537 538 static int nfs4_sequence_done(struct rpc_task *task, 539 struct nfs4_sequence_res *res) 540 { 541 if (res->sr_session == NULL) 542 return 1; 543 return nfs41_sequence_done(task, res); 544 } 545 546 /* 547 * nfs4_find_slot - efficiently look for a free slot 548 * 549 * nfs4_find_slot looks for an unset bit in the used_slots bitmap. 550 * If found, we mark the slot as used, update the highest_used_slotid, 551 * and respectively set up the sequence operation args. 552 * The slot number is returned if found, or NFS4_NO_SLOT otherwise. 553 * 554 * Note: must be called with under the slot_tbl_lock. 555 */ 556 static u32 557 nfs4_find_slot(struct nfs4_slot_table *tbl) 558 { 559 u32 slotid; 560 u32 ret_id = NFS4_NO_SLOT; 561 562 dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n", 563 __func__, tbl->used_slots[0], tbl->highest_used_slotid, 564 tbl->max_slots); 565 slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots); 566 if (slotid >= tbl->max_slots) 567 goto out; 568 __set_bit(slotid, tbl->used_slots); 569 if (slotid > tbl->highest_used_slotid || 570 tbl->highest_used_slotid == NFS4_NO_SLOT) 571 tbl->highest_used_slotid = slotid; 572 ret_id = slotid; 573 out: 574 dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n", 575 __func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id); 576 return ret_id; 577 } 578 579 static void nfs41_init_sequence(struct nfs4_sequence_args *args, 580 struct nfs4_sequence_res *res, int cache_reply) 581 { 582 args->sa_session = NULL; 583 args->sa_cache_this = 0; 584 if (cache_reply) 585 args->sa_cache_this = 1; 586 res->sr_session = NULL; 587 res->sr_slot = NULL; 588 } 589 590 int nfs41_setup_sequence(struct nfs4_session *session, 591 struct nfs4_sequence_args *args, 592 struct nfs4_sequence_res *res, 593 struct rpc_task *task) 594 { 595 struct nfs4_slot *slot; 596 struct nfs4_slot_table *tbl; 597 u32 slotid; 598 599 dprintk("--> %s\n", __func__); 600 /* slot already allocated? */ 601 if (res->sr_slot != NULL) 602 return 0; 603 604 tbl = &session->fc_slot_table; 605 606 spin_lock(&tbl->slot_tbl_lock); 607 if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) && 608 !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) { 609 /* The state manager will wait until the slot table is empty */ 610 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 611 spin_unlock(&tbl->slot_tbl_lock); 612 dprintk("%s session is draining\n", __func__); 613 return -EAGAIN; 614 } 615 616 if (!rpc_queue_empty(&tbl->slot_tbl_waitq) && 617 !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) { 618 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 619 spin_unlock(&tbl->slot_tbl_lock); 620 dprintk("%s enforce FIFO order\n", __func__); 621 return -EAGAIN; 622 } 623 624 slotid = nfs4_find_slot(tbl); 625 if (slotid == NFS4_NO_SLOT) { 626 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 627 spin_unlock(&tbl->slot_tbl_lock); 628 dprintk("<-- %s: no free slots\n", __func__); 629 return -EAGAIN; 630 } 631 spin_unlock(&tbl->slot_tbl_lock); 632 633 rpc_task_set_priority(task, RPC_PRIORITY_NORMAL); 634 slot = tbl->slots + slotid; 635 args->sa_session = session; 636 args->sa_slotid = slotid; 637 638 dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr); 639 640 res->sr_session = session; 641 res->sr_slot = slot; 642 res->sr_renewal_time = jiffies; 643 res->sr_status_flags = 0; 644 /* 645 * sr_status is only set in decode_sequence, and so will remain 646 * set to 1 if an rpc level failure occurs. 647 */ 648 res->sr_status = 1; 649 return 0; 650 } 651 EXPORT_SYMBOL_GPL(nfs41_setup_sequence); 652 653 int nfs4_setup_sequence(const struct nfs_server *server, 654 struct nfs4_sequence_args *args, 655 struct nfs4_sequence_res *res, 656 struct rpc_task *task) 657 { 658 struct nfs4_session *session = nfs4_get_session(server); 659 int ret = 0; 660 661 if (session == NULL) 662 goto out; 663 664 dprintk("--> %s clp %p session %p sr_slot %td\n", 665 __func__, session->clp, session, res->sr_slot ? 666 res->sr_slot - session->fc_slot_table.slots : -1); 667 668 ret = nfs41_setup_sequence(session, args, res, task); 669 out: 670 dprintk("<-- %s status=%d\n", __func__, ret); 671 return ret; 672 } 673 674 struct nfs41_call_sync_data { 675 const struct nfs_server *seq_server; 676 struct nfs4_sequence_args *seq_args; 677 struct nfs4_sequence_res *seq_res; 678 }; 679 680 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) 681 { 682 struct nfs41_call_sync_data *data = calldata; 683 684 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); 685 686 if (nfs4_setup_sequence(data->seq_server, data->seq_args, 687 data->seq_res, task)) 688 return; 689 rpc_call_start(task); 690 } 691 692 static void nfs41_call_priv_sync_prepare(struct rpc_task *task, void *calldata) 693 { 694 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); 695 nfs41_call_sync_prepare(task, calldata); 696 } 697 698 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) 699 { 700 struct nfs41_call_sync_data *data = calldata; 701 702 nfs41_sequence_done(task, data->seq_res); 703 } 704 705 static const struct rpc_call_ops nfs41_call_sync_ops = { 706 .rpc_call_prepare = nfs41_call_sync_prepare, 707 .rpc_call_done = nfs41_call_sync_done, 708 }; 709 710 static const struct rpc_call_ops nfs41_call_priv_sync_ops = { 711 .rpc_call_prepare = nfs41_call_priv_sync_prepare, 712 .rpc_call_done = nfs41_call_sync_done, 713 }; 714 715 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, 716 struct nfs_server *server, 717 struct rpc_message *msg, 718 struct nfs4_sequence_args *args, 719 struct nfs4_sequence_res *res, 720 int privileged) 721 { 722 int ret; 723 struct rpc_task *task; 724 struct nfs41_call_sync_data data = { 725 .seq_server = server, 726 .seq_args = args, 727 .seq_res = res, 728 }; 729 struct rpc_task_setup task_setup = { 730 .rpc_client = clnt, 731 .rpc_message = msg, 732 .callback_ops = &nfs41_call_sync_ops, 733 .callback_data = &data 734 }; 735 736 if (privileged) 737 task_setup.callback_ops = &nfs41_call_priv_sync_ops; 738 task = rpc_run_task(&task_setup); 739 if (IS_ERR(task)) 740 ret = PTR_ERR(task); 741 else { 742 ret = task->tk_status; 743 rpc_put_task(task); 744 } 745 return ret; 746 } 747 748 int _nfs4_call_sync_session(struct rpc_clnt *clnt, 749 struct nfs_server *server, 750 struct rpc_message *msg, 751 struct nfs4_sequence_args *args, 752 struct nfs4_sequence_res *res, 753 int cache_reply) 754 { 755 nfs41_init_sequence(args, res, cache_reply); 756 return nfs4_call_sync_sequence(clnt, server, msg, args, res, 0); 757 } 758 759 #else 760 static inline 761 void nfs41_init_sequence(struct nfs4_sequence_args *args, 762 struct nfs4_sequence_res *res, int cache_reply) 763 { 764 } 765 766 static int nfs4_sequence_done(struct rpc_task *task, 767 struct nfs4_sequence_res *res) 768 { 769 return 1; 770 } 771 #endif /* CONFIG_NFS_V4_1 */ 772 773 int _nfs4_call_sync(struct rpc_clnt *clnt, 774 struct nfs_server *server, 775 struct rpc_message *msg, 776 struct nfs4_sequence_args *args, 777 struct nfs4_sequence_res *res, 778 int cache_reply) 779 { 780 nfs41_init_sequence(args, res, cache_reply); 781 return rpc_call_sync(clnt, msg, 0); 782 } 783 784 static inline 785 int nfs4_call_sync(struct rpc_clnt *clnt, 786 struct nfs_server *server, 787 struct rpc_message *msg, 788 struct nfs4_sequence_args *args, 789 struct nfs4_sequence_res *res, 790 int cache_reply) 791 { 792 return server->nfs_client->cl_mvops->call_sync(clnt, server, msg, 793 args, res, cache_reply); 794 } 795 796 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) 797 { 798 struct nfs_inode *nfsi = NFS_I(dir); 799 800 spin_lock(&dir->i_lock); 801 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 802 if (!cinfo->atomic || cinfo->before != dir->i_version) 803 nfs_force_lookup_revalidate(dir); 804 dir->i_version = cinfo->after; 805 spin_unlock(&dir->i_lock); 806 } 807 808 struct nfs4_opendata { 809 struct kref kref; 810 struct nfs_openargs o_arg; 811 struct nfs_openres o_res; 812 struct nfs_open_confirmargs c_arg; 813 struct nfs_open_confirmres c_res; 814 struct nfs4_string owner_name; 815 struct nfs4_string group_name; 816 struct nfs_fattr f_attr; 817 struct dentry *dir; 818 struct dentry *dentry; 819 struct nfs4_state_owner *owner; 820 struct nfs4_state *state; 821 struct iattr attrs; 822 unsigned long timestamp; 823 unsigned int rpc_done : 1; 824 int rpc_status; 825 int cancelled; 826 }; 827 828 829 static void nfs4_init_opendata_res(struct nfs4_opendata *p) 830 { 831 p->o_res.f_attr = &p->f_attr; 832 p->o_res.seqid = p->o_arg.seqid; 833 p->c_res.seqid = p->c_arg.seqid; 834 p->o_res.server = p->o_arg.server; 835 nfs_fattr_init(&p->f_attr); 836 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); 837 } 838 839 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, 840 struct nfs4_state_owner *sp, fmode_t fmode, int flags, 841 const struct iattr *attrs, 842 gfp_t gfp_mask) 843 { 844 struct dentry *parent = dget_parent(dentry); 845 struct inode *dir = parent->d_inode; 846 struct nfs_server *server = NFS_SERVER(dir); 847 struct nfs4_opendata *p; 848 849 p = kzalloc(sizeof(*p), gfp_mask); 850 if (p == NULL) 851 goto err; 852 p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask); 853 if (p->o_arg.seqid == NULL) 854 goto err_free; 855 nfs_sb_active(dentry->d_sb); 856 p->dentry = dget(dentry); 857 p->dir = parent; 858 p->owner = sp; 859 atomic_inc(&sp->so_count); 860 p->o_arg.fh = NFS_FH(dir); 861 p->o_arg.open_flags = flags; 862 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); 863 p->o_arg.clientid = server->nfs_client->cl_clientid; 864 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time); 865 p->o_arg.id.uniquifier = sp->so_seqid.owner_id; 866 p->o_arg.name = &dentry->d_name; 867 p->o_arg.server = server; 868 p->o_arg.bitmask = server->attr_bitmask; 869 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 870 p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; 871 if (attrs != NULL && attrs->ia_valid != 0) { 872 __be32 verf[2]; 873 874 p->o_arg.u.attrs = &p->attrs; 875 memcpy(&p->attrs, attrs, sizeof(p->attrs)); 876 877 verf[0] = jiffies; 878 verf[1] = current->pid; 879 memcpy(p->o_arg.u.verifier.data, verf, 880 sizeof(p->o_arg.u.verifier.data)); 881 } 882 p->c_arg.fh = &p->o_res.fh; 883 p->c_arg.stateid = &p->o_res.stateid; 884 p->c_arg.seqid = p->o_arg.seqid; 885 nfs4_init_opendata_res(p); 886 kref_init(&p->kref); 887 return p; 888 err_free: 889 kfree(p); 890 err: 891 dput(parent); 892 return NULL; 893 } 894 895 static void nfs4_opendata_free(struct kref *kref) 896 { 897 struct nfs4_opendata *p = container_of(kref, 898 struct nfs4_opendata, kref); 899 struct super_block *sb = p->dentry->d_sb; 900 901 nfs_free_seqid(p->o_arg.seqid); 902 if (p->state != NULL) 903 nfs4_put_open_state(p->state); 904 nfs4_put_state_owner(p->owner); 905 dput(p->dir); 906 dput(p->dentry); 907 nfs_sb_deactive(sb); 908 nfs_fattr_free_names(&p->f_attr); 909 kfree(p); 910 } 911 912 static void nfs4_opendata_put(struct nfs4_opendata *p) 913 { 914 if (p != NULL) 915 kref_put(&p->kref, nfs4_opendata_free); 916 } 917 918 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) 919 { 920 int ret; 921 922 ret = rpc_wait_for_completion_task(task); 923 return ret; 924 } 925 926 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode) 927 { 928 int ret = 0; 929 930 if (open_mode & (O_EXCL|O_TRUNC)) 931 goto out; 932 switch (mode & (FMODE_READ|FMODE_WRITE)) { 933 case FMODE_READ: 934 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 935 && state->n_rdonly != 0; 936 break; 937 case FMODE_WRITE: 938 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 939 && state->n_wronly != 0; 940 break; 941 case FMODE_READ|FMODE_WRITE: 942 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 943 && state->n_rdwr != 0; 944 } 945 out: 946 return ret; 947 } 948 949 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode) 950 { 951 if (delegation == NULL) 952 return 0; 953 if ((delegation->type & fmode) != fmode) 954 return 0; 955 if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) 956 return 0; 957 nfs_mark_delegation_referenced(delegation); 958 return 1; 959 } 960 961 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) 962 { 963 switch (fmode) { 964 case FMODE_WRITE: 965 state->n_wronly++; 966 break; 967 case FMODE_READ: 968 state->n_rdonly++; 969 break; 970 case FMODE_READ|FMODE_WRITE: 971 state->n_rdwr++; 972 } 973 nfs4_state_set_mode_locked(state, state->state | fmode); 974 } 975 976 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) 977 { 978 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 979 nfs4_stateid_copy(&state->stateid, stateid); 980 nfs4_stateid_copy(&state->open_stateid, stateid); 981 switch (fmode) { 982 case FMODE_READ: 983 set_bit(NFS_O_RDONLY_STATE, &state->flags); 984 break; 985 case FMODE_WRITE: 986 set_bit(NFS_O_WRONLY_STATE, &state->flags); 987 break; 988 case FMODE_READ|FMODE_WRITE: 989 set_bit(NFS_O_RDWR_STATE, &state->flags); 990 } 991 } 992 993 static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) 994 { 995 write_seqlock(&state->seqlock); 996 nfs_set_open_stateid_locked(state, stateid, fmode); 997 write_sequnlock(&state->seqlock); 998 } 999 1000 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode) 1001 { 1002 /* 1003 * Protect the call to nfs4_state_set_mode_locked and 1004 * serialise the stateid update 1005 */ 1006 write_seqlock(&state->seqlock); 1007 if (deleg_stateid != NULL) { 1008 nfs4_stateid_copy(&state->stateid, deleg_stateid); 1009 set_bit(NFS_DELEGATED_STATE, &state->flags); 1010 } 1011 if (open_stateid != NULL) 1012 nfs_set_open_stateid_locked(state, open_stateid, fmode); 1013 write_sequnlock(&state->seqlock); 1014 spin_lock(&state->owner->so_lock); 1015 update_open_stateflags(state, fmode); 1016 spin_unlock(&state->owner->so_lock); 1017 } 1018 1019 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode) 1020 { 1021 struct nfs_inode *nfsi = NFS_I(state->inode); 1022 struct nfs_delegation *deleg_cur; 1023 int ret = 0; 1024 1025 fmode &= (FMODE_READ|FMODE_WRITE); 1026 1027 rcu_read_lock(); 1028 deleg_cur = rcu_dereference(nfsi->delegation); 1029 if (deleg_cur == NULL) 1030 goto no_delegation; 1031 1032 spin_lock(&deleg_cur->lock); 1033 if (nfsi->delegation != deleg_cur || 1034 (deleg_cur->type & fmode) != fmode) 1035 goto no_delegation_unlock; 1036 1037 if (delegation == NULL) 1038 delegation = &deleg_cur->stateid; 1039 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation)) 1040 goto no_delegation_unlock; 1041 1042 nfs_mark_delegation_referenced(deleg_cur); 1043 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode); 1044 ret = 1; 1045 no_delegation_unlock: 1046 spin_unlock(&deleg_cur->lock); 1047 no_delegation: 1048 rcu_read_unlock(); 1049 1050 if (!ret && open_stateid != NULL) { 1051 __update_open_stateid(state, open_stateid, NULL, fmode); 1052 ret = 1; 1053 } 1054 1055 return ret; 1056 } 1057 1058 1059 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) 1060 { 1061 struct nfs_delegation *delegation; 1062 1063 rcu_read_lock(); 1064 delegation = rcu_dereference(NFS_I(inode)->delegation); 1065 if (delegation == NULL || (delegation->type & fmode) == fmode) { 1066 rcu_read_unlock(); 1067 return; 1068 } 1069 rcu_read_unlock(); 1070 nfs4_inode_return_delegation(inode); 1071 } 1072 1073 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) 1074 { 1075 struct nfs4_state *state = opendata->state; 1076 struct nfs_inode *nfsi = NFS_I(state->inode); 1077 struct nfs_delegation *delegation; 1078 int open_mode = opendata->o_arg.open_flags & (O_EXCL|O_TRUNC); 1079 fmode_t fmode = opendata->o_arg.fmode; 1080 nfs4_stateid stateid; 1081 int ret = -EAGAIN; 1082 1083 for (;;) { 1084 if (can_open_cached(state, fmode, open_mode)) { 1085 spin_lock(&state->owner->so_lock); 1086 if (can_open_cached(state, fmode, open_mode)) { 1087 update_open_stateflags(state, fmode); 1088 spin_unlock(&state->owner->so_lock); 1089 goto out_return_state; 1090 } 1091 spin_unlock(&state->owner->so_lock); 1092 } 1093 rcu_read_lock(); 1094 delegation = rcu_dereference(nfsi->delegation); 1095 if (!can_open_delegated(delegation, fmode)) { 1096 rcu_read_unlock(); 1097 break; 1098 } 1099 /* Save the delegation */ 1100 nfs4_stateid_copy(&stateid, &delegation->stateid); 1101 rcu_read_unlock(); 1102 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); 1103 if (ret != 0) 1104 goto out; 1105 ret = -EAGAIN; 1106 1107 /* Try to update the stateid using the delegation */ 1108 if (update_open_stateid(state, NULL, &stateid, fmode)) 1109 goto out_return_state; 1110 } 1111 out: 1112 return ERR_PTR(ret); 1113 out_return_state: 1114 atomic_inc(&state->count); 1115 return state; 1116 } 1117 1118 static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1119 { 1120 struct inode *inode; 1121 struct nfs4_state *state = NULL; 1122 struct nfs_delegation *delegation; 1123 int ret; 1124 1125 if (!data->rpc_done) { 1126 state = nfs4_try_open_cached(data); 1127 goto out; 1128 } 1129 1130 ret = -EAGAIN; 1131 if (!(data->f_attr.valid & NFS_ATTR_FATTR)) 1132 goto err; 1133 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr); 1134 ret = PTR_ERR(inode); 1135 if (IS_ERR(inode)) 1136 goto err; 1137 ret = -ENOMEM; 1138 state = nfs4_get_open_state(inode, data->owner); 1139 if (state == NULL) 1140 goto err_put_inode; 1141 if (data->o_res.delegation_type != 0) { 1142 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 1143 int delegation_flags = 0; 1144 1145 rcu_read_lock(); 1146 delegation = rcu_dereference(NFS_I(inode)->delegation); 1147 if (delegation) 1148 delegation_flags = delegation->flags; 1149 rcu_read_unlock(); 1150 if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) { 1151 pr_err_ratelimited("NFS: Broken NFSv4 server %s is " 1152 "returning a delegation for " 1153 "OPEN(CLAIM_DELEGATE_CUR)\n", 1154 clp->cl_hostname); 1155 } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0) 1156 nfs_inode_set_delegation(state->inode, 1157 data->owner->so_cred, 1158 &data->o_res); 1159 else 1160 nfs_inode_reclaim_delegation(state->inode, 1161 data->owner->so_cred, 1162 &data->o_res); 1163 } 1164 1165 update_open_stateid(state, &data->o_res.stateid, NULL, 1166 data->o_arg.fmode); 1167 iput(inode); 1168 out: 1169 return state; 1170 err_put_inode: 1171 iput(inode); 1172 err: 1173 return ERR_PTR(ret); 1174 } 1175 1176 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state) 1177 { 1178 struct nfs_inode *nfsi = NFS_I(state->inode); 1179 struct nfs_open_context *ctx; 1180 1181 spin_lock(&state->inode->i_lock); 1182 list_for_each_entry(ctx, &nfsi->open_files, list) { 1183 if (ctx->state != state) 1184 continue; 1185 get_nfs_open_context(ctx); 1186 spin_unlock(&state->inode->i_lock); 1187 return ctx; 1188 } 1189 spin_unlock(&state->inode->i_lock); 1190 return ERR_PTR(-ENOENT); 1191 } 1192 1193 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state) 1194 { 1195 struct nfs4_opendata *opendata; 1196 1197 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, NULL, GFP_NOFS); 1198 if (opendata == NULL) 1199 return ERR_PTR(-ENOMEM); 1200 opendata->state = state; 1201 atomic_inc(&state->count); 1202 return opendata; 1203 } 1204 1205 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res) 1206 { 1207 struct nfs4_state *newstate; 1208 int ret; 1209 1210 opendata->o_arg.open_flags = 0; 1211 opendata->o_arg.fmode = fmode; 1212 memset(&opendata->o_res, 0, sizeof(opendata->o_res)); 1213 memset(&opendata->c_res, 0, sizeof(opendata->c_res)); 1214 nfs4_init_opendata_res(opendata); 1215 ret = _nfs4_recover_proc_open(opendata); 1216 if (ret != 0) 1217 return ret; 1218 newstate = nfs4_opendata_to_nfs4_state(opendata); 1219 if (IS_ERR(newstate)) 1220 return PTR_ERR(newstate); 1221 nfs4_close_state(newstate, fmode); 1222 *res = newstate; 1223 return 0; 1224 } 1225 1226 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) 1227 { 1228 struct nfs4_state *newstate; 1229 int ret; 1230 1231 /* memory barrier prior to reading state->n_* */ 1232 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1233 smp_rmb(); 1234 if (state->n_rdwr != 0) { 1235 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1236 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate); 1237 if (ret != 0) 1238 return ret; 1239 if (newstate != state) 1240 return -ESTALE; 1241 } 1242 if (state->n_wronly != 0) { 1243 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1244 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate); 1245 if (ret != 0) 1246 return ret; 1247 if (newstate != state) 1248 return -ESTALE; 1249 } 1250 if (state->n_rdonly != 0) { 1251 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1252 ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate); 1253 if (ret != 0) 1254 return ret; 1255 if (newstate != state) 1256 return -ESTALE; 1257 } 1258 /* 1259 * We may have performed cached opens for all three recoveries. 1260 * Check if we need to update the current stateid. 1261 */ 1262 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && 1263 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) { 1264 write_seqlock(&state->seqlock); 1265 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1266 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1267 write_sequnlock(&state->seqlock); 1268 } 1269 return 0; 1270 } 1271 1272 /* 1273 * OPEN_RECLAIM: 1274 * reclaim state on the server after a reboot. 1275 */ 1276 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 1277 { 1278 struct nfs_delegation *delegation; 1279 struct nfs4_opendata *opendata; 1280 fmode_t delegation_type = 0; 1281 int status; 1282 1283 opendata = nfs4_open_recoverdata_alloc(ctx, state); 1284 if (IS_ERR(opendata)) 1285 return PTR_ERR(opendata); 1286 opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS; 1287 opendata->o_arg.fh = NFS_FH(state->inode); 1288 rcu_read_lock(); 1289 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 1290 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) 1291 delegation_type = delegation->type; 1292 rcu_read_unlock(); 1293 opendata->o_arg.u.delegation_type = delegation_type; 1294 status = nfs4_open_recover(opendata, state); 1295 nfs4_opendata_put(opendata); 1296 return status; 1297 } 1298 1299 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 1300 { 1301 struct nfs_server *server = NFS_SERVER(state->inode); 1302 struct nfs4_exception exception = { }; 1303 int err; 1304 do { 1305 err = _nfs4_do_open_reclaim(ctx, state); 1306 if (err != -NFS4ERR_DELAY) 1307 break; 1308 nfs4_handle_exception(server, err, &exception); 1309 } while (exception.retry); 1310 return err; 1311 } 1312 1313 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) 1314 { 1315 struct nfs_open_context *ctx; 1316 int ret; 1317 1318 ctx = nfs4_state_find_open_context(state); 1319 if (IS_ERR(ctx)) 1320 return PTR_ERR(ctx); 1321 ret = nfs4_do_open_reclaim(ctx, state); 1322 put_nfs_open_context(ctx); 1323 return ret; 1324 } 1325 1326 static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) 1327 { 1328 struct nfs4_opendata *opendata; 1329 int ret; 1330 1331 opendata = nfs4_open_recoverdata_alloc(ctx, state); 1332 if (IS_ERR(opendata)) 1333 return PTR_ERR(opendata); 1334 opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR; 1335 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 1336 ret = nfs4_open_recover(opendata, state); 1337 nfs4_opendata_put(opendata); 1338 return ret; 1339 } 1340 1341 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) 1342 { 1343 struct nfs4_exception exception = { }; 1344 struct nfs_server *server = NFS_SERVER(state->inode); 1345 int err; 1346 do { 1347 err = _nfs4_open_delegation_recall(ctx, state, stateid); 1348 switch (err) { 1349 case 0: 1350 case -ENOENT: 1351 case -ESTALE: 1352 goto out; 1353 case -NFS4ERR_BADSESSION: 1354 case -NFS4ERR_BADSLOT: 1355 case -NFS4ERR_BAD_HIGH_SLOT: 1356 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1357 case -NFS4ERR_DEADSESSION: 1358 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err); 1359 goto out; 1360 case -NFS4ERR_STALE_CLIENTID: 1361 case -NFS4ERR_STALE_STATEID: 1362 case -NFS4ERR_EXPIRED: 1363 /* Don't recall a delegation if it was lost */ 1364 nfs4_schedule_lease_recovery(server->nfs_client); 1365 goto out; 1366 case -ERESTARTSYS: 1367 /* 1368 * The show must go on: exit, but mark the 1369 * stateid as needing recovery. 1370 */ 1371 case -NFS4ERR_DELEG_REVOKED: 1372 case -NFS4ERR_ADMIN_REVOKED: 1373 case -NFS4ERR_BAD_STATEID: 1374 nfs_inode_find_state_and_recover(state->inode, 1375 stateid); 1376 nfs4_schedule_stateid_recovery(server, state); 1377 case -EKEYEXPIRED: 1378 /* 1379 * User RPCSEC_GSS context has expired. 1380 * We cannot recover this stateid now, so 1381 * skip it and allow recovery thread to 1382 * proceed. 1383 */ 1384 case -ENOMEM: 1385 err = 0; 1386 goto out; 1387 } 1388 err = nfs4_handle_exception(server, err, &exception); 1389 } while (exception.retry); 1390 out: 1391 return err; 1392 } 1393 1394 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) 1395 { 1396 struct nfs4_opendata *data = calldata; 1397 1398 data->rpc_status = task->tk_status; 1399 if (data->rpc_status == 0) { 1400 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid); 1401 nfs_confirm_seqid(&data->owner->so_seqid, 0); 1402 renew_lease(data->o_res.server, data->timestamp); 1403 data->rpc_done = 1; 1404 } 1405 } 1406 1407 static void nfs4_open_confirm_release(void *calldata) 1408 { 1409 struct nfs4_opendata *data = calldata; 1410 struct nfs4_state *state = NULL; 1411 1412 /* If this request hasn't been cancelled, do nothing */ 1413 if (data->cancelled == 0) 1414 goto out_free; 1415 /* In case of error, no cleanup! */ 1416 if (!data->rpc_done) 1417 goto out_free; 1418 state = nfs4_opendata_to_nfs4_state(data); 1419 if (!IS_ERR(state)) 1420 nfs4_close_state(state, data->o_arg.fmode); 1421 out_free: 1422 nfs4_opendata_put(data); 1423 } 1424 1425 static const struct rpc_call_ops nfs4_open_confirm_ops = { 1426 .rpc_call_done = nfs4_open_confirm_done, 1427 .rpc_release = nfs4_open_confirm_release, 1428 }; 1429 1430 /* 1431 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata 1432 */ 1433 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) 1434 { 1435 struct nfs_server *server = NFS_SERVER(data->dir->d_inode); 1436 struct rpc_task *task; 1437 struct rpc_message msg = { 1438 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], 1439 .rpc_argp = &data->c_arg, 1440 .rpc_resp = &data->c_res, 1441 .rpc_cred = data->owner->so_cred, 1442 }; 1443 struct rpc_task_setup task_setup_data = { 1444 .rpc_client = server->client, 1445 .rpc_message = &msg, 1446 .callback_ops = &nfs4_open_confirm_ops, 1447 .callback_data = data, 1448 .workqueue = nfsiod_workqueue, 1449 .flags = RPC_TASK_ASYNC, 1450 }; 1451 int status; 1452 1453 kref_get(&data->kref); 1454 data->rpc_done = 0; 1455 data->rpc_status = 0; 1456 data->timestamp = jiffies; 1457 task = rpc_run_task(&task_setup_data); 1458 if (IS_ERR(task)) 1459 return PTR_ERR(task); 1460 status = nfs4_wait_for_completion_rpc_task(task); 1461 if (status != 0) { 1462 data->cancelled = 1; 1463 smp_wmb(); 1464 } else 1465 status = data->rpc_status; 1466 rpc_put_task(task); 1467 return status; 1468 } 1469 1470 static void nfs4_open_prepare(struct rpc_task *task, void *calldata) 1471 { 1472 struct nfs4_opendata *data = calldata; 1473 struct nfs4_state_owner *sp = data->owner; 1474 1475 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) 1476 return; 1477 /* 1478 * Check if we still need to send an OPEN call, or if we can use 1479 * a delegation instead. 1480 */ 1481 if (data->state != NULL) { 1482 struct nfs_delegation *delegation; 1483 1484 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags)) 1485 goto out_no_action; 1486 rcu_read_lock(); 1487 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation); 1488 if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR && 1489 can_open_delegated(delegation, data->o_arg.fmode)) 1490 goto unlock_no_action; 1491 rcu_read_unlock(); 1492 } 1493 /* Update client id. */ 1494 data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid; 1495 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) { 1496 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 1497 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh); 1498 } 1499 data->timestamp = jiffies; 1500 if (nfs4_setup_sequence(data->o_arg.server, 1501 &data->o_arg.seq_args, 1502 &data->o_res.seq_res, task)) 1503 return; 1504 rpc_call_start(task); 1505 return; 1506 unlock_no_action: 1507 rcu_read_unlock(); 1508 out_no_action: 1509 task->tk_action = NULL; 1510 1511 } 1512 1513 static void nfs4_recover_open_prepare(struct rpc_task *task, void *calldata) 1514 { 1515 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); 1516 nfs4_open_prepare(task, calldata); 1517 } 1518 1519 static void nfs4_open_done(struct rpc_task *task, void *calldata) 1520 { 1521 struct nfs4_opendata *data = calldata; 1522 1523 data->rpc_status = task->tk_status; 1524 1525 if (!nfs4_sequence_done(task, &data->o_res.seq_res)) 1526 return; 1527 1528 if (task->tk_status == 0) { 1529 switch (data->o_res.f_attr->mode & S_IFMT) { 1530 case S_IFREG: 1531 break; 1532 case S_IFLNK: 1533 data->rpc_status = -ELOOP; 1534 break; 1535 case S_IFDIR: 1536 data->rpc_status = -EISDIR; 1537 break; 1538 default: 1539 data->rpc_status = -ENOTDIR; 1540 } 1541 renew_lease(data->o_res.server, data->timestamp); 1542 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) 1543 nfs_confirm_seqid(&data->owner->so_seqid, 0); 1544 } 1545 data->rpc_done = 1; 1546 } 1547 1548 static void nfs4_open_release(void *calldata) 1549 { 1550 struct nfs4_opendata *data = calldata; 1551 struct nfs4_state *state = NULL; 1552 1553 /* If this request hasn't been cancelled, do nothing */ 1554 if (data->cancelled == 0) 1555 goto out_free; 1556 /* In case of error, no cleanup! */ 1557 if (data->rpc_status != 0 || !data->rpc_done) 1558 goto out_free; 1559 /* In case we need an open_confirm, no cleanup! */ 1560 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) 1561 goto out_free; 1562 state = nfs4_opendata_to_nfs4_state(data); 1563 if (!IS_ERR(state)) 1564 nfs4_close_state(state, data->o_arg.fmode); 1565 out_free: 1566 nfs4_opendata_put(data); 1567 } 1568 1569 static const struct rpc_call_ops nfs4_open_ops = { 1570 .rpc_call_prepare = nfs4_open_prepare, 1571 .rpc_call_done = nfs4_open_done, 1572 .rpc_release = nfs4_open_release, 1573 }; 1574 1575 static const struct rpc_call_ops nfs4_recover_open_ops = { 1576 .rpc_call_prepare = nfs4_recover_open_prepare, 1577 .rpc_call_done = nfs4_open_done, 1578 .rpc_release = nfs4_open_release, 1579 }; 1580 1581 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover) 1582 { 1583 struct inode *dir = data->dir->d_inode; 1584 struct nfs_server *server = NFS_SERVER(dir); 1585 struct nfs_openargs *o_arg = &data->o_arg; 1586 struct nfs_openres *o_res = &data->o_res; 1587 struct rpc_task *task; 1588 struct rpc_message msg = { 1589 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN], 1590 .rpc_argp = o_arg, 1591 .rpc_resp = o_res, 1592 .rpc_cred = data->owner->so_cred, 1593 }; 1594 struct rpc_task_setup task_setup_data = { 1595 .rpc_client = server->client, 1596 .rpc_message = &msg, 1597 .callback_ops = &nfs4_open_ops, 1598 .callback_data = data, 1599 .workqueue = nfsiod_workqueue, 1600 .flags = RPC_TASK_ASYNC, 1601 }; 1602 int status; 1603 1604 nfs41_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1); 1605 kref_get(&data->kref); 1606 data->rpc_done = 0; 1607 data->rpc_status = 0; 1608 data->cancelled = 0; 1609 if (isrecover) 1610 task_setup_data.callback_ops = &nfs4_recover_open_ops; 1611 task = rpc_run_task(&task_setup_data); 1612 if (IS_ERR(task)) 1613 return PTR_ERR(task); 1614 status = nfs4_wait_for_completion_rpc_task(task); 1615 if (status != 0) { 1616 data->cancelled = 1; 1617 smp_wmb(); 1618 } else 1619 status = data->rpc_status; 1620 rpc_put_task(task); 1621 1622 return status; 1623 } 1624 1625 static int _nfs4_recover_proc_open(struct nfs4_opendata *data) 1626 { 1627 struct inode *dir = data->dir->d_inode; 1628 struct nfs_openres *o_res = &data->o_res; 1629 int status; 1630 1631 status = nfs4_run_open_task(data, 1); 1632 if (status != 0 || !data->rpc_done) 1633 return status; 1634 1635 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); 1636 1637 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 1638 status = _nfs4_proc_open_confirm(data); 1639 if (status != 0) 1640 return status; 1641 } 1642 1643 return status; 1644 } 1645 1646 /* 1647 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata 1648 */ 1649 static int _nfs4_proc_open(struct nfs4_opendata *data) 1650 { 1651 struct inode *dir = data->dir->d_inode; 1652 struct nfs_server *server = NFS_SERVER(dir); 1653 struct nfs_openargs *o_arg = &data->o_arg; 1654 struct nfs_openres *o_res = &data->o_res; 1655 int status; 1656 1657 status = nfs4_run_open_task(data, 0); 1658 if (!data->rpc_done) 1659 return status; 1660 if (status != 0) { 1661 if (status == -NFS4ERR_BADNAME && 1662 !(o_arg->open_flags & O_CREAT)) 1663 return -ENOENT; 1664 return status; 1665 } 1666 1667 nfs_fattr_map_and_free_names(server, &data->f_attr); 1668 1669 if (o_arg->open_flags & O_CREAT) 1670 update_changeattr(dir, &o_res->cinfo); 1671 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 1672 server->caps &= ~NFS_CAP_POSIX_LOCK; 1673 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 1674 status = _nfs4_proc_open_confirm(data); 1675 if (status != 0) 1676 return status; 1677 } 1678 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) 1679 _nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr); 1680 return 0; 1681 } 1682 1683 static int nfs4_client_recover_expired_lease(struct nfs_client *clp) 1684 { 1685 unsigned int loop; 1686 int ret; 1687 1688 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { 1689 ret = nfs4_wait_clnt_recover(clp); 1690 if (ret != 0) 1691 break; 1692 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) && 1693 !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state)) 1694 break; 1695 nfs4_schedule_state_manager(clp); 1696 ret = -EIO; 1697 } 1698 return ret; 1699 } 1700 1701 static int nfs4_recover_expired_lease(struct nfs_server *server) 1702 { 1703 return nfs4_client_recover_expired_lease(server->nfs_client); 1704 } 1705 1706 /* 1707 * OPEN_EXPIRED: 1708 * reclaim state on the server after a network partition. 1709 * Assumes caller holds the appropriate lock 1710 */ 1711 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 1712 { 1713 struct nfs4_opendata *opendata; 1714 int ret; 1715 1716 opendata = nfs4_open_recoverdata_alloc(ctx, state); 1717 if (IS_ERR(opendata)) 1718 return PTR_ERR(opendata); 1719 ret = nfs4_open_recover(opendata, state); 1720 if (ret == -ESTALE) 1721 d_drop(ctx->dentry); 1722 nfs4_opendata_put(opendata); 1723 return ret; 1724 } 1725 1726 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 1727 { 1728 struct nfs_server *server = NFS_SERVER(state->inode); 1729 struct nfs4_exception exception = { }; 1730 int err; 1731 1732 do { 1733 err = _nfs4_open_expired(ctx, state); 1734 switch (err) { 1735 default: 1736 goto out; 1737 case -NFS4ERR_GRACE: 1738 case -NFS4ERR_DELAY: 1739 nfs4_handle_exception(server, err, &exception); 1740 err = 0; 1741 } 1742 } while (exception.retry); 1743 out: 1744 return err; 1745 } 1746 1747 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 1748 { 1749 struct nfs_open_context *ctx; 1750 int ret; 1751 1752 ctx = nfs4_state_find_open_context(state); 1753 if (IS_ERR(ctx)) 1754 return PTR_ERR(ctx); 1755 ret = nfs4_do_open_expired(ctx, state); 1756 put_nfs_open_context(ctx); 1757 return ret; 1758 } 1759 1760 #if defined(CONFIG_NFS_V4_1) 1761 static void nfs41_clear_delegation_stateid(struct nfs4_state *state) 1762 { 1763 struct nfs_server *server = NFS_SERVER(state->inode); 1764 nfs4_stateid *stateid = &state->stateid; 1765 int status; 1766 1767 /* If a state reset has been done, test_stateid is unneeded */ 1768 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1769 return; 1770 1771 status = nfs41_test_stateid(server, stateid); 1772 if (status != NFS_OK) { 1773 /* Free the stateid unless the server explicitly 1774 * informs us the stateid is unrecognized. */ 1775 if (status != -NFS4ERR_BAD_STATEID) 1776 nfs41_free_stateid(server, stateid); 1777 1778 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1779 } 1780 } 1781 1782 /** 1783 * nfs41_check_open_stateid - possibly free an open stateid 1784 * 1785 * @state: NFSv4 state for an inode 1786 * 1787 * Returns NFS_OK if recovery for this stateid is now finished. 1788 * Otherwise a negative NFS4ERR value is returned. 1789 */ 1790 static int nfs41_check_open_stateid(struct nfs4_state *state) 1791 { 1792 struct nfs_server *server = NFS_SERVER(state->inode); 1793 nfs4_stateid *stateid = &state->stateid; 1794 int status; 1795 1796 /* If a state reset has been done, test_stateid is unneeded */ 1797 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) && 1798 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) && 1799 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0)) 1800 return -NFS4ERR_BAD_STATEID; 1801 1802 status = nfs41_test_stateid(server, stateid); 1803 if (status != NFS_OK) { 1804 /* Free the stateid unless the server explicitly 1805 * informs us the stateid is unrecognized. */ 1806 if (status != -NFS4ERR_BAD_STATEID) 1807 nfs41_free_stateid(server, stateid); 1808 1809 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1810 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1811 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1812 } 1813 return status; 1814 } 1815 1816 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 1817 { 1818 int status; 1819 1820 nfs41_clear_delegation_stateid(state); 1821 status = nfs41_check_open_stateid(state); 1822 if (status != NFS_OK) 1823 status = nfs4_open_expired(sp, state); 1824 return status; 1825 } 1826 #endif 1827 1828 /* 1829 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* 1830 * fields corresponding to attributes that were used to store the verifier. 1831 * Make sure we clobber those fields in the later setattr call 1832 */ 1833 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr) 1834 { 1835 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) && 1836 !(sattr->ia_valid & ATTR_ATIME_SET)) 1837 sattr->ia_valid |= ATTR_ATIME; 1838 1839 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) && 1840 !(sattr->ia_valid & ATTR_MTIME_SET)) 1841 sattr->ia_valid |= ATTR_MTIME; 1842 } 1843 1844 /* 1845 * Returns a referenced nfs4_state 1846 */ 1847 static int _nfs4_do_open(struct inode *dir, 1848 struct dentry *dentry, 1849 fmode_t fmode, 1850 int flags, 1851 struct iattr *sattr, 1852 struct rpc_cred *cred, 1853 struct nfs4_state **res, 1854 struct nfs4_threshold **ctx_th) 1855 { 1856 struct nfs4_state_owner *sp; 1857 struct nfs4_state *state = NULL; 1858 struct nfs_server *server = NFS_SERVER(dir); 1859 struct nfs4_opendata *opendata; 1860 int status; 1861 1862 /* Protect against reboot recovery conflicts */ 1863 status = -ENOMEM; 1864 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL); 1865 if (sp == NULL) { 1866 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); 1867 goto out_err; 1868 } 1869 status = nfs4_recover_expired_lease(server); 1870 if (status != 0) 1871 goto err_put_state_owner; 1872 if (dentry->d_inode != NULL) 1873 nfs4_return_incompatible_delegation(dentry->d_inode, fmode); 1874 status = -ENOMEM; 1875 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, GFP_KERNEL); 1876 if (opendata == NULL) 1877 goto err_put_state_owner; 1878 1879 if (ctx_th && server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) { 1880 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); 1881 if (!opendata->f_attr.mdsthreshold) 1882 goto err_opendata_put; 1883 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; 1884 } 1885 if (dentry->d_inode != NULL) 1886 opendata->state = nfs4_get_open_state(dentry->d_inode, sp); 1887 1888 status = _nfs4_proc_open(opendata); 1889 if (status != 0) 1890 goto err_opendata_put; 1891 1892 state = nfs4_opendata_to_nfs4_state(opendata); 1893 status = PTR_ERR(state); 1894 if (IS_ERR(state)) 1895 goto err_opendata_put; 1896 if (server->caps & NFS_CAP_POSIX_LOCK) 1897 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); 1898 1899 if (opendata->o_arg.open_flags & O_EXCL) { 1900 nfs4_exclusive_attrset(opendata, sattr); 1901 1902 nfs_fattr_init(opendata->o_res.f_attr); 1903 status = nfs4_do_setattr(state->inode, cred, 1904 opendata->o_res.f_attr, sattr, 1905 state); 1906 if (status == 0) 1907 nfs_setattr_update_inode(state->inode, sattr); 1908 nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr); 1909 } 1910 1911 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) 1912 *ctx_th = opendata->f_attr.mdsthreshold; 1913 else 1914 kfree(opendata->f_attr.mdsthreshold); 1915 opendata->f_attr.mdsthreshold = NULL; 1916 1917 nfs4_opendata_put(opendata); 1918 nfs4_put_state_owner(sp); 1919 *res = state; 1920 return 0; 1921 err_opendata_put: 1922 kfree(opendata->f_attr.mdsthreshold); 1923 nfs4_opendata_put(opendata); 1924 err_put_state_owner: 1925 nfs4_put_state_owner(sp); 1926 out_err: 1927 *res = NULL; 1928 return status; 1929 } 1930 1931 1932 static struct nfs4_state *nfs4_do_open(struct inode *dir, 1933 struct dentry *dentry, 1934 fmode_t fmode, 1935 int flags, 1936 struct iattr *sattr, 1937 struct rpc_cred *cred, 1938 struct nfs4_threshold **ctx_th) 1939 { 1940 struct nfs4_exception exception = { }; 1941 struct nfs4_state *res; 1942 int status; 1943 1944 fmode &= FMODE_READ|FMODE_WRITE; 1945 do { 1946 status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, 1947 &res, ctx_th); 1948 if (status == 0) 1949 break; 1950 /* NOTE: BAD_SEQID means the server and client disagree about the 1951 * book-keeping w.r.t. state-changing operations 1952 * (OPEN/CLOSE/LOCK/LOCKU...) 1953 * It is actually a sign of a bug on the client or on the server. 1954 * 1955 * If we receive a BAD_SEQID error in the particular case of 1956 * doing an OPEN, we assume that nfs_increment_open_seqid() will 1957 * have unhashed the old state_owner for us, and that we can 1958 * therefore safely retry using a new one. We should still warn 1959 * the user though... 1960 */ 1961 if (status == -NFS4ERR_BAD_SEQID) { 1962 pr_warn_ratelimited("NFS: v4 server %s " 1963 " returned a bad sequence-id error!\n", 1964 NFS_SERVER(dir)->nfs_client->cl_hostname); 1965 exception.retry = 1; 1966 continue; 1967 } 1968 /* 1969 * BAD_STATEID on OPEN means that the server cancelled our 1970 * state before it received the OPEN_CONFIRM. 1971 * Recover by retrying the request as per the discussion 1972 * on Page 181 of RFC3530. 1973 */ 1974 if (status == -NFS4ERR_BAD_STATEID) { 1975 exception.retry = 1; 1976 continue; 1977 } 1978 if (status == -EAGAIN) { 1979 /* We must have found a delegation */ 1980 exception.retry = 1; 1981 continue; 1982 } 1983 res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir), 1984 status, &exception)); 1985 } while (exception.retry); 1986 return res; 1987 } 1988 1989 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 1990 struct nfs_fattr *fattr, struct iattr *sattr, 1991 struct nfs4_state *state) 1992 { 1993 struct nfs_server *server = NFS_SERVER(inode); 1994 struct nfs_setattrargs arg = { 1995 .fh = NFS_FH(inode), 1996 .iap = sattr, 1997 .server = server, 1998 .bitmask = server->attr_bitmask, 1999 }; 2000 struct nfs_setattrres res = { 2001 .fattr = fattr, 2002 .server = server, 2003 }; 2004 struct rpc_message msg = { 2005 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 2006 .rpc_argp = &arg, 2007 .rpc_resp = &res, 2008 .rpc_cred = cred, 2009 }; 2010 unsigned long timestamp = jiffies; 2011 int status; 2012 2013 nfs_fattr_init(fattr); 2014 2015 if (state != NULL) { 2016 nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE, 2017 current->files, current->tgid); 2018 } else if (nfs4_copy_delegation_stateid(&arg.stateid, inode, 2019 FMODE_WRITE)) { 2020 /* Use that stateid */ 2021 } else 2022 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 2023 2024 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 2025 if (status == 0 && state != NULL) 2026 renew_lease(server, timestamp); 2027 return status; 2028 } 2029 2030 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 2031 struct nfs_fattr *fattr, struct iattr *sattr, 2032 struct nfs4_state *state) 2033 { 2034 struct nfs_server *server = NFS_SERVER(inode); 2035 struct nfs4_exception exception = { 2036 .state = state, 2037 .inode = inode, 2038 }; 2039 int err; 2040 do { 2041 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state); 2042 switch (err) { 2043 case -NFS4ERR_OPENMODE: 2044 if (state && !(state->state & FMODE_WRITE)) { 2045 err = -EBADF; 2046 if (sattr->ia_valid & ATTR_OPEN) 2047 err = -EACCES; 2048 goto out; 2049 } 2050 } 2051 err = nfs4_handle_exception(server, err, &exception); 2052 } while (exception.retry); 2053 out: 2054 return err; 2055 } 2056 2057 struct nfs4_closedata { 2058 struct inode *inode; 2059 struct nfs4_state *state; 2060 struct nfs_closeargs arg; 2061 struct nfs_closeres res; 2062 struct nfs_fattr fattr; 2063 unsigned long timestamp; 2064 bool roc; 2065 u32 roc_barrier; 2066 }; 2067 2068 static void nfs4_free_closedata(void *data) 2069 { 2070 struct nfs4_closedata *calldata = data; 2071 struct nfs4_state_owner *sp = calldata->state->owner; 2072 struct super_block *sb = calldata->state->inode->i_sb; 2073 2074 if (calldata->roc) 2075 pnfs_roc_release(calldata->state->inode); 2076 nfs4_put_open_state(calldata->state); 2077 nfs_free_seqid(calldata->arg.seqid); 2078 nfs4_put_state_owner(sp); 2079 nfs_sb_deactive(sb); 2080 kfree(calldata); 2081 } 2082 2083 static void nfs4_close_clear_stateid_flags(struct nfs4_state *state, 2084 fmode_t fmode) 2085 { 2086 spin_lock(&state->owner->so_lock); 2087 if (!(fmode & FMODE_READ)) 2088 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 2089 if (!(fmode & FMODE_WRITE)) 2090 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 2091 clear_bit(NFS_O_RDWR_STATE, &state->flags); 2092 spin_unlock(&state->owner->so_lock); 2093 } 2094 2095 static void nfs4_close_done(struct rpc_task *task, void *data) 2096 { 2097 struct nfs4_closedata *calldata = data; 2098 struct nfs4_state *state = calldata->state; 2099 struct nfs_server *server = NFS_SERVER(calldata->inode); 2100 2101 dprintk("%s: begin!\n", __func__); 2102 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 2103 return; 2104 /* hmm. we are done with the inode, and in the process of freeing 2105 * the state_owner. we keep this around to process errors 2106 */ 2107 switch (task->tk_status) { 2108 case 0: 2109 if (calldata->roc) 2110 pnfs_roc_set_barrier(state->inode, 2111 calldata->roc_barrier); 2112 nfs_set_open_stateid(state, &calldata->res.stateid, 0); 2113 renew_lease(server, calldata->timestamp); 2114 nfs4_close_clear_stateid_flags(state, 2115 calldata->arg.fmode); 2116 break; 2117 case -NFS4ERR_STALE_STATEID: 2118 case -NFS4ERR_OLD_STATEID: 2119 case -NFS4ERR_BAD_STATEID: 2120 case -NFS4ERR_EXPIRED: 2121 if (calldata->arg.fmode == 0) 2122 break; 2123 default: 2124 if (nfs4_async_handle_error(task, server, state) == -EAGAIN) 2125 rpc_restart_call_prepare(task); 2126 } 2127 nfs_release_seqid(calldata->arg.seqid); 2128 nfs_refresh_inode(calldata->inode, calldata->res.fattr); 2129 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status); 2130 } 2131 2132 static void nfs4_close_prepare(struct rpc_task *task, void *data) 2133 { 2134 struct nfs4_closedata *calldata = data; 2135 struct nfs4_state *state = calldata->state; 2136 int call_close = 0; 2137 2138 dprintk("%s: begin!\n", __func__); 2139 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 2140 return; 2141 2142 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; 2143 calldata->arg.fmode = FMODE_READ|FMODE_WRITE; 2144 spin_lock(&state->owner->so_lock); 2145 /* Calculate the change in open mode */ 2146 if (state->n_rdwr == 0) { 2147 if (state->n_rdonly == 0) { 2148 call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags); 2149 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); 2150 calldata->arg.fmode &= ~FMODE_READ; 2151 } 2152 if (state->n_wronly == 0) { 2153 call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags); 2154 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); 2155 calldata->arg.fmode &= ~FMODE_WRITE; 2156 } 2157 } 2158 spin_unlock(&state->owner->so_lock); 2159 2160 if (!call_close) { 2161 /* Note: exit _without_ calling nfs4_close_done */ 2162 task->tk_action = NULL; 2163 goto out; 2164 } 2165 2166 if (calldata->arg.fmode == 0) { 2167 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; 2168 if (calldata->roc && 2169 pnfs_roc_drain(calldata->inode, &calldata->roc_barrier)) { 2170 rpc_sleep_on(&NFS_SERVER(calldata->inode)->roc_rpcwaitq, 2171 task, NULL); 2172 goto out; 2173 } 2174 } 2175 2176 nfs_fattr_init(calldata->res.fattr); 2177 calldata->timestamp = jiffies; 2178 if (nfs4_setup_sequence(NFS_SERVER(calldata->inode), 2179 &calldata->arg.seq_args, 2180 &calldata->res.seq_res, 2181 task)) 2182 goto out; 2183 rpc_call_start(task); 2184 out: 2185 dprintk("%s: done!\n", __func__); 2186 } 2187 2188 static const struct rpc_call_ops nfs4_close_ops = { 2189 .rpc_call_prepare = nfs4_close_prepare, 2190 .rpc_call_done = nfs4_close_done, 2191 .rpc_release = nfs4_free_closedata, 2192 }; 2193 2194 /* 2195 * It is possible for data to be read/written from a mem-mapped file 2196 * after the sys_close call (which hits the vfs layer as a flush). 2197 * This means that we can't safely call nfsv4 close on a file until 2198 * the inode is cleared. This in turn means that we are not good 2199 * NFSv4 citizens - we do not indicate to the server to update the file's 2200 * share state even when we are done with one of the three share 2201 * stateid's in the inode. 2202 * 2203 * NOTE: Caller must be holding the sp->so_owner semaphore! 2204 */ 2205 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc) 2206 { 2207 struct nfs_server *server = NFS_SERVER(state->inode); 2208 struct nfs4_closedata *calldata; 2209 struct nfs4_state_owner *sp = state->owner; 2210 struct rpc_task *task; 2211 struct rpc_message msg = { 2212 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], 2213 .rpc_cred = state->owner->so_cred, 2214 }; 2215 struct rpc_task_setup task_setup_data = { 2216 .rpc_client = server->client, 2217 .rpc_message = &msg, 2218 .callback_ops = &nfs4_close_ops, 2219 .workqueue = nfsiod_workqueue, 2220 .flags = RPC_TASK_ASYNC, 2221 }; 2222 int status = -ENOMEM; 2223 2224 calldata = kzalloc(sizeof(*calldata), gfp_mask); 2225 if (calldata == NULL) 2226 goto out; 2227 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1); 2228 calldata->inode = state->inode; 2229 calldata->state = state; 2230 calldata->arg.fh = NFS_FH(state->inode); 2231 calldata->arg.stateid = &state->open_stateid; 2232 /* Serialization for the sequence id */ 2233 calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask); 2234 if (calldata->arg.seqid == NULL) 2235 goto out_free_calldata; 2236 calldata->arg.fmode = 0; 2237 calldata->arg.bitmask = server->cache_consistency_bitmask; 2238 calldata->res.fattr = &calldata->fattr; 2239 calldata->res.seqid = calldata->arg.seqid; 2240 calldata->res.server = server; 2241 calldata->roc = roc; 2242 nfs_sb_active(calldata->inode->i_sb); 2243 2244 msg.rpc_argp = &calldata->arg; 2245 msg.rpc_resp = &calldata->res; 2246 task_setup_data.callback_data = calldata; 2247 task = rpc_run_task(&task_setup_data); 2248 if (IS_ERR(task)) 2249 return PTR_ERR(task); 2250 status = 0; 2251 if (wait) 2252 status = rpc_wait_for_completion_task(task); 2253 rpc_put_task(task); 2254 return status; 2255 out_free_calldata: 2256 kfree(calldata); 2257 out: 2258 if (roc) 2259 pnfs_roc_release(state->inode); 2260 nfs4_put_open_state(state); 2261 nfs4_put_state_owner(sp); 2262 return status; 2263 } 2264 2265 static struct inode * 2266 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr) 2267 { 2268 struct nfs4_state *state; 2269 2270 /* Protect against concurrent sillydeletes */ 2271 state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr, 2272 ctx->cred, &ctx->mdsthreshold); 2273 if (IS_ERR(state)) 2274 return ERR_CAST(state); 2275 ctx->state = state; 2276 return igrab(state->inode); 2277 } 2278 2279 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) 2280 { 2281 if (ctx->state == NULL) 2282 return; 2283 if (is_sync) 2284 nfs4_close_sync(ctx->state, ctx->mode); 2285 else 2286 nfs4_close_state(ctx->state, ctx->mode); 2287 } 2288 2289 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 2290 { 2291 struct nfs4_server_caps_arg args = { 2292 .fhandle = fhandle, 2293 }; 2294 struct nfs4_server_caps_res res = {}; 2295 struct rpc_message msg = { 2296 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], 2297 .rpc_argp = &args, 2298 .rpc_resp = &res, 2299 }; 2300 int status; 2301 2302 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2303 if (status == 0) { 2304 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); 2305 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS| 2306 NFS_CAP_SYMLINKS|NFS_CAP_FILEID| 2307 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER| 2308 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME| 2309 NFS_CAP_CTIME|NFS_CAP_MTIME); 2310 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL) 2311 server->caps |= NFS_CAP_ACLS; 2312 if (res.has_links != 0) 2313 server->caps |= NFS_CAP_HARDLINKS; 2314 if (res.has_symlinks != 0) 2315 server->caps |= NFS_CAP_SYMLINKS; 2316 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID) 2317 server->caps |= NFS_CAP_FILEID; 2318 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE) 2319 server->caps |= NFS_CAP_MODE; 2320 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS) 2321 server->caps |= NFS_CAP_NLINK; 2322 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER) 2323 server->caps |= NFS_CAP_OWNER; 2324 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP) 2325 server->caps |= NFS_CAP_OWNER_GROUP; 2326 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS) 2327 server->caps |= NFS_CAP_ATIME; 2328 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA) 2329 server->caps |= NFS_CAP_CTIME; 2330 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY) 2331 server->caps |= NFS_CAP_MTIME; 2332 2333 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); 2334 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; 2335 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; 2336 server->acl_bitmask = res.acl_bitmask; 2337 server->fh_expire_type = res.fh_expire_type; 2338 } 2339 2340 return status; 2341 } 2342 2343 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 2344 { 2345 struct nfs4_exception exception = { }; 2346 int err; 2347 do { 2348 err = nfs4_handle_exception(server, 2349 _nfs4_server_capabilities(server, fhandle), 2350 &exception); 2351 } while (exception.retry); 2352 return err; 2353 } 2354 2355 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 2356 struct nfs_fsinfo *info) 2357 { 2358 struct nfs4_lookup_root_arg args = { 2359 .bitmask = nfs4_fattr_bitmap, 2360 }; 2361 struct nfs4_lookup_res res = { 2362 .server = server, 2363 .fattr = info->fattr, 2364 .fh = fhandle, 2365 }; 2366 struct rpc_message msg = { 2367 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT], 2368 .rpc_argp = &args, 2369 .rpc_resp = &res, 2370 }; 2371 2372 nfs_fattr_init(info->fattr); 2373 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2374 } 2375 2376 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 2377 struct nfs_fsinfo *info) 2378 { 2379 struct nfs4_exception exception = { }; 2380 int err; 2381 do { 2382 err = _nfs4_lookup_root(server, fhandle, info); 2383 switch (err) { 2384 case 0: 2385 case -NFS4ERR_WRONGSEC: 2386 goto out; 2387 default: 2388 err = nfs4_handle_exception(server, err, &exception); 2389 } 2390 } while (exception.retry); 2391 out: 2392 return err; 2393 } 2394 2395 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 2396 struct nfs_fsinfo *info, rpc_authflavor_t flavor) 2397 { 2398 struct rpc_auth *auth; 2399 int ret; 2400 2401 auth = rpcauth_create(flavor, server->client); 2402 if (!auth) { 2403 ret = -EIO; 2404 goto out; 2405 } 2406 ret = nfs4_lookup_root(server, fhandle, info); 2407 out: 2408 return ret; 2409 } 2410 2411 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 2412 struct nfs_fsinfo *info) 2413 { 2414 int i, len, status = 0; 2415 rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS]; 2416 2417 len = rpcauth_list_flavors(flav_array, ARRAY_SIZE(flav_array)); 2418 BUG_ON(len < 0); 2419 2420 for (i = 0; i < len; i++) { 2421 /* AUTH_UNIX is the default flavor if none was specified, 2422 * thus has already been tried. */ 2423 if (flav_array[i] == RPC_AUTH_UNIX) 2424 continue; 2425 2426 status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]); 2427 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 2428 continue; 2429 break; 2430 } 2431 /* 2432 * -EACCESS could mean that the user doesn't have correct permissions 2433 * to access the mount. It could also mean that we tried to mount 2434 * with a gss auth flavor, but rpc.gssd isn't running. Either way, 2435 * existing mount programs don't handle -EACCES very well so it should 2436 * be mapped to -EPERM instead. 2437 */ 2438 if (status == -EACCES) 2439 status = -EPERM; 2440 return status; 2441 } 2442 2443 /* 2444 * get the file handle for the "/" directory on the server 2445 */ 2446 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle, 2447 struct nfs_fsinfo *info) 2448 { 2449 int minor_version = server->nfs_client->cl_minorversion; 2450 int status = nfs4_lookup_root(server, fhandle, info); 2451 if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR)) 2452 /* 2453 * A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM 2454 * by nfs4_map_errors() as this function exits. 2455 */ 2456 status = nfs_v4_minor_ops[minor_version]->find_root_sec(server, fhandle, info); 2457 if (status == 0) 2458 status = nfs4_server_capabilities(server, fhandle); 2459 if (status == 0) 2460 status = nfs4_do_fsinfo(server, fhandle, info); 2461 return nfs4_map_errors(status); 2462 } 2463 2464 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh, 2465 struct nfs_fsinfo *info) 2466 { 2467 int error; 2468 struct nfs_fattr *fattr = info->fattr; 2469 2470 error = nfs4_server_capabilities(server, mntfh); 2471 if (error < 0) { 2472 dprintk("nfs4_get_root: getcaps error = %d\n", -error); 2473 return error; 2474 } 2475 2476 error = nfs4_proc_getattr(server, mntfh, fattr); 2477 if (error < 0) { 2478 dprintk("nfs4_get_root: getattr error = %d\n", -error); 2479 return error; 2480 } 2481 2482 if (fattr->valid & NFS_ATTR_FATTR_FSID && 2483 !nfs_fsid_equal(&server->fsid, &fattr->fsid)) 2484 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid)); 2485 2486 return error; 2487 } 2488 2489 /* 2490 * Get locations and (maybe) other attributes of a referral. 2491 * Note that we'll actually follow the referral later when 2492 * we detect fsid mismatch in inode revalidation 2493 */ 2494 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir, 2495 const struct qstr *name, struct nfs_fattr *fattr, 2496 struct nfs_fh *fhandle) 2497 { 2498 int status = -ENOMEM; 2499 struct page *page = NULL; 2500 struct nfs4_fs_locations *locations = NULL; 2501 2502 page = alloc_page(GFP_KERNEL); 2503 if (page == NULL) 2504 goto out; 2505 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 2506 if (locations == NULL) 2507 goto out; 2508 2509 status = nfs4_proc_fs_locations(client, dir, name, locations, page); 2510 if (status != 0) 2511 goto out; 2512 /* Make sure server returned a different fsid for the referral */ 2513 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) { 2514 dprintk("%s: server did not return a different fsid for" 2515 " a referral at %s\n", __func__, name->name); 2516 status = -EIO; 2517 goto out; 2518 } 2519 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ 2520 nfs_fixup_referral_attributes(&locations->fattr); 2521 2522 /* replace the lookup nfs_fattr with the locations nfs_fattr */ 2523 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr)); 2524 memset(fhandle, 0, sizeof(struct nfs_fh)); 2525 out: 2526 if (page) 2527 __free_page(page); 2528 kfree(locations); 2529 return status; 2530 } 2531 2532 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr) 2533 { 2534 struct nfs4_getattr_arg args = { 2535 .fh = fhandle, 2536 .bitmask = server->attr_bitmask, 2537 }; 2538 struct nfs4_getattr_res res = { 2539 .fattr = fattr, 2540 .server = server, 2541 }; 2542 struct rpc_message msg = { 2543 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 2544 .rpc_argp = &args, 2545 .rpc_resp = &res, 2546 }; 2547 2548 nfs_fattr_init(fattr); 2549 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2550 } 2551 2552 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr) 2553 { 2554 struct nfs4_exception exception = { }; 2555 int err; 2556 do { 2557 err = nfs4_handle_exception(server, 2558 _nfs4_proc_getattr(server, fhandle, fattr), 2559 &exception); 2560 } while (exception.retry); 2561 return err; 2562 } 2563 2564 /* 2565 * The file is not closed if it is opened due to the a request to change 2566 * the size of the file. The open call will not be needed once the 2567 * VFS layer lookup-intents are implemented. 2568 * 2569 * Close is called when the inode is destroyed. 2570 * If we haven't opened the file for O_WRONLY, we 2571 * need to in the size_change case to obtain a stateid. 2572 * 2573 * Got race? 2574 * Because OPEN is always done by name in nfsv4, it is 2575 * possible that we opened a different file by the same 2576 * name. We can recognize this race condition, but we 2577 * can't do anything about it besides returning an error. 2578 * 2579 * This will be fixed with VFS changes (lookup-intent). 2580 */ 2581 static int 2582 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, 2583 struct iattr *sattr) 2584 { 2585 struct inode *inode = dentry->d_inode; 2586 struct rpc_cred *cred = NULL; 2587 struct nfs4_state *state = NULL; 2588 int status; 2589 2590 if (pnfs_ld_layoutret_on_setattr(inode)) 2591 pnfs_return_layout(inode); 2592 2593 nfs_fattr_init(fattr); 2594 2595 /* Deal with open(O_TRUNC) */ 2596 if (sattr->ia_valid & ATTR_OPEN) 2597 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN); 2598 2599 /* Optimization: if the end result is no change, don't RPC */ 2600 if ((sattr->ia_valid & ~(ATTR_FILE)) == 0) 2601 return 0; 2602 2603 /* Search for an existing open(O_WRITE) file */ 2604 if (sattr->ia_valid & ATTR_FILE) { 2605 struct nfs_open_context *ctx; 2606 2607 ctx = nfs_file_open_context(sattr->ia_file); 2608 if (ctx) { 2609 cred = ctx->cred; 2610 state = ctx->state; 2611 } 2612 } 2613 2614 status = nfs4_do_setattr(inode, cred, fattr, sattr, state); 2615 if (status == 0) 2616 nfs_setattr_update_inode(inode, sattr); 2617 return status; 2618 } 2619 2620 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, 2621 const struct qstr *name, struct nfs_fh *fhandle, 2622 struct nfs_fattr *fattr) 2623 { 2624 struct nfs_server *server = NFS_SERVER(dir); 2625 int status; 2626 struct nfs4_lookup_arg args = { 2627 .bitmask = server->attr_bitmask, 2628 .dir_fh = NFS_FH(dir), 2629 .name = name, 2630 }; 2631 struct nfs4_lookup_res res = { 2632 .server = server, 2633 .fattr = fattr, 2634 .fh = fhandle, 2635 }; 2636 struct rpc_message msg = { 2637 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], 2638 .rpc_argp = &args, 2639 .rpc_resp = &res, 2640 }; 2641 2642 nfs_fattr_init(fattr); 2643 2644 dprintk("NFS call lookup %s\n", name->name); 2645 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0); 2646 dprintk("NFS reply lookup: %d\n", status); 2647 return status; 2648 } 2649 2650 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr) 2651 { 2652 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 2653 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT; 2654 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 2655 fattr->nlink = 2; 2656 } 2657 2658 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir, 2659 struct qstr *name, struct nfs_fh *fhandle, 2660 struct nfs_fattr *fattr) 2661 { 2662 struct nfs4_exception exception = { }; 2663 struct rpc_clnt *client = *clnt; 2664 int err; 2665 do { 2666 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr); 2667 switch (err) { 2668 case -NFS4ERR_BADNAME: 2669 err = -ENOENT; 2670 goto out; 2671 case -NFS4ERR_MOVED: 2672 err = nfs4_get_referral(client, dir, name, fattr, fhandle); 2673 goto out; 2674 case -NFS4ERR_WRONGSEC: 2675 err = -EPERM; 2676 if (client != *clnt) 2677 goto out; 2678 2679 client = nfs4_create_sec_client(client, dir, name); 2680 if (IS_ERR(client)) 2681 return PTR_ERR(client); 2682 2683 exception.retry = 1; 2684 break; 2685 default: 2686 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 2687 } 2688 } while (exception.retry); 2689 2690 out: 2691 if (err == 0) 2692 *clnt = client; 2693 else if (client != *clnt) 2694 rpc_shutdown_client(client); 2695 2696 return err; 2697 } 2698 2699 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, 2700 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 2701 { 2702 int status; 2703 struct rpc_clnt *client = NFS_CLIENT(dir); 2704 2705 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr); 2706 if (client != NFS_CLIENT(dir)) { 2707 rpc_shutdown_client(client); 2708 nfs_fixup_secinfo_attributes(fattr); 2709 } 2710 return status; 2711 } 2712 2713 struct rpc_clnt * 2714 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name, 2715 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 2716 { 2717 int status; 2718 struct rpc_clnt *client = rpc_clone_client(NFS_CLIENT(dir)); 2719 2720 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr); 2721 if (status < 0) { 2722 rpc_shutdown_client(client); 2723 return ERR_PTR(status); 2724 } 2725 return client; 2726 } 2727 2728 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 2729 { 2730 struct nfs_server *server = NFS_SERVER(inode); 2731 struct nfs4_accessargs args = { 2732 .fh = NFS_FH(inode), 2733 .bitmask = server->cache_consistency_bitmask, 2734 }; 2735 struct nfs4_accessres res = { 2736 .server = server, 2737 }; 2738 struct rpc_message msg = { 2739 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], 2740 .rpc_argp = &args, 2741 .rpc_resp = &res, 2742 .rpc_cred = entry->cred, 2743 }; 2744 int mode = entry->mask; 2745 int status; 2746 2747 /* 2748 * Determine which access bits we want to ask for... 2749 */ 2750 if (mode & MAY_READ) 2751 args.access |= NFS4_ACCESS_READ; 2752 if (S_ISDIR(inode->i_mode)) { 2753 if (mode & MAY_WRITE) 2754 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE; 2755 if (mode & MAY_EXEC) 2756 args.access |= NFS4_ACCESS_LOOKUP; 2757 } else { 2758 if (mode & MAY_WRITE) 2759 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND; 2760 if (mode & MAY_EXEC) 2761 args.access |= NFS4_ACCESS_EXECUTE; 2762 } 2763 2764 res.fattr = nfs_alloc_fattr(); 2765 if (res.fattr == NULL) 2766 return -ENOMEM; 2767 2768 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2769 if (!status) { 2770 entry->mask = 0; 2771 if (res.access & NFS4_ACCESS_READ) 2772 entry->mask |= MAY_READ; 2773 if (res.access & (NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE)) 2774 entry->mask |= MAY_WRITE; 2775 if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE)) 2776 entry->mask |= MAY_EXEC; 2777 nfs_refresh_inode(inode, res.fattr); 2778 } 2779 nfs_free_fattr(res.fattr); 2780 return status; 2781 } 2782 2783 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 2784 { 2785 struct nfs4_exception exception = { }; 2786 int err; 2787 do { 2788 err = nfs4_handle_exception(NFS_SERVER(inode), 2789 _nfs4_proc_access(inode, entry), 2790 &exception); 2791 } while (exception.retry); 2792 return err; 2793 } 2794 2795 /* 2796 * TODO: For the time being, we don't try to get any attributes 2797 * along with any of the zero-copy operations READ, READDIR, 2798 * READLINK, WRITE. 2799 * 2800 * In the case of the first three, we want to put the GETATTR 2801 * after the read-type operation -- this is because it is hard 2802 * to predict the length of a GETATTR response in v4, and thus 2803 * align the READ data correctly. This means that the GETATTR 2804 * may end up partially falling into the page cache, and we should 2805 * shift it into the 'tail' of the xdr_buf before processing. 2806 * To do this efficiently, we need to know the total length 2807 * of data received, which doesn't seem to be available outside 2808 * of the RPC layer. 2809 * 2810 * In the case of WRITE, we also want to put the GETATTR after 2811 * the operation -- in this case because we want to make sure 2812 * we get the post-operation mtime and size. 2813 * 2814 * Both of these changes to the XDR layer would in fact be quite 2815 * minor, but I decided to leave them for a subsequent patch. 2816 */ 2817 static int _nfs4_proc_readlink(struct inode *inode, struct page *page, 2818 unsigned int pgbase, unsigned int pglen) 2819 { 2820 struct nfs4_readlink args = { 2821 .fh = NFS_FH(inode), 2822 .pgbase = pgbase, 2823 .pglen = pglen, 2824 .pages = &page, 2825 }; 2826 struct nfs4_readlink_res res; 2827 struct rpc_message msg = { 2828 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], 2829 .rpc_argp = &args, 2830 .rpc_resp = &res, 2831 }; 2832 2833 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); 2834 } 2835 2836 static int nfs4_proc_readlink(struct inode *inode, struct page *page, 2837 unsigned int pgbase, unsigned int pglen) 2838 { 2839 struct nfs4_exception exception = { }; 2840 int err; 2841 do { 2842 err = nfs4_handle_exception(NFS_SERVER(inode), 2843 _nfs4_proc_readlink(inode, page, pgbase, pglen), 2844 &exception); 2845 } while (exception.retry); 2846 return err; 2847 } 2848 2849 /* 2850 * This is just for mknod. open(O_CREAT) will always do ->open_context(). 2851 */ 2852 static int 2853 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, 2854 int flags) 2855 { 2856 struct nfs_open_context *ctx; 2857 struct nfs4_state *state; 2858 int status = 0; 2859 2860 ctx = alloc_nfs_open_context(dentry, FMODE_READ); 2861 if (IS_ERR(ctx)) 2862 return PTR_ERR(ctx); 2863 2864 sattr->ia_mode &= ~current_umask(); 2865 state = nfs4_do_open(dir, dentry, ctx->mode, 2866 flags, sattr, ctx->cred, 2867 &ctx->mdsthreshold); 2868 d_drop(dentry); 2869 if (IS_ERR(state)) { 2870 status = PTR_ERR(state); 2871 goto out; 2872 } 2873 d_add(dentry, igrab(state->inode)); 2874 nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); 2875 ctx->state = state; 2876 out: 2877 put_nfs_open_context(ctx); 2878 return status; 2879 } 2880 2881 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name) 2882 { 2883 struct nfs_server *server = NFS_SERVER(dir); 2884 struct nfs_removeargs args = { 2885 .fh = NFS_FH(dir), 2886 .name = *name, 2887 }; 2888 struct nfs_removeres res = { 2889 .server = server, 2890 }; 2891 struct rpc_message msg = { 2892 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], 2893 .rpc_argp = &args, 2894 .rpc_resp = &res, 2895 }; 2896 int status; 2897 2898 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 2899 if (status == 0) 2900 update_changeattr(dir, &res.cinfo); 2901 return status; 2902 } 2903 2904 static int nfs4_proc_remove(struct inode *dir, struct qstr *name) 2905 { 2906 struct nfs4_exception exception = { }; 2907 int err; 2908 do { 2909 err = nfs4_handle_exception(NFS_SERVER(dir), 2910 _nfs4_proc_remove(dir, name), 2911 &exception); 2912 } while (exception.retry); 2913 return err; 2914 } 2915 2916 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) 2917 { 2918 struct nfs_server *server = NFS_SERVER(dir); 2919 struct nfs_removeargs *args = msg->rpc_argp; 2920 struct nfs_removeres *res = msg->rpc_resp; 2921 2922 res->server = server; 2923 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; 2924 nfs41_init_sequence(&args->seq_args, &res->seq_res, 1); 2925 } 2926 2927 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) 2928 { 2929 if (nfs4_setup_sequence(NFS_SERVER(data->dir), 2930 &data->args.seq_args, 2931 &data->res.seq_res, 2932 task)) 2933 return; 2934 rpc_call_start(task); 2935 } 2936 2937 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) 2938 { 2939 struct nfs_removeres *res = task->tk_msg.rpc_resp; 2940 2941 if (!nfs4_sequence_done(task, &res->seq_res)) 2942 return 0; 2943 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN) 2944 return 0; 2945 update_changeattr(dir, &res->cinfo); 2946 return 1; 2947 } 2948 2949 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir) 2950 { 2951 struct nfs_server *server = NFS_SERVER(dir); 2952 struct nfs_renameargs *arg = msg->rpc_argp; 2953 struct nfs_renameres *res = msg->rpc_resp; 2954 2955 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; 2956 res->server = server; 2957 nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1); 2958 } 2959 2960 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) 2961 { 2962 if (nfs4_setup_sequence(NFS_SERVER(data->old_dir), 2963 &data->args.seq_args, 2964 &data->res.seq_res, 2965 task)) 2966 return; 2967 rpc_call_start(task); 2968 } 2969 2970 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, 2971 struct inode *new_dir) 2972 { 2973 struct nfs_renameres *res = task->tk_msg.rpc_resp; 2974 2975 if (!nfs4_sequence_done(task, &res->seq_res)) 2976 return 0; 2977 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN) 2978 return 0; 2979 2980 update_changeattr(old_dir, &res->old_cinfo); 2981 update_changeattr(new_dir, &res->new_cinfo); 2982 return 1; 2983 } 2984 2985 static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, 2986 struct inode *new_dir, struct qstr *new_name) 2987 { 2988 struct nfs_server *server = NFS_SERVER(old_dir); 2989 struct nfs_renameargs arg = { 2990 .old_dir = NFS_FH(old_dir), 2991 .new_dir = NFS_FH(new_dir), 2992 .old_name = old_name, 2993 .new_name = new_name, 2994 }; 2995 struct nfs_renameres res = { 2996 .server = server, 2997 }; 2998 struct rpc_message msg = { 2999 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME], 3000 .rpc_argp = &arg, 3001 .rpc_resp = &res, 3002 }; 3003 int status = -ENOMEM; 3004 3005 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 3006 if (!status) { 3007 update_changeattr(old_dir, &res.old_cinfo); 3008 update_changeattr(new_dir, &res.new_cinfo); 3009 } 3010 return status; 3011 } 3012 3013 static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, 3014 struct inode *new_dir, struct qstr *new_name) 3015 { 3016 struct nfs4_exception exception = { }; 3017 int err; 3018 do { 3019 err = nfs4_handle_exception(NFS_SERVER(old_dir), 3020 _nfs4_proc_rename(old_dir, old_name, 3021 new_dir, new_name), 3022 &exception); 3023 } while (exception.retry); 3024 return err; 3025 } 3026 3027 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) 3028 { 3029 struct nfs_server *server = NFS_SERVER(inode); 3030 struct nfs4_link_arg arg = { 3031 .fh = NFS_FH(inode), 3032 .dir_fh = NFS_FH(dir), 3033 .name = name, 3034 .bitmask = server->attr_bitmask, 3035 }; 3036 struct nfs4_link_res res = { 3037 .server = server, 3038 }; 3039 struct rpc_message msg = { 3040 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], 3041 .rpc_argp = &arg, 3042 .rpc_resp = &res, 3043 }; 3044 int status = -ENOMEM; 3045 3046 res.fattr = nfs_alloc_fattr(); 3047 if (res.fattr == NULL) 3048 goto out; 3049 3050 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 3051 if (!status) { 3052 update_changeattr(dir, &res.cinfo); 3053 nfs_post_op_update_inode(inode, res.fattr); 3054 } 3055 out: 3056 nfs_free_fattr(res.fattr); 3057 return status; 3058 } 3059 3060 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) 3061 { 3062 struct nfs4_exception exception = { }; 3063 int err; 3064 do { 3065 err = nfs4_handle_exception(NFS_SERVER(inode), 3066 _nfs4_proc_link(inode, dir, name), 3067 &exception); 3068 } while (exception.retry); 3069 return err; 3070 } 3071 3072 struct nfs4_createdata { 3073 struct rpc_message msg; 3074 struct nfs4_create_arg arg; 3075 struct nfs4_create_res res; 3076 struct nfs_fh fh; 3077 struct nfs_fattr fattr; 3078 }; 3079 3080 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, 3081 struct qstr *name, struct iattr *sattr, u32 ftype) 3082 { 3083 struct nfs4_createdata *data; 3084 3085 data = kzalloc(sizeof(*data), GFP_KERNEL); 3086 if (data != NULL) { 3087 struct nfs_server *server = NFS_SERVER(dir); 3088 3089 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE]; 3090 data->msg.rpc_argp = &data->arg; 3091 data->msg.rpc_resp = &data->res; 3092 data->arg.dir_fh = NFS_FH(dir); 3093 data->arg.server = server; 3094 data->arg.name = name; 3095 data->arg.attrs = sattr; 3096 data->arg.ftype = ftype; 3097 data->arg.bitmask = server->attr_bitmask; 3098 data->res.server = server; 3099 data->res.fh = &data->fh; 3100 data->res.fattr = &data->fattr; 3101 nfs_fattr_init(data->res.fattr); 3102 } 3103 return data; 3104 } 3105 3106 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) 3107 { 3108 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 3109 &data->arg.seq_args, &data->res.seq_res, 1); 3110 if (status == 0) { 3111 update_changeattr(dir, &data->res.dir_cinfo); 3112 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr); 3113 } 3114 return status; 3115 } 3116 3117 static void nfs4_free_createdata(struct nfs4_createdata *data) 3118 { 3119 kfree(data); 3120 } 3121 3122 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 3123 struct page *page, unsigned int len, struct iattr *sattr) 3124 { 3125 struct nfs4_createdata *data; 3126 int status = -ENAMETOOLONG; 3127 3128 if (len > NFS4_MAXPATHLEN) 3129 goto out; 3130 3131 status = -ENOMEM; 3132 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK); 3133 if (data == NULL) 3134 goto out; 3135 3136 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK]; 3137 data->arg.u.symlink.pages = &page; 3138 data->arg.u.symlink.len = len; 3139 3140 status = nfs4_do_create(dir, dentry, data); 3141 3142 nfs4_free_createdata(data); 3143 out: 3144 return status; 3145 } 3146 3147 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 3148 struct page *page, unsigned int len, struct iattr *sattr) 3149 { 3150 struct nfs4_exception exception = { }; 3151 int err; 3152 do { 3153 err = nfs4_handle_exception(NFS_SERVER(dir), 3154 _nfs4_proc_symlink(dir, dentry, page, 3155 len, sattr), 3156 &exception); 3157 } while (exception.retry); 3158 return err; 3159 } 3160 3161 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 3162 struct iattr *sattr) 3163 { 3164 struct nfs4_createdata *data; 3165 int status = -ENOMEM; 3166 3167 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); 3168 if (data == NULL) 3169 goto out; 3170 3171 status = nfs4_do_create(dir, dentry, data); 3172 3173 nfs4_free_createdata(data); 3174 out: 3175 return status; 3176 } 3177 3178 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 3179 struct iattr *sattr) 3180 { 3181 struct nfs4_exception exception = { }; 3182 int err; 3183 3184 sattr->ia_mode &= ~current_umask(); 3185 do { 3186 err = nfs4_handle_exception(NFS_SERVER(dir), 3187 _nfs4_proc_mkdir(dir, dentry, sattr), 3188 &exception); 3189 } while (exception.retry); 3190 return err; 3191 } 3192 3193 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, 3194 u64 cookie, struct page **pages, unsigned int count, int plus) 3195 { 3196 struct inode *dir = dentry->d_inode; 3197 struct nfs4_readdir_arg args = { 3198 .fh = NFS_FH(dir), 3199 .pages = pages, 3200 .pgbase = 0, 3201 .count = count, 3202 .bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask, 3203 .plus = plus, 3204 }; 3205 struct nfs4_readdir_res res; 3206 struct rpc_message msg = { 3207 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR], 3208 .rpc_argp = &args, 3209 .rpc_resp = &res, 3210 .rpc_cred = cred, 3211 }; 3212 int status; 3213 3214 dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__, 3215 dentry->d_parent->d_name.name, 3216 dentry->d_name.name, 3217 (unsigned long long)cookie); 3218 nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args); 3219 res.pgbase = args.pgbase; 3220 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); 3221 if (status >= 0) { 3222 memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE); 3223 status += args.pgbase; 3224 } 3225 3226 nfs_invalidate_atime(dir); 3227 3228 dprintk("%s: returns %d\n", __func__, status); 3229 return status; 3230 } 3231 3232 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, 3233 u64 cookie, struct page **pages, unsigned int count, int plus) 3234 { 3235 struct nfs4_exception exception = { }; 3236 int err; 3237 do { 3238 err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode), 3239 _nfs4_proc_readdir(dentry, cred, cookie, 3240 pages, count, plus), 3241 &exception); 3242 } while (exception.retry); 3243 return err; 3244 } 3245 3246 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 3247 struct iattr *sattr, dev_t rdev) 3248 { 3249 struct nfs4_createdata *data; 3250 int mode = sattr->ia_mode; 3251 int status = -ENOMEM; 3252 3253 BUG_ON(!(sattr->ia_valid & ATTR_MODE)); 3254 BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode)); 3255 3256 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK); 3257 if (data == NULL) 3258 goto out; 3259 3260 if (S_ISFIFO(mode)) 3261 data->arg.ftype = NF4FIFO; 3262 else if (S_ISBLK(mode)) { 3263 data->arg.ftype = NF4BLK; 3264 data->arg.u.device.specdata1 = MAJOR(rdev); 3265 data->arg.u.device.specdata2 = MINOR(rdev); 3266 } 3267 else if (S_ISCHR(mode)) { 3268 data->arg.ftype = NF4CHR; 3269 data->arg.u.device.specdata1 = MAJOR(rdev); 3270 data->arg.u.device.specdata2 = MINOR(rdev); 3271 } 3272 3273 status = nfs4_do_create(dir, dentry, data); 3274 3275 nfs4_free_createdata(data); 3276 out: 3277 return status; 3278 } 3279 3280 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 3281 struct iattr *sattr, dev_t rdev) 3282 { 3283 struct nfs4_exception exception = { }; 3284 int err; 3285 3286 sattr->ia_mode &= ~current_umask(); 3287 do { 3288 err = nfs4_handle_exception(NFS_SERVER(dir), 3289 _nfs4_proc_mknod(dir, dentry, sattr, rdev), 3290 &exception); 3291 } while (exception.retry); 3292 return err; 3293 } 3294 3295 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, 3296 struct nfs_fsstat *fsstat) 3297 { 3298 struct nfs4_statfs_arg args = { 3299 .fh = fhandle, 3300 .bitmask = server->attr_bitmask, 3301 }; 3302 struct nfs4_statfs_res res = { 3303 .fsstat = fsstat, 3304 }; 3305 struct rpc_message msg = { 3306 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], 3307 .rpc_argp = &args, 3308 .rpc_resp = &res, 3309 }; 3310 3311 nfs_fattr_init(fsstat->fattr); 3312 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3313 } 3314 3315 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) 3316 { 3317 struct nfs4_exception exception = { }; 3318 int err; 3319 do { 3320 err = nfs4_handle_exception(server, 3321 _nfs4_proc_statfs(server, fhandle, fsstat), 3322 &exception); 3323 } while (exception.retry); 3324 return err; 3325 } 3326 3327 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, 3328 struct nfs_fsinfo *fsinfo) 3329 { 3330 struct nfs4_fsinfo_arg args = { 3331 .fh = fhandle, 3332 .bitmask = server->attr_bitmask, 3333 }; 3334 struct nfs4_fsinfo_res res = { 3335 .fsinfo = fsinfo, 3336 }; 3337 struct rpc_message msg = { 3338 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], 3339 .rpc_argp = &args, 3340 .rpc_resp = &res, 3341 }; 3342 3343 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3344 } 3345 3346 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 3347 { 3348 struct nfs4_exception exception = { }; 3349 int err; 3350 3351 do { 3352 err = nfs4_handle_exception(server, 3353 _nfs4_do_fsinfo(server, fhandle, fsinfo), 3354 &exception); 3355 } while (exception.retry); 3356 return err; 3357 } 3358 3359 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 3360 { 3361 int error; 3362 3363 nfs_fattr_init(fsinfo->fattr); 3364 error = nfs4_do_fsinfo(server, fhandle, fsinfo); 3365 if (error == 0) 3366 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype); 3367 3368 return error; 3369 } 3370 3371 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 3372 struct nfs_pathconf *pathconf) 3373 { 3374 struct nfs4_pathconf_arg args = { 3375 .fh = fhandle, 3376 .bitmask = server->attr_bitmask, 3377 }; 3378 struct nfs4_pathconf_res res = { 3379 .pathconf = pathconf, 3380 }; 3381 struct rpc_message msg = { 3382 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], 3383 .rpc_argp = &args, 3384 .rpc_resp = &res, 3385 }; 3386 3387 /* None of the pathconf attributes are mandatory to implement */ 3388 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { 3389 memset(pathconf, 0, sizeof(*pathconf)); 3390 return 0; 3391 } 3392 3393 nfs_fattr_init(pathconf->fattr); 3394 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3395 } 3396 3397 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 3398 struct nfs_pathconf *pathconf) 3399 { 3400 struct nfs4_exception exception = { }; 3401 int err; 3402 3403 do { 3404 err = nfs4_handle_exception(server, 3405 _nfs4_proc_pathconf(server, fhandle, pathconf), 3406 &exception); 3407 } while (exception.retry); 3408 return err; 3409 } 3410 3411 void __nfs4_read_done_cb(struct nfs_read_data *data) 3412 { 3413 nfs_invalidate_atime(data->header->inode); 3414 } 3415 3416 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data) 3417 { 3418 struct nfs_server *server = NFS_SERVER(data->header->inode); 3419 3420 if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) { 3421 rpc_restart_call_prepare(task); 3422 return -EAGAIN; 3423 } 3424 3425 __nfs4_read_done_cb(data); 3426 if (task->tk_status > 0) 3427 renew_lease(server, data->timestamp); 3428 return 0; 3429 } 3430 3431 static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data) 3432 { 3433 3434 dprintk("--> %s\n", __func__); 3435 3436 if (!nfs4_sequence_done(task, &data->res.seq_res)) 3437 return -EAGAIN; 3438 3439 return data->read_done_cb ? data->read_done_cb(task, data) : 3440 nfs4_read_done_cb(task, data); 3441 } 3442 3443 static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg) 3444 { 3445 data->timestamp = jiffies; 3446 data->read_done_cb = nfs4_read_done_cb; 3447 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 3448 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); 3449 } 3450 3451 static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data) 3452 { 3453 if (nfs4_setup_sequence(NFS_SERVER(data->header->inode), 3454 &data->args.seq_args, 3455 &data->res.seq_res, 3456 task)) 3457 return; 3458 rpc_call_start(task); 3459 } 3460 3461 static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data) 3462 { 3463 struct inode *inode = data->header->inode; 3464 3465 if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) { 3466 rpc_restart_call_prepare(task); 3467 return -EAGAIN; 3468 } 3469 if (task->tk_status >= 0) { 3470 renew_lease(NFS_SERVER(inode), data->timestamp); 3471 nfs_post_op_update_inode_force_wcc(inode, &data->fattr); 3472 } 3473 return 0; 3474 } 3475 3476 static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data) 3477 { 3478 if (!nfs4_sequence_done(task, &data->res.seq_res)) 3479 return -EAGAIN; 3480 return data->write_done_cb ? data->write_done_cb(task, data) : 3481 nfs4_write_done_cb(task, data); 3482 } 3483 3484 static 3485 bool nfs4_write_need_cache_consistency_data(const struct nfs_write_data *data) 3486 { 3487 const struct nfs_pgio_header *hdr = data->header; 3488 3489 /* Don't request attributes for pNFS or O_DIRECT writes */ 3490 if (data->ds_clp != NULL || hdr->dreq != NULL) 3491 return false; 3492 /* Otherwise, request attributes if and only if we don't hold 3493 * a delegation 3494 */ 3495 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0; 3496 } 3497 3498 static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg) 3499 { 3500 struct nfs_server *server = NFS_SERVER(data->header->inode); 3501 3502 if (!nfs4_write_need_cache_consistency_data(data)) { 3503 data->args.bitmask = NULL; 3504 data->res.fattr = NULL; 3505 } else 3506 data->args.bitmask = server->cache_consistency_bitmask; 3507 3508 if (!data->write_done_cb) 3509 data->write_done_cb = nfs4_write_done_cb; 3510 data->res.server = server; 3511 data->timestamp = jiffies; 3512 3513 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; 3514 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 3515 } 3516 3517 static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data) 3518 { 3519 if (nfs4_setup_sequence(NFS_SERVER(data->header->inode), 3520 &data->args.seq_args, 3521 &data->res.seq_res, 3522 task)) 3523 return; 3524 rpc_call_start(task); 3525 } 3526 3527 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) 3528 { 3529 if (nfs4_setup_sequence(NFS_SERVER(data->inode), 3530 &data->args.seq_args, 3531 &data->res.seq_res, 3532 task)) 3533 return; 3534 rpc_call_start(task); 3535 } 3536 3537 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data) 3538 { 3539 struct inode *inode = data->inode; 3540 3541 if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) { 3542 rpc_restart_call_prepare(task); 3543 return -EAGAIN; 3544 } 3545 return 0; 3546 } 3547 3548 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data) 3549 { 3550 if (!nfs4_sequence_done(task, &data->res.seq_res)) 3551 return -EAGAIN; 3552 return data->commit_done_cb(task, data); 3553 } 3554 3555 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg) 3556 { 3557 struct nfs_server *server = NFS_SERVER(data->inode); 3558 3559 if (data->commit_done_cb == NULL) 3560 data->commit_done_cb = nfs4_commit_done_cb; 3561 data->res.server = server; 3562 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; 3563 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 3564 } 3565 3566 struct nfs4_renewdata { 3567 struct nfs_client *client; 3568 unsigned long timestamp; 3569 }; 3570 3571 /* 3572 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special 3573 * standalone procedure for queueing an asynchronous RENEW. 3574 */ 3575 static void nfs4_renew_release(void *calldata) 3576 { 3577 struct nfs4_renewdata *data = calldata; 3578 struct nfs_client *clp = data->client; 3579 3580 if (atomic_read(&clp->cl_count) > 1) 3581 nfs4_schedule_state_renewal(clp); 3582 nfs_put_client(clp); 3583 kfree(data); 3584 } 3585 3586 static void nfs4_renew_done(struct rpc_task *task, void *calldata) 3587 { 3588 struct nfs4_renewdata *data = calldata; 3589 struct nfs_client *clp = data->client; 3590 unsigned long timestamp = data->timestamp; 3591 3592 if (task->tk_status < 0) { 3593 /* Unless we're shutting down, schedule state recovery! */ 3594 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) 3595 return; 3596 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { 3597 nfs4_schedule_lease_recovery(clp); 3598 return; 3599 } 3600 nfs4_schedule_path_down_recovery(clp); 3601 } 3602 do_renew_lease(clp, timestamp); 3603 } 3604 3605 static const struct rpc_call_ops nfs4_renew_ops = { 3606 .rpc_call_done = nfs4_renew_done, 3607 .rpc_release = nfs4_renew_release, 3608 }; 3609 3610 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 3611 { 3612 struct rpc_message msg = { 3613 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 3614 .rpc_argp = clp, 3615 .rpc_cred = cred, 3616 }; 3617 struct nfs4_renewdata *data; 3618 3619 if (renew_flags == 0) 3620 return 0; 3621 if (!atomic_inc_not_zero(&clp->cl_count)) 3622 return -EIO; 3623 data = kmalloc(sizeof(*data), GFP_NOFS); 3624 if (data == NULL) 3625 return -ENOMEM; 3626 data->client = clp; 3627 data->timestamp = jiffies; 3628 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT, 3629 &nfs4_renew_ops, data); 3630 } 3631 3632 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) 3633 { 3634 struct rpc_message msg = { 3635 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 3636 .rpc_argp = clp, 3637 .rpc_cred = cred, 3638 }; 3639 unsigned long now = jiffies; 3640 int status; 3641 3642 status = rpc_call_sync(clp->cl_rpcclient, &msg, 0); 3643 if (status < 0) 3644 return status; 3645 do_renew_lease(clp, now); 3646 return 0; 3647 } 3648 3649 static inline int nfs4_server_supports_acls(struct nfs_server *server) 3650 { 3651 return (server->caps & NFS_CAP_ACLS) 3652 && (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) 3653 && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL); 3654 } 3655 3656 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that 3657 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on 3658 * the stack. 3659 */ 3660 #define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT) 3661 3662 static int buf_to_pages_noslab(const void *buf, size_t buflen, 3663 struct page **pages, unsigned int *pgbase) 3664 { 3665 struct page *newpage, **spages; 3666 int rc = 0; 3667 size_t len; 3668 spages = pages; 3669 3670 do { 3671 len = min_t(size_t, PAGE_CACHE_SIZE, buflen); 3672 newpage = alloc_page(GFP_KERNEL); 3673 3674 if (newpage == NULL) 3675 goto unwind; 3676 memcpy(page_address(newpage), buf, len); 3677 buf += len; 3678 buflen -= len; 3679 *pages++ = newpage; 3680 rc++; 3681 } while (buflen != 0); 3682 3683 return rc; 3684 3685 unwind: 3686 for(; rc > 0; rc--) 3687 __free_page(spages[rc-1]); 3688 return -ENOMEM; 3689 } 3690 3691 struct nfs4_cached_acl { 3692 int cached; 3693 size_t len; 3694 char data[0]; 3695 }; 3696 3697 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) 3698 { 3699 struct nfs_inode *nfsi = NFS_I(inode); 3700 3701 spin_lock(&inode->i_lock); 3702 kfree(nfsi->nfs4_acl); 3703 nfsi->nfs4_acl = acl; 3704 spin_unlock(&inode->i_lock); 3705 } 3706 3707 static void nfs4_zap_acl_attr(struct inode *inode) 3708 { 3709 nfs4_set_cached_acl(inode, NULL); 3710 } 3711 3712 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen) 3713 { 3714 struct nfs_inode *nfsi = NFS_I(inode); 3715 struct nfs4_cached_acl *acl; 3716 int ret = -ENOENT; 3717 3718 spin_lock(&inode->i_lock); 3719 acl = nfsi->nfs4_acl; 3720 if (acl == NULL) 3721 goto out; 3722 if (buf == NULL) /* user is just asking for length */ 3723 goto out_len; 3724 if (acl->cached == 0) 3725 goto out; 3726 ret = -ERANGE; /* see getxattr(2) man page */ 3727 if (acl->len > buflen) 3728 goto out; 3729 memcpy(buf, acl->data, acl->len); 3730 out_len: 3731 ret = acl->len; 3732 out: 3733 spin_unlock(&inode->i_lock); 3734 return ret; 3735 } 3736 3737 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len) 3738 { 3739 struct nfs4_cached_acl *acl; 3740 3741 if (pages && acl_len <= PAGE_SIZE) { 3742 acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL); 3743 if (acl == NULL) 3744 goto out; 3745 acl->cached = 1; 3746 _copy_from_pages(acl->data, pages, pgbase, acl_len); 3747 } else { 3748 acl = kmalloc(sizeof(*acl), GFP_KERNEL); 3749 if (acl == NULL) 3750 goto out; 3751 acl->cached = 0; 3752 } 3753 acl->len = acl_len; 3754 out: 3755 nfs4_set_cached_acl(inode, acl); 3756 } 3757 3758 /* 3759 * The getxattr API returns the required buffer length when called with a 3760 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating 3761 * the required buf. On a NULL buf, we send a page of data to the server 3762 * guessing that the ACL request can be serviced by a page. If so, we cache 3763 * up to the page of ACL data, and the 2nd call to getxattr is serviced by 3764 * the cache. If not so, we throw away the page, and cache the required 3765 * length. The next getxattr call will then produce another round trip to 3766 * the server, this time with the input buf of the required size. 3767 */ 3768 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 3769 { 3770 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, }; 3771 struct nfs_getaclargs args = { 3772 .fh = NFS_FH(inode), 3773 .acl_pages = pages, 3774 .acl_len = buflen, 3775 }; 3776 struct nfs_getaclres res = { 3777 .acl_len = buflen, 3778 }; 3779 struct rpc_message msg = { 3780 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], 3781 .rpc_argp = &args, 3782 .rpc_resp = &res, 3783 }; 3784 int ret = -ENOMEM, npages, i; 3785 size_t acl_len = 0; 3786 3787 npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT; 3788 /* As long as we're doing a round trip to the server anyway, 3789 * let's be prepared for a page of acl data. */ 3790 if (npages == 0) 3791 npages = 1; 3792 3793 /* Add an extra page to handle the bitmap returned */ 3794 npages++; 3795 3796 for (i = 0; i < npages; i++) { 3797 pages[i] = alloc_page(GFP_KERNEL); 3798 if (!pages[i]) 3799 goto out_free; 3800 } 3801 3802 /* for decoding across pages */ 3803 res.acl_scratch = alloc_page(GFP_KERNEL); 3804 if (!res.acl_scratch) 3805 goto out_free; 3806 3807 args.acl_len = npages * PAGE_SIZE; 3808 args.acl_pgbase = 0; 3809 3810 /* Let decode_getfacl know not to fail if the ACL data is larger than 3811 * the page we send as a guess */ 3812 if (buf == NULL) 3813 res.acl_flags |= NFS4_ACL_LEN_REQUEST; 3814 3815 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", 3816 __func__, buf, buflen, npages, args.acl_len); 3817 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), 3818 &msg, &args.seq_args, &res.seq_res, 0); 3819 if (ret) 3820 goto out_free; 3821 3822 acl_len = res.acl_len - res.acl_data_offset; 3823 if (acl_len > args.acl_len) 3824 nfs4_write_cached_acl(inode, NULL, 0, acl_len); 3825 else 3826 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, 3827 acl_len); 3828 if (buf) { 3829 ret = -ERANGE; 3830 if (acl_len > buflen) 3831 goto out_free; 3832 _copy_from_pages(buf, pages, res.acl_data_offset, 3833 acl_len); 3834 } 3835 ret = acl_len; 3836 out_free: 3837 for (i = 0; i < npages; i++) 3838 if (pages[i]) 3839 __free_page(pages[i]); 3840 if (res.acl_scratch) 3841 __free_page(res.acl_scratch); 3842 return ret; 3843 } 3844 3845 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 3846 { 3847 struct nfs4_exception exception = { }; 3848 ssize_t ret; 3849 do { 3850 ret = __nfs4_get_acl_uncached(inode, buf, buflen); 3851 if (ret >= 0) 3852 break; 3853 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); 3854 } while (exception.retry); 3855 return ret; 3856 } 3857 3858 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) 3859 { 3860 struct nfs_server *server = NFS_SERVER(inode); 3861 int ret; 3862 3863 if (!nfs4_server_supports_acls(server)) 3864 return -EOPNOTSUPP; 3865 ret = nfs_revalidate_inode(server, inode); 3866 if (ret < 0) 3867 return ret; 3868 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) 3869 nfs_zap_acl_cache(inode); 3870 ret = nfs4_read_cached_acl(inode, buf, buflen); 3871 if (ret != -ENOENT) 3872 /* -ENOENT is returned if there is no ACL or if there is an ACL 3873 * but no cached acl data, just the acl length */ 3874 return ret; 3875 return nfs4_get_acl_uncached(inode, buf, buflen); 3876 } 3877 3878 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 3879 { 3880 struct nfs_server *server = NFS_SERVER(inode); 3881 struct page *pages[NFS4ACL_MAXPAGES]; 3882 struct nfs_setaclargs arg = { 3883 .fh = NFS_FH(inode), 3884 .acl_pages = pages, 3885 .acl_len = buflen, 3886 }; 3887 struct nfs_setaclres res; 3888 struct rpc_message msg = { 3889 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], 3890 .rpc_argp = &arg, 3891 .rpc_resp = &res, 3892 }; 3893 int ret, i; 3894 3895 if (!nfs4_server_supports_acls(server)) 3896 return -EOPNOTSUPP; 3897 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase); 3898 if (i < 0) 3899 return i; 3900 nfs4_inode_return_delegation(inode); 3901 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 3902 3903 /* 3904 * Free each page after tx, so the only ref left is 3905 * held by the network stack 3906 */ 3907 for (; i > 0; i--) 3908 put_page(pages[i-1]); 3909 3910 /* 3911 * Acl update can result in inode attribute update. 3912 * so mark the attribute cache invalid. 3913 */ 3914 spin_lock(&inode->i_lock); 3915 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR; 3916 spin_unlock(&inode->i_lock); 3917 nfs_access_zap_cache(inode); 3918 nfs_zap_acl_cache(inode); 3919 return ret; 3920 } 3921 3922 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 3923 { 3924 struct nfs4_exception exception = { }; 3925 int err; 3926 do { 3927 err = nfs4_handle_exception(NFS_SERVER(inode), 3928 __nfs4_proc_set_acl(inode, buf, buflen), 3929 &exception); 3930 } while (exception.retry); 3931 return err; 3932 } 3933 3934 static int 3935 nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state) 3936 { 3937 struct nfs_client *clp = server->nfs_client; 3938 3939 if (task->tk_status >= 0) 3940 return 0; 3941 switch(task->tk_status) { 3942 case -NFS4ERR_DELEG_REVOKED: 3943 case -NFS4ERR_ADMIN_REVOKED: 3944 case -NFS4ERR_BAD_STATEID: 3945 if (state == NULL) 3946 break; 3947 nfs_remove_bad_delegation(state->inode); 3948 case -NFS4ERR_OPENMODE: 3949 if (state == NULL) 3950 break; 3951 nfs4_schedule_stateid_recovery(server, state); 3952 goto wait_on_recovery; 3953 case -NFS4ERR_EXPIRED: 3954 if (state != NULL) 3955 nfs4_schedule_stateid_recovery(server, state); 3956 case -NFS4ERR_STALE_STATEID: 3957 case -NFS4ERR_STALE_CLIENTID: 3958 nfs4_schedule_lease_recovery(clp); 3959 goto wait_on_recovery; 3960 #if defined(CONFIG_NFS_V4_1) 3961 case -NFS4ERR_BADSESSION: 3962 case -NFS4ERR_BADSLOT: 3963 case -NFS4ERR_BAD_HIGH_SLOT: 3964 case -NFS4ERR_DEADSESSION: 3965 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 3966 case -NFS4ERR_SEQ_FALSE_RETRY: 3967 case -NFS4ERR_SEQ_MISORDERED: 3968 dprintk("%s ERROR %d, Reset session\n", __func__, 3969 task->tk_status); 3970 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); 3971 task->tk_status = 0; 3972 return -EAGAIN; 3973 #endif /* CONFIG_NFS_V4_1 */ 3974 case -NFS4ERR_DELAY: 3975 nfs_inc_server_stats(server, NFSIOS_DELAY); 3976 case -NFS4ERR_GRACE: 3977 case -EKEYEXPIRED: 3978 rpc_delay(task, NFS4_POLL_RETRY_MAX); 3979 task->tk_status = 0; 3980 return -EAGAIN; 3981 case -NFS4ERR_RETRY_UNCACHED_REP: 3982 case -NFS4ERR_OLD_STATEID: 3983 task->tk_status = 0; 3984 return -EAGAIN; 3985 } 3986 task->tk_status = nfs4_map_errors(task->tk_status); 3987 return 0; 3988 wait_on_recovery: 3989 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); 3990 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) 3991 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); 3992 task->tk_status = 0; 3993 return -EAGAIN; 3994 } 3995 3996 static void nfs4_init_boot_verifier(const struct nfs_client *clp, 3997 nfs4_verifier *bootverf) 3998 { 3999 __be32 verf[2]; 4000 4001 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { 4002 /* An impossible timestamp guarantees this value 4003 * will never match a generated boot time. */ 4004 verf[0] = 0; 4005 verf[1] = (__be32)(NSEC_PER_SEC + 1); 4006 } else { 4007 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 4008 verf[0] = (__be32)nn->boot_time.tv_sec; 4009 verf[1] = (__be32)nn->boot_time.tv_nsec; 4010 } 4011 memcpy(bootverf->data, verf, sizeof(bootverf->data)); 4012 } 4013 4014 /** 4015 * nfs4_proc_setclientid - Negotiate client ID 4016 * @clp: state data structure 4017 * @program: RPC program for NFSv4 callback service 4018 * @port: IP port number for NFS4 callback service 4019 * @cred: RPC credential to use for this call 4020 * @res: where to place the result 4021 * 4022 * Returns zero, a negative errno, or a negative NFS4ERR status code. 4023 */ 4024 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, 4025 unsigned short port, struct rpc_cred *cred, 4026 struct nfs4_setclientid_res *res) 4027 { 4028 nfs4_verifier sc_verifier; 4029 struct nfs4_setclientid setclientid = { 4030 .sc_verifier = &sc_verifier, 4031 .sc_prog = program, 4032 .sc_cb_ident = clp->cl_cb_ident, 4033 }; 4034 struct rpc_message msg = { 4035 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], 4036 .rpc_argp = &setclientid, 4037 .rpc_resp = res, 4038 .rpc_cred = cred, 4039 }; 4040 int status; 4041 4042 /* nfs_client_id4 */ 4043 nfs4_init_boot_verifier(clp, &sc_verifier); 4044 rcu_read_lock(); 4045 setclientid.sc_name_len = scnprintf(setclientid.sc_name, 4046 sizeof(setclientid.sc_name), "%s/%s %s", 4047 clp->cl_ipaddr, 4048 rpc_peeraddr2str(clp->cl_rpcclient, 4049 RPC_DISPLAY_ADDR), 4050 rpc_peeraddr2str(clp->cl_rpcclient, 4051 RPC_DISPLAY_PROTO)); 4052 /* cb_client4 */ 4053 setclientid.sc_netid_len = scnprintf(setclientid.sc_netid, 4054 sizeof(setclientid.sc_netid), 4055 rpc_peeraddr2str(clp->cl_rpcclient, 4056 RPC_DISPLAY_NETID)); 4057 rcu_read_unlock(); 4058 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, 4059 sizeof(setclientid.sc_uaddr), "%s.%u.%u", 4060 clp->cl_ipaddr, port >> 8, port & 255); 4061 4062 dprintk("NFS call setclientid auth=%s, '%.*s'\n", 4063 clp->cl_rpcclient->cl_auth->au_ops->au_name, 4064 setclientid.sc_name_len, setclientid.sc_name); 4065 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 4066 dprintk("NFS reply setclientid: %d\n", status); 4067 return status; 4068 } 4069 4070 /** 4071 * nfs4_proc_setclientid_confirm - Confirm client ID 4072 * @clp: state data structure 4073 * @res: result of a previous SETCLIENTID 4074 * @cred: RPC credential to use for this call 4075 * 4076 * Returns zero, a negative errno, or a negative NFS4ERR status code. 4077 */ 4078 int nfs4_proc_setclientid_confirm(struct nfs_client *clp, 4079 struct nfs4_setclientid_res *arg, 4080 struct rpc_cred *cred) 4081 { 4082 struct nfs_fsinfo fsinfo; 4083 struct rpc_message msg = { 4084 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], 4085 .rpc_argp = arg, 4086 .rpc_resp = &fsinfo, 4087 .rpc_cred = cred, 4088 }; 4089 unsigned long now; 4090 int status; 4091 4092 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n", 4093 clp->cl_rpcclient->cl_auth->au_ops->au_name, 4094 clp->cl_clientid); 4095 now = jiffies; 4096 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 4097 if (status == 0) { 4098 spin_lock(&clp->cl_lock); 4099 clp->cl_lease_time = fsinfo.lease_time * HZ; 4100 clp->cl_last_renewal = now; 4101 spin_unlock(&clp->cl_lock); 4102 } 4103 dprintk("NFS reply setclientid_confirm: %d\n", status); 4104 return status; 4105 } 4106 4107 struct nfs4_delegreturndata { 4108 struct nfs4_delegreturnargs args; 4109 struct nfs4_delegreturnres res; 4110 struct nfs_fh fh; 4111 nfs4_stateid stateid; 4112 unsigned long timestamp; 4113 struct nfs_fattr fattr; 4114 int rpc_status; 4115 }; 4116 4117 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) 4118 { 4119 struct nfs4_delegreturndata *data = calldata; 4120 4121 if (!nfs4_sequence_done(task, &data->res.seq_res)) 4122 return; 4123 4124 switch (task->tk_status) { 4125 case -NFS4ERR_STALE_STATEID: 4126 case -NFS4ERR_EXPIRED: 4127 case 0: 4128 renew_lease(data->res.server, data->timestamp); 4129 break; 4130 default: 4131 if (nfs4_async_handle_error(task, data->res.server, NULL) == 4132 -EAGAIN) { 4133 rpc_restart_call_prepare(task); 4134 return; 4135 } 4136 } 4137 data->rpc_status = task->tk_status; 4138 } 4139 4140 static void nfs4_delegreturn_release(void *calldata) 4141 { 4142 kfree(calldata); 4143 } 4144 4145 #if defined(CONFIG_NFS_V4_1) 4146 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) 4147 { 4148 struct nfs4_delegreturndata *d_data; 4149 4150 d_data = (struct nfs4_delegreturndata *)data; 4151 4152 if (nfs4_setup_sequence(d_data->res.server, 4153 &d_data->args.seq_args, 4154 &d_data->res.seq_res, task)) 4155 return; 4156 rpc_call_start(task); 4157 } 4158 #endif /* CONFIG_NFS_V4_1 */ 4159 4160 static const struct rpc_call_ops nfs4_delegreturn_ops = { 4161 #if defined(CONFIG_NFS_V4_1) 4162 .rpc_call_prepare = nfs4_delegreturn_prepare, 4163 #endif /* CONFIG_NFS_V4_1 */ 4164 .rpc_call_done = nfs4_delegreturn_done, 4165 .rpc_release = nfs4_delegreturn_release, 4166 }; 4167 4168 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) 4169 { 4170 struct nfs4_delegreturndata *data; 4171 struct nfs_server *server = NFS_SERVER(inode); 4172 struct rpc_task *task; 4173 struct rpc_message msg = { 4174 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], 4175 .rpc_cred = cred, 4176 }; 4177 struct rpc_task_setup task_setup_data = { 4178 .rpc_client = server->client, 4179 .rpc_message = &msg, 4180 .callback_ops = &nfs4_delegreturn_ops, 4181 .flags = RPC_TASK_ASYNC, 4182 }; 4183 int status = 0; 4184 4185 data = kzalloc(sizeof(*data), GFP_NOFS); 4186 if (data == NULL) 4187 return -ENOMEM; 4188 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 4189 data->args.fhandle = &data->fh; 4190 data->args.stateid = &data->stateid; 4191 data->args.bitmask = server->cache_consistency_bitmask; 4192 nfs_copy_fh(&data->fh, NFS_FH(inode)); 4193 nfs4_stateid_copy(&data->stateid, stateid); 4194 data->res.fattr = &data->fattr; 4195 data->res.server = server; 4196 nfs_fattr_init(data->res.fattr); 4197 data->timestamp = jiffies; 4198 data->rpc_status = 0; 4199 4200 task_setup_data.callback_data = data; 4201 msg.rpc_argp = &data->args; 4202 msg.rpc_resp = &data->res; 4203 task = rpc_run_task(&task_setup_data); 4204 if (IS_ERR(task)) 4205 return PTR_ERR(task); 4206 if (!issync) 4207 goto out; 4208 status = nfs4_wait_for_completion_rpc_task(task); 4209 if (status != 0) 4210 goto out; 4211 status = data->rpc_status; 4212 if (status == 0) 4213 nfs_post_op_update_inode_force_wcc(inode, &data->fattr); 4214 else 4215 nfs_refresh_inode(inode, &data->fattr); 4216 out: 4217 rpc_put_task(task); 4218 return status; 4219 } 4220 4221 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) 4222 { 4223 struct nfs_server *server = NFS_SERVER(inode); 4224 struct nfs4_exception exception = { }; 4225 int err; 4226 do { 4227 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync); 4228 switch (err) { 4229 case -NFS4ERR_STALE_STATEID: 4230 case -NFS4ERR_EXPIRED: 4231 case 0: 4232 return 0; 4233 } 4234 err = nfs4_handle_exception(server, err, &exception); 4235 } while (exception.retry); 4236 return err; 4237 } 4238 4239 #define NFS4_LOCK_MINTIMEOUT (1 * HZ) 4240 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) 4241 4242 /* 4243 * sleep, with exponential backoff, and retry the LOCK operation. 4244 */ 4245 static unsigned long 4246 nfs4_set_lock_task_retry(unsigned long timeout) 4247 { 4248 freezable_schedule_timeout_killable(timeout); 4249 timeout <<= 1; 4250 if (timeout > NFS4_LOCK_MAXTIMEOUT) 4251 return NFS4_LOCK_MAXTIMEOUT; 4252 return timeout; 4253 } 4254 4255 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 4256 { 4257 struct inode *inode = state->inode; 4258 struct nfs_server *server = NFS_SERVER(inode); 4259 struct nfs_client *clp = server->nfs_client; 4260 struct nfs_lockt_args arg = { 4261 .fh = NFS_FH(inode), 4262 .fl = request, 4263 }; 4264 struct nfs_lockt_res res = { 4265 .denied = request, 4266 }; 4267 struct rpc_message msg = { 4268 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], 4269 .rpc_argp = &arg, 4270 .rpc_resp = &res, 4271 .rpc_cred = state->owner->so_cred, 4272 }; 4273 struct nfs4_lock_state *lsp; 4274 int status; 4275 4276 arg.lock_owner.clientid = clp->cl_clientid; 4277 status = nfs4_set_lock_state(state, request); 4278 if (status != 0) 4279 goto out; 4280 lsp = request->fl_u.nfs4_fl.owner; 4281 arg.lock_owner.id = lsp->ls_seqid.owner_id; 4282 arg.lock_owner.s_dev = server->s_dev; 4283 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 4284 switch (status) { 4285 case 0: 4286 request->fl_type = F_UNLCK; 4287 break; 4288 case -NFS4ERR_DENIED: 4289 status = 0; 4290 } 4291 request->fl_ops->fl_release_private(request); 4292 out: 4293 return status; 4294 } 4295 4296 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 4297 { 4298 struct nfs4_exception exception = { }; 4299 int err; 4300 4301 do { 4302 err = nfs4_handle_exception(NFS_SERVER(state->inode), 4303 _nfs4_proc_getlk(state, cmd, request), 4304 &exception); 4305 } while (exception.retry); 4306 return err; 4307 } 4308 4309 static int do_vfs_lock(struct file *file, struct file_lock *fl) 4310 { 4311 int res = 0; 4312 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { 4313 case FL_POSIX: 4314 res = posix_lock_file_wait(file, fl); 4315 break; 4316 case FL_FLOCK: 4317 res = flock_lock_file_wait(file, fl); 4318 break; 4319 default: 4320 BUG(); 4321 } 4322 return res; 4323 } 4324 4325 struct nfs4_unlockdata { 4326 struct nfs_locku_args arg; 4327 struct nfs_locku_res res; 4328 struct nfs4_lock_state *lsp; 4329 struct nfs_open_context *ctx; 4330 struct file_lock fl; 4331 const struct nfs_server *server; 4332 unsigned long timestamp; 4333 }; 4334 4335 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, 4336 struct nfs_open_context *ctx, 4337 struct nfs4_lock_state *lsp, 4338 struct nfs_seqid *seqid) 4339 { 4340 struct nfs4_unlockdata *p; 4341 struct inode *inode = lsp->ls_state->inode; 4342 4343 p = kzalloc(sizeof(*p), GFP_NOFS); 4344 if (p == NULL) 4345 return NULL; 4346 p->arg.fh = NFS_FH(inode); 4347 p->arg.fl = &p->fl; 4348 p->arg.seqid = seqid; 4349 p->res.seqid = seqid; 4350 p->arg.stateid = &lsp->ls_stateid; 4351 p->lsp = lsp; 4352 atomic_inc(&lsp->ls_count); 4353 /* Ensure we don't close file until we're done freeing locks! */ 4354 p->ctx = get_nfs_open_context(ctx); 4355 memcpy(&p->fl, fl, sizeof(p->fl)); 4356 p->server = NFS_SERVER(inode); 4357 return p; 4358 } 4359 4360 static void nfs4_locku_release_calldata(void *data) 4361 { 4362 struct nfs4_unlockdata *calldata = data; 4363 nfs_free_seqid(calldata->arg.seqid); 4364 nfs4_put_lock_state(calldata->lsp); 4365 put_nfs_open_context(calldata->ctx); 4366 kfree(calldata); 4367 } 4368 4369 static void nfs4_locku_done(struct rpc_task *task, void *data) 4370 { 4371 struct nfs4_unlockdata *calldata = data; 4372 4373 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 4374 return; 4375 switch (task->tk_status) { 4376 case 0: 4377 nfs4_stateid_copy(&calldata->lsp->ls_stateid, 4378 &calldata->res.stateid); 4379 renew_lease(calldata->server, calldata->timestamp); 4380 break; 4381 case -NFS4ERR_BAD_STATEID: 4382 case -NFS4ERR_OLD_STATEID: 4383 case -NFS4ERR_STALE_STATEID: 4384 case -NFS4ERR_EXPIRED: 4385 break; 4386 default: 4387 if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN) 4388 rpc_restart_call_prepare(task); 4389 } 4390 } 4391 4392 static void nfs4_locku_prepare(struct rpc_task *task, void *data) 4393 { 4394 struct nfs4_unlockdata *calldata = data; 4395 4396 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 4397 return; 4398 if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) { 4399 /* Note: exit _without_ running nfs4_locku_done */ 4400 task->tk_action = NULL; 4401 return; 4402 } 4403 calldata->timestamp = jiffies; 4404 if (nfs4_setup_sequence(calldata->server, 4405 &calldata->arg.seq_args, 4406 &calldata->res.seq_res, task)) 4407 return; 4408 rpc_call_start(task); 4409 } 4410 4411 static const struct rpc_call_ops nfs4_locku_ops = { 4412 .rpc_call_prepare = nfs4_locku_prepare, 4413 .rpc_call_done = nfs4_locku_done, 4414 .rpc_release = nfs4_locku_release_calldata, 4415 }; 4416 4417 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, 4418 struct nfs_open_context *ctx, 4419 struct nfs4_lock_state *lsp, 4420 struct nfs_seqid *seqid) 4421 { 4422 struct nfs4_unlockdata *data; 4423 struct rpc_message msg = { 4424 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], 4425 .rpc_cred = ctx->cred, 4426 }; 4427 struct rpc_task_setup task_setup_data = { 4428 .rpc_client = NFS_CLIENT(lsp->ls_state->inode), 4429 .rpc_message = &msg, 4430 .callback_ops = &nfs4_locku_ops, 4431 .workqueue = nfsiod_workqueue, 4432 .flags = RPC_TASK_ASYNC, 4433 }; 4434 4435 /* Ensure this is an unlock - when canceling a lock, the 4436 * canceled lock is passed in, and it won't be an unlock. 4437 */ 4438 fl->fl_type = F_UNLCK; 4439 4440 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); 4441 if (data == NULL) { 4442 nfs_free_seqid(seqid); 4443 return ERR_PTR(-ENOMEM); 4444 } 4445 4446 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); 4447 msg.rpc_argp = &data->arg; 4448 msg.rpc_resp = &data->res; 4449 task_setup_data.callback_data = data; 4450 return rpc_run_task(&task_setup_data); 4451 } 4452 4453 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) 4454 { 4455 struct nfs_inode *nfsi = NFS_I(state->inode); 4456 struct nfs_seqid *seqid; 4457 struct nfs4_lock_state *lsp; 4458 struct rpc_task *task; 4459 int status = 0; 4460 unsigned char fl_flags = request->fl_flags; 4461 4462 status = nfs4_set_lock_state(state, request); 4463 /* Unlock _before_ we do the RPC call */ 4464 request->fl_flags |= FL_EXISTS; 4465 down_read(&nfsi->rwsem); 4466 if (do_vfs_lock(request->fl_file, request) == -ENOENT) { 4467 up_read(&nfsi->rwsem); 4468 goto out; 4469 } 4470 up_read(&nfsi->rwsem); 4471 if (status != 0) 4472 goto out; 4473 /* Is this a delegated lock? */ 4474 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) 4475 goto out; 4476 lsp = request->fl_u.nfs4_fl.owner; 4477 seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); 4478 status = -ENOMEM; 4479 if (seqid == NULL) 4480 goto out; 4481 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid); 4482 status = PTR_ERR(task); 4483 if (IS_ERR(task)) 4484 goto out; 4485 status = nfs4_wait_for_completion_rpc_task(task); 4486 rpc_put_task(task); 4487 out: 4488 request->fl_flags = fl_flags; 4489 return status; 4490 } 4491 4492 struct nfs4_lockdata { 4493 struct nfs_lock_args arg; 4494 struct nfs_lock_res res; 4495 struct nfs4_lock_state *lsp; 4496 struct nfs_open_context *ctx; 4497 struct file_lock fl; 4498 unsigned long timestamp; 4499 int rpc_status; 4500 int cancelled; 4501 struct nfs_server *server; 4502 }; 4503 4504 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, 4505 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, 4506 gfp_t gfp_mask) 4507 { 4508 struct nfs4_lockdata *p; 4509 struct inode *inode = lsp->ls_state->inode; 4510 struct nfs_server *server = NFS_SERVER(inode); 4511 4512 p = kzalloc(sizeof(*p), gfp_mask); 4513 if (p == NULL) 4514 return NULL; 4515 4516 p->arg.fh = NFS_FH(inode); 4517 p->arg.fl = &p->fl; 4518 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); 4519 if (p->arg.open_seqid == NULL) 4520 goto out_free; 4521 p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask); 4522 if (p->arg.lock_seqid == NULL) 4523 goto out_free_seqid; 4524 p->arg.lock_stateid = &lsp->ls_stateid; 4525 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; 4526 p->arg.lock_owner.id = lsp->ls_seqid.owner_id; 4527 p->arg.lock_owner.s_dev = server->s_dev; 4528 p->res.lock_seqid = p->arg.lock_seqid; 4529 p->lsp = lsp; 4530 p->server = server; 4531 atomic_inc(&lsp->ls_count); 4532 p->ctx = get_nfs_open_context(ctx); 4533 memcpy(&p->fl, fl, sizeof(p->fl)); 4534 return p; 4535 out_free_seqid: 4536 nfs_free_seqid(p->arg.open_seqid); 4537 out_free: 4538 kfree(p); 4539 return NULL; 4540 } 4541 4542 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) 4543 { 4544 struct nfs4_lockdata *data = calldata; 4545 struct nfs4_state *state = data->lsp->ls_state; 4546 4547 dprintk("%s: begin!\n", __func__); 4548 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) 4549 return; 4550 /* Do we need to do an open_to_lock_owner? */ 4551 if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) { 4552 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) 4553 return; 4554 data->arg.open_stateid = &state->stateid; 4555 data->arg.new_lock_owner = 1; 4556 data->res.open_seqid = data->arg.open_seqid; 4557 } else 4558 data->arg.new_lock_owner = 0; 4559 data->timestamp = jiffies; 4560 if (nfs4_setup_sequence(data->server, 4561 &data->arg.seq_args, 4562 &data->res.seq_res, task)) 4563 return; 4564 rpc_call_start(task); 4565 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status); 4566 } 4567 4568 static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata) 4569 { 4570 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); 4571 nfs4_lock_prepare(task, calldata); 4572 } 4573 4574 static void nfs4_lock_done(struct rpc_task *task, void *calldata) 4575 { 4576 struct nfs4_lockdata *data = calldata; 4577 4578 dprintk("%s: begin!\n", __func__); 4579 4580 if (!nfs4_sequence_done(task, &data->res.seq_res)) 4581 return; 4582 4583 data->rpc_status = task->tk_status; 4584 if (data->arg.new_lock_owner != 0) { 4585 if (data->rpc_status == 0) 4586 nfs_confirm_seqid(&data->lsp->ls_seqid, 0); 4587 else 4588 goto out; 4589 } 4590 if (data->rpc_status == 0) { 4591 nfs4_stateid_copy(&data->lsp->ls_stateid, &data->res.stateid); 4592 data->lsp->ls_flags |= NFS_LOCK_INITIALIZED; 4593 renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp); 4594 } 4595 out: 4596 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status); 4597 } 4598 4599 static void nfs4_lock_release(void *calldata) 4600 { 4601 struct nfs4_lockdata *data = calldata; 4602 4603 dprintk("%s: begin!\n", __func__); 4604 nfs_free_seqid(data->arg.open_seqid); 4605 if (data->cancelled != 0) { 4606 struct rpc_task *task; 4607 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, 4608 data->arg.lock_seqid); 4609 if (!IS_ERR(task)) 4610 rpc_put_task_async(task); 4611 dprintk("%s: cancelling lock!\n", __func__); 4612 } else 4613 nfs_free_seqid(data->arg.lock_seqid); 4614 nfs4_put_lock_state(data->lsp); 4615 put_nfs_open_context(data->ctx); 4616 kfree(data); 4617 dprintk("%s: done!\n", __func__); 4618 } 4619 4620 static const struct rpc_call_ops nfs4_lock_ops = { 4621 .rpc_call_prepare = nfs4_lock_prepare, 4622 .rpc_call_done = nfs4_lock_done, 4623 .rpc_release = nfs4_lock_release, 4624 }; 4625 4626 static const struct rpc_call_ops nfs4_recover_lock_ops = { 4627 .rpc_call_prepare = nfs4_recover_lock_prepare, 4628 .rpc_call_done = nfs4_lock_done, 4629 .rpc_release = nfs4_lock_release, 4630 }; 4631 4632 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) 4633 { 4634 switch (error) { 4635 case -NFS4ERR_ADMIN_REVOKED: 4636 case -NFS4ERR_BAD_STATEID: 4637 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 4638 if (new_lock_owner != 0 || 4639 (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) 4640 nfs4_schedule_stateid_recovery(server, lsp->ls_state); 4641 break; 4642 case -NFS4ERR_STALE_STATEID: 4643 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 4644 case -NFS4ERR_EXPIRED: 4645 nfs4_schedule_lease_recovery(server->nfs_client); 4646 }; 4647 } 4648 4649 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) 4650 { 4651 struct nfs4_lockdata *data; 4652 struct rpc_task *task; 4653 struct rpc_message msg = { 4654 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK], 4655 .rpc_cred = state->owner->so_cred, 4656 }; 4657 struct rpc_task_setup task_setup_data = { 4658 .rpc_client = NFS_CLIENT(state->inode), 4659 .rpc_message = &msg, 4660 .callback_ops = &nfs4_lock_ops, 4661 .workqueue = nfsiod_workqueue, 4662 .flags = RPC_TASK_ASYNC, 4663 }; 4664 int ret; 4665 4666 dprintk("%s: begin!\n", __func__); 4667 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file), 4668 fl->fl_u.nfs4_fl.owner, 4669 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS); 4670 if (data == NULL) 4671 return -ENOMEM; 4672 if (IS_SETLKW(cmd)) 4673 data->arg.block = 1; 4674 if (recovery_type > NFS_LOCK_NEW) { 4675 if (recovery_type == NFS_LOCK_RECLAIM) 4676 data->arg.reclaim = NFS_LOCK_RECLAIM; 4677 task_setup_data.callback_ops = &nfs4_recover_lock_ops; 4678 } 4679 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); 4680 msg.rpc_argp = &data->arg; 4681 msg.rpc_resp = &data->res; 4682 task_setup_data.callback_data = data; 4683 task = rpc_run_task(&task_setup_data); 4684 if (IS_ERR(task)) 4685 return PTR_ERR(task); 4686 ret = nfs4_wait_for_completion_rpc_task(task); 4687 if (ret == 0) { 4688 ret = data->rpc_status; 4689 if (ret) 4690 nfs4_handle_setlk_error(data->server, data->lsp, 4691 data->arg.new_lock_owner, ret); 4692 } else 4693 data->cancelled = 1; 4694 rpc_put_task(task); 4695 dprintk("%s: done, ret = %d!\n", __func__, ret); 4696 return ret; 4697 } 4698 4699 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) 4700 { 4701 struct nfs_server *server = NFS_SERVER(state->inode); 4702 struct nfs4_exception exception = { 4703 .inode = state->inode, 4704 }; 4705 int err; 4706 4707 do { 4708 /* Cache the lock if possible... */ 4709 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 4710 return 0; 4711 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); 4712 if (err != -NFS4ERR_DELAY) 4713 break; 4714 nfs4_handle_exception(server, err, &exception); 4715 } while (exception.retry); 4716 return err; 4717 } 4718 4719 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) 4720 { 4721 struct nfs_server *server = NFS_SERVER(state->inode); 4722 struct nfs4_exception exception = { 4723 .inode = state->inode, 4724 }; 4725 int err; 4726 4727 err = nfs4_set_lock_state(state, request); 4728 if (err != 0) 4729 return err; 4730 do { 4731 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 4732 return 0; 4733 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED); 4734 switch (err) { 4735 default: 4736 goto out; 4737 case -NFS4ERR_GRACE: 4738 case -NFS4ERR_DELAY: 4739 nfs4_handle_exception(server, err, &exception); 4740 err = 0; 4741 } 4742 } while (exception.retry); 4743 out: 4744 return err; 4745 } 4746 4747 #if defined(CONFIG_NFS_V4_1) 4748 /** 4749 * nfs41_check_expired_locks - possibly free a lock stateid 4750 * 4751 * @state: NFSv4 state for an inode 4752 * 4753 * Returns NFS_OK if recovery for this stateid is now finished. 4754 * Otherwise a negative NFS4ERR value is returned. 4755 */ 4756 static int nfs41_check_expired_locks(struct nfs4_state *state) 4757 { 4758 int status, ret = -NFS4ERR_BAD_STATEID; 4759 struct nfs4_lock_state *lsp; 4760 struct nfs_server *server = NFS_SERVER(state->inode); 4761 4762 list_for_each_entry(lsp, &state->lock_states, ls_locks) { 4763 if (lsp->ls_flags & NFS_LOCK_INITIALIZED) { 4764 status = nfs41_test_stateid(server, &lsp->ls_stateid); 4765 if (status != NFS_OK) { 4766 /* Free the stateid unless the server 4767 * informs us the stateid is unrecognized. */ 4768 if (status != -NFS4ERR_BAD_STATEID) 4769 nfs41_free_stateid(server, 4770 &lsp->ls_stateid); 4771 lsp->ls_flags &= ~NFS_LOCK_INITIALIZED; 4772 ret = status; 4773 } 4774 } 4775 }; 4776 4777 return ret; 4778 } 4779 4780 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) 4781 { 4782 int status = NFS_OK; 4783 4784 if (test_bit(LK_STATE_IN_USE, &state->flags)) 4785 status = nfs41_check_expired_locks(state); 4786 if (status != NFS_OK) 4787 status = nfs4_lock_expired(state, request); 4788 return status; 4789 } 4790 #endif 4791 4792 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 4793 { 4794 struct nfs_inode *nfsi = NFS_I(state->inode); 4795 unsigned char fl_flags = request->fl_flags; 4796 int status = -ENOLCK; 4797 4798 if ((fl_flags & FL_POSIX) && 4799 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) 4800 goto out; 4801 /* Is this a delegated open? */ 4802 status = nfs4_set_lock_state(state, request); 4803 if (status != 0) 4804 goto out; 4805 request->fl_flags |= FL_ACCESS; 4806 status = do_vfs_lock(request->fl_file, request); 4807 if (status < 0) 4808 goto out; 4809 down_read(&nfsi->rwsem); 4810 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { 4811 /* Yes: cache locks! */ 4812 /* ...but avoid races with delegation recall... */ 4813 request->fl_flags = fl_flags & ~FL_SLEEP; 4814 status = do_vfs_lock(request->fl_file, request); 4815 goto out_unlock; 4816 } 4817 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); 4818 if (status != 0) 4819 goto out_unlock; 4820 /* Note: we always want to sleep here! */ 4821 request->fl_flags = fl_flags | FL_SLEEP; 4822 if (do_vfs_lock(request->fl_file, request) < 0) 4823 printk(KERN_WARNING "NFS: %s: VFS is out of sync with lock " 4824 "manager!\n", __func__); 4825 out_unlock: 4826 up_read(&nfsi->rwsem); 4827 out: 4828 request->fl_flags = fl_flags; 4829 return status; 4830 } 4831 4832 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 4833 { 4834 struct nfs4_exception exception = { 4835 .state = state, 4836 .inode = state->inode, 4837 }; 4838 int err; 4839 4840 do { 4841 err = _nfs4_proc_setlk(state, cmd, request); 4842 if (err == -NFS4ERR_DENIED) 4843 err = -EAGAIN; 4844 err = nfs4_handle_exception(NFS_SERVER(state->inode), 4845 err, &exception); 4846 } while (exception.retry); 4847 return err; 4848 } 4849 4850 static int 4851 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) 4852 { 4853 struct nfs_open_context *ctx; 4854 struct nfs4_state *state; 4855 unsigned long timeout = NFS4_LOCK_MINTIMEOUT; 4856 int status; 4857 4858 /* verify open state */ 4859 ctx = nfs_file_open_context(filp); 4860 state = ctx->state; 4861 4862 if (request->fl_start < 0 || request->fl_end < 0) 4863 return -EINVAL; 4864 4865 if (IS_GETLK(cmd)) { 4866 if (state != NULL) 4867 return nfs4_proc_getlk(state, F_GETLK, request); 4868 return 0; 4869 } 4870 4871 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) 4872 return -EINVAL; 4873 4874 if (request->fl_type == F_UNLCK) { 4875 if (state != NULL) 4876 return nfs4_proc_unlck(state, cmd, request); 4877 return 0; 4878 } 4879 4880 if (state == NULL) 4881 return -ENOLCK; 4882 /* 4883 * Don't rely on the VFS having checked the file open mode, 4884 * since it won't do this for flock() locks. 4885 */ 4886 switch (request->fl_type) { 4887 case F_RDLCK: 4888 if (!(filp->f_mode & FMODE_READ)) 4889 return -EBADF; 4890 break; 4891 case F_WRLCK: 4892 if (!(filp->f_mode & FMODE_WRITE)) 4893 return -EBADF; 4894 } 4895 4896 do { 4897 status = nfs4_proc_setlk(state, cmd, request); 4898 if ((status != -EAGAIN) || IS_SETLK(cmd)) 4899 break; 4900 timeout = nfs4_set_lock_task_retry(timeout); 4901 status = -ERESTARTSYS; 4902 if (signalled()) 4903 break; 4904 } while(status < 0); 4905 return status; 4906 } 4907 4908 int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl) 4909 { 4910 struct nfs_server *server = NFS_SERVER(state->inode); 4911 struct nfs4_exception exception = { }; 4912 int err; 4913 4914 err = nfs4_set_lock_state(state, fl); 4915 if (err != 0) 4916 goto out; 4917 do { 4918 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); 4919 switch (err) { 4920 default: 4921 printk(KERN_ERR "NFS: %s: unhandled error " 4922 "%d.\n", __func__, err); 4923 case 0: 4924 case -ESTALE: 4925 goto out; 4926 case -NFS4ERR_EXPIRED: 4927 nfs4_schedule_stateid_recovery(server, state); 4928 case -NFS4ERR_STALE_CLIENTID: 4929 case -NFS4ERR_STALE_STATEID: 4930 nfs4_schedule_lease_recovery(server->nfs_client); 4931 goto out; 4932 case -NFS4ERR_BADSESSION: 4933 case -NFS4ERR_BADSLOT: 4934 case -NFS4ERR_BAD_HIGH_SLOT: 4935 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 4936 case -NFS4ERR_DEADSESSION: 4937 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err); 4938 goto out; 4939 case -ERESTARTSYS: 4940 /* 4941 * The show must go on: exit, but mark the 4942 * stateid as needing recovery. 4943 */ 4944 case -NFS4ERR_DELEG_REVOKED: 4945 case -NFS4ERR_ADMIN_REVOKED: 4946 case -NFS4ERR_BAD_STATEID: 4947 case -NFS4ERR_OPENMODE: 4948 nfs4_schedule_stateid_recovery(server, state); 4949 err = 0; 4950 goto out; 4951 case -EKEYEXPIRED: 4952 /* 4953 * User RPCSEC_GSS context has expired. 4954 * We cannot recover this stateid now, so 4955 * skip it and allow recovery thread to 4956 * proceed. 4957 */ 4958 err = 0; 4959 goto out; 4960 case -ENOMEM: 4961 case -NFS4ERR_DENIED: 4962 /* kill_proc(fl->fl_pid, SIGLOST, 1); */ 4963 err = 0; 4964 goto out; 4965 case -NFS4ERR_DELAY: 4966 break; 4967 } 4968 err = nfs4_handle_exception(server, err, &exception); 4969 } while (exception.retry); 4970 out: 4971 return err; 4972 } 4973 4974 struct nfs_release_lockowner_data { 4975 struct nfs4_lock_state *lsp; 4976 struct nfs_server *server; 4977 struct nfs_release_lockowner_args args; 4978 }; 4979 4980 static void nfs4_release_lockowner_release(void *calldata) 4981 { 4982 struct nfs_release_lockowner_data *data = calldata; 4983 nfs4_free_lock_state(data->server, data->lsp); 4984 kfree(calldata); 4985 } 4986 4987 static const struct rpc_call_ops nfs4_release_lockowner_ops = { 4988 .rpc_release = nfs4_release_lockowner_release, 4989 }; 4990 4991 int nfs4_release_lockowner(struct nfs4_lock_state *lsp) 4992 { 4993 struct nfs_server *server = lsp->ls_state->owner->so_server; 4994 struct nfs_release_lockowner_data *data; 4995 struct rpc_message msg = { 4996 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER], 4997 }; 4998 4999 if (server->nfs_client->cl_mvops->minor_version != 0) 5000 return -EINVAL; 5001 data = kmalloc(sizeof(*data), GFP_NOFS); 5002 if (!data) 5003 return -ENOMEM; 5004 data->lsp = lsp; 5005 data->server = server; 5006 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 5007 data->args.lock_owner.id = lsp->ls_seqid.owner_id; 5008 data->args.lock_owner.s_dev = server->s_dev; 5009 msg.rpc_argp = &data->args; 5010 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); 5011 return 0; 5012 } 5013 5014 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" 5015 5016 static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key, 5017 const void *buf, size_t buflen, 5018 int flags, int type) 5019 { 5020 if (strcmp(key, "") != 0) 5021 return -EINVAL; 5022 5023 return nfs4_proc_set_acl(dentry->d_inode, buf, buflen); 5024 } 5025 5026 static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key, 5027 void *buf, size_t buflen, int type) 5028 { 5029 if (strcmp(key, "") != 0) 5030 return -EINVAL; 5031 5032 return nfs4_proc_get_acl(dentry->d_inode, buf, buflen); 5033 } 5034 5035 static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list, 5036 size_t list_len, const char *name, 5037 size_t name_len, int type) 5038 { 5039 size_t len = sizeof(XATTR_NAME_NFSV4_ACL); 5040 5041 if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode))) 5042 return 0; 5043 5044 if (list && len <= list_len) 5045 memcpy(list, XATTR_NAME_NFSV4_ACL, len); 5046 return len; 5047 } 5048 5049 /* 5050 * nfs_fhget will use either the mounted_on_fileid or the fileid 5051 */ 5052 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) 5053 { 5054 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || 5055 (fattr->valid & NFS_ATTR_FATTR_FILEID)) && 5056 (fattr->valid & NFS_ATTR_FATTR_FSID) && 5057 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS))) 5058 return; 5059 5060 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 5061 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL; 5062 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 5063 fattr->nlink = 2; 5064 } 5065 5066 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 5067 const struct qstr *name, 5068 struct nfs4_fs_locations *fs_locations, 5069 struct page *page) 5070 { 5071 struct nfs_server *server = NFS_SERVER(dir); 5072 u32 bitmask[2] = { 5073 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 5074 }; 5075 struct nfs4_fs_locations_arg args = { 5076 .dir_fh = NFS_FH(dir), 5077 .name = name, 5078 .page = page, 5079 .bitmask = bitmask, 5080 }; 5081 struct nfs4_fs_locations_res res = { 5082 .fs_locations = fs_locations, 5083 }; 5084 struct rpc_message msg = { 5085 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 5086 .rpc_argp = &args, 5087 .rpc_resp = &res, 5088 }; 5089 int status; 5090 5091 dprintk("%s: start\n", __func__); 5092 5093 /* Ask for the fileid of the absent filesystem if mounted_on_fileid 5094 * is not supported */ 5095 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) 5096 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID; 5097 else 5098 bitmask[0] |= FATTR4_WORD0_FILEID; 5099 5100 nfs_fattr_init(&fs_locations->fattr); 5101 fs_locations->server = server; 5102 fs_locations->nlocations = 0; 5103 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0); 5104 dprintk("%s: returned status = %d\n", __func__, status); 5105 return status; 5106 } 5107 5108 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 5109 const struct qstr *name, 5110 struct nfs4_fs_locations *fs_locations, 5111 struct page *page) 5112 { 5113 struct nfs4_exception exception = { }; 5114 int err; 5115 do { 5116 err = nfs4_handle_exception(NFS_SERVER(dir), 5117 _nfs4_proc_fs_locations(client, dir, name, fs_locations, page), 5118 &exception); 5119 } while (exception.retry); 5120 return err; 5121 } 5122 5123 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors) 5124 { 5125 int status; 5126 struct nfs4_secinfo_arg args = { 5127 .dir_fh = NFS_FH(dir), 5128 .name = name, 5129 }; 5130 struct nfs4_secinfo_res res = { 5131 .flavors = flavors, 5132 }; 5133 struct rpc_message msg = { 5134 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO], 5135 .rpc_argp = &args, 5136 .rpc_resp = &res, 5137 }; 5138 5139 dprintk("NFS call secinfo %s\n", name->name); 5140 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); 5141 dprintk("NFS reply secinfo: %d\n", status); 5142 return status; 5143 } 5144 5145 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, 5146 struct nfs4_secinfo_flavors *flavors) 5147 { 5148 struct nfs4_exception exception = { }; 5149 int err; 5150 do { 5151 err = nfs4_handle_exception(NFS_SERVER(dir), 5152 _nfs4_proc_secinfo(dir, name, flavors), 5153 &exception); 5154 } while (exception.retry); 5155 return err; 5156 } 5157 5158 #ifdef CONFIG_NFS_V4_1 5159 /* 5160 * Check the exchange flags returned by the server for invalid flags, having 5161 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or 5162 * DS flags set. 5163 */ 5164 static int nfs4_check_cl_exchange_flags(u32 flags) 5165 { 5166 if (flags & ~EXCHGID4_FLAG_MASK_R) 5167 goto out_inval; 5168 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && 5169 (flags & EXCHGID4_FLAG_USE_NON_PNFS)) 5170 goto out_inval; 5171 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) 5172 goto out_inval; 5173 return NFS_OK; 5174 out_inval: 5175 return -NFS4ERR_INVAL; 5176 } 5177 5178 static bool 5179 nfs41_same_server_scope(struct nfs41_server_scope *a, 5180 struct nfs41_server_scope *b) 5181 { 5182 if (a->server_scope_sz == b->server_scope_sz && 5183 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0) 5184 return true; 5185 5186 return false; 5187 } 5188 5189 /* 5190 * nfs4_proc_bind_conn_to_session() 5191 * 5192 * The 4.1 client currently uses the same TCP connection for the 5193 * fore and backchannel. 5194 */ 5195 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred) 5196 { 5197 int status; 5198 struct nfs41_bind_conn_to_session_res res; 5199 struct rpc_message msg = { 5200 .rpc_proc = 5201 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION], 5202 .rpc_argp = clp, 5203 .rpc_resp = &res, 5204 .rpc_cred = cred, 5205 }; 5206 5207 dprintk("--> %s\n", __func__); 5208 BUG_ON(clp == NULL); 5209 5210 res.session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS); 5211 if (unlikely(res.session == NULL)) { 5212 status = -ENOMEM; 5213 goto out; 5214 } 5215 5216 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5217 if (status == 0) { 5218 if (memcmp(res.session->sess_id.data, 5219 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { 5220 dprintk("NFS: %s: Session ID mismatch\n", __func__); 5221 status = -EIO; 5222 goto out_session; 5223 } 5224 if (res.dir != NFS4_CDFS4_BOTH) { 5225 dprintk("NFS: %s: Unexpected direction from server\n", 5226 __func__); 5227 status = -EIO; 5228 goto out_session; 5229 } 5230 if (res.use_conn_in_rdma_mode) { 5231 dprintk("NFS: %s: Server returned RDMA mode = true\n", 5232 __func__); 5233 status = -EIO; 5234 goto out_session; 5235 } 5236 } 5237 out_session: 5238 kfree(res.session); 5239 out: 5240 dprintk("<-- %s status= %d\n", __func__, status); 5241 return status; 5242 } 5243 5244 /* 5245 * nfs4_proc_exchange_id() 5246 * 5247 * Returns zero, a negative errno, or a negative NFS4ERR status code. 5248 * 5249 * Since the clientid has expired, all compounds using sessions 5250 * associated with the stale clientid will be returning 5251 * NFS4ERR_BADSESSION in the sequence operation, and will therefore 5252 * be in some phase of session reset. 5253 */ 5254 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) 5255 { 5256 nfs4_verifier verifier; 5257 struct nfs41_exchange_id_args args = { 5258 .verifier = &verifier, 5259 .client = clp, 5260 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER, 5261 }; 5262 struct nfs41_exchange_id_res res = { 5263 0 5264 }; 5265 int status; 5266 struct rpc_message msg = { 5267 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], 5268 .rpc_argp = &args, 5269 .rpc_resp = &res, 5270 .rpc_cred = cred, 5271 }; 5272 5273 nfs4_init_boot_verifier(clp, &verifier); 5274 args.id_len = scnprintf(args.id, sizeof(args.id), 5275 "%s/%s", 5276 clp->cl_ipaddr, 5277 clp->cl_rpcclient->cl_nodename); 5278 dprintk("NFS call exchange_id auth=%s, '%.*s'\n", 5279 clp->cl_rpcclient->cl_auth->au_ops->au_name, 5280 args.id_len, args.id); 5281 5282 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner), 5283 GFP_NOFS); 5284 if (unlikely(res.server_owner == NULL)) { 5285 status = -ENOMEM; 5286 goto out; 5287 } 5288 5289 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope), 5290 GFP_NOFS); 5291 if (unlikely(res.server_scope == NULL)) { 5292 status = -ENOMEM; 5293 goto out_server_owner; 5294 } 5295 5296 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS); 5297 if (unlikely(res.impl_id == NULL)) { 5298 status = -ENOMEM; 5299 goto out_server_scope; 5300 } 5301 5302 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5303 if (status == 0) 5304 status = nfs4_check_cl_exchange_flags(res.flags); 5305 5306 if (status == 0) { 5307 clp->cl_clientid = res.clientid; 5308 clp->cl_exchange_flags = (res.flags & ~EXCHGID4_FLAG_CONFIRMED_R); 5309 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) 5310 clp->cl_seqid = res.seqid; 5311 5312 kfree(clp->cl_serverowner); 5313 clp->cl_serverowner = res.server_owner; 5314 res.server_owner = NULL; 5315 5316 /* use the most recent implementation id */ 5317 kfree(clp->cl_implid); 5318 clp->cl_implid = res.impl_id; 5319 5320 if (clp->cl_serverscope != NULL && 5321 !nfs41_same_server_scope(clp->cl_serverscope, 5322 res.server_scope)) { 5323 dprintk("%s: server_scope mismatch detected\n", 5324 __func__); 5325 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); 5326 kfree(clp->cl_serverscope); 5327 clp->cl_serverscope = NULL; 5328 } 5329 5330 if (clp->cl_serverscope == NULL) { 5331 clp->cl_serverscope = res.server_scope; 5332 goto out; 5333 } 5334 } else 5335 kfree(res.impl_id); 5336 5337 out_server_owner: 5338 kfree(res.server_owner); 5339 out_server_scope: 5340 kfree(res.server_scope); 5341 out: 5342 if (clp->cl_implid != NULL) 5343 dprintk("NFS reply exchange_id: Server Implementation ID: " 5344 "domain: %s, name: %s, date: %llu,%u\n", 5345 clp->cl_implid->domain, clp->cl_implid->name, 5346 clp->cl_implid->date.seconds, 5347 clp->cl_implid->date.nseconds); 5348 dprintk("NFS reply exchange_id: %d\n", status); 5349 return status; 5350 } 5351 5352 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, 5353 struct rpc_cred *cred) 5354 { 5355 struct rpc_message msg = { 5356 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID], 5357 .rpc_argp = clp, 5358 .rpc_cred = cred, 5359 }; 5360 int status; 5361 5362 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5363 if (status) 5364 dprintk("NFS: Got error %d from the server %s on " 5365 "DESTROY_CLIENTID.", status, clp->cl_hostname); 5366 return status; 5367 } 5368 5369 static int nfs4_proc_destroy_clientid(struct nfs_client *clp, 5370 struct rpc_cred *cred) 5371 { 5372 unsigned int loop; 5373 int ret; 5374 5375 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { 5376 ret = _nfs4_proc_destroy_clientid(clp, cred); 5377 switch (ret) { 5378 case -NFS4ERR_DELAY: 5379 case -NFS4ERR_CLIENTID_BUSY: 5380 ssleep(1); 5381 break; 5382 default: 5383 return ret; 5384 } 5385 } 5386 return 0; 5387 } 5388 5389 int nfs4_destroy_clientid(struct nfs_client *clp) 5390 { 5391 struct rpc_cred *cred; 5392 int ret = 0; 5393 5394 if (clp->cl_mvops->minor_version < 1) 5395 goto out; 5396 if (clp->cl_exchange_flags == 0) 5397 goto out; 5398 cred = nfs4_get_exchange_id_cred(clp); 5399 ret = nfs4_proc_destroy_clientid(clp, cred); 5400 if (cred) 5401 put_rpccred(cred); 5402 switch (ret) { 5403 case 0: 5404 case -NFS4ERR_STALE_CLIENTID: 5405 clp->cl_exchange_flags = 0; 5406 } 5407 out: 5408 return ret; 5409 } 5410 5411 struct nfs4_get_lease_time_data { 5412 struct nfs4_get_lease_time_args *args; 5413 struct nfs4_get_lease_time_res *res; 5414 struct nfs_client *clp; 5415 }; 5416 5417 static void nfs4_get_lease_time_prepare(struct rpc_task *task, 5418 void *calldata) 5419 { 5420 int ret; 5421 struct nfs4_get_lease_time_data *data = 5422 (struct nfs4_get_lease_time_data *)calldata; 5423 5424 dprintk("--> %s\n", __func__); 5425 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); 5426 /* just setup sequence, do not trigger session recovery 5427 since we're invoked within one */ 5428 ret = nfs41_setup_sequence(data->clp->cl_session, 5429 &data->args->la_seq_args, 5430 &data->res->lr_seq_res, task); 5431 5432 BUG_ON(ret == -EAGAIN); 5433 rpc_call_start(task); 5434 dprintk("<-- %s\n", __func__); 5435 } 5436 5437 /* 5438 * Called from nfs4_state_manager thread for session setup, so don't recover 5439 * from sequence operation or clientid errors. 5440 */ 5441 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) 5442 { 5443 struct nfs4_get_lease_time_data *data = 5444 (struct nfs4_get_lease_time_data *)calldata; 5445 5446 dprintk("--> %s\n", __func__); 5447 if (!nfs41_sequence_done(task, &data->res->lr_seq_res)) 5448 return; 5449 switch (task->tk_status) { 5450 case -NFS4ERR_DELAY: 5451 case -NFS4ERR_GRACE: 5452 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); 5453 rpc_delay(task, NFS4_POLL_RETRY_MIN); 5454 task->tk_status = 0; 5455 /* fall through */ 5456 case -NFS4ERR_RETRY_UNCACHED_REP: 5457 rpc_restart_call_prepare(task); 5458 return; 5459 } 5460 dprintk("<-- %s\n", __func__); 5461 } 5462 5463 static const struct rpc_call_ops nfs4_get_lease_time_ops = { 5464 .rpc_call_prepare = nfs4_get_lease_time_prepare, 5465 .rpc_call_done = nfs4_get_lease_time_done, 5466 }; 5467 5468 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) 5469 { 5470 struct rpc_task *task; 5471 struct nfs4_get_lease_time_args args; 5472 struct nfs4_get_lease_time_res res = { 5473 .lr_fsinfo = fsinfo, 5474 }; 5475 struct nfs4_get_lease_time_data data = { 5476 .args = &args, 5477 .res = &res, 5478 .clp = clp, 5479 }; 5480 struct rpc_message msg = { 5481 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], 5482 .rpc_argp = &args, 5483 .rpc_resp = &res, 5484 }; 5485 struct rpc_task_setup task_setup = { 5486 .rpc_client = clp->cl_rpcclient, 5487 .rpc_message = &msg, 5488 .callback_ops = &nfs4_get_lease_time_ops, 5489 .callback_data = &data, 5490 .flags = RPC_TASK_TIMEOUT, 5491 }; 5492 int status; 5493 5494 nfs41_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0); 5495 dprintk("--> %s\n", __func__); 5496 task = rpc_run_task(&task_setup); 5497 5498 if (IS_ERR(task)) 5499 status = PTR_ERR(task); 5500 else { 5501 status = task->tk_status; 5502 rpc_put_task(task); 5503 } 5504 dprintk("<-- %s return %d\n", __func__, status); 5505 5506 return status; 5507 } 5508 5509 static struct nfs4_slot *nfs4_alloc_slots(u32 max_slots, gfp_t gfp_flags) 5510 { 5511 return kcalloc(max_slots, sizeof(struct nfs4_slot), gfp_flags); 5512 } 5513 5514 static void nfs4_add_and_init_slots(struct nfs4_slot_table *tbl, 5515 struct nfs4_slot *new, 5516 u32 max_slots, 5517 u32 ivalue) 5518 { 5519 struct nfs4_slot *old = NULL; 5520 u32 i; 5521 5522 spin_lock(&tbl->slot_tbl_lock); 5523 if (new) { 5524 old = tbl->slots; 5525 tbl->slots = new; 5526 tbl->max_slots = max_slots; 5527 } 5528 tbl->highest_used_slotid = -1; /* no slot is currently used */ 5529 for (i = 0; i < tbl->max_slots; i++) 5530 tbl->slots[i].seq_nr = ivalue; 5531 spin_unlock(&tbl->slot_tbl_lock); 5532 kfree(old); 5533 } 5534 5535 /* 5536 * (re)Initialise a slot table 5537 */ 5538 static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs, 5539 u32 ivalue) 5540 { 5541 struct nfs4_slot *new = NULL; 5542 int ret = -ENOMEM; 5543 5544 dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__, 5545 max_reqs, tbl->max_slots); 5546 5547 /* Does the newly negotiated max_reqs match the existing slot table? */ 5548 if (max_reqs != tbl->max_slots) { 5549 new = nfs4_alloc_slots(max_reqs, GFP_NOFS); 5550 if (!new) 5551 goto out; 5552 } 5553 ret = 0; 5554 5555 nfs4_add_and_init_slots(tbl, new, max_reqs, ivalue); 5556 dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, 5557 tbl, tbl->slots, tbl->max_slots); 5558 out: 5559 dprintk("<-- %s: return %d\n", __func__, ret); 5560 return ret; 5561 } 5562 5563 /* Destroy the slot table */ 5564 static void nfs4_destroy_slot_tables(struct nfs4_session *session) 5565 { 5566 if (session->fc_slot_table.slots != NULL) { 5567 kfree(session->fc_slot_table.slots); 5568 session->fc_slot_table.slots = NULL; 5569 } 5570 if (session->bc_slot_table.slots != NULL) { 5571 kfree(session->bc_slot_table.slots); 5572 session->bc_slot_table.slots = NULL; 5573 } 5574 return; 5575 } 5576 5577 /* 5578 * Initialize or reset the forechannel and backchannel tables 5579 */ 5580 static int nfs4_setup_session_slot_tables(struct nfs4_session *ses) 5581 { 5582 struct nfs4_slot_table *tbl; 5583 int status; 5584 5585 dprintk("--> %s\n", __func__); 5586 /* Fore channel */ 5587 tbl = &ses->fc_slot_table; 5588 status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1); 5589 if (status) /* -ENOMEM */ 5590 return status; 5591 /* Back channel */ 5592 tbl = &ses->bc_slot_table; 5593 status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0); 5594 if (status && tbl->slots == NULL) 5595 /* Fore and back channel share a connection so get 5596 * both slot tables or neither */ 5597 nfs4_destroy_slot_tables(ses); 5598 return status; 5599 } 5600 5601 struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp) 5602 { 5603 struct nfs4_session *session; 5604 struct nfs4_slot_table *tbl; 5605 5606 session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS); 5607 if (!session) 5608 return NULL; 5609 5610 tbl = &session->fc_slot_table; 5611 tbl->highest_used_slotid = NFS4_NO_SLOT; 5612 spin_lock_init(&tbl->slot_tbl_lock); 5613 rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table"); 5614 init_completion(&tbl->complete); 5615 5616 tbl = &session->bc_slot_table; 5617 tbl->highest_used_slotid = NFS4_NO_SLOT; 5618 spin_lock_init(&tbl->slot_tbl_lock); 5619 rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table"); 5620 init_completion(&tbl->complete); 5621 5622 session->session_state = 1<<NFS4_SESSION_INITING; 5623 5624 session->clp = clp; 5625 return session; 5626 } 5627 5628 void nfs4_destroy_session(struct nfs4_session *session) 5629 { 5630 struct rpc_xprt *xprt; 5631 struct rpc_cred *cred; 5632 5633 cred = nfs4_get_exchange_id_cred(session->clp); 5634 nfs4_proc_destroy_session(session, cred); 5635 if (cred) 5636 put_rpccred(cred); 5637 5638 rcu_read_lock(); 5639 xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt); 5640 rcu_read_unlock(); 5641 dprintk("%s Destroy backchannel for xprt %p\n", 5642 __func__, xprt); 5643 xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS); 5644 nfs4_destroy_slot_tables(session); 5645 kfree(session); 5646 } 5647 5648 /* 5649 * Initialize the values to be used by the client in CREATE_SESSION 5650 * If nfs4_init_session set the fore channel request and response sizes, 5651 * use them. 5652 * 5653 * Set the back channel max_resp_sz_cached to zero to force the client to 5654 * always set csa_cachethis to FALSE because the current implementation 5655 * of the back channel DRC only supports caching the CB_SEQUENCE operation. 5656 */ 5657 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args) 5658 { 5659 struct nfs4_session *session = args->client->cl_session; 5660 unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz, 5661 mxresp_sz = session->fc_attrs.max_resp_sz; 5662 5663 if (mxrqst_sz == 0) 5664 mxrqst_sz = NFS_MAX_FILE_IO_SIZE; 5665 if (mxresp_sz == 0) 5666 mxresp_sz = NFS_MAX_FILE_IO_SIZE; 5667 /* Fore channel attributes */ 5668 args->fc_attrs.max_rqst_sz = mxrqst_sz; 5669 args->fc_attrs.max_resp_sz = mxresp_sz; 5670 args->fc_attrs.max_ops = NFS4_MAX_OPS; 5671 args->fc_attrs.max_reqs = max_session_slots; 5672 5673 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " 5674 "max_ops=%u max_reqs=%u\n", 5675 __func__, 5676 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, 5677 args->fc_attrs.max_ops, args->fc_attrs.max_reqs); 5678 5679 /* Back channel attributes */ 5680 args->bc_attrs.max_rqst_sz = PAGE_SIZE; 5681 args->bc_attrs.max_resp_sz = PAGE_SIZE; 5682 args->bc_attrs.max_resp_sz_cached = 0; 5683 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; 5684 args->bc_attrs.max_reqs = 1; 5685 5686 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " 5687 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", 5688 __func__, 5689 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, 5690 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, 5691 args->bc_attrs.max_reqs); 5692 } 5693 5694 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session) 5695 { 5696 struct nfs4_channel_attrs *sent = &args->fc_attrs; 5697 struct nfs4_channel_attrs *rcvd = &session->fc_attrs; 5698 5699 if (rcvd->max_resp_sz > sent->max_resp_sz) 5700 return -EINVAL; 5701 /* 5702 * Our requested max_ops is the minimum we need; we're not 5703 * prepared to break up compounds into smaller pieces than that. 5704 * So, no point even trying to continue if the server won't 5705 * cooperate: 5706 */ 5707 if (rcvd->max_ops < sent->max_ops) 5708 return -EINVAL; 5709 if (rcvd->max_reqs == 0) 5710 return -EINVAL; 5711 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE) 5712 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE; 5713 return 0; 5714 } 5715 5716 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session) 5717 { 5718 struct nfs4_channel_attrs *sent = &args->bc_attrs; 5719 struct nfs4_channel_attrs *rcvd = &session->bc_attrs; 5720 5721 if (rcvd->max_rqst_sz > sent->max_rqst_sz) 5722 return -EINVAL; 5723 if (rcvd->max_resp_sz < sent->max_resp_sz) 5724 return -EINVAL; 5725 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) 5726 return -EINVAL; 5727 /* These would render the backchannel useless: */ 5728 if (rcvd->max_ops != sent->max_ops) 5729 return -EINVAL; 5730 if (rcvd->max_reqs != sent->max_reqs) 5731 return -EINVAL; 5732 return 0; 5733 } 5734 5735 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, 5736 struct nfs4_session *session) 5737 { 5738 int ret; 5739 5740 ret = nfs4_verify_fore_channel_attrs(args, session); 5741 if (ret) 5742 return ret; 5743 return nfs4_verify_back_channel_attrs(args, session); 5744 } 5745 5746 static int _nfs4_proc_create_session(struct nfs_client *clp, 5747 struct rpc_cred *cred) 5748 { 5749 struct nfs4_session *session = clp->cl_session; 5750 struct nfs41_create_session_args args = { 5751 .client = clp, 5752 .cb_program = NFS4_CALLBACK, 5753 }; 5754 struct nfs41_create_session_res res = { 5755 .client = clp, 5756 }; 5757 struct rpc_message msg = { 5758 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], 5759 .rpc_argp = &args, 5760 .rpc_resp = &res, 5761 .rpc_cred = cred, 5762 }; 5763 int status; 5764 5765 nfs4_init_channel_attrs(&args); 5766 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); 5767 5768 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5769 5770 if (!status) 5771 /* Verify the session's negotiated channel_attrs values */ 5772 status = nfs4_verify_channel_attrs(&args, session); 5773 if (!status) { 5774 /* Increment the clientid slot sequence id */ 5775 clp->cl_seqid++; 5776 } 5777 5778 return status; 5779 } 5780 5781 /* 5782 * Issues a CREATE_SESSION operation to the server. 5783 * It is the responsibility of the caller to verify the session is 5784 * expired before calling this routine. 5785 */ 5786 int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred) 5787 { 5788 int status; 5789 unsigned *ptr; 5790 struct nfs4_session *session = clp->cl_session; 5791 5792 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 5793 5794 status = _nfs4_proc_create_session(clp, cred); 5795 if (status) 5796 goto out; 5797 5798 /* Init or reset the session slot tables */ 5799 status = nfs4_setup_session_slot_tables(session); 5800 dprintk("slot table setup returned %d\n", status); 5801 if (status) 5802 goto out; 5803 5804 ptr = (unsigned *)&session->sess_id.data[0]; 5805 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, 5806 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); 5807 out: 5808 dprintk("<-- %s\n", __func__); 5809 return status; 5810 } 5811 5812 /* 5813 * Issue the over-the-wire RPC DESTROY_SESSION. 5814 * The caller must serialize access to this routine. 5815 */ 5816 int nfs4_proc_destroy_session(struct nfs4_session *session, 5817 struct rpc_cred *cred) 5818 { 5819 struct rpc_message msg = { 5820 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION], 5821 .rpc_argp = session, 5822 .rpc_cred = cred, 5823 }; 5824 int status = 0; 5825 5826 dprintk("--> nfs4_proc_destroy_session\n"); 5827 5828 /* session is still being setup */ 5829 if (session->clp->cl_cons_state != NFS_CS_READY) 5830 return status; 5831 5832 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5833 5834 if (status) 5835 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " 5836 "Session has been destroyed regardless...\n", status); 5837 5838 dprintk("<-- nfs4_proc_destroy_session\n"); 5839 return status; 5840 } 5841 5842 /* 5843 * With sessions, the client is not marked ready until after a 5844 * successful EXCHANGE_ID and CREATE_SESSION. 5845 * 5846 * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate 5847 * other versions of NFS can be tried. 5848 */ 5849 static int nfs41_check_session_ready(struct nfs_client *clp) 5850 { 5851 int ret; 5852 5853 if (clp->cl_cons_state == NFS_CS_SESSION_INITING) { 5854 ret = nfs4_client_recover_expired_lease(clp); 5855 if (ret) 5856 return ret; 5857 } 5858 if (clp->cl_cons_state < NFS_CS_READY) 5859 return -EPROTONOSUPPORT; 5860 smp_rmb(); 5861 return 0; 5862 } 5863 5864 int nfs4_init_session(struct nfs_server *server) 5865 { 5866 struct nfs_client *clp = server->nfs_client; 5867 struct nfs4_session *session; 5868 unsigned int rsize, wsize; 5869 5870 if (!nfs4_has_session(clp)) 5871 return 0; 5872 5873 session = clp->cl_session; 5874 spin_lock(&clp->cl_lock); 5875 if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) { 5876 5877 rsize = server->rsize; 5878 if (rsize == 0) 5879 rsize = NFS_MAX_FILE_IO_SIZE; 5880 wsize = server->wsize; 5881 if (wsize == 0) 5882 wsize = NFS_MAX_FILE_IO_SIZE; 5883 5884 session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead; 5885 session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead; 5886 } 5887 spin_unlock(&clp->cl_lock); 5888 5889 return nfs41_check_session_ready(clp); 5890 } 5891 5892 int nfs4_init_ds_session(struct nfs_client *clp, unsigned long lease_time) 5893 { 5894 struct nfs4_session *session = clp->cl_session; 5895 int ret; 5896 5897 spin_lock(&clp->cl_lock); 5898 if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) { 5899 /* 5900 * Do not set NFS_CS_CHECK_LEASE_TIME instead set the 5901 * DS lease to be equal to the MDS lease. 5902 */ 5903 clp->cl_lease_time = lease_time; 5904 clp->cl_last_renewal = jiffies; 5905 } 5906 spin_unlock(&clp->cl_lock); 5907 5908 ret = nfs41_check_session_ready(clp); 5909 if (ret) 5910 return ret; 5911 /* Test for the DS role */ 5912 if (!is_ds_client(clp)) 5913 return -ENODEV; 5914 return 0; 5915 } 5916 EXPORT_SYMBOL_GPL(nfs4_init_ds_session); 5917 5918 5919 /* 5920 * Renew the cl_session lease. 5921 */ 5922 struct nfs4_sequence_data { 5923 struct nfs_client *clp; 5924 struct nfs4_sequence_args args; 5925 struct nfs4_sequence_res res; 5926 }; 5927 5928 static void nfs41_sequence_release(void *data) 5929 { 5930 struct nfs4_sequence_data *calldata = data; 5931 struct nfs_client *clp = calldata->clp; 5932 5933 if (atomic_read(&clp->cl_count) > 1) 5934 nfs4_schedule_state_renewal(clp); 5935 nfs_put_client(clp); 5936 kfree(calldata); 5937 } 5938 5939 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) 5940 { 5941 switch(task->tk_status) { 5942 case -NFS4ERR_DELAY: 5943 rpc_delay(task, NFS4_POLL_RETRY_MAX); 5944 return -EAGAIN; 5945 default: 5946 nfs4_schedule_lease_recovery(clp); 5947 } 5948 return 0; 5949 } 5950 5951 static void nfs41_sequence_call_done(struct rpc_task *task, void *data) 5952 { 5953 struct nfs4_sequence_data *calldata = data; 5954 struct nfs_client *clp = calldata->clp; 5955 5956 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) 5957 return; 5958 5959 if (task->tk_status < 0) { 5960 dprintk("%s ERROR %d\n", __func__, task->tk_status); 5961 if (atomic_read(&clp->cl_count) == 1) 5962 goto out; 5963 5964 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { 5965 rpc_restart_call_prepare(task); 5966 return; 5967 } 5968 } 5969 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 5970 out: 5971 dprintk("<-- %s\n", __func__); 5972 } 5973 5974 static void nfs41_sequence_prepare(struct rpc_task *task, void *data) 5975 { 5976 struct nfs4_sequence_data *calldata = data; 5977 struct nfs_client *clp = calldata->clp; 5978 struct nfs4_sequence_args *args; 5979 struct nfs4_sequence_res *res; 5980 5981 args = task->tk_msg.rpc_argp; 5982 res = task->tk_msg.rpc_resp; 5983 5984 if (nfs41_setup_sequence(clp->cl_session, args, res, task)) 5985 return; 5986 rpc_call_start(task); 5987 } 5988 5989 static const struct rpc_call_ops nfs41_sequence_ops = { 5990 .rpc_call_done = nfs41_sequence_call_done, 5991 .rpc_call_prepare = nfs41_sequence_prepare, 5992 .rpc_release = nfs41_sequence_release, 5993 }; 5994 5995 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) 5996 { 5997 struct nfs4_sequence_data *calldata; 5998 struct rpc_message msg = { 5999 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], 6000 .rpc_cred = cred, 6001 }; 6002 struct rpc_task_setup task_setup_data = { 6003 .rpc_client = clp->cl_rpcclient, 6004 .rpc_message = &msg, 6005 .callback_ops = &nfs41_sequence_ops, 6006 .flags = RPC_TASK_ASYNC | RPC_TASK_SOFT, 6007 }; 6008 6009 if (!atomic_inc_not_zero(&clp->cl_count)) 6010 return ERR_PTR(-EIO); 6011 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 6012 if (calldata == NULL) { 6013 nfs_put_client(clp); 6014 return ERR_PTR(-ENOMEM); 6015 } 6016 nfs41_init_sequence(&calldata->args, &calldata->res, 0); 6017 msg.rpc_argp = &calldata->args; 6018 msg.rpc_resp = &calldata->res; 6019 calldata->clp = clp; 6020 task_setup_data.callback_data = calldata; 6021 6022 return rpc_run_task(&task_setup_data); 6023 } 6024 6025 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 6026 { 6027 struct rpc_task *task; 6028 int ret = 0; 6029 6030 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) 6031 return 0; 6032 task = _nfs41_proc_sequence(clp, cred); 6033 if (IS_ERR(task)) 6034 ret = PTR_ERR(task); 6035 else 6036 rpc_put_task_async(task); 6037 dprintk("<-- %s status=%d\n", __func__, ret); 6038 return ret; 6039 } 6040 6041 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) 6042 { 6043 struct rpc_task *task; 6044 int ret; 6045 6046 task = _nfs41_proc_sequence(clp, cred); 6047 if (IS_ERR(task)) { 6048 ret = PTR_ERR(task); 6049 goto out; 6050 } 6051 ret = rpc_wait_for_completion_task(task); 6052 if (!ret) { 6053 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp; 6054 6055 if (task->tk_status == 0) 6056 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); 6057 ret = task->tk_status; 6058 } 6059 rpc_put_task(task); 6060 out: 6061 dprintk("<-- %s status=%d\n", __func__, ret); 6062 return ret; 6063 } 6064 6065 struct nfs4_reclaim_complete_data { 6066 struct nfs_client *clp; 6067 struct nfs41_reclaim_complete_args arg; 6068 struct nfs41_reclaim_complete_res res; 6069 }; 6070 6071 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) 6072 { 6073 struct nfs4_reclaim_complete_data *calldata = data; 6074 6075 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); 6076 if (nfs41_setup_sequence(calldata->clp->cl_session, 6077 &calldata->arg.seq_args, 6078 &calldata->res.seq_res, task)) 6079 return; 6080 6081 rpc_call_start(task); 6082 } 6083 6084 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) 6085 { 6086 switch(task->tk_status) { 6087 case 0: 6088 case -NFS4ERR_COMPLETE_ALREADY: 6089 case -NFS4ERR_WRONG_CRED: /* What to do here? */ 6090 break; 6091 case -NFS4ERR_DELAY: 6092 rpc_delay(task, NFS4_POLL_RETRY_MAX); 6093 /* fall through */ 6094 case -NFS4ERR_RETRY_UNCACHED_REP: 6095 return -EAGAIN; 6096 default: 6097 nfs4_schedule_lease_recovery(clp); 6098 } 6099 return 0; 6100 } 6101 6102 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) 6103 { 6104 struct nfs4_reclaim_complete_data *calldata = data; 6105 struct nfs_client *clp = calldata->clp; 6106 struct nfs4_sequence_res *res = &calldata->res.seq_res; 6107 6108 dprintk("--> %s\n", __func__); 6109 if (!nfs41_sequence_done(task, res)) 6110 return; 6111 6112 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { 6113 rpc_restart_call_prepare(task); 6114 return; 6115 } 6116 dprintk("<-- %s\n", __func__); 6117 } 6118 6119 static void nfs4_free_reclaim_complete_data(void *data) 6120 { 6121 struct nfs4_reclaim_complete_data *calldata = data; 6122 6123 kfree(calldata); 6124 } 6125 6126 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = { 6127 .rpc_call_prepare = nfs4_reclaim_complete_prepare, 6128 .rpc_call_done = nfs4_reclaim_complete_done, 6129 .rpc_release = nfs4_free_reclaim_complete_data, 6130 }; 6131 6132 /* 6133 * Issue a global reclaim complete. 6134 */ 6135 static int nfs41_proc_reclaim_complete(struct nfs_client *clp) 6136 { 6137 struct nfs4_reclaim_complete_data *calldata; 6138 struct rpc_task *task; 6139 struct rpc_message msg = { 6140 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE], 6141 }; 6142 struct rpc_task_setup task_setup_data = { 6143 .rpc_client = clp->cl_rpcclient, 6144 .rpc_message = &msg, 6145 .callback_ops = &nfs4_reclaim_complete_call_ops, 6146 .flags = RPC_TASK_ASYNC, 6147 }; 6148 int status = -ENOMEM; 6149 6150 dprintk("--> %s\n", __func__); 6151 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 6152 if (calldata == NULL) 6153 goto out; 6154 calldata->clp = clp; 6155 calldata->arg.one_fs = 0; 6156 6157 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0); 6158 msg.rpc_argp = &calldata->arg; 6159 msg.rpc_resp = &calldata->res; 6160 task_setup_data.callback_data = calldata; 6161 task = rpc_run_task(&task_setup_data); 6162 if (IS_ERR(task)) { 6163 status = PTR_ERR(task); 6164 goto out; 6165 } 6166 status = nfs4_wait_for_completion_rpc_task(task); 6167 if (status == 0) 6168 status = task->tk_status; 6169 rpc_put_task(task); 6170 return 0; 6171 out: 6172 dprintk("<-- %s status=%d\n", __func__, status); 6173 return status; 6174 } 6175 6176 static void 6177 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) 6178 { 6179 struct nfs4_layoutget *lgp = calldata; 6180 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 6181 6182 dprintk("--> %s\n", __func__); 6183 /* Note the is a race here, where a CB_LAYOUTRECALL can come in 6184 * right now covering the LAYOUTGET we are about to send. 6185 * However, that is not so catastrophic, and there seems 6186 * to be no way to prevent it completely. 6187 */ 6188 if (nfs4_setup_sequence(server, &lgp->args.seq_args, 6189 &lgp->res.seq_res, task)) 6190 return; 6191 if (pnfs_choose_layoutget_stateid(&lgp->args.stateid, 6192 NFS_I(lgp->args.inode)->layout, 6193 lgp->args.ctx->state)) { 6194 rpc_exit(task, NFS4_OK); 6195 return; 6196 } 6197 rpc_call_start(task); 6198 } 6199 6200 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) 6201 { 6202 struct nfs4_layoutget *lgp = calldata; 6203 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 6204 6205 dprintk("--> %s\n", __func__); 6206 6207 if (!nfs4_sequence_done(task, &lgp->res.seq_res)) 6208 return; 6209 6210 switch (task->tk_status) { 6211 case 0: 6212 break; 6213 case -NFS4ERR_LAYOUTTRYLATER: 6214 case -NFS4ERR_RECALLCONFLICT: 6215 task->tk_status = -NFS4ERR_DELAY; 6216 /* Fall through */ 6217 default: 6218 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { 6219 rpc_restart_call_prepare(task); 6220 return; 6221 } 6222 } 6223 dprintk("<-- %s\n", __func__); 6224 } 6225 6226 static void nfs4_layoutget_release(void *calldata) 6227 { 6228 struct nfs4_layoutget *lgp = calldata; 6229 6230 dprintk("--> %s\n", __func__); 6231 put_nfs_open_context(lgp->args.ctx); 6232 kfree(calldata); 6233 dprintk("<-- %s\n", __func__); 6234 } 6235 6236 static const struct rpc_call_ops nfs4_layoutget_call_ops = { 6237 .rpc_call_prepare = nfs4_layoutget_prepare, 6238 .rpc_call_done = nfs4_layoutget_done, 6239 .rpc_release = nfs4_layoutget_release, 6240 }; 6241 6242 int nfs4_proc_layoutget(struct nfs4_layoutget *lgp) 6243 { 6244 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 6245 struct rpc_task *task; 6246 struct rpc_message msg = { 6247 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], 6248 .rpc_argp = &lgp->args, 6249 .rpc_resp = &lgp->res, 6250 }; 6251 struct rpc_task_setup task_setup_data = { 6252 .rpc_client = server->client, 6253 .rpc_message = &msg, 6254 .callback_ops = &nfs4_layoutget_call_ops, 6255 .callback_data = lgp, 6256 .flags = RPC_TASK_ASYNC, 6257 }; 6258 int status = 0; 6259 6260 dprintk("--> %s\n", __func__); 6261 6262 lgp->res.layoutp = &lgp->args.layout; 6263 lgp->res.seq_res.sr_slot = NULL; 6264 nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0); 6265 task = rpc_run_task(&task_setup_data); 6266 if (IS_ERR(task)) 6267 return PTR_ERR(task); 6268 status = nfs4_wait_for_completion_rpc_task(task); 6269 if (status == 0) 6270 status = task->tk_status; 6271 if (status == 0) 6272 status = pnfs_layout_process(lgp); 6273 rpc_put_task(task); 6274 dprintk("<-- %s status=%d\n", __func__, status); 6275 return status; 6276 } 6277 6278 static void 6279 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) 6280 { 6281 struct nfs4_layoutreturn *lrp = calldata; 6282 6283 dprintk("--> %s\n", __func__); 6284 if (nfs41_setup_sequence(lrp->clp->cl_session, &lrp->args.seq_args, 6285 &lrp->res.seq_res, task)) 6286 return; 6287 rpc_call_start(task); 6288 } 6289 6290 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) 6291 { 6292 struct nfs4_layoutreturn *lrp = calldata; 6293 struct nfs_server *server; 6294 struct pnfs_layout_hdr *lo = lrp->args.layout; 6295 6296 dprintk("--> %s\n", __func__); 6297 6298 if (!nfs4_sequence_done(task, &lrp->res.seq_res)) 6299 return; 6300 6301 server = NFS_SERVER(lrp->args.inode); 6302 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { 6303 rpc_restart_call_prepare(task); 6304 return; 6305 } 6306 spin_lock(&lo->plh_inode->i_lock); 6307 if (task->tk_status == 0) { 6308 if (lrp->res.lrs_present) { 6309 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); 6310 } else 6311 BUG_ON(!list_empty(&lo->plh_segs)); 6312 } 6313 lo->plh_block_lgets--; 6314 spin_unlock(&lo->plh_inode->i_lock); 6315 dprintk("<-- %s\n", __func__); 6316 } 6317 6318 static void nfs4_layoutreturn_release(void *calldata) 6319 { 6320 struct nfs4_layoutreturn *lrp = calldata; 6321 6322 dprintk("--> %s\n", __func__); 6323 put_layout_hdr(lrp->args.layout); 6324 kfree(calldata); 6325 dprintk("<-- %s\n", __func__); 6326 } 6327 6328 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { 6329 .rpc_call_prepare = nfs4_layoutreturn_prepare, 6330 .rpc_call_done = nfs4_layoutreturn_done, 6331 .rpc_release = nfs4_layoutreturn_release, 6332 }; 6333 6334 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp) 6335 { 6336 struct rpc_task *task; 6337 struct rpc_message msg = { 6338 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN], 6339 .rpc_argp = &lrp->args, 6340 .rpc_resp = &lrp->res, 6341 }; 6342 struct rpc_task_setup task_setup_data = { 6343 .rpc_client = lrp->clp->cl_rpcclient, 6344 .rpc_message = &msg, 6345 .callback_ops = &nfs4_layoutreturn_call_ops, 6346 .callback_data = lrp, 6347 }; 6348 int status; 6349 6350 dprintk("--> %s\n", __func__); 6351 nfs41_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1); 6352 task = rpc_run_task(&task_setup_data); 6353 if (IS_ERR(task)) 6354 return PTR_ERR(task); 6355 status = task->tk_status; 6356 dprintk("<-- %s status=%d\n", __func__, status); 6357 rpc_put_task(task); 6358 return status; 6359 } 6360 6361 /* 6362 * Retrieve the list of Data Server devices from the MDS. 6363 */ 6364 static int _nfs4_getdevicelist(struct nfs_server *server, 6365 const struct nfs_fh *fh, 6366 struct pnfs_devicelist *devlist) 6367 { 6368 struct nfs4_getdevicelist_args args = { 6369 .fh = fh, 6370 .layoutclass = server->pnfs_curr_ld->id, 6371 }; 6372 struct nfs4_getdevicelist_res res = { 6373 .devlist = devlist, 6374 }; 6375 struct rpc_message msg = { 6376 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICELIST], 6377 .rpc_argp = &args, 6378 .rpc_resp = &res, 6379 }; 6380 int status; 6381 6382 dprintk("--> %s\n", __func__); 6383 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, 6384 &res.seq_res, 0); 6385 dprintk("<-- %s status=%d\n", __func__, status); 6386 return status; 6387 } 6388 6389 int nfs4_proc_getdevicelist(struct nfs_server *server, 6390 const struct nfs_fh *fh, 6391 struct pnfs_devicelist *devlist) 6392 { 6393 struct nfs4_exception exception = { }; 6394 int err; 6395 6396 do { 6397 err = nfs4_handle_exception(server, 6398 _nfs4_getdevicelist(server, fh, devlist), 6399 &exception); 6400 } while (exception.retry); 6401 6402 dprintk("%s: err=%d, num_devs=%u\n", __func__, 6403 err, devlist->num_devs); 6404 6405 return err; 6406 } 6407 EXPORT_SYMBOL_GPL(nfs4_proc_getdevicelist); 6408 6409 static int 6410 _nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev) 6411 { 6412 struct nfs4_getdeviceinfo_args args = { 6413 .pdev = pdev, 6414 }; 6415 struct nfs4_getdeviceinfo_res res = { 6416 .pdev = pdev, 6417 }; 6418 struct rpc_message msg = { 6419 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO], 6420 .rpc_argp = &args, 6421 .rpc_resp = &res, 6422 }; 6423 int status; 6424 6425 dprintk("--> %s\n", __func__); 6426 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 6427 dprintk("<-- %s status=%d\n", __func__, status); 6428 6429 return status; 6430 } 6431 6432 int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev) 6433 { 6434 struct nfs4_exception exception = { }; 6435 int err; 6436 6437 do { 6438 err = nfs4_handle_exception(server, 6439 _nfs4_proc_getdeviceinfo(server, pdev), 6440 &exception); 6441 } while (exception.retry); 6442 return err; 6443 } 6444 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo); 6445 6446 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) 6447 { 6448 struct nfs4_layoutcommit_data *data = calldata; 6449 struct nfs_server *server = NFS_SERVER(data->args.inode); 6450 6451 if (nfs4_setup_sequence(server, &data->args.seq_args, 6452 &data->res.seq_res, task)) 6453 return; 6454 rpc_call_start(task); 6455 } 6456 6457 static void 6458 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) 6459 { 6460 struct nfs4_layoutcommit_data *data = calldata; 6461 struct nfs_server *server = NFS_SERVER(data->args.inode); 6462 6463 if (!nfs4_sequence_done(task, &data->res.seq_res)) 6464 return; 6465 6466 switch (task->tk_status) { /* Just ignore these failures */ 6467 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */ 6468 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ 6469 case -NFS4ERR_BADLAYOUT: /* no layout */ 6470 case -NFS4ERR_GRACE: /* loca_recalim always false */ 6471 task->tk_status = 0; 6472 break; 6473 case 0: 6474 nfs_post_op_update_inode_force_wcc(data->args.inode, 6475 data->res.fattr); 6476 break; 6477 default: 6478 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { 6479 rpc_restart_call_prepare(task); 6480 return; 6481 } 6482 } 6483 } 6484 6485 static void nfs4_layoutcommit_release(void *calldata) 6486 { 6487 struct nfs4_layoutcommit_data *data = calldata; 6488 struct pnfs_layout_segment *lseg, *tmp; 6489 unsigned long *bitlock = &NFS_I(data->args.inode)->flags; 6490 6491 pnfs_cleanup_layoutcommit(data); 6492 /* Matched by references in pnfs_set_layoutcommit */ 6493 list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) { 6494 list_del_init(&lseg->pls_lc_list); 6495 if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, 6496 &lseg->pls_flags)) 6497 put_lseg(lseg); 6498 } 6499 6500 clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock); 6501 smp_mb__after_clear_bit(); 6502 wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING); 6503 6504 put_rpccred(data->cred); 6505 kfree(data); 6506 } 6507 6508 static const struct rpc_call_ops nfs4_layoutcommit_ops = { 6509 .rpc_call_prepare = nfs4_layoutcommit_prepare, 6510 .rpc_call_done = nfs4_layoutcommit_done, 6511 .rpc_release = nfs4_layoutcommit_release, 6512 }; 6513 6514 int 6515 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) 6516 { 6517 struct rpc_message msg = { 6518 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT], 6519 .rpc_argp = &data->args, 6520 .rpc_resp = &data->res, 6521 .rpc_cred = data->cred, 6522 }; 6523 struct rpc_task_setup task_setup_data = { 6524 .task = &data->task, 6525 .rpc_client = NFS_CLIENT(data->args.inode), 6526 .rpc_message = &msg, 6527 .callback_ops = &nfs4_layoutcommit_ops, 6528 .callback_data = data, 6529 .flags = RPC_TASK_ASYNC, 6530 }; 6531 struct rpc_task *task; 6532 int status = 0; 6533 6534 dprintk("NFS: %4d initiating layoutcommit call. sync %d " 6535 "lbw: %llu inode %lu\n", 6536 data->task.tk_pid, sync, 6537 data->args.lastbytewritten, 6538 data->args.inode->i_ino); 6539 6540 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 6541 task = rpc_run_task(&task_setup_data); 6542 if (IS_ERR(task)) 6543 return PTR_ERR(task); 6544 if (sync == false) 6545 goto out; 6546 status = nfs4_wait_for_completion_rpc_task(task); 6547 if (status != 0) 6548 goto out; 6549 status = task->tk_status; 6550 out: 6551 dprintk("%s: status %d\n", __func__, status); 6552 rpc_put_task(task); 6553 return status; 6554 } 6555 6556 static int 6557 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 6558 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) 6559 { 6560 struct nfs41_secinfo_no_name_args args = { 6561 .style = SECINFO_STYLE_CURRENT_FH, 6562 }; 6563 struct nfs4_secinfo_res res = { 6564 .flavors = flavors, 6565 }; 6566 struct rpc_message msg = { 6567 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME], 6568 .rpc_argp = &args, 6569 .rpc_resp = &res, 6570 }; 6571 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 6572 } 6573 6574 static int 6575 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 6576 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) 6577 { 6578 struct nfs4_exception exception = { }; 6579 int err; 6580 do { 6581 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); 6582 switch (err) { 6583 case 0: 6584 case -NFS4ERR_WRONGSEC: 6585 case -NFS4ERR_NOTSUPP: 6586 goto out; 6587 default: 6588 err = nfs4_handle_exception(server, err, &exception); 6589 } 6590 } while (exception.retry); 6591 out: 6592 return err; 6593 } 6594 6595 static int 6596 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 6597 struct nfs_fsinfo *info) 6598 { 6599 int err; 6600 struct page *page; 6601 rpc_authflavor_t flavor; 6602 struct nfs4_secinfo_flavors *flavors; 6603 6604 page = alloc_page(GFP_KERNEL); 6605 if (!page) { 6606 err = -ENOMEM; 6607 goto out; 6608 } 6609 6610 flavors = page_address(page); 6611 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); 6612 6613 /* 6614 * Fall back on "guess and check" method if 6615 * the server doesn't support SECINFO_NO_NAME 6616 */ 6617 if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) { 6618 err = nfs4_find_root_sec(server, fhandle, info); 6619 goto out_freepage; 6620 } 6621 if (err) 6622 goto out_freepage; 6623 6624 flavor = nfs_find_best_sec(flavors); 6625 if (err == 0) 6626 err = nfs4_lookup_root_sec(server, fhandle, info, flavor); 6627 6628 out_freepage: 6629 put_page(page); 6630 if (err == -EACCES) 6631 return -EPERM; 6632 out: 6633 return err; 6634 } 6635 6636 static int _nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid) 6637 { 6638 int status; 6639 struct nfs41_test_stateid_args args = { 6640 .stateid = stateid, 6641 }; 6642 struct nfs41_test_stateid_res res; 6643 struct rpc_message msg = { 6644 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID], 6645 .rpc_argp = &args, 6646 .rpc_resp = &res, 6647 }; 6648 6649 dprintk("NFS call test_stateid %p\n", stateid); 6650 nfs41_init_sequence(&args.seq_args, &res.seq_res, 0); 6651 status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 6652 if (status != NFS_OK) { 6653 dprintk("NFS reply test_stateid: failed, %d\n", status); 6654 return status; 6655 } 6656 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status); 6657 return -res.status; 6658 } 6659 6660 /** 6661 * nfs41_test_stateid - perform a TEST_STATEID operation 6662 * 6663 * @server: server / transport on which to perform the operation 6664 * @stateid: state ID to test 6665 * 6666 * Returns NFS_OK if the server recognizes that "stateid" is valid. 6667 * Otherwise a negative NFS4ERR value is returned if the operation 6668 * failed or the state ID is not currently valid. 6669 */ 6670 static int nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid) 6671 { 6672 struct nfs4_exception exception = { }; 6673 int err; 6674 do { 6675 err = _nfs41_test_stateid(server, stateid); 6676 if (err != -NFS4ERR_DELAY) 6677 break; 6678 nfs4_handle_exception(server, err, &exception); 6679 } while (exception.retry); 6680 return err; 6681 } 6682 6683 static int _nfs4_free_stateid(struct nfs_server *server, nfs4_stateid *stateid) 6684 { 6685 struct nfs41_free_stateid_args args = { 6686 .stateid = stateid, 6687 }; 6688 struct nfs41_free_stateid_res res; 6689 struct rpc_message msg = { 6690 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID], 6691 .rpc_argp = &args, 6692 .rpc_resp = &res, 6693 }; 6694 int status; 6695 6696 dprintk("NFS call free_stateid %p\n", stateid); 6697 nfs41_init_sequence(&args.seq_args, &res.seq_res, 0); 6698 status = nfs4_call_sync_sequence(server->client, server, &msg, 6699 &args.seq_args, &res.seq_res, 1); 6700 dprintk("NFS reply free_stateid: %d\n", status); 6701 return status; 6702 } 6703 6704 /** 6705 * nfs41_free_stateid - perform a FREE_STATEID operation 6706 * 6707 * @server: server / transport on which to perform the operation 6708 * @stateid: state ID to release 6709 * 6710 * Returns NFS_OK if the server freed "stateid". Otherwise a 6711 * negative NFS4ERR value is returned. 6712 */ 6713 static int nfs41_free_stateid(struct nfs_server *server, nfs4_stateid *stateid) 6714 { 6715 struct nfs4_exception exception = { }; 6716 int err; 6717 do { 6718 err = _nfs4_free_stateid(server, stateid); 6719 if (err != -NFS4ERR_DELAY) 6720 break; 6721 nfs4_handle_exception(server, err, &exception); 6722 } while (exception.retry); 6723 return err; 6724 } 6725 6726 static bool nfs41_match_stateid(const nfs4_stateid *s1, 6727 const nfs4_stateid *s2) 6728 { 6729 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0) 6730 return false; 6731 6732 if (s1->seqid == s2->seqid) 6733 return true; 6734 if (s1->seqid == 0 || s2->seqid == 0) 6735 return true; 6736 6737 return false; 6738 } 6739 6740 #endif /* CONFIG_NFS_V4_1 */ 6741 6742 static bool nfs4_match_stateid(const nfs4_stateid *s1, 6743 const nfs4_stateid *s2) 6744 { 6745 return nfs4_stateid_match(s1, s2); 6746 } 6747 6748 6749 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { 6750 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 6751 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 6752 .recover_open = nfs4_open_reclaim, 6753 .recover_lock = nfs4_lock_reclaim, 6754 .establish_clid = nfs4_init_clientid, 6755 .get_clid_cred = nfs4_get_setclientid_cred, 6756 }; 6757 6758 #if defined(CONFIG_NFS_V4_1) 6759 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { 6760 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 6761 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 6762 .recover_open = nfs4_open_reclaim, 6763 .recover_lock = nfs4_lock_reclaim, 6764 .establish_clid = nfs41_init_clientid, 6765 .get_clid_cred = nfs4_get_exchange_id_cred, 6766 .reclaim_complete = nfs41_proc_reclaim_complete, 6767 }; 6768 #endif /* CONFIG_NFS_V4_1 */ 6769 6770 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { 6771 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 6772 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 6773 .recover_open = nfs4_open_expired, 6774 .recover_lock = nfs4_lock_expired, 6775 .establish_clid = nfs4_init_clientid, 6776 .get_clid_cred = nfs4_get_setclientid_cred, 6777 }; 6778 6779 #if defined(CONFIG_NFS_V4_1) 6780 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { 6781 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 6782 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 6783 .recover_open = nfs41_open_expired, 6784 .recover_lock = nfs41_lock_expired, 6785 .establish_clid = nfs41_init_clientid, 6786 .get_clid_cred = nfs4_get_exchange_id_cred, 6787 }; 6788 #endif /* CONFIG_NFS_V4_1 */ 6789 6790 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { 6791 .sched_state_renewal = nfs4_proc_async_renew, 6792 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked, 6793 .renew_lease = nfs4_proc_renew, 6794 }; 6795 6796 #if defined(CONFIG_NFS_V4_1) 6797 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { 6798 .sched_state_renewal = nfs41_proc_async_sequence, 6799 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked, 6800 .renew_lease = nfs4_proc_sequence, 6801 }; 6802 #endif 6803 6804 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { 6805 .minor_version = 0, 6806 .call_sync = _nfs4_call_sync, 6807 .match_stateid = nfs4_match_stateid, 6808 .find_root_sec = nfs4_find_root_sec, 6809 .reboot_recovery_ops = &nfs40_reboot_recovery_ops, 6810 .nograce_recovery_ops = &nfs40_nograce_recovery_ops, 6811 .state_renewal_ops = &nfs40_state_renewal_ops, 6812 }; 6813 6814 #if defined(CONFIG_NFS_V4_1) 6815 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { 6816 .minor_version = 1, 6817 .call_sync = _nfs4_call_sync_session, 6818 .match_stateid = nfs41_match_stateid, 6819 .find_root_sec = nfs41_find_root_sec, 6820 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 6821 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 6822 .state_renewal_ops = &nfs41_state_renewal_ops, 6823 }; 6824 #endif 6825 6826 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { 6827 [0] = &nfs_v4_0_minor_ops, 6828 #if defined(CONFIG_NFS_V4_1) 6829 [1] = &nfs_v4_1_minor_ops, 6830 #endif 6831 }; 6832 6833 const struct inode_operations nfs4_dir_inode_operations = { 6834 .create = nfs_create, 6835 .lookup = nfs_lookup, 6836 .atomic_open = nfs_atomic_open, 6837 .link = nfs_link, 6838 .unlink = nfs_unlink, 6839 .symlink = nfs_symlink, 6840 .mkdir = nfs_mkdir, 6841 .rmdir = nfs_rmdir, 6842 .mknod = nfs_mknod, 6843 .rename = nfs_rename, 6844 .permission = nfs_permission, 6845 .getattr = nfs_getattr, 6846 .setattr = nfs_setattr, 6847 .getxattr = generic_getxattr, 6848 .setxattr = generic_setxattr, 6849 .listxattr = generic_listxattr, 6850 .removexattr = generic_removexattr, 6851 }; 6852 6853 static const struct inode_operations nfs4_file_inode_operations = { 6854 .permission = nfs_permission, 6855 .getattr = nfs_getattr, 6856 .setattr = nfs_setattr, 6857 .getxattr = generic_getxattr, 6858 .setxattr = generic_setxattr, 6859 .listxattr = generic_listxattr, 6860 .removexattr = generic_removexattr, 6861 }; 6862 6863 const struct nfs_rpc_ops nfs_v4_clientops = { 6864 .version = 4, /* protocol version */ 6865 .dentry_ops = &nfs4_dentry_operations, 6866 .dir_inode_ops = &nfs4_dir_inode_operations, 6867 .file_inode_ops = &nfs4_file_inode_operations, 6868 .file_ops = &nfs4_file_operations, 6869 .getroot = nfs4_proc_get_root, 6870 .submount = nfs4_submount, 6871 .try_mount = nfs4_try_mount, 6872 .getattr = nfs4_proc_getattr, 6873 .setattr = nfs4_proc_setattr, 6874 .lookup = nfs4_proc_lookup, 6875 .access = nfs4_proc_access, 6876 .readlink = nfs4_proc_readlink, 6877 .create = nfs4_proc_create, 6878 .remove = nfs4_proc_remove, 6879 .unlink_setup = nfs4_proc_unlink_setup, 6880 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, 6881 .unlink_done = nfs4_proc_unlink_done, 6882 .rename = nfs4_proc_rename, 6883 .rename_setup = nfs4_proc_rename_setup, 6884 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, 6885 .rename_done = nfs4_proc_rename_done, 6886 .link = nfs4_proc_link, 6887 .symlink = nfs4_proc_symlink, 6888 .mkdir = nfs4_proc_mkdir, 6889 .rmdir = nfs4_proc_remove, 6890 .readdir = nfs4_proc_readdir, 6891 .mknod = nfs4_proc_mknod, 6892 .statfs = nfs4_proc_statfs, 6893 .fsinfo = nfs4_proc_fsinfo, 6894 .pathconf = nfs4_proc_pathconf, 6895 .set_capabilities = nfs4_server_capabilities, 6896 .decode_dirent = nfs4_decode_dirent, 6897 .read_setup = nfs4_proc_read_setup, 6898 .read_pageio_init = pnfs_pageio_init_read, 6899 .read_rpc_prepare = nfs4_proc_read_rpc_prepare, 6900 .read_done = nfs4_read_done, 6901 .write_setup = nfs4_proc_write_setup, 6902 .write_pageio_init = pnfs_pageio_init_write, 6903 .write_rpc_prepare = nfs4_proc_write_rpc_prepare, 6904 .write_done = nfs4_write_done, 6905 .commit_setup = nfs4_proc_commit_setup, 6906 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare, 6907 .commit_done = nfs4_commit_done, 6908 .lock = nfs4_proc_lock, 6909 .clear_acl_cache = nfs4_zap_acl_attr, 6910 .close_context = nfs4_close_context, 6911 .open_context = nfs4_atomic_open, 6912 .have_delegation = nfs4_have_delegation, 6913 .return_delegation = nfs4_inode_return_delegation, 6914 .alloc_client = nfs4_alloc_client, 6915 .init_client = nfs4_init_client, 6916 .free_client = nfs4_free_client, 6917 .create_server = nfs4_create_server, 6918 .clone_server = nfs_clone_server, 6919 }; 6920 6921 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { 6922 .prefix = XATTR_NAME_NFSV4_ACL, 6923 .list = nfs4_xattr_list_nfs4_acl, 6924 .get = nfs4_xattr_get_nfs4_acl, 6925 .set = nfs4_xattr_set_nfs4_acl, 6926 }; 6927 6928 const struct xattr_handler *nfs4_xattr_handlers[] = { 6929 &nfs4_xattr_nfs4_acl_handler, 6930 NULL 6931 }; 6932 6933 /* 6934 * Local variables: 6935 * c-basic-offset: 8 6936 * End: 6937 */ 6938