1 /* 2 * fs/nfs/nfs4proc.c 3 * 4 * Client-side procedure declarations for NFSv4. 5 * 6 * Copyright (c) 2002 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Kendrick Smith <kmsmith@umich.edu> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <linux/mm.h> 39 #include <linux/delay.h> 40 #include <linux/errno.h> 41 #include <linux/string.h> 42 #include <linux/ratelimit.h> 43 #include <linux/printk.h> 44 #include <linux/slab.h> 45 #include <linux/sunrpc/clnt.h> 46 #include <linux/nfs.h> 47 #include <linux/nfs4.h> 48 #include <linux/nfs_fs.h> 49 #include <linux/nfs_page.h> 50 #include <linux/nfs_mount.h> 51 #include <linux/namei.h> 52 #include <linux/mount.h> 53 #include <linux/module.h> 54 #include <linux/nfs_idmap.h> 55 #include <linux/sunrpc/bc_xprt.h> 56 #include <linux/xattr.h> 57 #include <linux/utsname.h> 58 #include <linux/freezer.h> 59 60 #include "nfs4_fs.h" 61 #include "delegation.h" 62 #include "internal.h" 63 #include "iostat.h" 64 #include "callback.h" 65 #include "pnfs.h" 66 #include "netns.h" 67 68 #define NFSDBG_FACILITY NFSDBG_PROC 69 70 #define NFS4_POLL_RETRY_MIN (HZ/10) 71 #define NFS4_POLL_RETRY_MAX (15*HZ) 72 73 #define NFS4_MAX_LOOP_ON_RECOVER (10) 74 75 struct nfs4_opendata; 76 static int _nfs4_proc_open(struct nfs4_opendata *data); 77 static int _nfs4_recover_proc_open(struct nfs4_opendata *data); 78 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); 79 static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *); 80 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); 81 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *); 82 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr); 83 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 84 struct nfs_fattr *fattr, struct iattr *sattr, 85 struct nfs4_state *state); 86 #ifdef CONFIG_NFS_V4_1 87 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *); 88 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *); 89 #endif 90 /* Prevent leaks of NFSv4 errors into userland */ 91 static int nfs4_map_errors(int err) 92 { 93 if (err >= -1000) 94 return err; 95 switch (err) { 96 case -NFS4ERR_RESOURCE: 97 return -EREMOTEIO; 98 case -NFS4ERR_WRONGSEC: 99 return -EPERM; 100 case -NFS4ERR_BADOWNER: 101 case -NFS4ERR_BADNAME: 102 return -EINVAL; 103 case -NFS4ERR_SHARE_DENIED: 104 return -EACCES; 105 case -NFS4ERR_MINOR_VERS_MISMATCH: 106 return -EPROTONOSUPPORT; 107 default: 108 dprintk("%s could not handle NFSv4 error %d\n", 109 __func__, -err); 110 break; 111 } 112 return -EIO; 113 } 114 115 /* 116 * This is our standard bitmap for GETATTR requests. 117 */ 118 const u32 nfs4_fattr_bitmap[3] = { 119 FATTR4_WORD0_TYPE 120 | FATTR4_WORD0_CHANGE 121 | FATTR4_WORD0_SIZE 122 | FATTR4_WORD0_FSID 123 | FATTR4_WORD0_FILEID, 124 FATTR4_WORD1_MODE 125 | FATTR4_WORD1_NUMLINKS 126 | FATTR4_WORD1_OWNER 127 | FATTR4_WORD1_OWNER_GROUP 128 | FATTR4_WORD1_RAWDEV 129 | FATTR4_WORD1_SPACE_USED 130 | FATTR4_WORD1_TIME_ACCESS 131 | FATTR4_WORD1_TIME_METADATA 132 | FATTR4_WORD1_TIME_MODIFY 133 }; 134 135 static const u32 nfs4_pnfs_open_bitmap[3] = { 136 FATTR4_WORD0_TYPE 137 | FATTR4_WORD0_CHANGE 138 | FATTR4_WORD0_SIZE 139 | FATTR4_WORD0_FSID 140 | FATTR4_WORD0_FILEID, 141 FATTR4_WORD1_MODE 142 | FATTR4_WORD1_NUMLINKS 143 | FATTR4_WORD1_OWNER 144 | FATTR4_WORD1_OWNER_GROUP 145 | FATTR4_WORD1_RAWDEV 146 | FATTR4_WORD1_SPACE_USED 147 | FATTR4_WORD1_TIME_ACCESS 148 | FATTR4_WORD1_TIME_METADATA 149 | FATTR4_WORD1_TIME_MODIFY, 150 FATTR4_WORD2_MDSTHRESHOLD 151 }; 152 153 const u32 nfs4_statfs_bitmap[2] = { 154 FATTR4_WORD0_FILES_AVAIL 155 | FATTR4_WORD0_FILES_FREE 156 | FATTR4_WORD0_FILES_TOTAL, 157 FATTR4_WORD1_SPACE_AVAIL 158 | FATTR4_WORD1_SPACE_FREE 159 | FATTR4_WORD1_SPACE_TOTAL 160 }; 161 162 const u32 nfs4_pathconf_bitmap[2] = { 163 FATTR4_WORD0_MAXLINK 164 | FATTR4_WORD0_MAXNAME, 165 0 166 }; 167 168 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE 169 | FATTR4_WORD0_MAXREAD 170 | FATTR4_WORD0_MAXWRITE 171 | FATTR4_WORD0_LEASE_TIME, 172 FATTR4_WORD1_TIME_DELTA 173 | FATTR4_WORD1_FS_LAYOUT_TYPES, 174 FATTR4_WORD2_LAYOUT_BLKSIZE 175 }; 176 177 const u32 nfs4_fs_locations_bitmap[2] = { 178 FATTR4_WORD0_TYPE 179 | FATTR4_WORD0_CHANGE 180 | FATTR4_WORD0_SIZE 181 | FATTR4_WORD0_FSID 182 | FATTR4_WORD0_FILEID 183 | FATTR4_WORD0_FS_LOCATIONS, 184 FATTR4_WORD1_MODE 185 | FATTR4_WORD1_NUMLINKS 186 | FATTR4_WORD1_OWNER 187 | FATTR4_WORD1_OWNER_GROUP 188 | FATTR4_WORD1_RAWDEV 189 | FATTR4_WORD1_SPACE_USED 190 | FATTR4_WORD1_TIME_ACCESS 191 | FATTR4_WORD1_TIME_METADATA 192 | FATTR4_WORD1_TIME_MODIFY 193 | FATTR4_WORD1_MOUNTED_ON_FILEID 194 }; 195 196 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, 197 struct nfs4_readdir_arg *readdir) 198 { 199 __be32 *start, *p; 200 201 BUG_ON(readdir->count < 80); 202 if (cookie > 2) { 203 readdir->cookie = cookie; 204 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); 205 return; 206 } 207 208 readdir->cookie = 0; 209 memset(&readdir->verifier, 0, sizeof(readdir->verifier)); 210 if (cookie == 2) 211 return; 212 213 /* 214 * NFSv4 servers do not return entries for '.' and '..' 215 * Therefore, we fake these entries here. We let '.' 216 * have cookie 0 and '..' have cookie 1. Note that 217 * when talking to the server, we always send cookie 0 218 * instead of 1 or 2. 219 */ 220 start = p = kmap_atomic(*readdir->pages); 221 222 if (cookie == 0) { 223 *p++ = xdr_one; /* next */ 224 *p++ = xdr_zero; /* cookie, first word */ 225 *p++ = xdr_one; /* cookie, second word */ 226 *p++ = xdr_one; /* entry len */ 227 memcpy(p, ".\0\0\0", 4); /* entry */ 228 p++; 229 *p++ = xdr_one; /* bitmap length */ 230 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ 231 *p++ = htonl(8); /* attribute buffer length */ 232 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode)); 233 } 234 235 *p++ = xdr_one; /* next */ 236 *p++ = xdr_zero; /* cookie, first word */ 237 *p++ = xdr_two; /* cookie, second word */ 238 *p++ = xdr_two; /* entry len */ 239 memcpy(p, "..\0\0", 4); /* entry */ 240 p++; 241 *p++ = xdr_one; /* bitmap length */ 242 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ 243 *p++ = htonl(8); /* attribute buffer length */ 244 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode)); 245 246 readdir->pgbase = (char *)p - (char *)start; 247 readdir->count -= readdir->pgbase; 248 kunmap_atomic(start); 249 } 250 251 static int nfs4_wait_clnt_recover(struct nfs_client *clp) 252 { 253 int res; 254 255 might_sleep(); 256 257 res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING, 258 nfs_wait_bit_killable, TASK_KILLABLE); 259 if (res) 260 return res; 261 262 if (clp->cl_cons_state < 0) 263 return clp->cl_cons_state; 264 return 0; 265 } 266 267 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) 268 { 269 int res = 0; 270 271 might_sleep(); 272 273 if (*timeout <= 0) 274 *timeout = NFS4_POLL_RETRY_MIN; 275 if (*timeout > NFS4_POLL_RETRY_MAX) 276 *timeout = NFS4_POLL_RETRY_MAX; 277 freezable_schedule_timeout_killable(*timeout); 278 if (fatal_signal_pending(current)) 279 res = -ERESTARTSYS; 280 *timeout <<= 1; 281 return res; 282 } 283 284 /* This is the error handling routine for processes that are allowed 285 * to sleep. 286 */ 287 static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) 288 { 289 struct nfs_client *clp = server->nfs_client; 290 struct nfs4_state *state = exception->state; 291 struct inode *inode = exception->inode; 292 int ret = errorcode; 293 294 exception->retry = 0; 295 switch(errorcode) { 296 case 0: 297 return 0; 298 case -NFS4ERR_OPENMODE: 299 if (inode && nfs4_have_delegation(inode, FMODE_READ)) { 300 nfs4_inode_return_delegation(inode); 301 exception->retry = 1; 302 return 0; 303 } 304 if (state == NULL) 305 break; 306 nfs4_schedule_stateid_recovery(server, state); 307 goto wait_on_recovery; 308 case -NFS4ERR_DELEG_REVOKED: 309 case -NFS4ERR_ADMIN_REVOKED: 310 case -NFS4ERR_BAD_STATEID: 311 if (state == NULL) 312 break; 313 nfs_remove_bad_delegation(state->inode); 314 nfs4_schedule_stateid_recovery(server, state); 315 goto wait_on_recovery; 316 case -NFS4ERR_EXPIRED: 317 if (state != NULL) 318 nfs4_schedule_stateid_recovery(server, state); 319 case -NFS4ERR_STALE_STATEID: 320 case -NFS4ERR_STALE_CLIENTID: 321 nfs4_schedule_lease_recovery(clp); 322 goto wait_on_recovery; 323 #if defined(CONFIG_NFS_V4_1) 324 case -NFS4ERR_BADSESSION: 325 case -NFS4ERR_BADSLOT: 326 case -NFS4ERR_BAD_HIGH_SLOT: 327 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 328 case -NFS4ERR_DEADSESSION: 329 case -NFS4ERR_SEQ_FALSE_RETRY: 330 case -NFS4ERR_SEQ_MISORDERED: 331 dprintk("%s ERROR: %d Reset session\n", __func__, 332 errorcode); 333 nfs4_schedule_session_recovery(clp->cl_session, errorcode); 334 exception->retry = 1; 335 break; 336 #endif /* defined(CONFIG_NFS_V4_1) */ 337 case -NFS4ERR_FILE_OPEN: 338 if (exception->timeout > HZ) { 339 /* We have retried a decent amount, time to 340 * fail 341 */ 342 ret = -EBUSY; 343 break; 344 } 345 case -NFS4ERR_GRACE: 346 case -NFS4ERR_DELAY: 347 case -EKEYEXPIRED: 348 ret = nfs4_delay(server->client, &exception->timeout); 349 if (ret != 0) 350 break; 351 case -NFS4ERR_RETRY_UNCACHED_REP: 352 case -NFS4ERR_OLD_STATEID: 353 exception->retry = 1; 354 break; 355 case -NFS4ERR_BADOWNER: 356 /* The following works around a Linux server bug! */ 357 case -NFS4ERR_BADNAME: 358 if (server->caps & NFS_CAP_UIDGID_NOMAP) { 359 server->caps &= ~NFS_CAP_UIDGID_NOMAP; 360 exception->retry = 1; 361 printk(KERN_WARNING "NFS: v4 server %s " 362 "does not accept raw " 363 "uid/gids. " 364 "Reenabling the idmapper.\n", 365 server->nfs_client->cl_hostname); 366 } 367 } 368 /* We failed to handle the error */ 369 return nfs4_map_errors(ret); 370 wait_on_recovery: 371 ret = nfs4_wait_clnt_recover(clp); 372 if (ret == 0) 373 exception->retry = 1; 374 return ret; 375 } 376 377 378 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) 379 { 380 spin_lock(&clp->cl_lock); 381 if (time_before(clp->cl_last_renewal,timestamp)) 382 clp->cl_last_renewal = timestamp; 383 spin_unlock(&clp->cl_lock); 384 } 385 386 static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 387 { 388 do_renew_lease(server->nfs_client, timestamp); 389 } 390 391 #if defined(CONFIG_NFS_V4_1) 392 393 /* 394 * nfs4_free_slot - free a slot and efficiently update slot table. 395 * 396 * freeing a slot is trivially done by clearing its respective bit 397 * in the bitmap. 398 * If the freed slotid equals highest_used_slotid we want to update it 399 * so that the server would be able to size down the slot table if needed, 400 * otherwise we know that the highest_used_slotid is still in use. 401 * When updating highest_used_slotid there may be "holes" in the bitmap 402 * so we need to scan down from highest_used_slotid to 0 looking for the now 403 * highest slotid in use. 404 * If none found, highest_used_slotid is set to NFS4_NO_SLOT. 405 * 406 * Must be called while holding tbl->slot_tbl_lock 407 */ 408 static void 409 nfs4_free_slot(struct nfs4_slot_table *tbl, u32 slotid) 410 { 411 BUG_ON(slotid >= NFS4_MAX_SLOT_TABLE); 412 /* clear used bit in bitmap */ 413 __clear_bit(slotid, tbl->used_slots); 414 415 /* update highest_used_slotid when it is freed */ 416 if (slotid == tbl->highest_used_slotid) { 417 slotid = find_last_bit(tbl->used_slots, tbl->max_slots); 418 if (slotid < tbl->max_slots) 419 tbl->highest_used_slotid = slotid; 420 else 421 tbl->highest_used_slotid = NFS4_NO_SLOT; 422 } 423 dprintk("%s: slotid %u highest_used_slotid %d\n", __func__, 424 slotid, tbl->highest_used_slotid); 425 } 426 427 bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy) 428 { 429 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); 430 return true; 431 } 432 433 /* 434 * Signal state manager thread if session fore channel is drained 435 */ 436 static void nfs4_check_drain_fc_complete(struct nfs4_session *ses) 437 { 438 if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) { 439 rpc_wake_up_first(&ses->fc_slot_table.slot_tbl_waitq, 440 nfs4_set_task_privileged, NULL); 441 return; 442 } 443 444 if (ses->fc_slot_table.highest_used_slotid != NFS4_NO_SLOT) 445 return; 446 447 dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__); 448 complete(&ses->fc_slot_table.complete); 449 } 450 451 /* 452 * Signal state manager thread if session back channel is drained 453 */ 454 void nfs4_check_drain_bc_complete(struct nfs4_session *ses) 455 { 456 if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) || 457 ses->bc_slot_table.highest_used_slotid != NFS4_NO_SLOT) 458 return; 459 dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__); 460 complete(&ses->bc_slot_table.complete); 461 } 462 463 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) 464 { 465 struct nfs4_slot_table *tbl; 466 467 tbl = &res->sr_session->fc_slot_table; 468 if (!res->sr_slot) { 469 /* just wake up the next guy waiting since 470 * we may have not consumed a slot after all */ 471 dprintk("%s: No slot\n", __func__); 472 return; 473 } 474 475 spin_lock(&tbl->slot_tbl_lock); 476 nfs4_free_slot(tbl, res->sr_slot - tbl->slots); 477 nfs4_check_drain_fc_complete(res->sr_session); 478 spin_unlock(&tbl->slot_tbl_lock); 479 res->sr_slot = NULL; 480 } 481 482 static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 483 { 484 unsigned long timestamp; 485 struct nfs_client *clp; 486 487 /* 488 * sr_status remains 1 if an RPC level error occurred. The server 489 * may or may not have processed the sequence operation.. 490 * Proceed as if the server received and processed the sequence 491 * operation. 492 */ 493 if (res->sr_status == 1) 494 res->sr_status = NFS_OK; 495 496 /* don't increment the sequence number if the task wasn't sent */ 497 if (!RPC_WAS_SENT(task)) 498 goto out; 499 500 /* Check the SEQUENCE operation status */ 501 switch (res->sr_status) { 502 case 0: 503 /* Update the slot's sequence and clientid lease timer */ 504 ++res->sr_slot->seq_nr; 505 timestamp = res->sr_renewal_time; 506 clp = res->sr_session->clp; 507 do_renew_lease(clp, timestamp); 508 /* Check sequence flags */ 509 if (res->sr_status_flags != 0) 510 nfs4_schedule_lease_recovery(clp); 511 break; 512 case -NFS4ERR_DELAY: 513 /* The server detected a resend of the RPC call and 514 * returned NFS4ERR_DELAY as per Section 2.10.6.2 515 * of RFC5661. 516 */ 517 dprintk("%s: slot=%td seq=%d: Operation in progress\n", 518 __func__, 519 res->sr_slot - res->sr_session->fc_slot_table.slots, 520 res->sr_slot->seq_nr); 521 goto out_retry; 522 default: 523 /* Just update the slot sequence no. */ 524 ++res->sr_slot->seq_nr; 525 } 526 out: 527 /* The session may be reset by one of the error handlers. */ 528 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); 529 nfs41_sequence_free_slot(res); 530 return 1; 531 out_retry: 532 if (!rpc_restart_call(task)) 533 goto out; 534 rpc_delay(task, NFS4_POLL_RETRY_MAX); 535 return 0; 536 } 537 538 static int nfs4_sequence_done(struct rpc_task *task, 539 struct nfs4_sequence_res *res) 540 { 541 if (res->sr_session == NULL) 542 return 1; 543 return nfs41_sequence_done(task, res); 544 } 545 546 /* 547 * nfs4_find_slot - efficiently look for a free slot 548 * 549 * nfs4_find_slot looks for an unset bit in the used_slots bitmap. 550 * If found, we mark the slot as used, update the highest_used_slotid, 551 * and respectively set up the sequence operation args. 552 * The slot number is returned if found, or NFS4_NO_SLOT otherwise. 553 * 554 * Note: must be called with under the slot_tbl_lock. 555 */ 556 static u32 557 nfs4_find_slot(struct nfs4_slot_table *tbl) 558 { 559 u32 slotid; 560 u32 ret_id = NFS4_NO_SLOT; 561 562 dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n", 563 __func__, tbl->used_slots[0], tbl->highest_used_slotid, 564 tbl->max_slots); 565 slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots); 566 if (slotid >= tbl->max_slots) 567 goto out; 568 __set_bit(slotid, tbl->used_slots); 569 if (slotid > tbl->highest_used_slotid || 570 tbl->highest_used_slotid == NFS4_NO_SLOT) 571 tbl->highest_used_slotid = slotid; 572 ret_id = slotid; 573 out: 574 dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n", 575 __func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id); 576 return ret_id; 577 } 578 579 static void nfs41_init_sequence(struct nfs4_sequence_args *args, 580 struct nfs4_sequence_res *res, int cache_reply) 581 { 582 args->sa_session = NULL; 583 args->sa_cache_this = 0; 584 if (cache_reply) 585 args->sa_cache_this = 1; 586 res->sr_session = NULL; 587 res->sr_slot = NULL; 588 } 589 590 int nfs41_setup_sequence(struct nfs4_session *session, 591 struct nfs4_sequence_args *args, 592 struct nfs4_sequence_res *res, 593 struct rpc_task *task) 594 { 595 struct nfs4_slot *slot; 596 struct nfs4_slot_table *tbl; 597 u32 slotid; 598 599 dprintk("--> %s\n", __func__); 600 /* slot already allocated? */ 601 if (res->sr_slot != NULL) 602 return 0; 603 604 tbl = &session->fc_slot_table; 605 606 spin_lock(&tbl->slot_tbl_lock); 607 if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) && 608 !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) { 609 /* The state manager will wait until the slot table is empty */ 610 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 611 spin_unlock(&tbl->slot_tbl_lock); 612 dprintk("%s session is draining\n", __func__); 613 return -EAGAIN; 614 } 615 616 if (!rpc_queue_empty(&tbl->slot_tbl_waitq) && 617 !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) { 618 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 619 spin_unlock(&tbl->slot_tbl_lock); 620 dprintk("%s enforce FIFO order\n", __func__); 621 return -EAGAIN; 622 } 623 624 slotid = nfs4_find_slot(tbl); 625 if (slotid == NFS4_NO_SLOT) { 626 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 627 spin_unlock(&tbl->slot_tbl_lock); 628 dprintk("<-- %s: no free slots\n", __func__); 629 return -EAGAIN; 630 } 631 spin_unlock(&tbl->slot_tbl_lock); 632 633 rpc_task_set_priority(task, RPC_PRIORITY_NORMAL); 634 slot = tbl->slots + slotid; 635 args->sa_session = session; 636 args->sa_slotid = slotid; 637 638 dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr); 639 640 res->sr_session = session; 641 res->sr_slot = slot; 642 res->sr_renewal_time = jiffies; 643 res->sr_status_flags = 0; 644 /* 645 * sr_status is only set in decode_sequence, and so will remain 646 * set to 1 if an rpc level failure occurs. 647 */ 648 res->sr_status = 1; 649 return 0; 650 } 651 EXPORT_SYMBOL_GPL(nfs41_setup_sequence); 652 653 int nfs4_setup_sequence(const struct nfs_server *server, 654 struct nfs4_sequence_args *args, 655 struct nfs4_sequence_res *res, 656 struct rpc_task *task) 657 { 658 struct nfs4_session *session = nfs4_get_session(server); 659 int ret = 0; 660 661 if (session == NULL) 662 goto out; 663 664 dprintk("--> %s clp %p session %p sr_slot %td\n", 665 __func__, session->clp, session, res->sr_slot ? 666 res->sr_slot - session->fc_slot_table.slots : -1); 667 668 ret = nfs41_setup_sequence(session, args, res, task); 669 out: 670 dprintk("<-- %s status=%d\n", __func__, ret); 671 return ret; 672 } 673 674 struct nfs41_call_sync_data { 675 const struct nfs_server *seq_server; 676 struct nfs4_sequence_args *seq_args; 677 struct nfs4_sequence_res *seq_res; 678 }; 679 680 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) 681 { 682 struct nfs41_call_sync_data *data = calldata; 683 684 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); 685 686 if (nfs4_setup_sequence(data->seq_server, data->seq_args, 687 data->seq_res, task)) 688 return; 689 rpc_call_start(task); 690 } 691 692 static void nfs41_call_priv_sync_prepare(struct rpc_task *task, void *calldata) 693 { 694 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); 695 nfs41_call_sync_prepare(task, calldata); 696 } 697 698 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) 699 { 700 struct nfs41_call_sync_data *data = calldata; 701 702 nfs41_sequence_done(task, data->seq_res); 703 } 704 705 static const struct rpc_call_ops nfs41_call_sync_ops = { 706 .rpc_call_prepare = nfs41_call_sync_prepare, 707 .rpc_call_done = nfs41_call_sync_done, 708 }; 709 710 static const struct rpc_call_ops nfs41_call_priv_sync_ops = { 711 .rpc_call_prepare = nfs41_call_priv_sync_prepare, 712 .rpc_call_done = nfs41_call_sync_done, 713 }; 714 715 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, 716 struct nfs_server *server, 717 struct rpc_message *msg, 718 struct nfs4_sequence_args *args, 719 struct nfs4_sequence_res *res, 720 int privileged) 721 { 722 int ret; 723 struct rpc_task *task; 724 struct nfs41_call_sync_data data = { 725 .seq_server = server, 726 .seq_args = args, 727 .seq_res = res, 728 }; 729 struct rpc_task_setup task_setup = { 730 .rpc_client = clnt, 731 .rpc_message = msg, 732 .callback_ops = &nfs41_call_sync_ops, 733 .callback_data = &data 734 }; 735 736 if (privileged) 737 task_setup.callback_ops = &nfs41_call_priv_sync_ops; 738 task = rpc_run_task(&task_setup); 739 if (IS_ERR(task)) 740 ret = PTR_ERR(task); 741 else { 742 ret = task->tk_status; 743 rpc_put_task(task); 744 } 745 return ret; 746 } 747 748 int _nfs4_call_sync_session(struct rpc_clnt *clnt, 749 struct nfs_server *server, 750 struct rpc_message *msg, 751 struct nfs4_sequence_args *args, 752 struct nfs4_sequence_res *res, 753 int cache_reply) 754 { 755 nfs41_init_sequence(args, res, cache_reply); 756 return nfs4_call_sync_sequence(clnt, server, msg, args, res, 0); 757 } 758 759 #else 760 static inline 761 void nfs41_init_sequence(struct nfs4_sequence_args *args, 762 struct nfs4_sequence_res *res, int cache_reply) 763 { 764 } 765 766 static int nfs4_sequence_done(struct rpc_task *task, 767 struct nfs4_sequence_res *res) 768 { 769 return 1; 770 } 771 #endif /* CONFIG_NFS_V4_1 */ 772 773 int _nfs4_call_sync(struct rpc_clnt *clnt, 774 struct nfs_server *server, 775 struct rpc_message *msg, 776 struct nfs4_sequence_args *args, 777 struct nfs4_sequence_res *res, 778 int cache_reply) 779 { 780 nfs41_init_sequence(args, res, cache_reply); 781 return rpc_call_sync(clnt, msg, 0); 782 } 783 784 static inline 785 int nfs4_call_sync(struct rpc_clnt *clnt, 786 struct nfs_server *server, 787 struct rpc_message *msg, 788 struct nfs4_sequence_args *args, 789 struct nfs4_sequence_res *res, 790 int cache_reply) 791 { 792 return server->nfs_client->cl_mvops->call_sync(clnt, server, msg, 793 args, res, cache_reply); 794 } 795 796 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) 797 { 798 struct nfs_inode *nfsi = NFS_I(dir); 799 800 spin_lock(&dir->i_lock); 801 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 802 if (!cinfo->atomic || cinfo->before != dir->i_version) 803 nfs_force_lookup_revalidate(dir); 804 dir->i_version = cinfo->after; 805 spin_unlock(&dir->i_lock); 806 } 807 808 struct nfs4_opendata { 809 struct kref kref; 810 struct nfs_openargs o_arg; 811 struct nfs_openres o_res; 812 struct nfs_open_confirmargs c_arg; 813 struct nfs_open_confirmres c_res; 814 struct nfs4_string owner_name; 815 struct nfs4_string group_name; 816 struct nfs_fattr f_attr; 817 struct dentry *dir; 818 struct dentry *dentry; 819 struct nfs4_state_owner *owner; 820 struct nfs4_state *state; 821 struct iattr attrs; 822 unsigned long timestamp; 823 unsigned int rpc_done : 1; 824 int rpc_status; 825 int cancelled; 826 }; 827 828 829 static void nfs4_init_opendata_res(struct nfs4_opendata *p) 830 { 831 p->o_res.f_attr = &p->f_attr; 832 p->o_res.seqid = p->o_arg.seqid; 833 p->c_res.seqid = p->c_arg.seqid; 834 p->o_res.server = p->o_arg.server; 835 nfs_fattr_init(&p->f_attr); 836 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); 837 } 838 839 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, 840 struct nfs4_state_owner *sp, fmode_t fmode, int flags, 841 const struct iattr *attrs, 842 gfp_t gfp_mask) 843 { 844 struct dentry *parent = dget_parent(dentry); 845 struct inode *dir = parent->d_inode; 846 struct nfs_server *server = NFS_SERVER(dir); 847 struct nfs4_opendata *p; 848 849 p = kzalloc(sizeof(*p), gfp_mask); 850 if (p == NULL) 851 goto err; 852 p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask); 853 if (p->o_arg.seqid == NULL) 854 goto err_free; 855 nfs_sb_active(dentry->d_sb); 856 p->dentry = dget(dentry); 857 p->dir = parent; 858 p->owner = sp; 859 atomic_inc(&sp->so_count); 860 p->o_arg.fh = NFS_FH(dir); 861 p->o_arg.open_flags = flags; 862 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); 863 p->o_arg.clientid = server->nfs_client->cl_clientid; 864 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time); 865 p->o_arg.id.uniquifier = sp->so_seqid.owner_id; 866 p->o_arg.name = &dentry->d_name; 867 p->o_arg.server = server; 868 p->o_arg.bitmask = server->attr_bitmask; 869 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 870 p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; 871 if (attrs != NULL && attrs->ia_valid != 0) { 872 __be32 verf[2]; 873 874 p->o_arg.u.attrs = &p->attrs; 875 memcpy(&p->attrs, attrs, sizeof(p->attrs)); 876 877 verf[0] = jiffies; 878 verf[1] = current->pid; 879 memcpy(p->o_arg.u.verifier.data, verf, 880 sizeof(p->o_arg.u.verifier.data)); 881 } 882 p->c_arg.fh = &p->o_res.fh; 883 p->c_arg.stateid = &p->o_res.stateid; 884 p->c_arg.seqid = p->o_arg.seqid; 885 nfs4_init_opendata_res(p); 886 kref_init(&p->kref); 887 return p; 888 err_free: 889 kfree(p); 890 err: 891 dput(parent); 892 return NULL; 893 } 894 895 static void nfs4_opendata_free(struct kref *kref) 896 { 897 struct nfs4_opendata *p = container_of(kref, 898 struct nfs4_opendata, kref); 899 struct super_block *sb = p->dentry->d_sb; 900 901 nfs_free_seqid(p->o_arg.seqid); 902 if (p->state != NULL) 903 nfs4_put_open_state(p->state); 904 nfs4_put_state_owner(p->owner); 905 dput(p->dir); 906 dput(p->dentry); 907 nfs_sb_deactive(sb); 908 nfs_fattr_free_names(&p->f_attr); 909 kfree(p); 910 } 911 912 static void nfs4_opendata_put(struct nfs4_opendata *p) 913 { 914 if (p != NULL) 915 kref_put(&p->kref, nfs4_opendata_free); 916 } 917 918 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) 919 { 920 int ret; 921 922 ret = rpc_wait_for_completion_task(task); 923 return ret; 924 } 925 926 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode) 927 { 928 int ret = 0; 929 930 if (open_mode & (O_EXCL|O_TRUNC)) 931 goto out; 932 switch (mode & (FMODE_READ|FMODE_WRITE)) { 933 case FMODE_READ: 934 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 935 && state->n_rdonly != 0; 936 break; 937 case FMODE_WRITE: 938 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 939 && state->n_wronly != 0; 940 break; 941 case FMODE_READ|FMODE_WRITE: 942 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 943 && state->n_rdwr != 0; 944 } 945 out: 946 return ret; 947 } 948 949 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode) 950 { 951 if (delegation == NULL) 952 return 0; 953 if ((delegation->type & fmode) != fmode) 954 return 0; 955 if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) 956 return 0; 957 nfs_mark_delegation_referenced(delegation); 958 return 1; 959 } 960 961 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) 962 { 963 switch (fmode) { 964 case FMODE_WRITE: 965 state->n_wronly++; 966 break; 967 case FMODE_READ: 968 state->n_rdonly++; 969 break; 970 case FMODE_READ|FMODE_WRITE: 971 state->n_rdwr++; 972 } 973 nfs4_state_set_mode_locked(state, state->state | fmode); 974 } 975 976 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) 977 { 978 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 979 nfs4_stateid_copy(&state->stateid, stateid); 980 nfs4_stateid_copy(&state->open_stateid, stateid); 981 switch (fmode) { 982 case FMODE_READ: 983 set_bit(NFS_O_RDONLY_STATE, &state->flags); 984 break; 985 case FMODE_WRITE: 986 set_bit(NFS_O_WRONLY_STATE, &state->flags); 987 break; 988 case FMODE_READ|FMODE_WRITE: 989 set_bit(NFS_O_RDWR_STATE, &state->flags); 990 } 991 } 992 993 static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) 994 { 995 write_seqlock(&state->seqlock); 996 nfs_set_open_stateid_locked(state, stateid, fmode); 997 write_sequnlock(&state->seqlock); 998 } 999 1000 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode) 1001 { 1002 /* 1003 * Protect the call to nfs4_state_set_mode_locked and 1004 * serialise the stateid update 1005 */ 1006 write_seqlock(&state->seqlock); 1007 if (deleg_stateid != NULL) { 1008 nfs4_stateid_copy(&state->stateid, deleg_stateid); 1009 set_bit(NFS_DELEGATED_STATE, &state->flags); 1010 } 1011 if (open_stateid != NULL) 1012 nfs_set_open_stateid_locked(state, open_stateid, fmode); 1013 write_sequnlock(&state->seqlock); 1014 spin_lock(&state->owner->so_lock); 1015 update_open_stateflags(state, fmode); 1016 spin_unlock(&state->owner->so_lock); 1017 } 1018 1019 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode) 1020 { 1021 struct nfs_inode *nfsi = NFS_I(state->inode); 1022 struct nfs_delegation *deleg_cur; 1023 int ret = 0; 1024 1025 fmode &= (FMODE_READ|FMODE_WRITE); 1026 1027 rcu_read_lock(); 1028 deleg_cur = rcu_dereference(nfsi->delegation); 1029 if (deleg_cur == NULL) 1030 goto no_delegation; 1031 1032 spin_lock(&deleg_cur->lock); 1033 if (nfsi->delegation != deleg_cur || 1034 (deleg_cur->type & fmode) != fmode) 1035 goto no_delegation_unlock; 1036 1037 if (delegation == NULL) 1038 delegation = &deleg_cur->stateid; 1039 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation)) 1040 goto no_delegation_unlock; 1041 1042 nfs_mark_delegation_referenced(deleg_cur); 1043 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode); 1044 ret = 1; 1045 no_delegation_unlock: 1046 spin_unlock(&deleg_cur->lock); 1047 no_delegation: 1048 rcu_read_unlock(); 1049 1050 if (!ret && open_stateid != NULL) { 1051 __update_open_stateid(state, open_stateid, NULL, fmode); 1052 ret = 1; 1053 } 1054 1055 return ret; 1056 } 1057 1058 1059 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) 1060 { 1061 struct nfs_delegation *delegation; 1062 1063 rcu_read_lock(); 1064 delegation = rcu_dereference(NFS_I(inode)->delegation); 1065 if (delegation == NULL || (delegation->type & fmode) == fmode) { 1066 rcu_read_unlock(); 1067 return; 1068 } 1069 rcu_read_unlock(); 1070 nfs4_inode_return_delegation(inode); 1071 } 1072 1073 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) 1074 { 1075 struct nfs4_state *state = opendata->state; 1076 struct nfs_inode *nfsi = NFS_I(state->inode); 1077 struct nfs_delegation *delegation; 1078 int open_mode = opendata->o_arg.open_flags & (O_EXCL|O_TRUNC); 1079 fmode_t fmode = opendata->o_arg.fmode; 1080 nfs4_stateid stateid; 1081 int ret = -EAGAIN; 1082 1083 for (;;) { 1084 if (can_open_cached(state, fmode, open_mode)) { 1085 spin_lock(&state->owner->so_lock); 1086 if (can_open_cached(state, fmode, open_mode)) { 1087 update_open_stateflags(state, fmode); 1088 spin_unlock(&state->owner->so_lock); 1089 goto out_return_state; 1090 } 1091 spin_unlock(&state->owner->so_lock); 1092 } 1093 rcu_read_lock(); 1094 delegation = rcu_dereference(nfsi->delegation); 1095 if (!can_open_delegated(delegation, fmode)) { 1096 rcu_read_unlock(); 1097 break; 1098 } 1099 /* Save the delegation */ 1100 nfs4_stateid_copy(&stateid, &delegation->stateid); 1101 rcu_read_unlock(); 1102 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); 1103 if (ret != 0) 1104 goto out; 1105 ret = -EAGAIN; 1106 1107 /* Try to update the stateid using the delegation */ 1108 if (update_open_stateid(state, NULL, &stateid, fmode)) 1109 goto out_return_state; 1110 } 1111 out: 1112 return ERR_PTR(ret); 1113 out_return_state: 1114 atomic_inc(&state->count); 1115 return state; 1116 } 1117 1118 static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1119 { 1120 struct inode *inode; 1121 struct nfs4_state *state = NULL; 1122 struct nfs_delegation *delegation; 1123 int ret; 1124 1125 if (!data->rpc_done) { 1126 state = nfs4_try_open_cached(data); 1127 goto out; 1128 } 1129 1130 ret = -EAGAIN; 1131 if (!(data->f_attr.valid & NFS_ATTR_FATTR)) 1132 goto err; 1133 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr); 1134 ret = PTR_ERR(inode); 1135 if (IS_ERR(inode)) 1136 goto err; 1137 ret = -ENOMEM; 1138 state = nfs4_get_open_state(inode, data->owner); 1139 if (state == NULL) 1140 goto err_put_inode; 1141 if (data->o_res.delegation_type != 0) { 1142 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 1143 int delegation_flags = 0; 1144 1145 rcu_read_lock(); 1146 delegation = rcu_dereference(NFS_I(inode)->delegation); 1147 if (delegation) 1148 delegation_flags = delegation->flags; 1149 rcu_read_unlock(); 1150 if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) { 1151 pr_err_ratelimited("NFS: Broken NFSv4 server %s is " 1152 "returning a delegation for " 1153 "OPEN(CLAIM_DELEGATE_CUR)\n", 1154 clp->cl_hostname); 1155 } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0) 1156 nfs_inode_set_delegation(state->inode, 1157 data->owner->so_cred, 1158 &data->o_res); 1159 else 1160 nfs_inode_reclaim_delegation(state->inode, 1161 data->owner->so_cred, 1162 &data->o_res); 1163 } 1164 1165 update_open_stateid(state, &data->o_res.stateid, NULL, 1166 data->o_arg.fmode); 1167 iput(inode); 1168 out: 1169 return state; 1170 err_put_inode: 1171 iput(inode); 1172 err: 1173 return ERR_PTR(ret); 1174 } 1175 1176 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state) 1177 { 1178 struct nfs_inode *nfsi = NFS_I(state->inode); 1179 struct nfs_open_context *ctx; 1180 1181 spin_lock(&state->inode->i_lock); 1182 list_for_each_entry(ctx, &nfsi->open_files, list) { 1183 if (ctx->state != state) 1184 continue; 1185 get_nfs_open_context(ctx); 1186 spin_unlock(&state->inode->i_lock); 1187 return ctx; 1188 } 1189 spin_unlock(&state->inode->i_lock); 1190 return ERR_PTR(-ENOENT); 1191 } 1192 1193 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state) 1194 { 1195 struct nfs4_opendata *opendata; 1196 1197 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, NULL, GFP_NOFS); 1198 if (opendata == NULL) 1199 return ERR_PTR(-ENOMEM); 1200 opendata->state = state; 1201 atomic_inc(&state->count); 1202 return opendata; 1203 } 1204 1205 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res) 1206 { 1207 struct nfs4_state *newstate; 1208 int ret; 1209 1210 opendata->o_arg.open_flags = 0; 1211 opendata->o_arg.fmode = fmode; 1212 memset(&opendata->o_res, 0, sizeof(opendata->o_res)); 1213 memset(&opendata->c_res, 0, sizeof(opendata->c_res)); 1214 nfs4_init_opendata_res(opendata); 1215 ret = _nfs4_recover_proc_open(opendata); 1216 if (ret != 0) 1217 return ret; 1218 newstate = nfs4_opendata_to_nfs4_state(opendata); 1219 if (IS_ERR(newstate)) 1220 return PTR_ERR(newstate); 1221 nfs4_close_state(newstate, fmode); 1222 *res = newstate; 1223 return 0; 1224 } 1225 1226 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) 1227 { 1228 struct nfs4_state *newstate; 1229 int ret; 1230 1231 /* memory barrier prior to reading state->n_* */ 1232 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1233 smp_rmb(); 1234 if (state->n_rdwr != 0) { 1235 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1236 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate); 1237 if (ret != 0) 1238 return ret; 1239 if (newstate != state) 1240 return -ESTALE; 1241 } 1242 if (state->n_wronly != 0) { 1243 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1244 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate); 1245 if (ret != 0) 1246 return ret; 1247 if (newstate != state) 1248 return -ESTALE; 1249 } 1250 if (state->n_rdonly != 0) { 1251 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1252 ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate); 1253 if (ret != 0) 1254 return ret; 1255 if (newstate != state) 1256 return -ESTALE; 1257 } 1258 /* 1259 * We may have performed cached opens for all three recoveries. 1260 * Check if we need to update the current stateid. 1261 */ 1262 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && 1263 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) { 1264 write_seqlock(&state->seqlock); 1265 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1266 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1267 write_sequnlock(&state->seqlock); 1268 } 1269 return 0; 1270 } 1271 1272 /* 1273 * OPEN_RECLAIM: 1274 * reclaim state on the server after a reboot. 1275 */ 1276 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 1277 { 1278 struct nfs_delegation *delegation; 1279 struct nfs4_opendata *opendata; 1280 fmode_t delegation_type = 0; 1281 int status; 1282 1283 opendata = nfs4_open_recoverdata_alloc(ctx, state); 1284 if (IS_ERR(opendata)) 1285 return PTR_ERR(opendata); 1286 opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS; 1287 opendata->o_arg.fh = NFS_FH(state->inode); 1288 rcu_read_lock(); 1289 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 1290 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) 1291 delegation_type = delegation->type; 1292 rcu_read_unlock(); 1293 opendata->o_arg.u.delegation_type = delegation_type; 1294 status = nfs4_open_recover(opendata, state); 1295 nfs4_opendata_put(opendata); 1296 return status; 1297 } 1298 1299 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 1300 { 1301 struct nfs_server *server = NFS_SERVER(state->inode); 1302 struct nfs4_exception exception = { }; 1303 int err; 1304 do { 1305 err = _nfs4_do_open_reclaim(ctx, state); 1306 if (err != -NFS4ERR_DELAY) 1307 break; 1308 nfs4_handle_exception(server, err, &exception); 1309 } while (exception.retry); 1310 return err; 1311 } 1312 1313 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) 1314 { 1315 struct nfs_open_context *ctx; 1316 int ret; 1317 1318 ctx = nfs4_state_find_open_context(state); 1319 if (IS_ERR(ctx)) 1320 return PTR_ERR(ctx); 1321 ret = nfs4_do_open_reclaim(ctx, state); 1322 put_nfs_open_context(ctx); 1323 return ret; 1324 } 1325 1326 static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) 1327 { 1328 struct nfs4_opendata *opendata; 1329 int ret; 1330 1331 opendata = nfs4_open_recoverdata_alloc(ctx, state); 1332 if (IS_ERR(opendata)) 1333 return PTR_ERR(opendata); 1334 opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR; 1335 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 1336 ret = nfs4_open_recover(opendata, state); 1337 nfs4_opendata_put(opendata); 1338 return ret; 1339 } 1340 1341 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) 1342 { 1343 struct nfs4_exception exception = { }; 1344 struct nfs_server *server = NFS_SERVER(state->inode); 1345 int err; 1346 do { 1347 err = _nfs4_open_delegation_recall(ctx, state, stateid); 1348 switch (err) { 1349 case 0: 1350 case -ENOENT: 1351 case -ESTALE: 1352 goto out; 1353 case -NFS4ERR_BADSESSION: 1354 case -NFS4ERR_BADSLOT: 1355 case -NFS4ERR_BAD_HIGH_SLOT: 1356 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1357 case -NFS4ERR_DEADSESSION: 1358 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err); 1359 goto out; 1360 case -NFS4ERR_STALE_CLIENTID: 1361 case -NFS4ERR_STALE_STATEID: 1362 case -NFS4ERR_EXPIRED: 1363 /* Don't recall a delegation if it was lost */ 1364 nfs4_schedule_lease_recovery(server->nfs_client); 1365 goto out; 1366 case -ERESTARTSYS: 1367 /* 1368 * The show must go on: exit, but mark the 1369 * stateid as needing recovery. 1370 */ 1371 case -NFS4ERR_DELEG_REVOKED: 1372 case -NFS4ERR_ADMIN_REVOKED: 1373 case -NFS4ERR_BAD_STATEID: 1374 nfs_inode_find_state_and_recover(state->inode, 1375 stateid); 1376 nfs4_schedule_stateid_recovery(server, state); 1377 case -EKEYEXPIRED: 1378 /* 1379 * User RPCSEC_GSS context has expired. 1380 * We cannot recover this stateid now, so 1381 * skip it and allow recovery thread to 1382 * proceed. 1383 */ 1384 case -ENOMEM: 1385 err = 0; 1386 goto out; 1387 } 1388 err = nfs4_handle_exception(server, err, &exception); 1389 } while (exception.retry); 1390 out: 1391 return err; 1392 } 1393 1394 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) 1395 { 1396 struct nfs4_opendata *data = calldata; 1397 1398 data->rpc_status = task->tk_status; 1399 if (data->rpc_status == 0) { 1400 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid); 1401 nfs_confirm_seqid(&data->owner->so_seqid, 0); 1402 renew_lease(data->o_res.server, data->timestamp); 1403 data->rpc_done = 1; 1404 } 1405 } 1406 1407 static void nfs4_open_confirm_release(void *calldata) 1408 { 1409 struct nfs4_opendata *data = calldata; 1410 struct nfs4_state *state = NULL; 1411 1412 /* If this request hasn't been cancelled, do nothing */ 1413 if (data->cancelled == 0) 1414 goto out_free; 1415 /* In case of error, no cleanup! */ 1416 if (!data->rpc_done) 1417 goto out_free; 1418 state = nfs4_opendata_to_nfs4_state(data); 1419 if (!IS_ERR(state)) 1420 nfs4_close_state(state, data->o_arg.fmode); 1421 out_free: 1422 nfs4_opendata_put(data); 1423 } 1424 1425 static const struct rpc_call_ops nfs4_open_confirm_ops = { 1426 .rpc_call_done = nfs4_open_confirm_done, 1427 .rpc_release = nfs4_open_confirm_release, 1428 }; 1429 1430 /* 1431 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata 1432 */ 1433 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) 1434 { 1435 struct nfs_server *server = NFS_SERVER(data->dir->d_inode); 1436 struct rpc_task *task; 1437 struct rpc_message msg = { 1438 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], 1439 .rpc_argp = &data->c_arg, 1440 .rpc_resp = &data->c_res, 1441 .rpc_cred = data->owner->so_cred, 1442 }; 1443 struct rpc_task_setup task_setup_data = { 1444 .rpc_client = server->client, 1445 .rpc_message = &msg, 1446 .callback_ops = &nfs4_open_confirm_ops, 1447 .callback_data = data, 1448 .workqueue = nfsiod_workqueue, 1449 .flags = RPC_TASK_ASYNC, 1450 }; 1451 int status; 1452 1453 kref_get(&data->kref); 1454 data->rpc_done = 0; 1455 data->rpc_status = 0; 1456 data->timestamp = jiffies; 1457 task = rpc_run_task(&task_setup_data); 1458 if (IS_ERR(task)) 1459 return PTR_ERR(task); 1460 status = nfs4_wait_for_completion_rpc_task(task); 1461 if (status != 0) { 1462 data->cancelled = 1; 1463 smp_wmb(); 1464 } else 1465 status = data->rpc_status; 1466 rpc_put_task(task); 1467 return status; 1468 } 1469 1470 static void nfs4_open_prepare(struct rpc_task *task, void *calldata) 1471 { 1472 struct nfs4_opendata *data = calldata; 1473 struct nfs4_state_owner *sp = data->owner; 1474 1475 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) 1476 return; 1477 /* 1478 * Check if we still need to send an OPEN call, or if we can use 1479 * a delegation instead. 1480 */ 1481 if (data->state != NULL) { 1482 struct nfs_delegation *delegation; 1483 1484 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags)) 1485 goto out_no_action; 1486 rcu_read_lock(); 1487 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation); 1488 if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR && 1489 can_open_delegated(delegation, data->o_arg.fmode)) 1490 goto unlock_no_action; 1491 rcu_read_unlock(); 1492 } 1493 /* Update client id. */ 1494 data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid; 1495 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) { 1496 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 1497 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh); 1498 } 1499 data->timestamp = jiffies; 1500 if (nfs4_setup_sequence(data->o_arg.server, 1501 &data->o_arg.seq_args, 1502 &data->o_res.seq_res, task)) 1503 return; 1504 rpc_call_start(task); 1505 return; 1506 unlock_no_action: 1507 rcu_read_unlock(); 1508 out_no_action: 1509 task->tk_action = NULL; 1510 1511 } 1512 1513 static void nfs4_recover_open_prepare(struct rpc_task *task, void *calldata) 1514 { 1515 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); 1516 nfs4_open_prepare(task, calldata); 1517 } 1518 1519 static void nfs4_open_done(struct rpc_task *task, void *calldata) 1520 { 1521 struct nfs4_opendata *data = calldata; 1522 1523 data->rpc_status = task->tk_status; 1524 1525 if (!nfs4_sequence_done(task, &data->o_res.seq_res)) 1526 return; 1527 1528 if (task->tk_status == 0) { 1529 switch (data->o_res.f_attr->mode & S_IFMT) { 1530 case S_IFREG: 1531 break; 1532 case S_IFLNK: 1533 data->rpc_status = -ELOOP; 1534 break; 1535 case S_IFDIR: 1536 data->rpc_status = -EISDIR; 1537 break; 1538 default: 1539 data->rpc_status = -ENOTDIR; 1540 } 1541 renew_lease(data->o_res.server, data->timestamp); 1542 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) 1543 nfs_confirm_seqid(&data->owner->so_seqid, 0); 1544 } 1545 data->rpc_done = 1; 1546 } 1547 1548 static void nfs4_open_release(void *calldata) 1549 { 1550 struct nfs4_opendata *data = calldata; 1551 struct nfs4_state *state = NULL; 1552 1553 /* If this request hasn't been cancelled, do nothing */ 1554 if (data->cancelled == 0) 1555 goto out_free; 1556 /* In case of error, no cleanup! */ 1557 if (data->rpc_status != 0 || !data->rpc_done) 1558 goto out_free; 1559 /* In case we need an open_confirm, no cleanup! */ 1560 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) 1561 goto out_free; 1562 state = nfs4_opendata_to_nfs4_state(data); 1563 if (!IS_ERR(state)) 1564 nfs4_close_state(state, data->o_arg.fmode); 1565 out_free: 1566 nfs4_opendata_put(data); 1567 } 1568 1569 static const struct rpc_call_ops nfs4_open_ops = { 1570 .rpc_call_prepare = nfs4_open_prepare, 1571 .rpc_call_done = nfs4_open_done, 1572 .rpc_release = nfs4_open_release, 1573 }; 1574 1575 static const struct rpc_call_ops nfs4_recover_open_ops = { 1576 .rpc_call_prepare = nfs4_recover_open_prepare, 1577 .rpc_call_done = nfs4_open_done, 1578 .rpc_release = nfs4_open_release, 1579 }; 1580 1581 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover) 1582 { 1583 struct inode *dir = data->dir->d_inode; 1584 struct nfs_server *server = NFS_SERVER(dir); 1585 struct nfs_openargs *o_arg = &data->o_arg; 1586 struct nfs_openres *o_res = &data->o_res; 1587 struct rpc_task *task; 1588 struct rpc_message msg = { 1589 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN], 1590 .rpc_argp = o_arg, 1591 .rpc_resp = o_res, 1592 .rpc_cred = data->owner->so_cred, 1593 }; 1594 struct rpc_task_setup task_setup_data = { 1595 .rpc_client = server->client, 1596 .rpc_message = &msg, 1597 .callback_ops = &nfs4_open_ops, 1598 .callback_data = data, 1599 .workqueue = nfsiod_workqueue, 1600 .flags = RPC_TASK_ASYNC, 1601 }; 1602 int status; 1603 1604 nfs41_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1); 1605 kref_get(&data->kref); 1606 data->rpc_done = 0; 1607 data->rpc_status = 0; 1608 data->cancelled = 0; 1609 if (isrecover) 1610 task_setup_data.callback_ops = &nfs4_recover_open_ops; 1611 task = rpc_run_task(&task_setup_data); 1612 if (IS_ERR(task)) 1613 return PTR_ERR(task); 1614 status = nfs4_wait_for_completion_rpc_task(task); 1615 if (status != 0) { 1616 data->cancelled = 1; 1617 smp_wmb(); 1618 } else 1619 status = data->rpc_status; 1620 rpc_put_task(task); 1621 1622 return status; 1623 } 1624 1625 static int _nfs4_recover_proc_open(struct nfs4_opendata *data) 1626 { 1627 struct inode *dir = data->dir->d_inode; 1628 struct nfs_openres *o_res = &data->o_res; 1629 int status; 1630 1631 status = nfs4_run_open_task(data, 1); 1632 if (status != 0 || !data->rpc_done) 1633 return status; 1634 1635 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); 1636 1637 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 1638 status = _nfs4_proc_open_confirm(data); 1639 if (status != 0) 1640 return status; 1641 } 1642 1643 return status; 1644 } 1645 1646 /* 1647 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata 1648 */ 1649 static int _nfs4_proc_open(struct nfs4_opendata *data) 1650 { 1651 struct inode *dir = data->dir->d_inode; 1652 struct nfs_server *server = NFS_SERVER(dir); 1653 struct nfs_openargs *o_arg = &data->o_arg; 1654 struct nfs_openres *o_res = &data->o_res; 1655 int status; 1656 1657 status = nfs4_run_open_task(data, 0); 1658 if (!data->rpc_done) 1659 return status; 1660 if (status != 0) { 1661 if (status == -NFS4ERR_BADNAME && 1662 !(o_arg->open_flags & O_CREAT)) 1663 return -ENOENT; 1664 return status; 1665 } 1666 1667 nfs_fattr_map_and_free_names(server, &data->f_attr); 1668 1669 if (o_arg->open_flags & O_CREAT) 1670 update_changeattr(dir, &o_res->cinfo); 1671 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 1672 server->caps &= ~NFS_CAP_POSIX_LOCK; 1673 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 1674 status = _nfs4_proc_open_confirm(data); 1675 if (status != 0) 1676 return status; 1677 } 1678 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) 1679 _nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr); 1680 return 0; 1681 } 1682 1683 static int nfs4_client_recover_expired_lease(struct nfs_client *clp) 1684 { 1685 unsigned int loop; 1686 int ret; 1687 1688 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { 1689 ret = nfs4_wait_clnt_recover(clp); 1690 if (ret != 0) 1691 break; 1692 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) && 1693 !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state)) 1694 break; 1695 nfs4_schedule_state_manager(clp); 1696 ret = -EIO; 1697 } 1698 return ret; 1699 } 1700 1701 static int nfs4_recover_expired_lease(struct nfs_server *server) 1702 { 1703 return nfs4_client_recover_expired_lease(server->nfs_client); 1704 } 1705 1706 /* 1707 * OPEN_EXPIRED: 1708 * reclaim state on the server after a network partition. 1709 * Assumes caller holds the appropriate lock 1710 */ 1711 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 1712 { 1713 struct nfs4_opendata *opendata; 1714 int ret; 1715 1716 opendata = nfs4_open_recoverdata_alloc(ctx, state); 1717 if (IS_ERR(opendata)) 1718 return PTR_ERR(opendata); 1719 ret = nfs4_open_recover(opendata, state); 1720 if (ret == -ESTALE) 1721 d_drop(ctx->dentry); 1722 nfs4_opendata_put(opendata); 1723 return ret; 1724 } 1725 1726 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 1727 { 1728 struct nfs_server *server = NFS_SERVER(state->inode); 1729 struct nfs4_exception exception = { }; 1730 int err; 1731 1732 do { 1733 err = _nfs4_open_expired(ctx, state); 1734 switch (err) { 1735 default: 1736 goto out; 1737 case -NFS4ERR_GRACE: 1738 case -NFS4ERR_DELAY: 1739 nfs4_handle_exception(server, err, &exception); 1740 err = 0; 1741 } 1742 } while (exception.retry); 1743 out: 1744 return err; 1745 } 1746 1747 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 1748 { 1749 struct nfs_open_context *ctx; 1750 int ret; 1751 1752 ctx = nfs4_state_find_open_context(state); 1753 if (IS_ERR(ctx)) 1754 return PTR_ERR(ctx); 1755 ret = nfs4_do_open_expired(ctx, state); 1756 put_nfs_open_context(ctx); 1757 return ret; 1758 } 1759 1760 #if defined(CONFIG_NFS_V4_1) 1761 static void nfs41_clear_delegation_stateid(struct nfs4_state *state) 1762 { 1763 struct nfs_server *server = NFS_SERVER(state->inode); 1764 nfs4_stateid *stateid = &state->stateid; 1765 int status; 1766 1767 /* If a state reset has been done, test_stateid is unneeded */ 1768 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1769 return; 1770 1771 status = nfs41_test_stateid(server, stateid); 1772 if (status != NFS_OK) { 1773 /* Free the stateid unless the server explicitly 1774 * informs us the stateid is unrecognized. */ 1775 if (status != -NFS4ERR_BAD_STATEID) 1776 nfs41_free_stateid(server, stateid); 1777 1778 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1779 } 1780 } 1781 1782 /** 1783 * nfs41_check_open_stateid - possibly free an open stateid 1784 * 1785 * @state: NFSv4 state for an inode 1786 * 1787 * Returns NFS_OK if recovery for this stateid is now finished. 1788 * Otherwise a negative NFS4ERR value is returned. 1789 */ 1790 static int nfs41_check_open_stateid(struct nfs4_state *state) 1791 { 1792 struct nfs_server *server = NFS_SERVER(state->inode); 1793 nfs4_stateid *stateid = &state->stateid; 1794 int status; 1795 1796 /* If a state reset has been done, test_stateid is unneeded */ 1797 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) && 1798 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) && 1799 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0)) 1800 return -NFS4ERR_BAD_STATEID; 1801 1802 status = nfs41_test_stateid(server, stateid); 1803 if (status != NFS_OK) { 1804 /* Free the stateid unless the server explicitly 1805 * informs us the stateid is unrecognized. */ 1806 if (status != -NFS4ERR_BAD_STATEID) 1807 nfs41_free_stateid(server, stateid); 1808 1809 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1810 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1811 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1812 } 1813 return status; 1814 } 1815 1816 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 1817 { 1818 int status; 1819 1820 nfs41_clear_delegation_stateid(state); 1821 status = nfs41_check_open_stateid(state); 1822 if (status != NFS_OK) 1823 status = nfs4_open_expired(sp, state); 1824 return status; 1825 } 1826 #endif 1827 1828 /* 1829 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* 1830 * fields corresponding to attributes that were used to store the verifier. 1831 * Make sure we clobber those fields in the later setattr call 1832 */ 1833 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr) 1834 { 1835 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) && 1836 !(sattr->ia_valid & ATTR_ATIME_SET)) 1837 sattr->ia_valid |= ATTR_ATIME; 1838 1839 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) && 1840 !(sattr->ia_valid & ATTR_MTIME_SET)) 1841 sattr->ia_valid |= ATTR_MTIME; 1842 } 1843 1844 /* 1845 * Returns a referenced nfs4_state 1846 */ 1847 static int _nfs4_do_open(struct inode *dir, 1848 struct dentry *dentry, 1849 fmode_t fmode, 1850 int flags, 1851 struct iattr *sattr, 1852 struct rpc_cred *cred, 1853 struct nfs4_state **res, 1854 struct nfs4_threshold **ctx_th) 1855 { 1856 struct nfs4_state_owner *sp; 1857 struct nfs4_state *state = NULL; 1858 struct nfs_server *server = NFS_SERVER(dir); 1859 struct nfs4_opendata *opendata; 1860 int status; 1861 1862 /* Protect against reboot recovery conflicts */ 1863 status = -ENOMEM; 1864 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL); 1865 if (sp == NULL) { 1866 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); 1867 goto out_err; 1868 } 1869 status = nfs4_recover_expired_lease(server); 1870 if (status != 0) 1871 goto err_put_state_owner; 1872 if (dentry->d_inode != NULL) 1873 nfs4_return_incompatible_delegation(dentry->d_inode, fmode); 1874 status = -ENOMEM; 1875 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, GFP_KERNEL); 1876 if (opendata == NULL) 1877 goto err_put_state_owner; 1878 1879 if (ctx_th && server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) { 1880 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); 1881 if (!opendata->f_attr.mdsthreshold) 1882 goto err_opendata_put; 1883 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; 1884 } 1885 if (dentry->d_inode != NULL) 1886 opendata->state = nfs4_get_open_state(dentry->d_inode, sp); 1887 1888 status = _nfs4_proc_open(opendata); 1889 if (status != 0) 1890 goto err_opendata_put; 1891 1892 state = nfs4_opendata_to_nfs4_state(opendata); 1893 status = PTR_ERR(state); 1894 if (IS_ERR(state)) 1895 goto err_opendata_put; 1896 if (server->caps & NFS_CAP_POSIX_LOCK) 1897 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); 1898 1899 if (opendata->o_arg.open_flags & O_EXCL) { 1900 nfs4_exclusive_attrset(opendata, sattr); 1901 1902 nfs_fattr_init(opendata->o_res.f_attr); 1903 status = nfs4_do_setattr(state->inode, cred, 1904 opendata->o_res.f_attr, sattr, 1905 state); 1906 if (status == 0) 1907 nfs_setattr_update_inode(state->inode, sattr); 1908 nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr); 1909 } 1910 1911 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) 1912 *ctx_th = opendata->f_attr.mdsthreshold; 1913 else 1914 kfree(opendata->f_attr.mdsthreshold); 1915 opendata->f_attr.mdsthreshold = NULL; 1916 1917 nfs4_opendata_put(opendata); 1918 nfs4_put_state_owner(sp); 1919 *res = state; 1920 return 0; 1921 err_opendata_put: 1922 kfree(opendata->f_attr.mdsthreshold); 1923 nfs4_opendata_put(opendata); 1924 err_put_state_owner: 1925 nfs4_put_state_owner(sp); 1926 out_err: 1927 *res = NULL; 1928 return status; 1929 } 1930 1931 1932 static struct nfs4_state *nfs4_do_open(struct inode *dir, 1933 struct dentry *dentry, 1934 fmode_t fmode, 1935 int flags, 1936 struct iattr *sattr, 1937 struct rpc_cred *cred, 1938 struct nfs4_threshold **ctx_th) 1939 { 1940 struct nfs4_exception exception = { }; 1941 struct nfs4_state *res; 1942 int status; 1943 1944 fmode &= FMODE_READ|FMODE_WRITE; 1945 do { 1946 status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, 1947 &res, ctx_th); 1948 if (status == 0) 1949 break; 1950 /* NOTE: BAD_SEQID means the server and client disagree about the 1951 * book-keeping w.r.t. state-changing operations 1952 * (OPEN/CLOSE/LOCK/LOCKU...) 1953 * It is actually a sign of a bug on the client or on the server. 1954 * 1955 * If we receive a BAD_SEQID error in the particular case of 1956 * doing an OPEN, we assume that nfs_increment_open_seqid() will 1957 * have unhashed the old state_owner for us, and that we can 1958 * therefore safely retry using a new one. We should still warn 1959 * the user though... 1960 */ 1961 if (status == -NFS4ERR_BAD_SEQID) { 1962 pr_warn_ratelimited("NFS: v4 server %s " 1963 " returned a bad sequence-id error!\n", 1964 NFS_SERVER(dir)->nfs_client->cl_hostname); 1965 exception.retry = 1; 1966 continue; 1967 } 1968 /* 1969 * BAD_STATEID on OPEN means that the server cancelled our 1970 * state before it received the OPEN_CONFIRM. 1971 * Recover by retrying the request as per the discussion 1972 * on Page 181 of RFC3530. 1973 */ 1974 if (status == -NFS4ERR_BAD_STATEID) { 1975 exception.retry = 1; 1976 continue; 1977 } 1978 if (status == -EAGAIN) { 1979 /* We must have found a delegation */ 1980 exception.retry = 1; 1981 continue; 1982 } 1983 res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir), 1984 status, &exception)); 1985 } while (exception.retry); 1986 return res; 1987 } 1988 1989 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 1990 struct nfs_fattr *fattr, struct iattr *sattr, 1991 struct nfs4_state *state) 1992 { 1993 struct nfs_server *server = NFS_SERVER(inode); 1994 struct nfs_setattrargs arg = { 1995 .fh = NFS_FH(inode), 1996 .iap = sattr, 1997 .server = server, 1998 .bitmask = server->attr_bitmask, 1999 }; 2000 struct nfs_setattrres res = { 2001 .fattr = fattr, 2002 .server = server, 2003 }; 2004 struct rpc_message msg = { 2005 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 2006 .rpc_argp = &arg, 2007 .rpc_resp = &res, 2008 .rpc_cred = cred, 2009 }; 2010 unsigned long timestamp = jiffies; 2011 int status; 2012 2013 nfs_fattr_init(fattr); 2014 2015 if (state != NULL) { 2016 nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE, 2017 current->files, current->tgid); 2018 } else if (nfs4_copy_delegation_stateid(&arg.stateid, inode, 2019 FMODE_WRITE)) { 2020 /* Use that stateid */ 2021 } else 2022 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 2023 2024 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 2025 if (status == 0 && state != NULL) 2026 renew_lease(server, timestamp); 2027 return status; 2028 } 2029 2030 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 2031 struct nfs_fattr *fattr, struct iattr *sattr, 2032 struct nfs4_state *state) 2033 { 2034 struct nfs_server *server = NFS_SERVER(inode); 2035 struct nfs4_exception exception = { 2036 .state = state, 2037 .inode = inode, 2038 }; 2039 int err; 2040 do { 2041 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state); 2042 switch (err) { 2043 case -NFS4ERR_OPENMODE: 2044 if (state && !(state->state & FMODE_WRITE)) { 2045 err = -EBADF; 2046 if (sattr->ia_valid & ATTR_OPEN) 2047 err = -EACCES; 2048 goto out; 2049 } 2050 } 2051 err = nfs4_handle_exception(server, err, &exception); 2052 } while (exception.retry); 2053 out: 2054 return err; 2055 } 2056 2057 struct nfs4_closedata { 2058 struct inode *inode; 2059 struct nfs4_state *state; 2060 struct nfs_closeargs arg; 2061 struct nfs_closeres res; 2062 struct nfs_fattr fattr; 2063 unsigned long timestamp; 2064 bool roc; 2065 u32 roc_barrier; 2066 }; 2067 2068 static void nfs4_free_closedata(void *data) 2069 { 2070 struct nfs4_closedata *calldata = data; 2071 struct nfs4_state_owner *sp = calldata->state->owner; 2072 struct super_block *sb = calldata->state->inode->i_sb; 2073 2074 if (calldata->roc) 2075 pnfs_roc_release(calldata->state->inode); 2076 nfs4_put_open_state(calldata->state); 2077 nfs_free_seqid(calldata->arg.seqid); 2078 nfs4_put_state_owner(sp); 2079 nfs_sb_deactive(sb); 2080 kfree(calldata); 2081 } 2082 2083 static void nfs4_close_clear_stateid_flags(struct nfs4_state *state, 2084 fmode_t fmode) 2085 { 2086 spin_lock(&state->owner->so_lock); 2087 if (!(fmode & FMODE_READ)) 2088 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 2089 if (!(fmode & FMODE_WRITE)) 2090 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 2091 clear_bit(NFS_O_RDWR_STATE, &state->flags); 2092 spin_unlock(&state->owner->so_lock); 2093 } 2094 2095 static void nfs4_close_done(struct rpc_task *task, void *data) 2096 { 2097 struct nfs4_closedata *calldata = data; 2098 struct nfs4_state *state = calldata->state; 2099 struct nfs_server *server = NFS_SERVER(calldata->inode); 2100 2101 dprintk("%s: begin!\n", __func__); 2102 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 2103 return; 2104 /* hmm. we are done with the inode, and in the process of freeing 2105 * the state_owner. we keep this around to process errors 2106 */ 2107 switch (task->tk_status) { 2108 case 0: 2109 if (calldata->roc) 2110 pnfs_roc_set_barrier(state->inode, 2111 calldata->roc_barrier); 2112 nfs_set_open_stateid(state, &calldata->res.stateid, 0); 2113 renew_lease(server, calldata->timestamp); 2114 nfs4_close_clear_stateid_flags(state, 2115 calldata->arg.fmode); 2116 break; 2117 case -NFS4ERR_STALE_STATEID: 2118 case -NFS4ERR_OLD_STATEID: 2119 case -NFS4ERR_BAD_STATEID: 2120 case -NFS4ERR_EXPIRED: 2121 if (calldata->arg.fmode == 0) 2122 break; 2123 default: 2124 if (nfs4_async_handle_error(task, server, state) == -EAGAIN) 2125 rpc_restart_call_prepare(task); 2126 } 2127 nfs_release_seqid(calldata->arg.seqid); 2128 nfs_refresh_inode(calldata->inode, calldata->res.fattr); 2129 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status); 2130 } 2131 2132 static void nfs4_close_prepare(struct rpc_task *task, void *data) 2133 { 2134 struct nfs4_closedata *calldata = data; 2135 struct nfs4_state *state = calldata->state; 2136 int call_close = 0; 2137 2138 dprintk("%s: begin!\n", __func__); 2139 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 2140 return; 2141 2142 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; 2143 calldata->arg.fmode = FMODE_READ|FMODE_WRITE; 2144 spin_lock(&state->owner->so_lock); 2145 /* Calculate the change in open mode */ 2146 if (state->n_rdwr == 0) { 2147 if (state->n_rdonly == 0) { 2148 call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags); 2149 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); 2150 calldata->arg.fmode &= ~FMODE_READ; 2151 } 2152 if (state->n_wronly == 0) { 2153 call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags); 2154 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); 2155 calldata->arg.fmode &= ~FMODE_WRITE; 2156 } 2157 } 2158 spin_unlock(&state->owner->so_lock); 2159 2160 if (!call_close) { 2161 /* Note: exit _without_ calling nfs4_close_done */ 2162 task->tk_action = NULL; 2163 goto out; 2164 } 2165 2166 if (calldata->arg.fmode == 0) { 2167 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; 2168 if (calldata->roc && 2169 pnfs_roc_drain(calldata->inode, &calldata->roc_barrier)) { 2170 rpc_sleep_on(&NFS_SERVER(calldata->inode)->roc_rpcwaitq, 2171 task, NULL); 2172 goto out; 2173 } 2174 } 2175 2176 nfs_fattr_init(calldata->res.fattr); 2177 calldata->timestamp = jiffies; 2178 if (nfs4_setup_sequence(NFS_SERVER(calldata->inode), 2179 &calldata->arg.seq_args, 2180 &calldata->res.seq_res, 2181 task)) 2182 goto out; 2183 rpc_call_start(task); 2184 out: 2185 dprintk("%s: done!\n", __func__); 2186 } 2187 2188 static const struct rpc_call_ops nfs4_close_ops = { 2189 .rpc_call_prepare = nfs4_close_prepare, 2190 .rpc_call_done = nfs4_close_done, 2191 .rpc_release = nfs4_free_closedata, 2192 }; 2193 2194 /* 2195 * It is possible for data to be read/written from a mem-mapped file 2196 * after the sys_close call (which hits the vfs layer as a flush). 2197 * This means that we can't safely call nfsv4 close on a file until 2198 * the inode is cleared. This in turn means that we are not good 2199 * NFSv4 citizens - we do not indicate to the server to update the file's 2200 * share state even when we are done with one of the three share 2201 * stateid's in the inode. 2202 * 2203 * NOTE: Caller must be holding the sp->so_owner semaphore! 2204 */ 2205 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc) 2206 { 2207 struct nfs_server *server = NFS_SERVER(state->inode); 2208 struct nfs4_closedata *calldata; 2209 struct nfs4_state_owner *sp = state->owner; 2210 struct rpc_task *task; 2211 struct rpc_message msg = { 2212 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], 2213 .rpc_cred = state->owner->so_cred, 2214 }; 2215 struct rpc_task_setup task_setup_data = { 2216 .rpc_client = server->client, 2217 .rpc_message = &msg, 2218 .callback_ops = &nfs4_close_ops, 2219 .workqueue = nfsiod_workqueue, 2220 .flags = RPC_TASK_ASYNC, 2221 }; 2222 int status = -ENOMEM; 2223 2224 calldata = kzalloc(sizeof(*calldata), gfp_mask); 2225 if (calldata == NULL) 2226 goto out; 2227 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1); 2228 calldata->inode = state->inode; 2229 calldata->state = state; 2230 calldata->arg.fh = NFS_FH(state->inode); 2231 calldata->arg.stateid = &state->open_stateid; 2232 /* Serialization for the sequence id */ 2233 calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask); 2234 if (calldata->arg.seqid == NULL) 2235 goto out_free_calldata; 2236 calldata->arg.fmode = 0; 2237 calldata->arg.bitmask = server->cache_consistency_bitmask; 2238 calldata->res.fattr = &calldata->fattr; 2239 calldata->res.seqid = calldata->arg.seqid; 2240 calldata->res.server = server; 2241 calldata->roc = roc; 2242 nfs_sb_active(calldata->inode->i_sb); 2243 2244 msg.rpc_argp = &calldata->arg; 2245 msg.rpc_resp = &calldata->res; 2246 task_setup_data.callback_data = calldata; 2247 task = rpc_run_task(&task_setup_data); 2248 if (IS_ERR(task)) 2249 return PTR_ERR(task); 2250 status = 0; 2251 if (wait) 2252 status = rpc_wait_for_completion_task(task); 2253 rpc_put_task(task); 2254 return status; 2255 out_free_calldata: 2256 kfree(calldata); 2257 out: 2258 if (roc) 2259 pnfs_roc_release(state->inode); 2260 nfs4_put_open_state(state); 2261 nfs4_put_state_owner(sp); 2262 return status; 2263 } 2264 2265 static struct inode * 2266 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr) 2267 { 2268 struct nfs4_state *state; 2269 2270 /* Protect against concurrent sillydeletes */ 2271 state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr, 2272 ctx->cred, &ctx->mdsthreshold); 2273 if (IS_ERR(state)) 2274 return ERR_CAST(state); 2275 ctx->state = state; 2276 return igrab(state->inode); 2277 } 2278 2279 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) 2280 { 2281 if (ctx->state == NULL) 2282 return; 2283 if (is_sync) 2284 nfs4_close_sync(ctx->state, ctx->mode); 2285 else 2286 nfs4_close_state(ctx->state, ctx->mode); 2287 } 2288 2289 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 2290 { 2291 struct nfs4_server_caps_arg args = { 2292 .fhandle = fhandle, 2293 }; 2294 struct nfs4_server_caps_res res = {}; 2295 struct rpc_message msg = { 2296 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], 2297 .rpc_argp = &args, 2298 .rpc_resp = &res, 2299 }; 2300 int status; 2301 2302 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2303 if (status == 0) { 2304 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); 2305 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS| 2306 NFS_CAP_SYMLINKS|NFS_CAP_FILEID| 2307 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER| 2308 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME| 2309 NFS_CAP_CTIME|NFS_CAP_MTIME); 2310 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL) 2311 server->caps |= NFS_CAP_ACLS; 2312 if (res.has_links != 0) 2313 server->caps |= NFS_CAP_HARDLINKS; 2314 if (res.has_symlinks != 0) 2315 server->caps |= NFS_CAP_SYMLINKS; 2316 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID) 2317 server->caps |= NFS_CAP_FILEID; 2318 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE) 2319 server->caps |= NFS_CAP_MODE; 2320 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS) 2321 server->caps |= NFS_CAP_NLINK; 2322 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER) 2323 server->caps |= NFS_CAP_OWNER; 2324 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP) 2325 server->caps |= NFS_CAP_OWNER_GROUP; 2326 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS) 2327 server->caps |= NFS_CAP_ATIME; 2328 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA) 2329 server->caps |= NFS_CAP_CTIME; 2330 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY) 2331 server->caps |= NFS_CAP_MTIME; 2332 2333 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); 2334 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; 2335 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; 2336 server->acl_bitmask = res.acl_bitmask; 2337 server->fh_expire_type = res.fh_expire_type; 2338 } 2339 2340 return status; 2341 } 2342 2343 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 2344 { 2345 struct nfs4_exception exception = { }; 2346 int err; 2347 do { 2348 err = nfs4_handle_exception(server, 2349 _nfs4_server_capabilities(server, fhandle), 2350 &exception); 2351 } while (exception.retry); 2352 return err; 2353 } 2354 2355 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 2356 struct nfs_fsinfo *info) 2357 { 2358 struct nfs4_lookup_root_arg args = { 2359 .bitmask = nfs4_fattr_bitmap, 2360 }; 2361 struct nfs4_lookup_res res = { 2362 .server = server, 2363 .fattr = info->fattr, 2364 .fh = fhandle, 2365 }; 2366 struct rpc_message msg = { 2367 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT], 2368 .rpc_argp = &args, 2369 .rpc_resp = &res, 2370 }; 2371 2372 nfs_fattr_init(info->fattr); 2373 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2374 } 2375 2376 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 2377 struct nfs_fsinfo *info) 2378 { 2379 struct nfs4_exception exception = { }; 2380 int err; 2381 do { 2382 err = _nfs4_lookup_root(server, fhandle, info); 2383 switch (err) { 2384 case 0: 2385 case -NFS4ERR_WRONGSEC: 2386 goto out; 2387 default: 2388 err = nfs4_handle_exception(server, err, &exception); 2389 } 2390 } while (exception.retry); 2391 out: 2392 return err; 2393 } 2394 2395 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 2396 struct nfs_fsinfo *info, rpc_authflavor_t flavor) 2397 { 2398 struct rpc_auth *auth; 2399 int ret; 2400 2401 auth = rpcauth_create(flavor, server->client); 2402 if (!auth) { 2403 ret = -EIO; 2404 goto out; 2405 } 2406 ret = nfs4_lookup_root(server, fhandle, info); 2407 out: 2408 return ret; 2409 } 2410 2411 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 2412 struct nfs_fsinfo *info) 2413 { 2414 int i, len, status = 0; 2415 rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS]; 2416 2417 len = rpcauth_list_flavors(flav_array, ARRAY_SIZE(flav_array)); 2418 BUG_ON(len < 0); 2419 2420 for (i = 0; i < len; i++) { 2421 /* AUTH_UNIX is the default flavor if none was specified, 2422 * thus has already been tried. */ 2423 if (flav_array[i] == RPC_AUTH_UNIX) 2424 continue; 2425 2426 status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]); 2427 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 2428 continue; 2429 break; 2430 } 2431 /* 2432 * -EACCESS could mean that the user doesn't have correct permissions 2433 * to access the mount. It could also mean that we tried to mount 2434 * with a gss auth flavor, but rpc.gssd isn't running. Either way, 2435 * existing mount programs don't handle -EACCES very well so it should 2436 * be mapped to -EPERM instead. 2437 */ 2438 if (status == -EACCES) 2439 status = -EPERM; 2440 return status; 2441 } 2442 2443 /* 2444 * get the file handle for the "/" directory on the server 2445 */ 2446 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle, 2447 struct nfs_fsinfo *info) 2448 { 2449 int minor_version = server->nfs_client->cl_minorversion; 2450 int status = nfs4_lookup_root(server, fhandle, info); 2451 if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR)) 2452 /* 2453 * A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM 2454 * by nfs4_map_errors() as this function exits. 2455 */ 2456 status = nfs_v4_minor_ops[minor_version]->find_root_sec(server, fhandle, info); 2457 if (status == 0) 2458 status = nfs4_server_capabilities(server, fhandle); 2459 if (status == 0) 2460 status = nfs4_do_fsinfo(server, fhandle, info); 2461 return nfs4_map_errors(status); 2462 } 2463 2464 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh, 2465 struct nfs_fsinfo *info) 2466 { 2467 int error; 2468 struct nfs_fattr *fattr = info->fattr; 2469 2470 error = nfs4_server_capabilities(server, mntfh); 2471 if (error < 0) { 2472 dprintk("nfs4_get_root: getcaps error = %d\n", -error); 2473 return error; 2474 } 2475 2476 error = nfs4_proc_getattr(server, mntfh, fattr); 2477 if (error < 0) { 2478 dprintk("nfs4_get_root: getattr error = %d\n", -error); 2479 return error; 2480 } 2481 2482 if (fattr->valid & NFS_ATTR_FATTR_FSID && 2483 !nfs_fsid_equal(&server->fsid, &fattr->fsid)) 2484 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid)); 2485 2486 return error; 2487 } 2488 2489 /* 2490 * Get locations and (maybe) other attributes of a referral. 2491 * Note that we'll actually follow the referral later when 2492 * we detect fsid mismatch in inode revalidation 2493 */ 2494 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir, 2495 const struct qstr *name, struct nfs_fattr *fattr, 2496 struct nfs_fh *fhandle) 2497 { 2498 int status = -ENOMEM; 2499 struct page *page = NULL; 2500 struct nfs4_fs_locations *locations = NULL; 2501 2502 page = alloc_page(GFP_KERNEL); 2503 if (page == NULL) 2504 goto out; 2505 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 2506 if (locations == NULL) 2507 goto out; 2508 2509 status = nfs4_proc_fs_locations(client, dir, name, locations, page); 2510 if (status != 0) 2511 goto out; 2512 /* Make sure server returned a different fsid for the referral */ 2513 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) { 2514 dprintk("%s: server did not return a different fsid for" 2515 " a referral at %s\n", __func__, name->name); 2516 status = -EIO; 2517 goto out; 2518 } 2519 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ 2520 nfs_fixup_referral_attributes(&locations->fattr); 2521 2522 /* replace the lookup nfs_fattr with the locations nfs_fattr */ 2523 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr)); 2524 memset(fhandle, 0, sizeof(struct nfs_fh)); 2525 out: 2526 if (page) 2527 __free_page(page); 2528 kfree(locations); 2529 return status; 2530 } 2531 2532 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr) 2533 { 2534 struct nfs4_getattr_arg args = { 2535 .fh = fhandle, 2536 .bitmask = server->attr_bitmask, 2537 }; 2538 struct nfs4_getattr_res res = { 2539 .fattr = fattr, 2540 .server = server, 2541 }; 2542 struct rpc_message msg = { 2543 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 2544 .rpc_argp = &args, 2545 .rpc_resp = &res, 2546 }; 2547 2548 nfs_fattr_init(fattr); 2549 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2550 } 2551 2552 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr) 2553 { 2554 struct nfs4_exception exception = { }; 2555 int err; 2556 do { 2557 err = nfs4_handle_exception(server, 2558 _nfs4_proc_getattr(server, fhandle, fattr), 2559 &exception); 2560 } while (exception.retry); 2561 return err; 2562 } 2563 2564 /* 2565 * The file is not closed if it is opened due to the a request to change 2566 * the size of the file. The open call will not be needed once the 2567 * VFS layer lookup-intents are implemented. 2568 * 2569 * Close is called when the inode is destroyed. 2570 * If we haven't opened the file for O_WRONLY, we 2571 * need to in the size_change case to obtain a stateid. 2572 * 2573 * Got race? 2574 * Because OPEN is always done by name in nfsv4, it is 2575 * possible that we opened a different file by the same 2576 * name. We can recognize this race condition, but we 2577 * can't do anything about it besides returning an error. 2578 * 2579 * This will be fixed with VFS changes (lookup-intent). 2580 */ 2581 static int 2582 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, 2583 struct iattr *sattr) 2584 { 2585 struct inode *inode = dentry->d_inode; 2586 struct rpc_cred *cred = NULL; 2587 struct nfs4_state *state = NULL; 2588 int status; 2589 2590 if (pnfs_ld_layoutret_on_setattr(inode)) 2591 pnfs_return_layout(inode); 2592 2593 nfs_fattr_init(fattr); 2594 2595 /* Deal with open(O_TRUNC) */ 2596 if (sattr->ia_valid & ATTR_OPEN) 2597 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN); 2598 2599 /* Optimization: if the end result is no change, don't RPC */ 2600 if ((sattr->ia_valid & ~(ATTR_FILE)) == 0) 2601 return 0; 2602 2603 /* Search for an existing open(O_WRITE) file */ 2604 if (sattr->ia_valid & ATTR_FILE) { 2605 struct nfs_open_context *ctx; 2606 2607 ctx = nfs_file_open_context(sattr->ia_file); 2608 if (ctx) { 2609 cred = ctx->cred; 2610 state = ctx->state; 2611 } 2612 } 2613 2614 status = nfs4_do_setattr(inode, cred, fattr, sattr, state); 2615 if (status == 0) 2616 nfs_setattr_update_inode(inode, sattr); 2617 return status; 2618 } 2619 2620 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, 2621 const struct qstr *name, struct nfs_fh *fhandle, 2622 struct nfs_fattr *fattr) 2623 { 2624 struct nfs_server *server = NFS_SERVER(dir); 2625 int status; 2626 struct nfs4_lookup_arg args = { 2627 .bitmask = server->attr_bitmask, 2628 .dir_fh = NFS_FH(dir), 2629 .name = name, 2630 }; 2631 struct nfs4_lookup_res res = { 2632 .server = server, 2633 .fattr = fattr, 2634 .fh = fhandle, 2635 }; 2636 struct rpc_message msg = { 2637 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], 2638 .rpc_argp = &args, 2639 .rpc_resp = &res, 2640 }; 2641 2642 nfs_fattr_init(fattr); 2643 2644 dprintk("NFS call lookup %s\n", name->name); 2645 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0); 2646 dprintk("NFS reply lookup: %d\n", status); 2647 return status; 2648 } 2649 2650 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr) 2651 { 2652 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 2653 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT; 2654 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 2655 fattr->nlink = 2; 2656 } 2657 2658 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir, 2659 struct qstr *name, struct nfs_fh *fhandle, 2660 struct nfs_fattr *fattr) 2661 { 2662 struct nfs4_exception exception = { }; 2663 struct rpc_clnt *client = *clnt; 2664 int err; 2665 do { 2666 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr); 2667 switch (err) { 2668 case -NFS4ERR_BADNAME: 2669 err = -ENOENT; 2670 goto out; 2671 case -NFS4ERR_MOVED: 2672 err = nfs4_get_referral(client, dir, name, fattr, fhandle); 2673 goto out; 2674 case -NFS4ERR_WRONGSEC: 2675 err = -EPERM; 2676 if (client != *clnt) 2677 goto out; 2678 2679 client = nfs4_create_sec_client(client, dir, name); 2680 if (IS_ERR(client)) 2681 return PTR_ERR(client); 2682 2683 exception.retry = 1; 2684 break; 2685 default: 2686 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 2687 } 2688 } while (exception.retry); 2689 2690 out: 2691 if (err == 0) 2692 *clnt = client; 2693 else if (client != *clnt) 2694 rpc_shutdown_client(client); 2695 2696 return err; 2697 } 2698 2699 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, 2700 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 2701 { 2702 int status; 2703 struct rpc_clnt *client = NFS_CLIENT(dir); 2704 2705 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr); 2706 if (client != NFS_CLIENT(dir)) { 2707 rpc_shutdown_client(client); 2708 nfs_fixup_secinfo_attributes(fattr); 2709 } 2710 return status; 2711 } 2712 2713 struct rpc_clnt * 2714 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name, 2715 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 2716 { 2717 int status; 2718 struct rpc_clnt *client = rpc_clone_client(NFS_CLIENT(dir)); 2719 2720 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr); 2721 if (status < 0) { 2722 rpc_shutdown_client(client); 2723 return ERR_PTR(status); 2724 } 2725 return client; 2726 } 2727 2728 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 2729 { 2730 struct nfs_server *server = NFS_SERVER(inode); 2731 struct nfs4_accessargs args = { 2732 .fh = NFS_FH(inode), 2733 .bitmask = server->cache_consistency_bitmask, 2734 }; 2735 struct nfs4_accessres res = { 2736 .server = server, 2737 }; 2738 struct rpc_message msg = { 2739 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], 2740 .rpc_argp = &args, 2741 .rpc_resp = &res, 2742 .rpc_cred = entry->cred, 2743 }; 2744 int mode = entry->mask; 2745 int status; 2746 2747 /* 2748 * Determine which access bits we want to ask for... 2749 */ 2750 if (mode & MAY_READ) 2751 args.access |= NFS4_ACCESS_READ; 2752 if (S_ISDIR(inode->i_mode)) { 2753 if (mode & MAY_WRITE) 2754 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE; 2755 if (mode & MAY_EXEC) 2756 args.access |= NFS4_ACCESS_LOOKUP; 2757 } else { 2758 if (mode & MAY_WRITE) 2759 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND; 2760 if (mode & MAY_EXEC) 2761 args.access |= NFS4_ACCESS_EXECUTE; 2762 } 2763 2764 res.fattr = nfs_alloc_fattr(); 2765 if (res.fattr == NULL) 2766 return -ENOMEM; 2767 2768 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2769 if (!status) { 2770 entry->mask = 0; 2771 if (res.access & NFS4_ACCESS_READ) 2772 entry->mask |= MAY_READ; 2773 if (res.access & (NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE)) 2774 entry->mask |= MAY_WRITE; 2775 if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE)) 2776 entry->mask |= MAY_EXEC; 2777 nfs_refresh_inode(inode, res.fattr); 2778 } 2779 nfs_free_fattr(res.fattr); 2780 return status; 2781 } 2782 2783 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 2784 { 2785 struct nfs4_exception exception = { }; 2786 int err; 2787 do { 2788 err = nfs4_handle_exception(NFS_SERVER(inode), 2789 _nfs4_proc_access(inode, entry), 2790 &exception); 2791 } while (exception.retry); 2792 return err; 2793 } 2794 2795 /* 2796 * TODO: For the time being, we don't try to get any attributes 2797 * along with any of the zero-copy operations READ, READDIR, 2798 * READLINK, WRITE. 2799 * 2800 * In the case of the first three, we want to put the GETATTR 2801 * after the read-type operation -- this is because it is hard 2802 * to predict the length of a GETATTR response in v4, and thus 2803 * align the READ data correctly. This means that the GETATTR 2804 * may end up partially falling into the page cache, and we should 2805 * shift it into the 'tail' of the xdr_buf before processing. 2806 * To do this efficiently, we need to know the total length 2807 * of data received, which doesn't seem to be available outside 2808 * of the RPC layer. 2809 * 2810 * In the case of WRITE, we also want to put the GETATTR after 2811 * the operation -- in this case because we want to make sure 2812 * we get the post-operation mtime and size. 2813 * 2814 * Both of these changes to the XDR layer would in fact be quite 2815 * minor, but I decided to leave them for a subsequent patch. 2816 */ 2817 static int _nfs4_proc_readlink(struct inode *inode, struct page *page, 2818 unsigned int pgbase, unsigned int pglen) 2819 { 2820 struct nfs4_readlink args = { 2821 .fh = NFS_FH(inode), 2822 .pgbase = pgbase, 2823 .pglen = pglen, 2824 .pages = &page, 2825 }; 2826 struct nfs4_readlink_res res; 2827 struct rpc_message msg = { 2828 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], 2829 .rpc_argp = &args, 2830 .rpc_resp = &res, 2831 }; 2832 2833 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); 2834 } 2835 2836 static int nfs4_proc_readlink(struct inode *inode, struct page *page, 2837 unsigned int pgbase, unsigned int pglen) 2838 { 2839 struct nfs4_exception exception = { }; 2840 int err; 2841 do { 2842 err = nfs4_handle_exception(NFS_SERVER(inode), 2843 _nfs4_proc_readlink(inode, page, pgbase, pglen), 2844 &exception); 2845 } while (exception.retry); 2846 return err; 2847 } 2848 2849 /* 2850 * This is just for mknod. open(O_CREAT) will always do ->open_context(). 2851 */ 2852 static int 2853 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, 2854 int flags) 2855 { 2856 struct nfs_open_context *ctx; 2857 struct nfs4_state *state; 2858 int status = 0; 2859 2860 ctx = alloc_nfs_open_context(dentry, FMODE_READ); 2861 if (IS_ERR(ctx)) 2862 return PTR_ERR(ctx); 2863 2864 sattr->ia_mode &= ~current_umask(); 2865 state = nfs4_do_open(dir, dentry, ctx->mode, 2866 flags, sattr, ctx->cred, 2867 &ctx->mdsthreshold); 2868 d_drop(dentry); 2869 if (IS_ERR(state)) { 2870 status = PTR_ERR(state); 2871 goto out; 2872 } 2873 d_add(dentry, igrab(state->inode)); 2874 nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); 2875 ctx->state = state; 2876 out: 2877 put_nfs_open_context(ctx); 2878 return status; 2879 } 2880 2881 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name) 2882 { 2883 struct nfs_server *server = NFS_SERVER(dir); 2884 struct nfs_removeargs args = { 2885 .fh = NFS_FH(dir), 2886 .name = *name, 2887 }; 2888 struct nfs_removeres res = { 2889 .server = server, 2890 }; 2891 struct rpc_message msg = { 2892 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], 2893 .rpc_argp = &args, 2894 .rpc_resp = &res, 2895 }; 2896 int status; 2897 2898 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 2899 if (status == 0) 2900 update_changeattr(dir, &res.cinfo); 2901 return status; 2902 } 2903 2904 static int nfs4_proc_remove(struct inode *dir, struct qstr *name) 2905 { 2906 struct nfs4_exception exception = { }; 2907 int err; 2908 do { 2909 err = nfs4_handle_exception(NFS_SERVER(dir), 2910 _nfs4_proc_remove(dir, name), 2911 &exception); 2912 } while (exception.retry); 2913 return err; 2914 } 2915 2916 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) 2917 { 2918 struct nfs_server *server = NFS_SERVER(dir); 2919 struct nfs_removeargs *args = msg->rpc_argp; 2920 struct nfs_removeres *res = msg->rpc_resp; 2921 2922 res->server = server; 2923 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; 2924 nfs41_init_sequence(&args->seq_args, &res->seq_res, 1); 2925 } 2926 2927 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) 2928 { 2929 if (nfs4_setup_sequence(NFS_SERVER(data->dir), 2930 &data->args.seq_args, 2931 &data->res.seq_res, 2932 task)) 2933 return; 2934 rpc_call_start(task); 2935 } 2936 2937 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) 2938 { 2939 struct nfs_removeres *res = task->tk_msg.rpc_resp; 2940 2941 if (!nfs4_sequence_done(task, &res->seq_res)) 2942 return 0; 2943 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN) 2944 return 0; 2945 update_changeattr(dir, &res->cinfo); 2946 return 1; 2947 } 2948 2949 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir) 2950 { 2951 struct nfs_server *server = NFS_SERVER(dir); 2952 struct nfs_renameargs *arg = msg->rpc_argp; 2953 struct nfs_renameres *res = msg->rpc_resp; 2954 2955 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; 2956 res->server = server; 2957 nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1); 2958 } 2959 2960 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) 2961 { 2962 if (nfs4_setup_sequence(NFS_SERVER(data->old_dir), 2963 &data->args.seq_args, 2964 &data->res.seq_res, 2965 task)) 2966 return; 2967 rpc_call_start(task); 2968 } 2969 2970 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, 2971 struct inode *new_dir) 2972 { 2973 struct nfs_renameres *res = task->tk_msg.rpc_resp; 2974 2975 if (!nfs4_sequence_done(task, &res->seq_res)) 2976 return 0; 2977 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN) 2978 return 0; 2979 2980 update_changeattr(old_dir, &res->old_cinfo); 2981 update_changeattr(new_dir, &res->new_cinfo); 2982 return 1; 2983 } 2984 2985 static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, 2986 struct inode *new_dir, struct qstr *new_name) 2987 { 2988 struct nfs_server *server = NFS_SERVER(old_dir); 2989 struct nfs_renameargs arg = { 2990 .old_dir = NFS_FH(old_dir), 2991 .new_dir = NFS_FH(new_dir), 2992 .old_name = old_name, 2993 .new_name = new_name, 2994 }; 2995 struct nfs_renameres res = { 2996 .server = server, 2997 }; 2998 struct rpc_message msg = { 2999 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME], 3000 .rpc_argp = &arg, 3001 .rpc_resp = &res, 3002 }; 3003 int status = -ENOMEM; 3004 3005 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 3006 if (!status) { 3007 update_changeattr(old_dir, &res.old_cinfo); 3008 update_changeattr(new_dir, &res.new_cinfo); 3009 } 3010 return status; 3011 } 3012 3013 static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, 3014 struct inode *new_dir, struct qstr *new_name) 3015 { 3016 struct nfs4_exception exception = { }; 3017 int err; 3018 do { 3019 err = nfs4_handle_exception(NFS_SERVER(old_dir), 3020 _nfs4_proc_rename(old_dir, old_name, 3021 new_dir, new_name), 3022 &exception); 3023 } while (exception.retry); 3024 return err; 3025 } 3026 3027 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) 3028 { 3029 struct nfs_server *server = NFS_SERVER(inode); 3030 struct nfs4_link_arg arg = { 3031 .fh = NFS_FH(inode), 3032 .dir_fh = NFS_FH(dir), 3033 .name = name, 3034 .bitmask = server->attr_bitmask, 3035 }; 3036 struct nfs4_link_res res = { 3037 .server = server, 3038 }; 3039 struct rpc_message msg = { 3040 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], 3041 .rpc_argp = &arg, 3042 .rpc_resp = &res, 3043 }; 3044 int status = -ENOMEM; 3045 3046 res.fattr = nfs_alloc_fattr(); 3047 if (res.fattr == NULL) 3048 goto out; 3049 3050 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 3051 if (!status) { 3052 update_changeattr(dir, &res.cinfo); 3053 nfs_post_op_update_inode(inode, res.fattr); 3054 } 3055 out: 3056 nfs_free_fattr(res.fattr); 3057 return status; 3058 } 3059 3060 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) 3061 { 3062 struct nfs4_exception exception = { }; 3063 int err; 3064 do { 3065 err = nfs4_handle_exception(NFS_SERVER(inode), 3066 _nfs4_proc_link(inode, dir, name), 3067 &exception); 3068 } while (exception.retry); 3069 return err; 3070 } 3071 3072 struct nfs4_createdata { 3073 struct rpc_message msg; 3074 struct nfs4_create_arg arg; 3075 struct nfs4_create_res res; 3076 struct nfs_fh fh; 3077 struct nfs_fattr fattr; 3078 }; 3079 3080 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, 3081 struct qstr *name, struct iattr *sattr, u32 ftype) 3082 { 3083 struct nfs4_createdata *data; 3084 3085 data = kzalloc(sizeof(*data), GFP_KERNEL); 3086 if (data != NULL) { 3087 struct nfs_server *server = NFS_SERVER(dir); 3088 3089 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE]; 3090 data->msg.rpc_argp = &data->arg; 3091 data->msg.rpc_resp = &data->res; 3092 data->arg.dir_fh = NFS_FH(dir); 3093 data->arg.server = server; 3094 data->arg.name = name; 3095 data->arg.attrs = sattr; 3096 data->arg.ftype = ftype; 3097 data->arg.bitmask = server->attr_bitmask; 3098 data->res.server = server; 3099 data->res.fh = &data->fh; 3100 data->res.fattr = &data->fattr; 3101 nfs_fattr_init(data->res.fattr); 3102 } 3103 return data; 3104 } 3105 3106 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) 3107 { 3108 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 3109 &data->arg.seq_args, &data->res.seq_res, 1); 3110 if (status == 0) { 3111 update_changeattr(dir, &data->res.dir_cinfo); 3112 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr); 3113 } 3114 return status; 3115 } 3116 3117 static void nfs4_free_createdata(struct nfs4_createdata *data) 3118 { 3119 kfree(data); 3120 } 3121 3122 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 3123 struct page *page, unsigned int len, struct iattr *sattr) 3124 { 3125 struct nfs4_createdata *data; 3126 int status = -ENAMETOOLONG; 3127 3128 if (len > NFS4_MAXPATHLEN) 3129 goto out; 3130 3131 status = -ENOMEM; 3132 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK); 3133 if (data == NULL) 3134 goto out; 3135 3136 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK]; 3137 data->arg.u.symlink.pages = &page; 3138 data->arg.u.symlink.len = len; 3139 3140 status = nfs4_do_create(dir, dentry, data); 3141 3142 nfs4_free_createdata(data); 3143 out: 3144 return status; 3145 } 3146 3147 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 3148 struct page *page, unsigned int len, struct iattr *sattr) 3149 { 3150 struct nfs4_exception exception = { }; 3151 int err; 3152 do { 3153 err = nfs4_handle_exception(NFS_SERVER(dir), 3154 _nfs4_proc_symlink(dir, dentry, page, 3155 len, sattr), 3156 &exception); 3157 } while (exception.retry); 3158 return err; 3159 } 3160 3161 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 3162 struct iattr *sattr) 3163 { 3164 struct nfs4_createdata *data; 3165 int status = -ENOMEM; 3166 3167 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); 3168 if (data == NULL) 3169 goto out; 3170 3171 status = nfs4_do_create(dir, dentry, data); 3172 3173 nfs4_free_createdata(data); 3174 out: 3175 return status; 3176 } 3177 3178 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 3179 struct iattr *sattr) 3180 { 3181 struct nfs4_exception exception = { }; 3182 int err; 3183 3184 sattr->ia_mode &= ~current_umask(); 3185 do { 3186 err = nfs4_handle_exception(NFS_SERVER(dir), 3187 _nfs4_proc_mkdir(dir, dentry, sattr), 3188 &exception); 3189 } while (exception.retry); 3190 return err; 3191 } 3192 3193 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, 3194 u64 cookie, struct page **pages, unsigned int count, int plus) 3195 { 3196 struct inode *dir = dentry->d_inode; 3197 struct nfs4_readdir_arg args = { 3198 .fh = NFS_FH(dir), 3199 .pages = pages, 3200 .pgbase = 0, 3201 .count = count, 3202 .bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask, 3203 .plus = plus, 3204 }; 3205 struct nfs4_readdir_res res; 3206 struct rpc_message msg = { 3207 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR], 3208 .rpc_argp = &args, 3209 .rpc_resp = &res, 3210 .rpc_cred = cred, 3211 }; 3212 int status; 3213 3214 dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__, 3215 dentry->d_parent->d_name.name, 3216 dentry->d_name.name, 3217 (unsigned long long)cookie); 3218 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args); 3219 res.pgbase = args.pgbase; 3220 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); 3221 if (status >= 0) { 3222 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE); 3223 status += args.pgbase; 3224 } 3225 3226 nfs_invalidate_atime(dir); 3227 3228 dprintk("%s: returns %d\n", __func__, status); 3229 return status; 3230 } 3231 3232 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, 3233 u64 cookie, struct page **pages, unsigned int count, int plus) 3234 { 3235 struct nfs4_exception exception = { }; 3236 int err; 3237 do { 3238 err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode), 3239 _nfs4_proc_readdir(dentry, cred, cookie, 3240 pages, count, plus), 3241 &exception); 3242 } while (exception.retry); 3243 return err; 3244 } 3245 3246 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 3247 struct iattr *sattr, dev_t rdev) 3248 { 3249 struct nfs4_createdata *data; 3250 int mode = sattr->ia_mode; 3251 int status = -ENOMEM; 3252 3253 BUG_ON(!(sattr->ia_valid & ATTR_MODE)); 3254 BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode)); 3255 3256 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK); 3257 if (data == NULL) 3258 goto out; 3259 3260 if (S_ISFIFO(mode)) 3261 data->arg.ftype = NF4FIFO; 3262 else if (S_ISBLK(mode)) { 3263 data->arg.ftype = NF4BLK; 3264 data->arg.u.device.specdata1 = MAJOR(rdev); 3265 data->arg.u.device.specdata2 = MINOR(rdev); 3266 } 3267 else if (S_ISCHR(mode)) { 3268 data->arg.ftype = NF4CHR; 3269 data->arg.u.device.specdata1 = MAJOR(rdev); 3270 data->arg.u.device.specdata2 = MINOR(rdev); 3271 } 3272 3273 status = nfs4_do_create(dir, dentry, data); 3274 3275 nfs4_free_createdata(data); 3276 out: 3277 return status; 3278 } 3279 3280 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 3281 struct iattr *sattr, dev_t rdev) 3282 { 3283 struct nfs4_exception exception = { }; 3284 int err; 3285 3286 sattr->ia_mode &= ~current_umask(); 3287 do { 3288 err = nfs4_handle_exception(NFS_SERVER(dir), 3289 _nfs4_proc_mknod(dir, dentry, sattr, rdev), 3290 &exception); 3291 } while (exception.retry); 3292 return err; 3293 } 3294 3295 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, 3296 struct nfs_fsstat *fsstat) 3297 { 3298 struct nfs4_statfs_arg args = { 3299 .fh = fhandle, 3300 .bitmask = server->attr_bitmask, 3301 }; 3302 struct nfs4_statfs_res res = { 3303 .fsstat = fsstat, 3304 }; 3305 struct rpc_message msg = { 3306 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], 3307 .rpc_argp = &args, 3308 .rpc_resp = &res, 3309 }; 3310 3311 nfs_fattr_init(fsstat->fattr); 3312 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3313 } 3314 3315 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) 3316 { 3317 struct nfs4_exception exception = { }; 3318 int err; 3319 do { 3320 err = nfs4_handle_exception(server, 3321 _nfs4_proc_statfs(server, fhandle, fsstat), 3322 &exception); 3323 } while (exception.retry); 3324 return err; 3325 } 3326 3327 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, 3328 struct nfs_fsinfo *fsinfo) 3329 { 3330 struct nfs4_fsinfo_arg args = { 3331 .fh = fhandle, 3332 .bitmask = server->attr_bitmask, 3333 }; 3334 struct nfs4_fsinfo_res res = { 3335 .fsinfo = fsinfo, 3336 }; 3337 struct rpc_message msg = { 3338 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], 3339 .rpc_argp = &args, 3340 .rpc_resp = &res, 3341 }; 3342 3343 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3344 } 3345 3346 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 3347 { 3348 struct nfs4_exception exception = { }; 3349 int err; 3350 3351 do { 3352 err = nfs4_handle_exception(server, 3353 _nfs4_do_fsinfo(server, fhandle, fsinfo), 3354 &exception); 3355 } while (exception.retry); 3356 return err; 3357 } 3358 3359 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 3360 { 3361 int error; 3362 3363 nfs_fattr_init(fsinfo->fattr); 3364 error = nfs4_do_fsinfo(server, fhandle, fsinfo); 3365 if (error == 0) 3366 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype); 3367 3368 return error; 3369 } 3370 3371 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 3372 struct nfs_pathconf *pathconf) 3373 { 3374 struct nfs4_pathconf_arg args = { 3375 .fh = fhandle, 3376 .bitmask = server->attr_bitmask, 3377 }; 3378 struct nfs4_pathconf_res res = { 3379 .pathconf = pathconf, 3380 }; 3381 struct rpc_message msg = { 3382 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], 3383 .rpc_argp = &args, 3384 .rpc_resp = &res, 3385 }; 3386 3387 /* None of the pathconf attributes are mandatory to implement */ 3388 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { 3389 memset(pathconf, 0, sizeof(*pathconf)); 3390 return 0; 3391 } 3392 3393 nfs_fattr_init(pathconf->fattr); 3394 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3395 } 3396 3397 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 3398 struct nfs_pathconf *pathconf) 3399 { 3400 struct nfs4_exception exception = { }; 3401 int err; 3402 3403 do { 3404 err = nfs4_handle_exception(server, 3405 _nfs4_proc_pathconf(server, fhandle, pathconf), 3406 &exception); 3407 } while (exception.retry); 3408 return err; 3409 } 3410 3411 void __nfs4_read_done_cb(struct nfs_read_data *data) 3412 { 3413 nfs_invalidate_atime(data->header->inode); 3414 } 3415 3416 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data) 3417 { 3418 struct nfs_server *server = NFS_SERVER(data->header->inode); 3419 3420 if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) { 3421 rpc_restart_call_prepare(task); 3422 return -EAGAIN; 3423 } 3424 3425 __nfs4_read_done_cb(data); 3426 if (task->tk_status > 0) 3427 renew_lease(server, data->timestamp); 3428 return 0; 3429 } 3430 3431 static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data) 3432 { 3433 3434 dprintk("--> %s\n", __func__); 3435 3436 if (!nfs4_sequence_done(task, &data->res.seq_res)) 3437 return -EAGAIN; 3438 3439 return data->read_done_cb ? data->read_done_cb(task, data) : 3440 nfs4_read_done_cb(task, data); 3441 } 3442 3443 static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg) 3444 { 3445 data->timestamp = jiffies; 3446 data->read_done_cb = nfs4_read_done_cb; 3447 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 3448 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); 3449 } 3450 3451 static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data) 3452 { 3453 if (nfs4_setup_sequence(NFS_SERVER(data->header->inode), 3454 &data->args.seq_args, 3455 &data->res.seq_res, 3456 task)) 3457 return; 3458 rpc_call_start(task); 3459 } 3460 3461 static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data) 3462 { 3463 struct inode *inode = data->header->inode; 3464 3465 if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) { 3466 rpc_restart_call_prepare(task); 3467 return -EAGAIN; 3468 } 3469 if (task->tk_status >= 0) { 3470 renew_lease(NFS_SERVER(inode), data->timestamp); 3471 nfs_post_op_update_inode_force_wcc(inode, &data->fattr); 3472 } 3473 return 0; 3474 } 3475 3476 static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data) 3477 { 3478 if (!nfs4_sequence_done(task, &data->res.seq_res)) 3479 return -EAGAIN; 3480 return data->write_done_cb ? data->write_done_cb(task, data) : 3481 nfs4_write_done_cb(task, data); 3482 } 3483 3484 static 3485 bool nfs4_write_need_cache_consistency_data(const struct nfs_write_data *data) 3486 { 3487 const struct nfs_pgio_header *hdr = data->header; 3488 3489 /* Don't request attributes for pNFS or O_DIRECT writes */ 3490 if (data->ds_clp != NULL || hdr->dreq != NULL) 3491 return false; 3492 /* Otherwise, request attributes if and only if we don't hold 3493 * a delegation 3494 */ 3495 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0; 3496 } 3497 3498 static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg) 3499 { 3500 struct nfs_server *server = NFS_SERVER(data->header->inode); 3501 3502 if (!nfs4_write_need_cache_consistency_data(data)) { 3503 data->args.bitmask = NULL; 3504 data->res.fattr = NULL; 3505 } else 3506 data->args.bitmask = server->cache_consistency_bitmask; 3507 3508 if (!data->write_done_cb) 3509 data->write_done_cb = nfs4_write_done_cb; 3510 data->res.server = server; 3511 data->timestamp = jiffies; 3512 3513 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; 3514 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 3515 } 3516 3517 static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data) 3518 { 3519 if (nfs4_setup_sequence(NFS_SERVER(data->header->inode), 3520 &data->args.seq_args, 3521 &data->res.seq_res, 3522 task)) 3523 return; 3524 rpc_call_start(task); 3525 } 3526 3527 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) 3528 { 3529 if (nfs4_setup_sequence(NFS_SERVER(data->inode), 3530 &data->args.seq_args, 3531 &data->res.seq_res, 3532 task)) 3533 return; 3534 rpc_call_start(task); 3535 } 3536 3537 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data) 3538 { 3539 struct inode *inode = data->inode; 3540 3541 if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) { 3542 rpc_restart_call_prepare(task); 3543 return -EAGAIN; 3544 } 3545 return 0; 3546 } 3547 3548 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data) 3549 { 3550 if (!nfs4_sequence_done(task, &data->res.seq_res)) 3551 return -EAGAIN; 3552 return data->commit_done_cb(task, data); 3553 } 3554 3555 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg) 3556 { 3557 struct nfs_server *server = NFS_SERVER(data->inode); 3558 3559 if (data->commit_done_cb == NULL) 3560 data->commit_done_cb = nfs4_commit_done_cb; 3561 data->res.server = server; 3562 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; 3563 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 3564 } 3565 3566 struct nfs4_renewdata { 3567 struct nfs_client *client; 3568 unsigned long timestamp; 3569 }; 3570 3571 /* 3572 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special 3573 * standalone procedure for queueing an asynchronous RENEW. 3574 */ 3575 static void nfs4_renew_release(void *calldata) 3576 { 3577 struct nfs4_renewdata *data = calldata; 3578 struct nfs_client *clp = data->client; 3579 3580 if (atomic_read(&clp->cl_count) > 1) 3581 nfs4_schedule_state_renewal(clp); 3582 nfs_put_client(clp); 3583 kfree(data); 3584 } 3585 3586 static void nfs4_renew_done(struct rpc_task *task, void *calldata) 3587 { 3588 struct nfs4_renewdata *data = calldata; 3589 struct nfs_client *clp = data->client; 3590 unsigned long timestamp = data->timestamp; 3591 3592 if (task->tk_status < 0) { 3593 /* Unless we're shutting down, schedule state recovery! */ 3594 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) 3595 return; 3596 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { 3597 nfs4_schedule_lease_recovery(clp); 3598 return; 3599 } 3600 nfs4_schedule_path_down_recovery(clp); 3601 } 3602 do_renew_lease(clp, timestamp); 3603 } 3604 3605 static const struct rpc_call_ops nfs4_renew_ops = { 3606 .rpc_call_done = nfs4_renew_done, 3607 .rpc_release = nfs4_renew_release, 3608 }; 3609 3610 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 3611 { 3612 struct rpc_message msg = { 3613 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 3614 .rpc_argp = clp, 3615 .rpc_cred = cred, 3616 }; 3617 struct nfs4_renewdata *data; 3618 3619 if (renew_flags == 0) 3620 return 0; 3621 if (!atomic_inc_not_zero(&clp->cl_count)) 3622 return -EIO; 3623 data = kmalloc(sizeof(*data), GFP_NOFS); 3624 if (data == NULL) 3625 return -ENOMEM; 3626 data->client = clp; 3627 data->timestamp = jiffies; 3628 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT, 3629 &nfs4_renew_ops, data); 3630 } 3631 3632 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) 3633 { 3634 struct rpc_message msg = { 3635 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 3636 .rpc_argp = clp, 3637 .rpc_cred = cred, 3638 }; 3639 unsigned long now = jiffies; 3640 int status; 3641 3642 status = rpc_call_sync(clp->cl_rpcclient, &msg, 0); 3643 if (status < 0) 3644 return status; 3645 do_renew_lease(clp, now); 3646 return 0; 3647 } 3648 3649 static inline int nfs4_server_supports_acls(struct nfs_server *server) 3650 { 3651 return (server->caps & NFS_CAP_ACLS) 3652 && (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) 3653 && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL); 3654 } 3655 3656 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that 3657 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on 3658 * the stack. 3659 */ 3660 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 3661 3662 static int buf_to_pages_noslab(const void *buf, size_t buflen, 3663 struct page **pages, unsigned int *pgbase) 3664 { 3665 struct page *newpage, **spages; 3666 int rc = 0; 3667 size_t len; 3668 spages = pages; 3669 3670 do { 3671 len = min_t(size_t, PAGE_SIZE, buflen); 3672 newpage = alloc_page(GFP_KERNEL); 3673 3674 if (newpage == NULL) 3675 goto unwind; 3676 memcpy(page_address(newpage), buf, len); 3677 buf += len; 3678 buflen -= len; 3679 *pages++ = newpage; 3680 rc++; 3681 } while (buflen != 0); 3682 3683 return rc; 3684 3685 unwind: 3686 for(; rc > 0; rc--) 3687 __free_page(spages[rc-1]); 3688 return -ENOMEM; 3689 } 3690 3691 struct nfs4_cached_acl { 3692 int cached; 3693 size_t len; 3694 char data[0]; 3695 }; 3696 3697 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) 3698 { 3699 struct nfs_inode *nfsi = NFS_I(inode); 3700 3701 spin_lock(&inode->i_lock); 3702 kfree(nfsi->nfs4_acl); 3703 nfsi->nfs4_acl = acl; 3704 spin_unlock(&inode->i_lock); 3705 } 3706 3707 static void nfs4_zap_acl_attr(struct inode *inode) 3708 { 3709 nfs4_set_cached_acl(inode, NULL); 3710 } 3711 3712 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen) 3713 { 3714 struct nfs_inode *nfsi = NFS_I(inode); 3715 struct nfs4_cached_acl *acl; 3716 int ret = -ENOENT; 3717 3718 spin_lock(&inode->i_lock); 3719 acl = nfsi->nfs4_acl; 3720 if (acl == NULL) 3721 goto out; 3722 if (buf == NULL) /* user is just asking for length */ 3723 goto out_len; 3724 if (acl->cached == 0) 3725 goto out; 3726 ret = -ERANGE; /* see getxattr(2) man page */ 3727 if (acl->len > buflen) 3728 goto out; 3729 memcpy(buf, acl->data, acl->len); 3730 out_len: 3731 ret = acl->len; 3732 out: 3733 spin_unlock(&inode->i_lock); 3734 return ret; 3735 } 3736 3737 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len) 3738 { 3739 struct nfs4_cached_acl *acl; 3740 size_t buflen = sizeof(*acl) + acl_len; 3741 3742 if (buflen <= PAGE_SIZE) { 3743 acl = kmalloc(buflen, GFP_KERNEL); 3744 if (acl == NULL) 3745 goto out; 3746 acl->cached = 1; 3747 _copy_from_pages(acl->data, pages, pgbase, acl_len); 3748 } else { 3749 acl = kmalloc(sizeof(*acl), GFP_KERNEL); 3750 if (acl == NULL) 3751 goto out; 3752 acl->cached = 0; 3753 } 3754 acl->len = acl_len; 3755 out: 3756 nfs4_set_cached_acl(inode, acl); 3757 } 3758 3759 /* 3760 * The getxattr API returns the required buffer length when called with a 3761 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating 3762 * the required buf. On a NULL buf, we send a page of data to the server 3763 * guessing that the ACL request can be serviced by a page. If so, we cache 3764 * up to the page of ACL data, and the 2nd call to getxattr is serviced by 3765 * the cache. If not so, we throw away the page, and cache the required 3766 * length. The next getxattr call will then produce another round trip to 3767 * the server, this time with the input buf of the required size. 3768 */ 3769 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 3770 { 3771 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, }; 3772 struct nfs_getaclargs args = { 3773 .fh = NFS_FH(inode), 3774 .acl_pages = pages, 3775 .acl_len = buflen, 3776 }; 3777 struct nfs_getaclres res = { 3778 .acl_len = buflen, 3779 }; 3780 struct rpc_message msg = { 3781 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], 3782 .rpc_argp = &args, 3783 .rpc_resp = &res, 3784 }; 3785 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 3786 int ret = -ENOMEM, i; 3787 3788 /* As long as we're doing a round trip to the server anyway, 3789 * let's be prepared for a page of acl data. */ 3790 if (npages == 0) 3791 npages = 1; 3792 if (npages > ARRAY_SIZE(pages)) 3793 return -ERANGE; 3794 3795 for (i = 0; i < npages; i++) { 3796 pages[i] = alloc_page(GFP_KERNEL); 3797 if (!pages[i]) 3798 goto out_free; 3799 } 3800 3801 /* for decoding across pages */ 3802 res.acl_scratch = alloc_page(GFP_KERNEL); 3803 if (!res.acl_scratch) 3804 goto out_free; 3805 3806 args.acl_len = npages * PAGE_SIZE; 3807 args.acl_pgbase = 0; 3808 3809 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", 3810 __func__, buf, buflen, npages, args.acl_len); 3811 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), 3812 &msg, &args.seq_args, &res.seq_res, 0); 3813 if (ret) 3814 goto out_free; 3815 3816 /* Handle the case where the passed-in buffer is too short */ 3817 if (res.acl_flags & NFS4_ACL_TRUNC) { 3818 /* Did the user only issue a request for the acl length? */ 3819 if (buf == NULL) 3820 goto out_ok; 3821 ret = -ERANGE; 3822 goto out_free; 3823 } 3824 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len); 3825 if (buf) 3826 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); 3827 out_ok: 3828 ret = res.acl_len; 3829 out_free: 3830 for (i = 0; i < npages; i++) 3831 if (pages[i]) 3832 __free_page(pages[i]); 3833 if (res.acl_scratch) 3834 __free_page(res.acl_scratch); 3835 return ret; 3836 } 3837 3838 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 3839 { 3840 struct nfs4_exception exception = { }; 3841 ssize_t ret; 3842 do { 3843 ret = __nfs4_get_acl_uncached(inode, buf, buflen); 3844 if (ret >= 0) 3845 break; 3846 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); 3847 } while (exception.retry); 3848 return ret; 3849 } 3850 3851 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) 3852 { 3853 struct nfs_server *server = NFS_SERVER(inode); 3854 int ret; 3855 3856 if (!nfs4_server_supports_acls(server)) 3857 return -EOPNOTSUPP; 3858 ret = nfs_revalidate_inode(server, inode); 3859 if (ret < 0) 3860 return ret; 3861 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) 3862 nfs_zap_acl_cache(inode); 3863 ret = nfs4_read_cached_acl(inode, buf, buflen); 3864 if (ret != -ENOENT) 3865 /* -ENOENT is returned if there is no ACL or if there is an ACL 3866 * but no cached acl data, just the acl length */ 3867 return ret; 3868 return nfs4_get_acl_uncached(inode, buf, buflen); 3869 } 3870 3871 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 3872 { 3873 struct nfs_server *server = NFS_SERVER(inode); 3874 struct page *pages[NFS4ACL_MAXPAGES]; 3875 struct nfs_setaclargs arg = { 3876 .fh = NFS_FH(inode), 3877 .acl_pages = pages, 3878 .acl_len = buflen, 3879 }; 3880 struct nfs_setaclres res; 3881 struct rpc_message msg = { 3882 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], 3883 .rpc_argp = &arg, 3884 .rpc_resp = &res, 3885 }; 3886 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 3887 int ret, i; 3888 3889 if (!nfs4_server_supports_acls(server)) 3890 return -EOPNOTSUPP; 3891 if (npages > ARRAY_SIZE(pages)) 3892 return -ERANGE; 3893 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase); 3894 if (i < 0) 3895 return i; 3896 nfs4_inode_return_delegation(inode); 3897 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 3898 3899 /* 3900 * Free each page after tx, so the only ref left is 3901 * held by the network stack 3902 */ 3903 for (; i > 0; i--) 3904 put_page(pages[i-1]); 3905 3906 /* 3907 * Acl update can result in inode attribute update. 3908 * so mark the attribute cache invalid. 3909 */ 3910 spin_lock(&inode->i_lock); 3911 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR; 3912 spin_unlock(&inode->i_lock); 3913 nfs_access_zap_cache(inode); 3914 nfs_zap_acl_cache(inode); 3915 return ret; 3916 } 3917 3918 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 3919 { 3920 struct nfs4_exception exception = { }; 3921 int err; 3922 do { 3923 err = nfs4_handle_exception(NFS_SERVER(inode), 3924 __nfs4_proc_set_acl(inode, buf, buflen), 3925 &exception); 3926 } while (exception.retry); 3927 return err; 3928 } 3929 3930 static int 3931 nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state) 3932 { 3933 struct nfs_client *clp = server->nfs_client; 3934 3935 if (task->tk_status >= 0) 3936 return 0; 3937 switch(task->tk_status) { 3938 case -NFS4ERR_DELEG_REVOKED: 3939 case -NFS4ERR_ADMIN_REVOKED: 3940 case -NFS4ERR_BAD_STATEID: 3941 if (state == NULL) 3942 break; 3943 nfs_remove_bad_delegation(state->inode); 3944 case -NFS4ERR_OPENMODE: 3945 if (state == NULL) 3946 break; 3947 nfs4_schedule_stateid_recovery(server, state); 3948 goto wait_on_recovery; 3949 case -NFS4ERR_EXPIRED: 3950 if (state != NULL) 3951 nfs4_schedule_stateid_recovery(server, state); 3952 case -NFS4ERR_STALE_STATEID: 3953 case -NFS4ERR_STALE_CLIENTID: 3954 nfs4_schedule_lease_recovery(clp); 3955 goto wait_on_recovery; 3956 #if defined(CONFIG_NFS_V4_1) 3957 case -NFS4ERR_BADSESSION: 3958 case -NFS4ERR_BADSLOT: 3959 case -NFS4ERR_BAD_HIGH_SLOT: 3960 case -NFS4ERR_DEADSESSION: 3961 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 3962 case -NFS4ERR_SEQ_FALSE_RETRY: 3963 case -NFS4ERR_SEQ_MISORDERED: 3964 dprintk("%s ERROR %d, Reset session\n", __func__, 3965 task->tk_status); 3966 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); 3967 task->tk_status = 0; 3968 return -EAGAIN; 3969 #endif /* CONFIG_NFS_V4_1 */ 3970 case -NFS4ERR_DELAY: 3971 nfs_inc_server_stats(server, NFSIOS_DELAY); 3972 case -NFS4ERR_GRACE: 3973 case -EKEYEXPIRED: 3974 rpc_delay(task, NFS4_POLL_RETRY_MAX); 3975 task->tk_status = 0; 3976 return -EAGAIN; 3977 case -NFS4ERR_RETRY_UNCACHED_REP: 3978 case -NFS4ERR_OLD_STATEID: 3979 task->tk_status = 0; 3980 return -EAGAIN; 3981 } 3982 task->tk_status = nfs4_map_errors(task->tk_status); 3983 return 0; 3984 wait_on_recovery: 3985 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); 3986 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) 3987 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); 3988 task->tk_status = 0; 3989 return -EAGAIN; 3990 } 3991 3992 static void nfs4_init_boot_verifier(const struct nfs_client *clp, 3993 nfs4_verifier *bootverf) 3994 { 3995 __be32 verf[2]; 3996 3997 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { 3998 /* An impossible timestamp guarantees this value 3999 * will never match a generated boot time. */ 4000 verf[0] = 0; 4001 verf[1] = (__be32)(NSEC_PER_SEC + 1); 4002 } else { 4003 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 4004 verf[0] = (__be32)nn->boot_time.tv_sec; 4005 verf[1] = (__be32)nn->boot_time.tv_nsec; 4006 } 4007 memcpy(bootverf->data, verf, sizeof(bootverf->data)); 4008 } 4009 4010 /** 4011 * nfs4_proc_setclientid - Negotiate client ID 4012 * @clp: state data structure 4013 * @program: RPC program for NFSv4 callback service 4014 * @port: IP port number for NFS4 callback service 4015 * @cred: RPC credential to use for this call 4016 * @res: where to place the result 4017 * 4018 * Returns zero, a negative errno, or a negative NFS4ERR status code. 4019 */ 4020 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, 4021 unsigned short port, struct rpc_cred *cred, 4022 struct nfs4_setclientid_res *res) 4023 { 4024 nfs4_verifier sc_verifier; 4025 struct nfs4_setclientid setclientid = { 4026 .sc_verifier = &sc_verifier, 4027 .sc_prog = program, 4028 .sc_cb_ident = clp->cl_cb_ident, 4029 }; 4030 struct rpc_message msg = { 4031 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], 4032 .rpc_argp = &setclientid, 4033 .rpc_resp = res, 4034 .rpc_cred = cred, 4035 }; 4036 int status; 4037 4038 /* nfs_client_id4 */ 4039 nfs4_init_boot_verifier(clp, &sc_verifier); 4040 rcu_read_lock(); 4041 setclientid.sc_name_len = scnprintf(setclientid.sc_name, 4042 sizeof(setclientid.sc_name), "%s/%s %s", 4043 clp->cl_ipaddr, 4044 rpc_peeraddr2str(clp->cl_rpcclient, 4045 RPC_DISPLAY_ADDR), 4046 rpc_peeraddr2str(clp->cl_rpcclient, 4047 RPC_DISPLAY_PROTO)); 4048 /* cb_client4 */ 4049 setclientid.sc_netid_len = scnprintf(setclientid.sc_netid, 4050 sizeof(setclientid.sc_netid), 4051 rpc_peeraddr2str(clp->cl_rpcclient, 4052 RPC_DISPLAY_NETID)); 4053 rcu_read_unlock(); 4054 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, 4055 sizeof(setclientid.sc_uaddr), "%s.%u.%u", 4056 clp->cl_ipaddr, port >> 8, port & 255); 4057 4058 dprintk("NFS call setclientid auth=%s, '%.*s'\n", 4059 clp->cl_rpcclient->cl_auth->au_ops->au_name, 4060 setclientid.sc_name_len, setclientid.sc_name); 4061 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 4062 dprintk("NFS reply setclientid: %d\n", status); 4063 return status; 4064 } 4065 4066 /** 4067 * nfs4_proc_setclientid_confirm - Confirm client ID 4068 * @clp: state data structure 4069 * @res: result of a previous SETCLIENTID 4070 * @cred: RPC credential to use for this call 4071 * 4072 * Returns zero, a negative errno, or a negative NFS4ERR status code. 4073 */ 4074 int nfs4_proc_setclientid_confirm(struct nfs_client *clp, 4075 struct nfs4_setclientid_res *arg, 4076 struct rpc_cred *cred) 4077 { 4078 struct nfs_fsinfo fsinfo; 4079 struct rpc_message msg = { 4080 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], 4081 .rpc_argp = arg, 4082 .rpc_resp = &fsinfo, 4083 .rpc_cred = cred, 4084 }; 4085 unsigned long now; 4086 int status; 4087 4088 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n", 4089 clp->cl_rpcclient->cl_auth->au_ops->au_name, 4090 clp->cl_clientid); 4091 now = jiffies; 4092 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 4093 if (status == 0) { 4094 spin_lock(&clp->cl_lock); 4095 clp->cl_lease_time = fsinfo.lease_time * HZ; 4096 clp->cl_last_renewal = now; 4097 spin_unlock(&clp->cl_lock); 4098 } 4099 dprintk("NFS reply setclientid_confirm: %d\n", status); 4100 return status; 4101 } 4102 4103 struct nfs4_delegreturndata { 4104 struct nfs4_delegreturnargs args; 4105 struct nfs4_delegreturnres res; 4106 struct nfs_fh fh; 4107 nfs4_stateid stateid; 4108 unsigned long timestamp; 4109 struct nfs_fattr fattr; 4110 int rpc_status; 4111 }; 4112 4113 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) 4114 { 4115 struct nfs4_delegreturndata *data = calldata; 4116 4117 if (!nfs4_sequence_done(task, &data->res.seq_res)) 4118 return; 4119 4120 switch (task->tk_status) { 4121 case -NFS4ERR_STALE_STATEID: 4122 case -NFS4ERR_EXPIRED: 4123 case 0: 4124 renew_lease(data->res.server, data->timestamp); 4125 break; 4126 default: 4127 if (nfs4_async_handle_error(task, data->res.server, NULL) == 4128 -EAGAIN) { 4129 rpc_restart_call_prepare(task); 4130 return; 4131 } 4132 } 4133 data->rpc_status = task->tk_status; 4134 } 4135 4136 static void nfs4_delegreturn_release(void *calldata) 4137 { 4138 kfree(calldata); 4139 } 4140 4141 #if defined(CONFIG_NFS_V4_1) 4142 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) 4143 { 4144 struct nfs4_delegreturndata *d_data; 4145 4146 d_data = (struct nfs4_delegreturndata *)data; 4147 4148 if (nfs4_setup_sequence(d_data->res.server, 4149 &d_data->args.seq_args, 4150 &d_data->res.seq_res, task)) 4151 return; 4152 rpc_call_start(task); 4153 } 4154 #endif /* CONFIG_NFS_V4_1 */ 4155 4156 static const struct rpc_call_ops nfs4_delegreturn_ops = { 4157 #if defined(CONFIG_NFS_V4_1) 4158 .rpc_call_prepare = nfs4_delegreturn_prepare, 4159 #endif /* CONFIG_NFS_V4_1 */ 4160 .rpc_call_done = nfs4_delegreturn_done, 4161 .rpc_release = nfs4_delegreturn_release, 4162 }; 4163 4164 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) 4165 { 4166 struct nfs4_delegreturndata *data; 4167 struct nfs_server *server = NFS_SERVER(inode); 4168 struct rpc_task *task; 4169 struct rpc_message msg = { 4170 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], 4171 .rpc_cred = cred, 4172 }; 4173 struct rpc_task_setup task_setup_data = { 4174 .rpc_client = server->client, 4175 .rpc_message = &msg, 4176 .callback_ops = &nfs4_delegreturn_ops, 4177 .flags = RPC_TASK_ASYNC, 4178 }; 4179 int status = 0; 4180 4181 data = kzalloc(sizeof(*data), GFP_NOFS); 4182 if (data == NULL) 4183 return -ENOMEM; 4184 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 4185 data->args.fhandle = &data->fh; 4186 data->args.stateid = &data->stateid; 4187 data->args.bitmask = server->cache_consistency_bitmask; 4188 nfs_copy_fh(&data->fh, NFS_FH(inode)); 4189 nfs4_stateid_copy(&data->stateid, stateid); 4190 data->res.fattr = &data->fattr; 4191 data->res.server = server; 4192 nfs_fattr_init(data->res.fattr); 4193 data->timestamp = jiffies; 4194 data->rpc_status = 0; 4195 4196 task_setup_data.callback_data = data; 4197 msg.rpc_argp = &data->args; 4198 msg.rpc_resp = &data->res; 4199 task = rpc_run_task(&task_setup_data); 4200 if (IS_ERR(task)) 4201 return PTR_ERR(task); 4202 if (!issync) 4203 goto out; 4204 status = nfs4_wait_for_completion_rpc_task(task); 4205 if (status != 0) 4206 goto out; 4207 status = data->rpc_status; 4208 if (status == 0) 4209 nfs_post_op_update_inode_force_wcc(inode, &data->fattr); 4210 else 4211 nfs_refresh_inode(inode, &data->fattr); 4212 out: 4213 rpc_put_task(task); 4214 return status; 4215 } 4216 4217 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) 4218 { 4219 struct nfs_server *server = NFS_SERVER(inode); 4220 struct nfs4_exception exception = { }; 4221 int err; 4222 do { 4223 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync); 4224 switch (err) { 4225 case -NFS4ERR_STALE_STATEID: 4226 case -NFS4ERR_EXPIRED: 4227 case 0: 4228 return 0; 4229 } 4230 err = nfs4_handle_exception(server, err, &exception); 4231 } while (exception.retry); 4232 return err; 4233 } 4234 4235 #define NFS4_LOCK_MINTIMEOUT (1 * HZ) 4236 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) 4237 4238 /* 4239 * sleep, with exponential backoff, and retry the LOCK operation. 4240 */ 4241 static unsigned long 4242 nfs4_set_lock_task_retry(unsigned long timeout) 4243 { 4244 freezable_schedule_timeout_killable(timeout); 4245 timeout <<= 1; 4246 if (timeout > NFS4_LOCK_MAXTIMEOUT) 4247 return NFS4_LOCK_MAXTIMEOUT; 4248 return timeout; 4249 } 4250 4251 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 4252 { 4253 struct inode *inode = state->inode; 4254 struct nfs_server *server = NFS_SERVER(inode); 4255 struct nfs_client *clp = server->nfs_client; 4256 struct nfs_lockt_args arg = { 4257 .fh = NFS_FH(inode), 4258 .fl = request, 4259 }; 4260 struct nfs_lockt_res res = { 4261 .denied = request, 4262 }; 4263 struct rpc_message msg = { 4264 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], 4265 .rpc_argp = &arg, 4266 .rpc_resp = &res, 4267 .rpc_cred = state->owner->so_cred, 4268 }; 4269 struct nfs4_lock_state *lsp; 4270 int status; 4271 4272 arg.lock_owner.clientid = clp->cl_clientid; 4273 status = nfs4_set_lock_state(state, request); 4274 if (status != 0) 4275 goto out; 4276 lsp = request->fl_u.nfs4_fl.owner; 4277 arg.lock_owner.id = lsp->ls_seqid.owner_id; 4278 arg.lock_owner.s_dev = server->s_dev; 4279 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 4280 switch (status) { 4281 case 0: 4282 request->fl_type = F_UNLCK; 4283 break; 4284 case -NFS4ERR_DENIED: 4285 status = 0; 4286 } 4287 request->fl_ops->fl_release_private(request); 4288 out: 4289 return status; 4290 } 4291 4292 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 4293 { 4294 struct nfs4_exception exception = { }; 4295 int err; 4296 4297 do { 4298 err = nfs4_handle_exception(NFS_SERVER(state->inode), 4299 _nfs4_proc_getlk(state, cmd, request), 4300 &exception); 4301 } while (exception.retry); 4302 return err; 4303 } 4304 4305 static int do_vfs_lock(struct file *file, struct file_lock *fl) 4306 { 4307 int res = 0; 4308 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { 4309 case FL_POSIX: 4310 res = posix_lock_file_wait(file, fl); 4311 break; 4312 case FL_FLOCK: 4313 res = flock_lock_file_wait(file, fl); 4314 break; 4315 default: 4316 BUG(); 4317 } 4318 return res; 4319 } 4320 4321 struct nfs4_unlockdata { 4322 struct nfs_locku_args arg; 4323 struct nfs_locku_res res; 4324 struct nfs4_lock_state *lsp; 4325 struct nfs_open_context *ctx; 4326 struct file_lock fl; 4327 const struct nfs_server *server; 4328 unsigned long timestamp; 4329 }; 4330 4331 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, 4332 struct nfs_open_context *ctx, 4333 struct nfs4_lock_state *lsp, 4334 struct nfs_seqid *seqid) 4335 { 4336 struct nfs4_unlockdata *p; 4337 struct inode *inode = lsp->ls_state->inode; 4338 4339 p = kzalloc(sizeof(*p), GFP_NOFS); 4340 if (p == NULL) 4341 return NULL; 4342 p->arg.fh = NFS_FH(inode); 4343 p->arg.fl = &p->fl; 4344 p->arg.seqid = seqid; 4345 p->res.seqid = seqid; 4346 p->arg.stateid = &lsp->ls_stateid; 4347 p->lsp = lsp; 4348 atomic_inc(&lsp->ls_count); 4349 /* Ensure we don't close file until we're done freeing locks! */ 4350 p->ctx = get_nfs_open_context(ctx); 4351 memcpy(&p->fl, fl, sizeof(p->fl)); 4352 p->server = NFS_SERVER(inode); 4353 return p; 4354 } 4355 4356 static void nfs4_locku_release_calldata(void *data) 4357 { 4358 struct nfs4_unlockdata *calldata = data; 4359 nfs_free_seqid(calldata->arg.seqid); 4360 nfs4_put_lock_state(calldata->lsp); 4361 put_nfs_open_context(calldata->ctx); 4362 kfree(calldata); 4363 } 4364 4365 static void nfs4_locku_done(struct rpc_task *task, void *data) 4366 { 4367 struct nfs4_unlockdata *calldata = data; 4368 4369 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 4370 return; 4371 switch (task->tk_status) { 4372 case 0: 4373 nfs4_stateid_copy(&calldata->lsp->ls_stateid, 4374 &calldata->res.stateid); 4375 renew_lease(calldata->server, calldata->timestamp); 4376 break; 4377 case -NFS4ERR_BAD_STATEID: 4378 case -NFS4ERR_OLD_STATEID: 4379 case -NFS4ERR_STALE_STATEID: 4380 case -NFS4ERR_EXPIRED: 4381 break; 4382 default: 4383 if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN) 4384 rpc_restart_call_prepare(task); 4385 } 4386 } 4387 4388 static void nfs4_locku_prepare(struct rpc_task *task, void *data) 4389 { 4390 struct nfs4_unlockdata *calldata = data; 4391 4392 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 4393 return; 4394 if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) { 4395 /* Note: exit _without_ running nfs4_locku_done */ 4396 task->tk_action = NULL; 4397 return; 4398 } 4399 calldata->timestamp = jiffies; 4400 if (nfs4_setup_sequence(calldata->server, 4401 &calldata->arg.seq_args, 4402 &calldata->res.seq_res, task)) 4403 return; 4404 rpc_call_start(task); 4405 } 4406 4407 static const struct rpc_call_ops nfs4_locku_ops = { 4408 .rpc_call_prepare = nfs4_locku_prepare, 4409 .rpc_call_done = nfs4_locku_done, 4410 .rpc_release = nfs4_locku_release_calldata, 4411 }; 4412 4413 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, 4414 struct nfs_open_context *ctx, 4415 struct nfs4_lock_state *lsp, 4416 struct nfs_seqid *seqid) 4417 { 4418 struct nfs4_unlockdata *data; 4419 struct rpc_message msg = { 4420 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], 4421 .rpc_cred = ctx->cred, 4422 }; 4423 struct rpc_task_setup task_setup_data = { 4424 .rpc_client = NFS_CLIENT(lsp->ls_state->inode), 4425 .rpc_message = &msg, 4426 .callback_ops = &nfs4_locku_ops, 4427 .workqueue = nfsiod_workqueue, 4428 .flags = RPC_TASK_ASYNC, 4429 }; 4430 4431 /* Ensure this is an unlock - when canceling a lock, the 4432 * canceled lock is passed in, and it won't be an unlock. 4433 */ 4434 fl->fl_type = F_UNLCK; 4435 4436 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); 4437 if (data == NULL) { 4438 nfs_free_seqid(seqid); 4439 return ERR_PTR(-ENOMEM); 4440 } 4441 4442 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); 4443 msg.rpc_argp = &data->arg; 4444 msg.rpc_resp = &data->res; 4445 task_setup_data.callback_data = data; 4446 return rpc_run_task(&task_setup_data); 4447 } 4448 4449 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) 4450 { 4451 struct nfs_inode *nfsi = NFS_I(state->inode); 4452 struct nfs_seqid *seqid; 4453 struct nfs4_lock_state *lsp; 4454 struct rpc_task *task; 4455 int status = 0; 4456 unsigned char fl_flags = request->fl_flags; 4457 4458 status = nfs4_set_lock_state(state, request); 4459 /* Unlock _before_ we do the RPC call */ 4460 request->fl_flags |= FL_EXISTS; 4461 down_read(&nfsi->rwsem); 4462 if (do_vfs_lock(request->fl_file, request) == -ENOENT) { 4463 up_read(&nfsi->rwsem); 4464 goto out; 4465 } 4466 up_read(&nfsi->rwsem); 4467 if (status != 0) 4468 goto out; 4469 /* Is this a delegated lock? */ 4470 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) 4471 goto out; 4472 lsp = request->fl_u.nfs4_fl.owner; 4473 seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); 4474 status = -ENOMEM; 4475 if (seqid == NULL) 4476 goto out; 4477 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid); 4478 status = PTR_ERR(task); 4479 if (IS_ERR(task)) 4480 goto out; 4481 status = nfs4_wait_for_completion_rpc_task(task); 4482 rpc_put_task(task); 4483 out: 4484 request->fl_flags = fl_flags; 4485 return status; 4486 } 4487 4488 struct nfs4_lockdata { 4489 struct nfs_lock_args arg; 4490 struct nfs_lock_res res; 4491 struct nfs4_lock_state *lsp; 4492 struct nfs_open_context *ctx; 4493 struct file_lock fl; 4494 unsigned long timestamp; 4495 int rpc_status; 4496 int cancelled; 4497 struct nfs_server *server; 4498 }; 4499 4500 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, 4501 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, 4502 gfp_t gfp_mask) 4503 { 4504 struct nfs4_lockdata *p; 4505 struct inode *inode = lsp->ls_state->inode; 4506 struct nfs_server *server = NFS_SERVER(inode); 4507 4508 p = kzalloc(sizeof(*p), gfp_mask); 4509 if (p == NULL) 4510 return NULL; 4511 4512 p->arg.fh = NFS_FH(inode); 4513 p->arg.fl = &p->fl; 4514 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); 4515 if (p->arg.open_seqid == NULL) 4516 goto out_free; 4517 p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask); 4518 if (p->arg.lock_seqid == NULL) 4519 goto out_free_seqid; 4520 p->arg.lock_stateid = &lsp->ls_stateid; 4521 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; 4522 p->arg.lock_owner.id = lsp->ls_seqid.owner_id; 4523 p->arg.lock_owner.s_dev = server->s_dev; 4524 p->res.lock_seqid = p->arg.lock_seqid; 4525 p->lsp = lsp; 4526 p->server = server; 4527 atomic_inc(&lsp->ls_count); 4528 p->ctx = get_nfs_open_context(ctx); 4529 memcpy(&p->fl, fl, sizeof(p->fl)); 4530 return p; 4531 out_free_seqid: 4532 nfs_free_seqid(p->arg.open_seqid); 4533 out_free: 4534 kfree(p); 4535 return NULL; 4536 } 4537 4538 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) 4539 { 4540 struct nfs4_lockdata *data = calldata; 4541 struct nfs4_state *state = data->lsp->ls_state; 4542 4543 dprintk("%s: begin!\n", __func__); 4544 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) 4545 return; 4546 /* Do we need to do an open_to_lock_owner? */ 4547 if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) { 4548 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) 4549 return; 4550 data->arg.open_stateid = &state->stateid; 4551 data->arg.new_lock_owner = 1; 4552 data->res.open_seqid = data->arg.open_seqid; 4553 } else 4554 data->arg.new_lock_owner = 0; 4555 data->timestamp = jiffies; 4556 if (nfs4_setup_sequence(data->server, 4557 &data->arg.seq_args, 4558 &data->res.seq_res, task)) 4559 return; 4560 rpc_call_start(task); 4561 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status); 4562 } 4563 4564 static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata) 4565 { 4566 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); 4567 nfs4_lock_prepare(task, calldata); 4568 } 4569 4570 static void nfs4_lock_done(struct rpc_task *task, void *calldata) 4571 { 4572 struct nfs4_lockdata *data = calldata; 4573 4574 dprintk("%s: begin!\n", __func__); 4575 4576 if (!nfs4_sequence_done(task, &data->res.seq_res)) 4577 return; 4578 4579 data->rpc_status = task->tk_status; 4580 if (data->arg.new_lock_owner != 0) { 4581 if (data->rpc_status == 0) 4582 nfs_confirm_seqid(&data->lsp->ls_seqid, 0); 4583 else 4584 goto out; 4585 } 4586 if (data->rpc_status == 0) { 4587 nfs4_stateid_copy(&data->lsp->ls_stateid, &data->res.stateid); 4588 data->lsp->ls_flags |= NFS_LOCK_INITIALIZED; 4589 renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp); 4590 } 4591 out: 4592 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status); 4593 } 4594 4595 static void nfs4_lock_release(void *calldata) 4596 { 4597 struct nfs4_lockdata *data = calldata; 4598 4599 dprintk("%s: begin!\n", __func__); 4600 nfs_free_seqid(data->arg.open_seqid); 4601 if (data->cancelled != 0) { 4602 struct rpc_task *task; 4603 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, 4604 data->arg.lock_seqid); 4605 if (!IS_ERR(task)) 4606 rpc_put_task_async(task); 4607 dprintk("%s: cancelling lock!\n", __func__); 4608 } else 4609 nfs_free_seqid(data->arg.lock_seqid); 4610 nfs4_put_lock_state(data->lsp); 4611 put_nfs_open_context(data->ctx); 4612 kfree(data); 4613 dprintk("%s: done!\n", __func__); 4614 } 4615 4616 static const struct rpc_call_ops nfs4_lock_ops = { 4617 .rpc_call_prepare = nfs4_lock_prepare, 4618 .rpc_call_done = nfs4_lock_done, 4619 .rpc_release = nfs4_lock_release, 4620 }; 4621 4622 static const struct rpc_call_ops nfs4_recover_lock_ops = { 4623 .rpc_call_prepare = nfs4_recover_lock_prepare, 4624 .rpc_call_done = nfs4_lock_done, 4625 .rpc_release = nfs4_lock_release, 4626 }; 4627 4628 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) 4629 { 4630 switch (error) { 4631 case -NFS4ERR_ADMIN_REVOKED: 4632 case -NFS4ERR_BAD_STATEID: 4633 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 4634 if (new_lock_owner != 0 || 4635 (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) 4636 nfs4_schedule_stateid_recovery(server, lsp->ls_state); 4637 break; 4638 case -NFS4ERR_STALE_STATEID: 4639 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 4640 case -NFS4ERR_EXPIRED: 4641 nfs4_schedule_lease_recovery(server->nfs_client); 4642 }; 4643 } 4644 4645 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) 4646 { 4647 struct nfs4_lockdata *data; 4648 struct rpc_task *task; 4649 struct rpc_message msg = { 4650 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK], 4651 .rpc_cred = state->owner->so_cred, 4652 }; 4653 struct rpc_task_setup task_setup_data = { 4654 .rpc_client = NFS_CLIENT(state->inode), 4655 .rpc_message = &msg, 4656 .callback_ops = &nfs4_lock_ops, 4657 .workqueue = nfsiod_workqueue, 4658 .flags = RPC_TASK_ASYNC, 4659 }; 4660 int ret; 4661 4662 dprintk("%s: begin!\n", __func__); 4663 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file), 4664 fl->fl_u.nfs4_fl.owner, 4665 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS); 4666 if (data == NULL) 4667 return -ENOMEM; 4668 if (IS_SETLKW(cmd)) 4669 data->arg.block = 1; 4670 if (recovery_type > NFS_LOCK_NEW) { 4671 if (recovery_type == NFS_LOCK_RECLAIM) 4672 data->arg.reclaim = NFS_LOCK_RECLAIM; 4673 task_setup_data.callback_ops = &nfs4_recover_lock_ops; 4674 } 4675 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); 4676 msg.rpc_argp = &data->arg; 4677 msg.rpc_resp = &data->res; 4678 task_setup_data.callback_data = data; 4679 task = rpc_run_task(&task_setup_data); 4680 if (IS_ERR(task)) 4681 return PTR_ERR(task); 4682 ret = nfs4_wait_for_completion_rpc_task(task); 4683 if (ret == 0) { 4684 ret = data->rpc_status; 4685 if (ret) 4686 nfs4_handle_setlk_error(data->server, data->lsp, 4687 data->arg.new_lock_owner, ret); 4688 } else 4689 data->cancelled = 1; 4690 rpc_put_task(task); 4691 dprintk("%s: done, ret = %d!\n", __func__, ret); 4692 return ret; 4693 } 4694 4695 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) 4696 { 4697 struct nfs_server *server = NFS_SERVER(state->inode); 4698 struct nfs4_exception exception = { 4699 .inode = state->inode, 4700 }; 4701 int err; 4702 4703 do { 4704 /* Cache the lock if possible... */ 4705 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 4706 return 0; 4707 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); 4708 if (err != -NFS4ERR_DELAY) 4709 break; 4710 nfs4_handle_exception(server, err, &exception); 4711 } while (exception.retry); 4712 return err; 4713 } 4714 4715 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) 4716 { 4717 struct nfs_server *server = NFS_SERVER(state->inode); 4718 struct nfs4_exception exception = { 4719 .inode = state->inode, 4720 }; 4721 int err; 4722 4723 err = nfs4_set_lock_state(state, request); 4724 if (err != 0) 4725 return err; 4726 do { 4727 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 4728 return 0; 4729 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED); 4730 switch (err) { 4731 default: 4732 goto out; 4733 case -NFS4ERR_GRACE: 4734 case -NFS4ERR_DELAY: 4735 nfs4_handle_exception(server, err, &exception); 4736 err = 0; 4737 } 4738 } while (exception.retry); 4739 out: 4740 return err; 4741 } 4742 4743 #if defined(CONFIG_NFS_V4_1) 4744 /** 4745 * nfs41_check_expired_locks - possibly free a lock stateid 4746 * 4747 * @state: NFSv4 state for an inode 4748 * 4749 * Returns NFS_OK if recovery for this stateid is now finished. 4750 * Otherwise a negative NFS4ERR value is returned. 4751 */ 4752 static int nfs41_check_expired_locks(struct nfs4_state *state) 4753 { 4754 int status, ret = -NFS4ERR_BAD_STATEID; 4755 struct nfs4_lock_state *lsp; 4756 struct nfs_server *server = NFS_SERVER(state->inode); 4757 4758 list_for_each_entry(lsp, &state->lock_states, ls_locks) { 4759 if (lsp->ls_flags & NFS_LOCK_INITIALIZED) { 4760 status = nfs41_test_stateid(server, &lsp->ls_stateid); 4761 if (status != NFS_OK) { 4762 /* Free the stateid unless the server 4763 * informs us the stateid is unrecognized. */ 4764 if (status != -NFS4ERR_BAD_STATEID) 4765 nfs41_free_stateid(server, 4766 &lsp->ls_stateid); 4767 lsp->ls_flags &= ~NFS_LOCK_INITIALIZED; 4768 ret = status; 4769 } 4770 } 4771 }; 4772 4773 return ret; 4774 } 4775 4776 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) 4777 { 4778 int status = NFS_OK; 4779 4780 if (test_bit(LK_STATE_IN_USE, &state->flags)) 4781 status = nfs41_check_expired_locks(state); 4782 if (status != NFS_OK) 4783 status = nfs4_lock_expired(state, request); 4784 return status; 4785 } 4786 #endif 4787 4788 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 4789 { 4790 struct nfs_inode *nfsi = NFS_I(state->inode); 4791 unsigned char fl_flags = request->fl_flags; 4792 int status = -ENOLCK; 4793 4794 if ((fl_flags & FL_POSIX) && 4795 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) 4796 goto out; 4797 /* Is this a delegated open? */ 4798 status = nfs4_set_lock_state(state, request); 4799 if (status != 0) 4800 goto out; 4801 request->fl_flags |= FL_ACCESS; 4802 status = do_vfs_lock(request->fl_file, request); 4803 if (status < 0) 4804 goto out; 4805 down_read(&nfsi->rwsem); 4806 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { 4807 /* Yes: cache locks! */ 4808 /* ...but avoid races with delegation recall... */ 4809 request->fl_flags = fl_flags & ~FL_SLEEP; 4810 status = do_vfs_lock(request->fl_file, request); 4811 goto out_unlock; 4812 } 4813 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); 4814 if (status != 0) 4815 goto out_unlock; 4816 /* Note: we always want to sleep here! */ 4817 request->fl_flags = fl_flags | FL_SLEEP; 4818 if (do_vfs_lock(request->fl_file, request) < 0) 4819 printk(KERN_WARNING "NFS: %s: VFS is out of sync with lock " 4820 "manager!\n", __func__); 4821 out_unlock: 4822 up_read(&nfsi->rwsem); 4823 out: 4824 request->fl_flags = fl_flags; 4825 return status; 4826 } 4827 4828 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 4829 { 4830 struct nfs4_exception exception = { 4831 .state = state, 4832 .inode = state->inode, 4833 }; 4834 int err; 4835 4836 do { 4837 err = _nfs4_proc_setlk(state, cmd, request); 4838 if (err == -NFS4ERR_DENIED) 4839 err = -EAGAIN; 4840 err = nfs4_handle_exception(NFS_SERVER(state->inode), 4841 err, &exception); 4842 } while (exception.retry); 4843 return err; 4844 } 4845 4846 static int 4847 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) 4848 { 4849 struct nfs_open_context *ctx; 4850 struct nfs4_state *state; 4851 unsigned long timeout = NFS4_LOCK_MINTIMEOUT; 4852 int status; 4853 4854 /* verify open state */ 4855 ctx = nfs_file_open_context(filp); 4856 state = ctx->state; 4857 4858 if (request->fl_start < 0 || request->fl_end < 0) 4859 return -EINVAL; 4860 4861 if (IS_GETLK(cmd)) { 4862 if (state != NULL) 4863 return nfs4_proc_getlk(state, F_GETLK, request); 4864 return 0; 4865 } 4866 4867 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) 4868 return -EINVAL; 4869 4870 if (request->fl_type == F_UNLCK) { 4871 if (state != NULL) 4872 return nfs4_proc_unlck(state, cmd, request); 4873 return 0; 4874 } 4875 4876 if (state == NULL) 4877 return -ENOLCK; 4878 /* 4879 * Don't rely on the VFS having checked the file open mode, 4880 * since it won't do this for flock() locks. 4881 */ 4882 switch (request->fl_type) { 4883 case F_RDLCK: 4884 if (!(filp->f_mode & FMODE_READ)) 4885 return -EBADF; 4886 break; 4887 case F_WRLCK: 4888 if (!(filp->f_mode & FMODE_WRITE)) 4889 return -EBADF; 4890 } 4891 4892 do { 4893 status = nfs4_proc_setlk(state, cmd, request); 4894 if ((status != -EAGAIN) || IS_SETLK(cmd)) 4895 break; 4896 timeout = nfs4_set_lock_task_retry(timeout); 4897 status = -ERESTARTSYS; 4898 if (signalled()) 4899 break; 4900 } while(status < 0); 4901 return status; 4902 } 4903 4904 int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl) 4905 { 4906 struct nfs_server *server = NFS_SERVER(state->inode); 4907 struct nfs4_exception exception = { }; 4908 int err; 4909 4910 err = nfs4_set_lock_state(state, fl); 4911 if (err != 0) 4912 goto out; 4913 do { 4914 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); 4915 switch (err) { 4916 default: 4917 printk(KERN_ERR "NFS: %s: unhandled error " 4918 "%d.\n", __func__, err); 4919 case 0: 4920 case -ESTALE: 4921 goto out; 4922 case -NFS4ERR_EXPIRED: 4923 nfs4_schedule_stateid_recovery(server, state); 4924 case -NFS4ERR_STALE_CLIENTID: 4925 case -NFS4ERR_STALE_STATEID: 4926 nfs4_schedule_lease_recovery(server->nfs_client); 4927 goto out; 4928 case -NFS4ERR_BADSESSION: 4929 case -NFS4ERR_BADSLOT: 4930 case -NFS4ERR_BAD_HIGH_SLOT: 4931 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 4932 case -NFS4ERR_DEADSESSION: 4933 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err); 4934 goto out; 4935 case -ERESTARTSYS: 4936 /* 4937 * The show must go on: exit, but mark the 4938 * stateid as needing recovery. 4939 */ 4940 case -NFS4ERR_DELEG_REVOKED: 4941 case -NFS4ERR_ADMIN_REVOKED: 4942 case -NFS4ERR_BAD_STATEID: 4943 case -NFS4ERR_OPENMODE: 4944 nfs4_schedule_stateid_recovery(server, state); 4945 err = 0; 4946 goto out; 4947 case -EKEYEXPIRED: 4948 /* 4949 * User RPCSEC_GSS context has expired. 4950 * We cannot recover this stateid now, so 4951 * skip it and allow recovery thread to 4952 * proceed. 4953 */ 4954 err = 0; 4955 goto out; 4956 case -ENOMEM: 4957 case -NFS4ERR_DENIED: 4958 /* kill_proc(fl->fl_pid, SIGLOST, 1); */ 4959 err = 0; 4960 goto out; 4961 case -NFS4ERR_DELAY: 4962 break; 4963 } 4964 err = nfs4_handle_exception(server, err, &exception); 4965 } while (exception.retry); 4966 out: 4967 return err; 4968 } 4969 4970 struct nfs_release_lockowner_data { 4971 struct nfs4_lock_state *lsp; 4972 struct nfs_server *server; 4973 struct nfs_release_lockowner_args args; 4974 }; 4975 4976 static void nfs4_release_lockowner_release(void *calldata) 4977 { 4978 struct nfs_release_lockowner_data *data = calldata; 4979 nfs4_free_lock_state(data->server, data->lsp); 4980 kfree(calldata); 4981 } 4982 4983 static const struct rpc_call_ops nfs4_release_lockowner_ops = { 4984 .rpc_release = nfs4_release_lockowner_release, 4985 }; 4986 4987 int nfs4_release_lockowner(struct nfs4_lock_state *lsp) 4988 { 4989 struct nfs_server *server = lsp->ls_state->owner->so_server; 4990 struct nfs_release_lockowner_data *data; 4991 struct rpc_message msg = { 4992 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER], 4993 }; 4994 4995 if (server->nfs_client->cl_mvops->minor_version != 0) 4996 return -EINVAL; 4997 data = kmalloc(sizeof(*data), GFP_NOFS); 4998 if (!data) 4999 return -ENOMEM; 5000 data->lsp = lsp; 5001 data->server = server; 5002 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 5003 data->args.lock_owner.id = lsp->ls_seqid.owner_id; 5004 data->args.lock_owner.s_dev = server->s_dev; 5005 msg.rpc_argp = &data->args; 5006 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); 5007 return 0; 5008 } 5009 5010 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" 5011 5012 static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key, 5013 const void *buf, size_t buflen, 5014 int flags, int type) 5015 { 5016 if (strcmp(key, "") != 0) 5017 return -EINVAL; 5018 5019 return nfs4_proc_set_acl(dentry->d_inode, buf, buflen); 5020 } 5021 5022 static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key, 5023 void *buf, size_t buflen, int type) 5024 { 5025 if (strcmp(key, "") != 0) 5026 return -EINVAL; 5027 5028 return nfs4_proc_get_acl(dentry->d_inode, buf, buflen); 5029 } 5030 5031 static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list, 5032 size_t list_len, const char *name, 5033 size_t name_len, int type) 5034 { 5035 size_t len = sizeof(XATTR_NAME_NFSV4_ACL); 5036 5037 if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode))) 5038 return 0; 5039 5040 if (list && len <= list_len) 5041 memcpy(list, XATTR_NAME_NFSV4_ACL, len); 5042 return len; 5043 } 5044 5045 /* 5046 * nfs_fhget will use either the mounted_on_fileid or the fileid 5047 */ 5048 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) 5049 { 5050 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || 5051 (fattr->valid & NFS_ATTR_FATTR_FILEID)) && 5052 (fattr->valid & NFS_ATTR_FATTR_FSID) && 5053 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS))) 5054 return; 5055 5056 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 5057 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL; 5058 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 5059 fattr->nlink = 2; 5060 } 5061 5062 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 5063 const struct qstr *name, 5064 struct nfs4_fs_locations *fs_locations, 5065 struct page *page) 5066 { 5067 struct nfs_server *server = NFS_SERVER(dir); 5068 u32 bitmask[2] = { 5069 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 5070 }; 5071 struct nfs4_fs_locations_arg args = { 5072 .dir_fh = NFS_FH(dir), 5073 .name = name, 5074 .page = page, 5075 .bitmask = bitmask, 5076 }; 5077 struct nfs4_fs_locations_res res = { 5078 .fs_locations = fs_locations, 5079 }; 5080 struct rpc_message msg = { 5081 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 5082 .rpc_argp = &args, 5083 .rpc_resp = &res, 5084 }; 5085 int status; 5086 5087 dprintk("%s: start\n", __func__); 5088 5089 /* Ask for the fileid of the absent filesystem if mounted_on_fileid 5090 * is not supported */ 5091 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) 5092 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID; 5093 else 5094 bitmask[0] |= FATTR4_WORD0_FILEID; 5095 5096 nfs_fattr_init(&fs_locations->fattr); 5097 fs_locations->server = server; 5098 fs_locations->nlocations = 0; 5099 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0); 5100 dprintk("%s: returned status = %d\n", __func__, status); 5101 return status; 5102 } 5103 5104 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 5105 const struct qstr *name, 5106 struct nfs4_fs_locations *fs_locations, 5107 struct page *page) 5108 { 5109 struct nfs4_exception exception = { }; 5110 int err; 5111 do { 5112 err = nfs4_handle_exception(NFS_SERVER(dir), 5113 _nfs4_proc_fs_locations(client, dir, name, fs_locations, page), 5114 &exception); 5115 } while (exception.retry); 5116 return err; 5117 } 5118 5119 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors) 5120 { 5121 int status; 5122 struct nfs4_secinfo_arg args = { 5123 .dir_fh = NFS_FH(dir), 5124 .name = name, 5125 }; 5126 struct nfs4_secinfo_res res = { 5127 .flavors = flavors, 5128 }; 5129 struct rpc_message msg = { 5130 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO], 5131 .rpc_argp = &args, 5132 .rpc_resp = &res, 5133 }; 5134 5135 dprintk("NFS call secinfo %s\n", name->name); 5136 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); 5137 dprintk("NFS reply secinfo: %d\n", status); 5138 return status; 5139 } 5140 5141 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, 5142 struct nfs4_secinfo_flavors *flavors) 5143 { 5144 struct nfs4_exception exception = { }; 5145 int err; 5146 do { 5147 err = nfs4_handle_exception(NFS_SERVER(dir), 5148 _nfs4_proc_secinfo(dir, name, flavors), 5149 &exception); 5150 } while (exception.retry); 5151 return err; 5152 } 5153 5154 #ifdef CONFIG_NFS_V4_1 5155 /* 5156 * Check the exchange flags returned by the server for invalid flags, having 5157 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or 5158 * DS flags set. 5159 */ 5160 static int nfs4_check_cl_exchange_flags(u32 flags) 5161 { 5162 if (flags & ~EXCHGID4_FLAG_MASK_R) 5163 goto out_inval; 5164 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && 5165 (flags & EXCHGID4_FLAG_USE_NON_PNFS)) 5166 goto out_inval; 5167 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) 5168 goto out_inval; 5169 return NFS_OK; 5170 out_inval: 5171 return -NFS4ERR_INVAL; 5172 } 5173 5174 static bool 5175 nfs41_same_server_scope(struct nfs41_server_scope *a, 5176 struct nfs41_server_scope *b) 5177 { 5178 if (a->server_scope_sz == b->server_scope_sz && 5179 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0) 5180 return true; 5181 5182 return false; 5183 } 5184 5185 /* 5186 * nfs4_proc_bind_conn_to_session() 5187 * 5188 * The 4.1 client currently uses the same TCP connection for the 5189 * fore and backchannel. 5190 */ 5191 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred) 5192 { 5193 int status; 5194 struct nfs41_bind_conn_to_session_res res; 5195 struct rpc_message msg = { 5196 .rpc_proc = 5197 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION], 5198 .rpc_argp = clp, 5199 .rpc_resp = &res, 5200 .rpc_cred = cred, 5201 }; 5202 5203 dprintk("--> %s\n", __func__); 5204 BUG_ON(clp == NULL); 5205 5206 res.session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS); 5207 if (unlikely(res.session == NULL)) { 5208 status = -ENOMEM; 5209 goto out; 5210 } 5211 5212 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5213 if (status == 0) { 5214 if (memcmp(res.session->sess_id.data, 5215 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { 5216 dprintk("NFS: %s: Session ID mismatch\n", __func__); 5217 status = -EIO; 5218 goto out_session; 5219 } 5220 if (res.dir != NFS4_CDFS4_BOTH) { 5221 dprintk("NFS: %s: Unexpected direction from server\n", 5222 __func__); 5223 status = -EIO; 5224 goto out_session; 5225 } 5226 if (res.use_conn_in_rdma_mode) { 5227 dprintk("NFS: %s: Server returned RDMA mode = true\n", 5228 __func__); 5229 status = -EIO; 5230 goto out_session; 5231 } 5232 } 5233 out_session: 5234 kfree(res.session); 5235 out: 5236 dprintk("<-- %s status= %d\n", __func__, status); 5237 return status; 5238 } 5239 5240 /* 5241 * nfs4_proc_exchange_id() 5242 * 5243 * Returns zero, a negative errno, or a negative NFS4ERR status code. 5244 * 5245 * Since the clientid has expired, all compounds using sessions 5246 * associated with the stale clientid will be returning 5247 * NFS4ERR_BADSESSION in the sequence operation, and will therefore 5248 * be in some phase of session reset. 5249 */ 5250 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) 5251 { 5252 nfs4_verifier verifier; 5253 struct nfs41_exchange_id_args args = { 5254 .verifier = &verifier, 5255 .client = clp, 5256 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER, 5257 }; 5258 struct nfs41_exchange_id_res res = { 5259 0 5260 }; 5261 int status; 5262 struct rpc_message msg = { 5263 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], 5264 .rpc_argp = &args, 5265 .rpc_resp = &res, 5266 .rpc_cred = cred, 5267 }; 5268 5269 nfs4_init_boot_verifier(clp, &verifier); 5270 args.id_len = scnprintf(args.id, sizeof(args.id), 5271 "%s/%s", 5272 clp->cl_ipaddr, 5273 clp->cl_rpcclient->cl_nodename); 5274 dprintk("NFS call exchange_id auth=%s, '%.*s'\n", 5275 clp->cl_rpcclient->cl_auth->au_ops->au_name, 5276 args.id_len, args.id); 5277 5278 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner), 5279 GFP_NOFS); 5280 if (unlikely(res.server_owner == NULL)) { 5281 status = -ENOMEM; 5282 goto out; 5283 } 5284 5285 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope), 5286 GFP_NOFS); 5287 if (unlikely(res.server_scope == NULL)) { 5288 status = -ENOMEM; 5289 goto out_server_owner; 5290 } 5291 5292 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS); 5293 if (unlikely(res.impl_id == NULL)) { 5294 status = -ENOMEM; 5295 goto out_server_scope; 5296 } 5297 5298 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5299 if (status == 0) 5300 status = nfs4_check_cl_exchange_flags(res.flags); 5301 5302 if (status == 0) { 5303 clp->cl_clientid = res.clientid; 5304 clp->cl_exchange_flags = (res.flags & ~EXCHGID4_FLAG_CONFIRMED_R); 5305 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) 5306 clp->cl_seqid = res.seqid; 5307 5308 kfree(clp->cl_serverowner); 5309 clp->cl_serverowner = res.server_owner; 5310 res.server_owner = NULL; 5311 5312 /* use the most recent implementation id */ 5313 kfree(clp->cl_implid); 5314 clp->cl_implid = res.impl_id; 5315 5316 if (clp->cl_serverscope != NULL && 5317 !nfs41_same_server_scope(clp->cl_serverscope, 5318 res.server_scope)) { 5319 dprintk("%s: server_scope mismatch detected\n", 5320 __func__); 5321 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); 5322 kfree(clp->cl_serverscope); 5323 clp->cl_serverscope = NULL; 5324 } 5325 5326 if (clp->cl_serverscope == NULL) { 5327 clp->cl_serverscope = res.server_scope; 5328 goto out; 5329 } 5330 } else 5331 kfree(res.impl_id); 5332 5333 out_server_owner: 5334 kfree(res.server_owner); 5335 out_server_scope: 5336 kfree(res.server_scope); 5337 out: 5338 if (clp->cl_implid != NULL) 5339 dprintk("NFS reply exchange_id: Server Implementation ID: " 5340 "domain: %s, name: %s, date: %llu,%u\n", 5341 clp->cl_implid->domain, clp->cl_implid->name, 5342 clp->cl_implid->date.seconds, 5343 clp->cl_implid->date.nseconds); 5344 dprintk("NFS reply exchange_id: %d\n", status); 5345 return status; 5346 } 5347 5348 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, 5349 struct rpc_cred *cred) 5350 { 5351 struct rpc_message msg = { 5352 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID], 5353 .rpc_argp = clp, 5354 .rpc_cred = cred, 5355 }; 5356 int status; 5357 5358 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5359 if (status) 5360 dprintk("NFS: Got error %d from the server %s on " 5361 "DESTROY_CLIENTID.", status, clp->cl_hostname); 5362 return status; 5363 } 5364 5365 static int nfs4_proc_destroy_clientid(struct nfs_client *clp, 5366 struct rpc_cred *cred) 5367 { 5368 unsigned int loop; 5369 int ret; 5370 5371 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { 5372 ret = _nfs4_proc_destroy_clientid(clp, cred); 5373 switch (ret) { 5374 case -NFS4ERR_DELAY: 5375 case -NFS4ERR_CLIENTID_BUSY: 5376 ssleep(1); 5377 break; 5378 default: 5379 return ret; 5380 } 5381 } 5382 return 0; 5383 } 5384 5385 int nfs4_destroy_clientid(struct nfs_client *clp) 5386 { 5387 struct rpc_cred *cred; 5388 int ret = 0; 5389 5390 if (clp->cl_mvops->minor_version < 1) 5391 goto out; 5392 if (clp->cl_exchange_flags == 0) 5393 goto out; 5394 cred = nfs4_get_exchange_id_cred(clp); 5395 ret = nfs4_proc_destroy_clientid(clp, cred); 5396 if (cred) 5397 put_rpccred(cred); 5398 switch (ret) { 5399 case 0: 5400 case -NFS4ERR_STALE_CLIENTID: 5401 clp->cl_exchange_flags = 0; 5402 } 5403 out: 5404 return ret; 5405 } 5406 5407 struct nfs4_get_lease_time_data { 5408 struct nfs4_get_lease_time_args *args; 5409 struct nfs4_get_lease_time_res *res; 5410 struct nfs_client *clp; 5411 }; 5412 5413 static void nfs4_get_lease_time_prepare(struct rpc_task *task, 5414 void *calldata) 5415 { 5416 int ret; 5417 struct nfs4_get_lease_time_data *data = 5418 (struct nfs4_get_lease_time_data *)calldata; 5419 5420 dprintk("--> %s\n", __func__); 5421 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); 5422 /* just setup sequence, do not trigger session recovery 5423 since we're invoked within one */ 5424 ret = nfs41_setup_sequence(data->clp->cl_session, 5425 &data->args->la_seq_args, 5426 &data->res->lr_seq_res, task); 5427 5428 BUG_ON(ret == -EAGAIN); 5429 rpc_call_start(task); 5430 dprintk("<-- %s\n", __func__); 5431 } 5432 5433 /* 5434 * Called from nfs4_state_manager thread for session setup, so don't recover 5435 * from sequence operation or clientid errors. 5436 */ 5437 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) 5438 { 5439 struct nfs4_get_lease_time_data *data = 5440 (struct nfs4_get_lease_time_data *)calldata; 5441 5442 dprintk("--> %s\n", __func__); 5443 if (!nfs41_sequence_done(task, &data->res->lr_seq_res)) 5444 return; 5445 switch (task->tk_status) { 5446 case -NFS4ERR_DELAY: 5447 case -NFS4ERR_GRACE: 5448 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); 5449 rpc_delay(task, NFS4_POLL_RETRY_MIN); 5450 task->tk_status = 0; 5451 /* fall through */ 5452 case -NFS4ERR_RETRY_UNCACHED_REP: 5453 rpc_restart_call_prepare(task); 5454 return; 5455 } 5456 dprintk("<-- %s\n", __func__); 5457 } 5458 5459 static const struct rpc_call_ops nfs4_get_lease_time_ops = { 5460 .rpc_call_prepare = nfs4_get_lease_time_prepare, 5461 .rpc_call_done = nfs4_get_lease_time_done, 5462 }; 5463 5464 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) 5465 { 5466 struct rpc_task *task; 5467 struct nfs4_get_lease_time_args args; 5468 struct nfs4_get_lease_time_res res = { 5469 .lr_fsinfo = fsinfo, 5470 }; 5471 struct nfs4_get_lease_time_data data = { 5472 .args = &args, 5473 .res = &res, 5474 .clp = clp, 5475 }; 5476 struct rpc_message msg = { 5477 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], 5478 .rpc_argp = &args, 5479 .rpc_resp = &res, 5480 }; 5481 struct rpc_task_setup task_setup = { 5482 .rpc_client = clp->cl_rpcclient, 5483 .rpc_message = &msg, 5484 .callback_ops = &nfs4_get_lease_time_ops, 5485 .callback_data = &data, 5486 .flags = RPC_TASK_TIMEOUT, 5487 }; 5488 int status; 5489 5490 nfs41_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0); 5491 dprintk("--> %s\n", __func__); 5492 task = rpc_run_task(&task_setup); 5493 5494 if (IS_ERR(task)) 5495 status = PTR_ERR(task); 5496 else { 5497 status = task->tk_status; 5498 rpc_put_task(task); 5499 } 5500 dprintk("<-- %s return %d\n", __func__, status); 5501 5502 return status; 5503 } 5504 5505 static struct nfs4_slot *nfs4_alloc_slots(u32 max_slots, gfp_t gfp_flags) 5506 { 5507 return kcalloc(max_slots, sizeof(struct nfs4_slot), gfp_flags); 5508 } 5509 5510 static void nfs4_add_and_init_slots(struct nfs4_slot_table *tbl, 5511 struct nfs4_slot *new, 5512 u32 max_slots, 5513 u32 ivalue) 5514 { 5515 struct nfs4_slot *old = NULL; 5516 u32 i; 5517 5518 spin_lock(&tbl->slot_tbl_lock); 5519 if (new) { 5520 old = tbl->slots; 5521 tbl->slots = new; 5522 tbl->max_slots = max_slots; 5523 } 5524 tbl->highest_used_slotid = -1; /* no slot is currently used */ 5525 for (i = 0; i < tbl->max_slots; i++) 5526 tbl->slots[i].seq_nr = ivalue; 5527 spin_unlock(&tbl->slot_tbl_lock); 5528 kfree(old); 5529 } 5530 5531 /* 5532 * (re)Initialise a slot table 5533 */ 5534 static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs, 5535 u32 ivalue) 5536 { 5537 struct nfs4_slot *new = NULL; 5538 int ret = -ENOMEM; 5539 5540 dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__, 5541 max_reqs, tbl->max_slots); 5542 5543 /* Does the newly negotiated max_reqs match the existing slot table? */ 5544 if (max_reqs != tbl->max_slots) { 5545 new = nfs4_alloc_slots(max_reqs, GFP_NOFS); 5546 if (!new) 5547 goto out; 5548 } 5549 ret = 0; 5550 5551 nfs4_add_and_init_slots(tbl, new, max_reqs, ivalue); 5552 dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, 5553 tbl, tbl->slots, tbl->max_slots); 5554 out: 5555 dprintk("<-- %s: return %d\n", __func__, ret); 5556 return ret; 5557 } 5558 5559 /* Destroy the slot table */ 5560 static void nfs4_destroy_slot_tables(struct nfs4_session *session) 5561 { 5562 if (session->fc_slot_table.slots != NULL) { 5563 kfree(session->fc_slot_table.slots); 5564 session->fc_slot_table.slots = NULL; 5565 } 5566 if (session->bc_slot_table.slots != NULL) { 5567 kfree(session->bc_slot_table.slots); 5568 session->bc_slot_table.slots = NULL; 5569 } 5570 return; 5571 } 5572 5573 /* 5574 * Initialize or reset the forechannel and backchannel tables 5575 */ 5576 static int nfs4_setup_session_slot_tables(struct nfs4_session *ses) 5577 { 5578 struct nfs4_slot_table *tbl; 5579 int status; 5580 5581 dprintk("--> %s\n", __func__); 5582 /* Fore channel */ 5583 tbl = &ses->fc_slot_table; 5584 status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1); 5585 if (status) /* -ENOMEM */ 5586 return status; 5587 /* Back channel */ 5588 tbl = &ses->bc_slot_table; 5589 status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0); 5590 if (status && tbl->slots == NULL) 5591 /* Fore and back channel share a connection so get 5592 * both slot tables or neither */ 5593 nfs4_destroy_slot_tables(ses); 5594 return status; 5595 } 5596 5597 struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp) 5598 { 5599 struct nfs4_session *session; 5600 struct nfs4_slot_table *tbl; 5601 5602 session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS); 5603 if (!session) 5604 return NULL; 5605 5606 tbl = &session->fc_slot_table; 5607 tbl->highest_used_slotid = NFS4_NO_SLOT; 5608 spin_lock_init(&tbl->slot_tbl_lock); 5609 rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table"); 5610 init_completion(&tbl->complete); 5611 5612 tbl = &session->bc_slot_table; 5613 tbl->highest_used_slotid = NFS4_NO_SLOT; 5614 spin_lock_init(&tbl->slot_tbl_lock); 5615 rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table"); 5616 init_completion(&tbl->complete); 5617 5618 session->session_state = 1<<NFS4_SESSION_INITING; 5619 5620 session->clp = clp; 5621 return session; 5622 } 5623 5624 void nfs4_destroy_session(struct nfs4_session *session) 5625 { 5626 struct rpc_xprt *xprt; 5627 struct rpc_cred *cred; 5628 5629 cred = nfs4_get_exchange_id_cred(session->clp); 5630 nfs4_proc_destroy_session(session, cred); 5631 if (cred) 5632 put_rpccred(cred); 5633 5634 rcu_read_lock(); 5635 xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt); 5636 rcu_read_unlock(); 5637 dprintk("%s Destroy backchannel for xprt %p\n", 5638 __func__, xprt); 5639 xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS); 5640 nfs4_destroy_slot_tables(session); 5641 kfree(session); 5642 } 5643 5644 /* 5645 * Initialize the values to be used by the client in CREATE_SESSION 5646 * If nfs4_init_session set the fore channel request and response sizes, 5647 * use them. 5648 * 5649 * Set the back channel max_resp_sz_cached to zero to force the client to 5650 * always set csa_cachethis to FALSE because the current implementation 5651 * of the back channel DRC only supports caching the CB_SEQUENCE operation. 5652 */ 5653 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args) 5654 { 5655 struct nfs4_session *session = args->client->cl_session; 5656 unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz, 5657 mxresp_sz = session->fc_attrs.max_resp_sz; 5658 5659 if (mxrqst_sz == 0) 5660 mxrqst_sz = NFS_MAX_FILE_IO_SIZE; 5661 if (mxresp_sz == 0) 5662 mxresp_sz = NFS_MAX_FILE_IO_SIZE; 5663 /* Fore channel attributes */ 5664 args->fc_attrs.max_rqst_sz = mxrqst_sz; 5665 args->fc_attrs.max_resp_sz = mxresp_sz; 5666 args->fc_attrs.max_ops = NFS4_MAX_OPS; 5667 args->fc_attrs.max_reqs = max_session_slots; 5668 5669 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " 5670 "max_ops=%u max_reqs=%u\n", 5671 __func__, 5672 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, 5673 args->fc_attrs.max_ops, args->fc_attrs.max_reqs); 5674 5675 /* Back channel attributes */ 5676 args->bc_attrs.max_rqst_sz = PAGE_SIZE; 5677 args->bc_attrs.max_resp_sz = PAGE_SIZE; 5678 args->bc_attrs.max_resp_sz_cached = 0; 5679 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; 5680 args->bc_attrs.max_reqs = 1; 5681 5682 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " 5683 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", 5684 __func__, 5685 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, 5686 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, 5687 args->bc_attrs.max_reqs); 5688 } 5689 5690 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session) 5691 { 5692 struct nfs4_channel_attrs *sent = &args->fc_attrs; 5693 struct nfs4_channel_attrs *rcvd = &session->fc_attrs; 5694 5695 if (rcvd->max_resp_sz > sent->max_resp_sz) 5696 return -EINVAL; 5697 /* 5698 * Our requested max_ops is the minimum we need; we're not 5699 * prepared to break up compounds into smaller pieces than that. 5700 * So, no point even trying to continue if the server won't 5701 * cooperate: 5702 */ 5703 if (rcvd->max_ops < sent->max_ops) 5704 return -EINVAL; 5705 if (rcvd->max_reqs == 0) 5706 return -EINVAL; 5707 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE) 5708 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE; 5709 return 0; 5710 } 5711 5712 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session) 5713 { 5714 struct nfs4_channel_attrs *sent = &args->bc_attrs; 5715 struct nfs4_channel_attrs *rcvd = &session->bc_attrs; 5716 5717 if (rcvd->max_rqst_sz > sent->max_rqst_sz) 5718 return -EINVAL; 5719 if (rcvd->max_resp_sz < sent->max_resp_sz) 5720 return -EINVAL; 5721 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) 5722 return -EINVAL; 5723 /* These would render the backchannel useless: */ 5724 if (rcvd->max_ops != sent->max_ops) 5725 return -EINVAL; 5726 if (rcvd->max_reqs != sent->max_reqs) 5727 return -EINVAL; 5728 return 0; 5729 } 5730 5731 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, 5732 struct nfs4_session *session) 5733 { 5734 int ret; 5735 5736 ret = nfs4_verify_fore_channel_attrs(args, session); 5737 if (ret) 5738 return ret; 5739 return nfs4_verify_back_channel_attrs(args, session); 5740 } 5741 5742 static int _nfs4_proc_create_session(struct nfs_client *clp, 5743 struct rpc_cred *cred) 5744 { 5745 struct nfs4_session *session = clp->cl_session; 5746 struct nfs41_create_session_args args = { 5747 .client = clp, 5748 .cb_program = NFS4_CALLBACK, 5749 }; 5750 struct nfs41_create_session_res res = { 5751 .client = clp, 5752 }; 5753 struct rpc_message msg = { 5754 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], 5755 .rpc_argp = &args, 5756 .rpc_resp = &res, 5757 .rpc_cred = cred, 5758 }; 5759 int status; 5760 5761 nfs4_init_channel_attrs(&args); 5762 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); 5763 5764 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5765 5766 if (!status) 5767 /* Verify the session's negotiated channel_attrs values */ 5768 status = nfs4_verify_channel_attrs(&args, session); 5769 if (!status) { 5770 /* Increment the clientid slot sequence id */ 5771 clp->cl_seqid++; 5772 } 5773 5774 return status; 5775 } 5776 5777 /* 5778 * Issues a CREATE_SESSION operation to the server. 5779 * It is the responsibility of the caller to verify the session is 5780 * expired before calling this routine. 5781 */ 5782 int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred) 5783 { 5784 int status; 5785 unsigned *ptr; 5786 struct nfs4_session *session = clp->cl_session; 5787 5788 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 5789 5790 status = _nfs4_proc_create_session(clp, cred); 5791 if (status) 5792 goto out; 5793 5794 /* Init or reset the session slot tables */ 5795 status = nfs4_setup_session_slot_tables(session); 5796 dprintk("slot table setup returned %d\n", status); 5797 if (status) 5798 goto out; 5799 5800 ptr = (unsigned *)&session->sess_id.data[0]; 5801 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, 5802 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); 5803 out: 5804 dprintk("<-- %s\n", __func__); 5805 return status; 5806 } 5807 5808 /* 5809 * Issue the over-the-wire RPC DESTROY_SESSION. 5810 * The caller must serialize access to this routine. 5811 */ 5812 int nfs4_proc_destroy_session(struct nfs4_session *session, 5813 struct rpc_cred *cred) 5814 { 5815 struct rpc_message msg = { 5816 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION], 5817 .rpc_argp = session, 5818 .rpc_cred = cred, 5819 }; 5820 int status = 0; 5821 5822 dprintk("--> nfs4_proc_destroy_session\n"); 5823 5824 /* session is still being setup */ 5825 if (session->clp->cl_cons_state != NFS_CS_READY) 5826 return status; 5827 5828 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5829 5830 if (status) 5831 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " 5832 "Session has been destroyed regardless...\n", status); 5833 5834 dprintk("<-- nfs4_proc_destroy_session\n"); 5835 return status; 5836 } 5837 5838 /* 5839 * With sessions, the client is not marked ready until after a 5840 * successful EXCHANGE_ID and CREATE_SESSION. 5841 * 5842 * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate 5843 * other versions of NFS can be tried. 5844 */ 5845 static int nfs41_check_session_ready(struct nfs_client *clp) 5846 { 5847 int ret; 5848 5849 if (clp->cl_cons_state == NFS_CS_SESSION_INITING) { 5850 ret = nfs4_client_recover_expired_lease(clp); 5851 if (ret) 5852 return ret; 5853 } 5854 if (clp->cl_cons_state < NFS_CS_READY) 5855 return -EPROTONOSUPPORT; 5856 smp_rmb(); 5857 return 0; 5858 } 5859 5860 int nfs4_init_session(struct nfs_server *server) 5861 { 5862 struct nfs_client *clp = server->nfs_client; 5863 struct nfs4_session *session; 5864 unsigned int rsize, wsize; 5865 5866 if (!nfs4_has_session(clp)) 5867 return 0; 5868 5869 session = clp->cl_session; 5870 spin_lock(&clp->cl_lock); 5871 if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) { 5872 5873 rsize = server->rsize; 5874 if (rsize == 0) 5875 rsize = NFS_MAX_FILE_IO_SIZE; 5876 wsize = server->wsize; 5877 if (wsize == 0) 5878 wsize = NFS_MAX_FILE_IO_SIZE; 5879 5880 session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead; 5881 session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead; 5882 } 5883 spin_unlock(&clp->cl_lock); 5884 5885 return nfs41_check_session_ready(clp); 5886 } 5887 5888 int nfs4_init_ds_session(struct nfs_client *clp, unsigned long lease_time) 5889 { 5890 struct nfs4_session *session = clp->cl_session; 5891 int ret; 5892 5893 spin_lock(&clp->cl_lock); 5894 if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) { 5895 /* 5896 * Do not set NFS_CS_CHECK_LEASE_TIME instead set the 5897 * DS lease to be equal to the MDS lease. 5898 */ 5899 clp->cl_lease_time = lease_time; 5900 clp->cl_last_renewal = jiffies; 5901 } 5902 spin_unlock(&clp->cl_lock); 5903 5904 ret = nfs41_check_session_ready(clp); 5905 if (ret) 5906 return ret; 5907 /* Test for the DS role */ 5908 if (!is_ds_client(clp)) 5909 return -ENODEV; 5910 return 0; 5911 } 5912 EXPORT_SYMBOL_GPL(nfs4_init_ds_session); 5913 5914 5915 /* 5916 * Renew the cl_session lease. 5917 */ 5918 struct nfs4_sequence_data { 5919 struct nfs_client *clp; 5920 struct nfs4_sequence_args args; 5921 struct nfs4_sequence_res res; 5922 }; 5923 5924 static void nfs41_sequence_release(void *data) 5925 { 5926 struct nfs4_sequence_data *calldata = data; 5927 struct nfs_client *clp = calldata->clp; 5928 5929 if (atomic_read(&clp->cl_count) > 1) 5930 nfs4_schedule_state_renewal(clp); 5931 nfs_put_client(clp); 5932 kfree(calldata); 5933 } 5934 5935 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) 5936 { 5937 switch(task->tk_status) { 5938 case -NFS4ERR_DELAY: 5939 rpc_delay(task, NFS4_POLL_RETRY_MAX); 5940 return -EAGAIN; 5941 default: 5942 nfs4_schedule_lease_recovery(clp); 5943 } 5944 return 0; 5945 } 5946 5947 static void nfs41_sequence_call_done(struct rpc_task *task, void *data) 5948 { 5949 struct nfs4_sequence_data *calldata = data; 5950 struct nfs_client *clp = calldata->clp; 5951 5952 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) 5953 return; 5954 5955 if (task->tk_status < 0) { 5956 dprintk("%s ERROR %d\n", __func__, task->tk_status); 5957 if (atomic_read(&clp->cl_count) == 1) 5958 goto out; 5959 5960 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { 5961 rpc_restart_call_prepare(task); 5962 return; 5963 } 5964 } 5965 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 5966 out: 5967 dprintk("<-- %s\n", __func__); 5968 } 5969 5970 static void nfs41_sequence_prepare(struct rpc_task *task, void *data) 5971 { 5972 struct nfs4_sequence_data *calldata = data; 5973 struct nfs_client *clp = calldata->clp; 5974 struct nfs4_sequence_args *args; 5975 struct nfs4_sequence_res *res; 5976 5977 args = task->tk_msg.rpc_argp; 5978 res = task->tk_msg.rpc_resp; 5979 5980 if (nfs41_setup_sequence(clp->cl_session, args, res, task)) 5981 return; 5982 rpc_call_start(task); 5983 } 5984 5985 static const struct rpc_call_ops nfs41_sequence_ops = { 5986 .rpc_call_done = nfs41_sequence_call_done, 5987 .rpc_call_prepare = nfs41_sequence_prepare, 5988 .rpc_release = nfs41_sequence_release, 5989 }; 5990 5991 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) 5992 { 5993 struct nfs4_sequence_data *calldata; 5994 struct rpc_message msg = { 5995 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], 5996 .rpc_cred = cred, 5997 }; 5998 struct rpc_task_setup task_setup_data = { 5999 .rpc_client = clp->cl_rpcclient, 6000 .rpc_message = &msg, 6001 .callback_ops = &nfs41_sequence_ops, 6002 .flags = RPC_TASK_ASYNC | RPC_TASK_SOFT, 6003 }; 6004 6005 if (!atomic_inc_not_zero(&clp->cl_count)) 6006 return ERR_PTR(-EIO); 6007 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 6008 if (calldata == NULL) { 6009 nfs_put_client(clp); 6010 return ERR_PTR(-ENOMEM); 6011 } 6012 nfs41_init_sequence(&calldata->args, &calldata->res, 0); 6013 msg.rpc_argp = &calldata->args; 6014 msg.rpc_resp = &calldata->res; 6015 calldata->clp = clp; 6016 task_setup_data.callback_data = calldata; 6017 6018 return rpc_run_task(&task_setup_data); 6019 } 6020 6021 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 6022 { 6023 struct rpc_task *task; 6024 int ret = 0; 6025 6026 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) 6027 return 0; 6028 task = _nfs41_proc_sequence(clp, cred); 6029 if (IS_ERR(task)) 6030 ret = PTR_ERR(task); 6031 else 6032 rpc_put_task_async(task); 6033 dprintk("<-- %s status=%d\n", __func__, ret); 6034 return ret; 6035 } 6036 6037 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) 6038 { 6039 struct rpc_task *task; 6040 int ret; 6041 6042 task = _nfs41_proc_sequence(clp, cred); 6043 if (IS_ERR(task)) { 6044 ret = PTR_ERR(task); 6045 goto out; 6046 } 6047 ret = rpc_wait_for_completion_task(task); 6048 if (!ret) { 6049 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp; 6050 6051 if (task->tk_status == 0) 6052 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); 6053 ret = task->tk_status; 6054 } 6055 rpc_put_task(task); 6056 out: 6057 dprintk("<-- %s status=%d\n", __func__, ret); 6058 return ret; 6059 } 6060 6061 struct nfs4_reclaim_complete_data { 6062 struct nfs_client *clp; 6063 struct nfs41_reclaim_complete_args arg; 6064 struct nfs41_reclaim_complete_res res; 6065 }; 6066 6067 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) 6068 { 6069 struct nfs4_reclaim_complete_data *calldata = data; 6070 6071 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); 6072 if (nfs41_setup_sequence(calldata->clp->cl_session, 6073 &calldata->arg.seq_args, 6074 &calldata->res.seq_res, task)) 6075 return; 6076 6077 rpc_call_start(task); 6078 } 6079 6080 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) 6081 { 6082 switch(task->tk_status) { 6083 case 0: 6084 case -NFS4ERR_COMPLETE_ALREADY: 6085 case -NFS4ERR_WRONG_CRED: /* What to do here? */ 6086 break; 6087 case -NFS4ERR_DELAY: 6088 rpc_delay(task, NFS4_POLL_RETRY_MAX); 6089 /* fall through */ 6090 case -NFS4ERR_RETRY_UNCACHED_REP: 6091 return -EAGAIN; 6092 default: 6093 nfs4_schedule_lease_recovery(clp); 6094 } 6095 return 0; 6096 } 6097 6098 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) 6099 { 6100 struct nfs4_reclaim_complete_data *calldata = data; 6101 struct nfs_client *clp = calldata->clp; 6102 struct nfs4_sequence_res *res = &calldata->res.seq_res; 6103 6104 dprintk("--> %s\n", __func__); 6105 if (!nfs41_sequence_done(task, res)) 6106 return; 6107 6108 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { 6109 rpc_restart_call_prepare(task); 6110 return; 6111 } 6112 dprintk("<-- %s\n", __func__); 6113 } 6114 6115 static void nfs4_free_reclaim_complete_data(void *data) 6116 { 6117 struct nfs4_reclaim_complete_data *calldata = data; 6118 6119 kfree(calldata); 6120 } 6121 6122 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = { 6123 .rpc_call_prepare = nfs4_reclaim_complete_prepare, 6124 .rpc_call_done = nfs4_reclaim_complete_done, 6125 .rpc_release = nfs4_free_reclaim_complete_data, 6126 }; 6127 6128 /* 6129 * Issue a global reclaim complete. 6130 */ 6131 static int nfs41_proc_reclaim_complete(struct nfs_client *clp) 6132 { 6133 struct nfs4_reclaim_complete_data *calldata; 6134 struct rpc_task *task; 6135 struct rpc_message msg = { 6136 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE], 6137 }; 6138 struct rpc_task_setup task_setup_data = { 6139 .rpc_client = clp->cl_rpcclient, 6140 .rpc_message = &msg, 6141 .callback_ops = &nfs4_reclaim_complete_call_ops, 6142 .flags = RPC_TASK_ASYNC, 6143 }; 6144 int status = -ENOMEM; 6145 6146 dprintk("--> %s\n", __func__); 6147 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 6148 if (calldata == NULL) 6149 goto out; 6150 calldata->clp = clp; 6151 calldata->arg.one_fs = 0; 6152 6153 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0); 6154 msg.rpc_argp = &calldata->arg; 6155 msg.rpc_resp = &calldata->res; 6156 task_setup_data.callback_data = calldata; 6157 task = rpc_run_task(&task_setup_data); 6158 if (IS_ERR(task)) { 6159 status = PTR_ERR(task); 6160 goto out; 6161 } 6162 status = nfs4_wait_for_completion_rpc_task(task); 6163 if (status == 0) 6164 status = task->tk_status; 6165 rpc_put_task(task); 6166 return 0; 6167 out: 6168 dprintk("<-- %s status=%d\n", __func__, status); 6169 return status; 6170 } 6171 6172 static void 6173 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) 6174 { 6175 struct nfs4_layoutget *lgp = calldata; 6176 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 6177 6178 dprintk("--> %s\n", __func__); 6179 /* Note the is a race here, where a CB_LAYOUTRECALL can come in 6180 * right now covering the LAYOUTGET we are about to send. 6181 * However, that is not so catastrophic, and there seems 6182 * to be no way to prevent it completely. 6183 */ 6184 if (nfs4_setup_sequence(server, &lgp->args.seq_args, 6185 &lgp->res.seq_res, task)) 6186 return; 6187 if (pnfs_choose_layoutget_stateid(&lgp->args.stateid, 6188 NFS_I(lgp->args.inode)->layout, 6189 lgp->args.ctx->state)) { 6190 rpc_exit(task, NFS4_OK); 6191 return; 6192 } 6193 rpc_call_start(task); 6194 } 6195 6196 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) 6197 { 6198 struct nfs4_layoutget *lgp = calldata; 6199 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 6200 6201 dprintk("--> %s\n", __func__); 6202 6203 if (!nfs4_sequence_done(task, &lgp->res.seq_res)) 6204 return; 6205 6206 switch (task->tk_status) { 6207 case 0: 6208 break; 6209 case -NFS4ERR_LAYOUTTRYLATER: 6210 case -NFS4ERR_RECALLCONFLICT: 6211 task->tk_status = -NFS4ERR_DELAY; 6212 /* Fall through */ 6213 default: 6214 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { 6215 rpc_restart_call_prepare(task); 6216 return; 6217 } 6218 } 6219 dprintk("<-- %s\n", __func__); 6220 } 6221 6222 static size_t max_response_pages(struct nfs_server *server) 6223 { 6224 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 6225 return nfs_page_array_len(0, max_resp_sz); 6226 } 6227 6228 static void nfs4_free_pages(struct page **pages, size_t size) 6229 { 6230 int i; 6231 6232 if (!pages) 6233 return; 6234 6235 for (i = 0; i < size; i++) { 6236 if (!pages[i]) 6237 break; 6238 __free_page(pages[i]); 6239 } 6240 kfree(pages); 6241 } 6242 6243 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags) 6244 { 6245 struct page **pages; 6246 int i; 6247 6248 pages = kcalloc(size, sizeof(struct page *), gfp_flags); 6249 if (!pages) { 6250 dprintk("%s: can't alloc array of %zu pages\n", __func__, size); 6251 return NULL; 6252 } 6253 6254 for (i = 0; i < size; i++) { 6255 pages[i] = alloc_page(gfp_flags); 6256 if (!pages[i]) { 6257 dprintk("%s: failed to allocate page\n", __func__); 6258 nfs4_free_pages(pages, size); 6259 return NULL; 6260 } 6261 } 6262 6263 return pages; 6264 } 6265 6266 static void nfs4_layoutget_release(void *calldata) 6267 { 6268 struct nfs4_layoutget *lgp = calldata; 6269 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 6270 size_t max_pages = max_response_pages(server); 6271 6272 dprintk("--> %s\n", __func__); 6273 nfs4_free_pages(lgp->args.layout.pages, max_pages); 6274 put_nfs_open_context(lgp->args.ctx); 6275 kfree(calldata); 6276 dprintk("<-- %s\n", __func__); 6277 } 6278 6279 static const struct rpc_call_ops nfs4_layoutget_call_ops = { 6280 .rpc_call_prepare = nfs4_layoutget_prepare, 6281 .rpc_call_done = nfs4_layoutget_done, 6282 .rpc_release = nfs4_layoutget_release, 6283 }; 6284 6285 void nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags) 6286 { 6287 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 6288 size_t max_pages = max_response_pages(server); 6289 struct rpc_task *task; 6290 struct rpc_message msg = { 6291 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], 6292 .rpc_argp = &lgp->args, 6293 .rpc_resp = &lgp->res, 6294 }; 6295 struct rpc_task_setup task_setup_data = { 6296 .rpc_client = server->client, 6297 .rpc_message = &msg, 6298 .callback_ops = &nfs4_layoutget_call_ops, 6299 .callback_data = lgp, 6300 .flags = RPC_TASK_ASYNC, 6301 }; 6302 int status = 0; 6303 6304 dprintk("--> %s\n", __func__); 6305 6306 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags); 6307 if (!lgp->args.layout.pages) { 6308 nfs4_layoutget_release(lgp); 6309 return; 6310 } 6311 lgp->args.layout.pglen = max_pages * PAGE_SIZE; 6312 6313 lgp->res.layoutp = &lgp->args.layout; 6314 lgp->res.seq_res.sr_slot = NULL; 6315 nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0); 6316 task = rpc_run_task(&task_setup_data); 6317 if (IS_ERR(task)) 6318 return; 6319 status = nfs4_wait_for_completion_rpc_task(task); 6320 if (status == 0) 6321 status = task->tk_status; 6322 if (status == 0) 6323 status = pnfs_layout_process(lgp); 6324 rpc_put_task(task); 6325 dprintk("<-- %s status=%d\n", __func__, status); 6326 return; 6327 } 6328 6329 static void 6330 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) 6331 { 6332 struct nfs4_layoutreturn *lrp = calldata; 6333 6334 dprintk("--> %s\n", __func__); 6335 if (nfs41_setup_sequence(lrp->clp->cl_session, &lrp->args.seq_args, 6336 &lrp->res.seq_res, task)) 6337 return; 6338 rpc_call_start(task); 6339 } 6340 6341 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) 6342 { 6343 struct nfs4_layoutreturn *lrp = calldata; 6344 struct nfs_server *server; 6345 struct pnfs_layout_hdr *lo = lrp->args.layout; 6346 6347 dprintk("--> %s\n", __func__); 6348 6349 if (!nfs4_sequence_done(task, &lrp->res.seq_res)) 6350 return; 6351 6352 server = NFS_SERVER(lrp->args.inode); 6353 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { 6354 rpc_restart_call_prepare(task); 6355 return; 6356 } 6357 spin_lock(&lo->plh_inode->i_lock); 6358 if (task->tk_status == 0 && lrp->res.lrs_present) 6359 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); 6360 lo->plh_block_lgets--; 6361 spin_unlock(&lo->plh_inode->i_lock); 6362 dprintk("<-- %s\n", __func__); 6363 } 6364 6365 static void nfs4_layoutreturn_release(void *calldata) 6366 { 6367 struct nfs4_layoutreturn *lrp = calldata; 6368 6369 dprintk("--> %s\n", __func__); 6370 put_layout_hdr(lrp->args.layout); 6371 kfree(calldata); 6372 dprintk("<-- %s\n", __func__); 6373 } 6374 6375 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { 6376 .rpc_call_prepare = nfs4_layoutreturn_prepare, 6377 .rpc_call_done = nfs4_layoutreturn_done, 6378 .rpc_release = nfs4_layoutreturn_release, 6379 }; 6380 6381 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp) 6382 { 6383 struct rpc_task *task; 6384 struct rpc_message msg = { 6385 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN], 6386 .rpc_argp = &lrp->args, 6387 .rpc_resp = &lrp->res, 6388 }; 6389 struct rpc_task_setup task_setup_data = { 6390 .rpc_client = lrp->clp->cl_rpcclient, 6391 .rpc_message = &msg, 6392 .callback_ops = &nfs4_layoutreturn_call_ops, 6393 .callback_data = lrp, 6394 }; 6395 int status; 6396 6397 dprintk("--> %s\n", __func__); 6398 nfs41_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1); 6399 task = rpc_run_task(&task_setup_data); 6400 if (IS_ERR(task)) 6401 return PTR_ERR(task); 6402 status = task->tk_status; 6403 dprintk("<-- %s status=%d\n", __func__, status); 6404 rpc_put_task(task); 6405 return status; 6406 } 6407 6408 /* 6409 * Retrieve the list of Data Server devices from the MDS. 6410 */ 6411 static int _nfs4_getdevicelist(struct nfs_server *server, 6412 const struct nfs_fh *fh, 6413 struct pnfs_devicelist *devlist) 6414 { 6415 struct nfs4_getdevicelist_args args = { 6416 .fh = fh, 6417 .layoutclass = server->pnfs_curr_ld->id, 6418 }; 6419 struct nfs4_getdevicelist_res res = { 6420 .devlist = devlist, 6421 }; 6422 struct rpc_message msg = { 6423 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICELIST], 6424 .rpc_argp = &args, 6425 .rpc_resp = &res, 6426 }; 6427 int status; 6428 6429 dprintk("--> %s\n", __func__); 6430 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, 6431 &res.seq_res, 0); 6432 dprintk("<-- %s status=%d\n", __func__, status); 6433 return status; 6434 } 6435 6436 int nfs4_proc_getdevicelist(struct nfs_server *server, 6437 const struct nfs_fh *fh, 6438 struct pnfs_devicelist *devlist) 6439 { 6440 struct nfs4_exception exception = { }; 6441 int err; 6442 6443 do { 6444 err = nfs4_handle_exception(server, 6445 _nfs4_getdevicelist(server, fh, devlist), 6446 &exception); 6447 } while (exception.retry); 6448 6449 dprintk("%s: err=%d, num_devs=%u\n", __func__, 6450 err, devlist->num_devs); 6451 6452 return err; 6453 } 6454 EXPORT_SYMBOL_GPL(nfs4_proc_getdevicelist); 6455 6456 static int 6457 _nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev) 6458 { 6459 struct nfs4_getdeviceinfo_args args = { 6460 .pdev = pdev, 6461 }; 6462 struct nfs4_getdeviceinfo_res res = { 6463 .pdev = pdev, 6464 }; 6465 struct rpc_message msg = { 6466 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO], 6467 .rpc_argp = &args, 6468 .rpc_resp = &res, 6469 }; 6470 int status; 6471 6472 dprintk("--> %s\n", __func__); 6473 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 6474 dprintk("<-- %s status=%d\n", __func__, status); 6475 6476 return status; 6477 } 6478 6479 int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev) 6480 { 6481 struct nfs4_exception exception = { }; 6482 int err; 6483 6484 do { 6485 err = nfs4_handle_exception(server, 6486 _nfs4_proc_getdeviceinfo(server, pdev), 6487 &exception); 6488 } while (exception.retry); 6489 return err; 6490 } 6491 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo); 6492 6493 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) 6494 { 6495 struct nfs4_layoutcommit_data *data = calldata; 6496 struct nfs_server *server = NFS_SERVER(data->args.inode); 6497 6498 if (nfs4_setup_sequence(server, &data->args.seq_args, 6499 &data->res.seq_res, task)) 6500 return; 6501 rpc_call_start(task); 6502 } 6503 6504 static void 6505 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) 6506 { 6507 struct nfs4_layoutcommit_data *data = calldata; 6508 struct nfs_server *server = NFS_SERVER(data->args.inode); 6509 6510 if (!nfs4_sequence_done(task, &data->res.seq_res)) 6511 return; 6512 6513 switch (task->tk_status) { /* Just ignore these failures */ 6514 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */ 6515 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ 6516 case -NFS4ERR_BADLAYOUT: /* no layout */ 6517 case -NFS4ERR_GRACE: /* loca_recalim always false */ 6518 task->tk_status = 0; 6519 break; 6520 case 0: 6521 nfs_post_op_update_inode_force_wcc(data->args.inode, 6522 data->res.fattr); 6523 break; 6524 default: 6525 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { 6526 rpc_restart_call_prepare(task); 6527 return; 6528 } 6529 } 6530 } 6531 6532 static void nfs4_layoutcommit_release(void *calldata) 6533 { 6534 struct nfs4_layoutcommit_data *data = calldata; 6535 struct pnfs_layout_segment *lseg, *tmp; 6536 unsigned long *bitlock = &NFS_I(data->args.inode)->flags; 6537 6538 pnfs_cleanup_layoutcommit(data); 6539 /* Matched by references in pnfs_set_layoutcommit */ 6540 list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) { 6541 list_del_init(&lseg->pls_lc_list); 6542 if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, 6543 &lseg->pls_flags)) 6544 put_lseg(lseg); 6545 } 6546 6547 clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock); 6548 smp_mb__after_clear_bit(); 6549 wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING); 6550 6551 put_rpccred(data->cred); 6552 kfree(data); 6553 } 6554 6555 static const struct rpc_call_ops nfs4_layoutcommit_ops = { 6556 .rpc_call_prepare = nfs4_layoutcommit_prepare, 6557 .rpc_call_done = nfs4_layoutcommit_done, 6558 .rpc_release = nfs4_layoutcommit_release, 6559 }; 6560 6561 int 6562 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) 6563 { 6564 struct rpc_message msg = { 6565 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT], 6566 .rpc_argp = &data->args, 6567 .rpc_resp = &data->res, 6568 .rpc_cred = data->cred, 6569 }; 6570 struct rpc_task_setup task_setup_data = { 6571 .task = &data->task, 6572 .rpc_client = NFS_CLIENT(data->args.inode), 6573 .rpc_message = &msg, 6574 .callback_ops = &nfs4_layoutcommit_ops, 6575 .callback_data = data, 6576 .flags = RPC_TASK_ASYNC, 6577 }; 6578 struct rpc_task *task; 6579 int status = 0; 6580 6581 dprintk("NFS: %4d initiating layoutcommit call. sync %d " 6582 "lbw: %llu inode %lu\n", 6583 data->task.tk_pid, sync, 6584 data->args.lastbytewritten, 6585 data->args.inode->i_ino); 6586 6587 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 6588 task = rpc_run_task(&task_setup_data); 6589 if (IS_ERR(task)) 6590 return PTR_ERR(task); 6591 if (sync == false) 6592 goto out; 6593 status = nfs4_wait_for_completion_rpc_task(task); 6594 if (status != 0) 6595 goto out; 6596 status = task->tk_status; 6597 out: 6598 dprintk("%s: status %d\n", __func__, status); 6599 rpc_put_task(task); 6600 return status; 6601 } 6602 6603 static int 6604 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 6605 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) 6606 { 6607 struct nfs41_secinfo_no_name_args args = { 6608 .style = SECINFO_STYLE_CURRENT_FH, 6609 }; 6610 struct nfs4_secinfo_res res = { 6611 .flavors = flavors, 6612 }; 6613 struct rpc_message msg = { 6614 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME], 6615 .rpc_argp = &args, 6616 .rpc_resp = &res, 6617 }; 6618 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 6619 } 6620 6621 static int 6622 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 6623 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) 6624 { 6625 struct nfs4_exception exception = { }; 6626 int err; 6627 do { 6628 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); 6629 switch (err) { 6630 case 0: 6631 case -NFS4ERR_WRONGSEC: 6632 case -NFS4ERR_NOTSUPP: 6633 goto out; 6634 default: 6635 err = nfs4_handle_exception(server, err, &exception); 6636 } 6637 } while (exception.retry); 6638 out: 6639 return err; 6640 } 6641 6642 static int 6643 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 6644 struct nfs_fsinfo *info) 6645 { 6646 int err; 6647 struct page *page; 6648 rpc_authflavor_t flavor; 6649 struct nfs4_secinfo_flavors *flavors; 6650 6651 page = alloc_page(GFP_KERNEL); 6652 if (!page) { 6653 err = -ENOMEM; 6654 goto out; 6655 } 6656 6657 flavors = page_address(page); 6658 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); 6659 6660 /* 6661 * Fall back on "guess and check" method if 6662 * the server doesn't support SECINFO_NO_NAME 6663 */ 6664 if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) { 6665 err = nfs4_find_root_sec(server, fhandle, info); 6666 goto out_freepage; 6667 } 6668 if (err) 6669 goto out_freepage; 6670 6671 flavor = nfs_find_best_sec(flavors); 6672 if (err == 0) 6673 err = nfs4_lookup_root_sec(server, fhandle, info, flavor); 6674 6675 out_freepage: 6676 put_page(page); 6677 if (err == -EACCES) 6678 return -EPERM; 6679 out: 6680 return err; 6681 } 6682 6683 static int _nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid) 6684 { 6685 int status; 6686 struct nfs41_test_stateid_args args = { 6687 .stateid = stateid, 6688 }; 6689 struct nfs41_test_stateid_res res; 6690 struct rpc_message msg = { 6691 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID], 6692 .rpc_argp = &args, 6693 .rpc_resp = &res, 6694 }; 6695 6696 dprintk("NFS call test_stateid %p\n", stateid); 6697 nfs41_init_sequence(&args.seq_args, &res.seq_res, 0); 6698 status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 6699 if (status != NFS_OK) { 6700 dprintk("NFS reply test_stateid: failed, %d\n", status); 6701 return status; 6702 } 6703 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status); 6704 return -res.status; 6705 } 6706 6707 /** 6708 * nfs41_test_stateid - perform a TEST_STATEID operation 6709 * 6710 * @server: server / transport on which to perform the operation 6711 * @stateid: state ID to test 6712 * 6713 * Returns NFS_OK if the server recognizes that "stateid" is valid. 6714 * Otherwise a negative NFS4ERR value is returned if the operation 6715 * failed or the state ID is not currently valid. 6716 */ 6717 static int nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid) 6718 { 6719 struct nfs4_exception exception = { }; 6720 int err; 6721 do { 6722 err = _nfs41_test_stateid(server, stateid); 6723 if (err != -NFS4ERR_DELAY) 6724 break; 6725 nfs4_handle_exception(server, err, &exception); 6726 } while (exception.retry); 6727 return err; 6728 } 6729 6730 static int _nfs4_free_stateid(struct nfs_server *server, nfs4_stateid *stateid) 6731 { 6732 struct nfs41_free_stateid_args args = { 6733 .stateid = stateid, 6734 }; 6735 struct nfs41_free_stateid_res res; 6736 struct rpc_message msg = { 6737 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID], 6738 .rpc_argp = &args, 6739 .rpc_resp = &res, 6740 }; 6741 int status; 6742 6743 dprintk("NFS call free_stateid %p\n", stateid); 6744 nfs41_init_sequence(&args.seq_args, &res.seq_res, 0); 6745 status = nfs4_call_sync_sequence(server->client, server, &msg, 6746 &args.seq_args, &res.seq_res, 1); 6747 dprintk("NFS reply free_stateid: %d\n", status); 6748 return status; 6749 } 6750 6751 /** 6752 * nfs41_free_stateid - perform a FREE_STATEID operation 6753 * 6754 * @server: server / transport on which to perform the operation 6755 * @stateid: state ID to release 6756 * 6757 * Returns NFS_OK if the server freed "stateid". Otherwise a 6758 * negative NFS4ERR value is returned. 6759 */ 6760 static int nfs41_free_stateid(struct nfs_server *server, nfs4_stateid *stateid) 6761 { 6762 struct nfs4_exception exception = { }; 6763 int err; 6764 do { 6765 err = _nfs4_free_stateid(server, stateid); 6766 if (err != -NFS4ERR_DELAY) 6767 break; 6768 nfs4_handle_exception(server, err, &exception); 6769 } while (exception.retry); 6770 return err; 6771 } 6772 6773 static bool nfs41_match_stateid(const nfs4_stateid *s1, 6774 const nfs4_stateid *s2) 6775 { 6776 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0) 6777 return false; 6778 6779 if (s1->seqid == s2->seqid) 6780 return true; 6781 if (s1->seqid == 0 || s2->seqid == 0) 6782 return true; 6783 6784 return false; 6785 } 6786 6787 #endif /* CONFIG_NFS_V4_1 */ 6788 6789 static bool nfs4_match_stateid(const nfs4_stateid *s1, 6790 const nfs4_stateid *s2) 6791 { 6792 return nfs4_stateid_match(s1, s2); 6793 } 6794 6795 6796 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { 6797 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 6798 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 6799 .recover_open = nfs4_open_reclaim, 6800 .recover_lock = nfs4_lock_reclaim, 6801 .establish_clid = nfs4_init_clientid, 6802 .get_clid_cred = nfs4_get_setclientid_cred, 6803 }; 6804 6805 #if defined(CONFIG_NFS_V4_1) 6806 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { 6807 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 6808 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 6809 .recover_open = nfs4_open_reclaim, 6810 .recover_lock = nfs4_lock_reclaim, 6811 .establish_clid = nfs41_init_clientid, 6812 .get_clid_cred = nfs4_get_exchange_id_cred, 6813 .reclaim_complete = nfs41_proc_reclaim_complete, 6814 }; 6815 #endif /* CONFIG_NFS_V4_1 */ 6816 6817 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { 6818 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 6819 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 6820 .recover_open = nfs4_open_expired, 6821 .recover_lock = nfs4_lock_expired, 6822 .establish_clid = nfs4_init_clientid, 6823 .get_clid_cred = nfs4_get_setclientid_cred, 6824 }; 6825 6826 #if defined(CONFIG_NFS_V4_1) 6827 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { 6828 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 6829 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 6830 .recover_open = nfs41_open_expired, 6831 .recover_lock = nfs41_lock_expired, 6832 .establish_clid = nfs41_init_clientid, 6833 .get_clid_cred = nfs4_get_exchange_id_cred, 6834 }; 6835 #endif /* CONFIG_NFS_V4_1 */ 6836 6837 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { 6838 .sched_state_renewal = nfs4_proc_async_renew, 6839 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked, 6840 .renew_lease = nfs4_proc_renew, 6841 }; 6842 6843 #if defined(CONFIG_NFS_V4_1) 6844 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { 6845 .sched_state_renewal = nfs41_proc_async_sequence, 6846 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked, 6847 .renew_lease = nfs4_proc_sequence, 6848 }; 6849 #endif 6850 6851 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { 6852 .minor_version = 0, 6853 .call_sync = _nfs4_call_sync, 6854 .match_stateid = nfs4_match_stateid, 6855 .find_root_sec = nfs4_find_root_sec, 6856 .reboot_recovery_ops = &nfs40_reboot_recovery_ops, 6857 .nograce_recovery_ops = &nfs40_nograce_recovery_ops, 6858 .state_renewal_ops = &nfs40_state_renewal_ops, 6859 }; 6860 6861 #if defined(CONFIG_NFS_V4_1) 6862 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { 6863 .minor_version = 1, 6864 .call_sync = _nfs4_call_sync_session, 6865 .match_stateid = nfs41_match_stateid, 6866 .find_root_sec = nfs41_find_root_sec, 6867 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 6868 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 6869 .state_renewal_ops = &nfs41_state_renewal_ops, 6870 }; 6871 #endif 6872 6873 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { 6874 [0] = &nfs_v4_0_minor_ops, 6875 #if defined(CONFIG_NFS_V4_1) 6876 [1] = &nfs_v4_1_minor_ops, 6877 #endif 6878 }; 6879 6880 const struct inode_operations nfs4_dir_inode_operations = { 6881 .create = nfs_create, 6882 .lookup = nfs_lookup, 6883 .atomic_open = nfs_atomic_open, 6884 .link = nfs_link, 6885 .unlink = nfs_unlink, 6886 .symlink = nfs_symlink, 6887 .mkdir = nfs_mkdir, 6888 .rmdir = nfs_rmdir, 6889 .mknod = nfs_mknod, 6890 .rename = nfs_rename, 6891 .permission = nfs_permission, 6892 .getattr = nfs_getattr, 6893 .setattr = nfs_setattr, 6894 .getxattr = generic_getxattr, 6895 .setxattr = generic_setxattr, 6896 .listxattr = generic_listxattr, 6897 .removexattr = generic_removexattr, 6898 }; 6899 6900 static const struct inode_operations nfs4_file_inode_operations = { 6901 .permission = nfs_permission, 6902 .getattr = nfs_getattr, 6903 .setattr = nfs_setattr, 6904 .getxattr = generic_getxattr, 6905 .setxattr = generic_setxattr, 6906 .listxattr = generic_listxattr, 6907 .removexattr = generic_removexattr, 6908 }; 6909 6910 const struct nfs_rpc_ops nfs_v4_clientops = { 6911 .version = 4, /* protocol version */ 6912 .dentry_ops = &nfs4_dentry_operations, 6913 .dir_inode_ops = &nfs4_dir_inode_operations, 6914 .file_inode_ops = &nfs4_file_inode_operations, 6915 .file_ops = &nfs4_file_operations, 6916 .getroot = nfs4_proc_get_root, 6917 .submount = nfs4_submount, 6918 .try_mount = nfs4_try_mount, 6919 .getattr = nfs4_proc_getattr, 6920 .setattr = nfs4_proc_setattr, 6921 .lookup = nfs4_proc_lookup, 6922 .access = nfs4_proc_access, 6923 .readlink = nfs4_proc_readlink, 6924 .create = nfs4_proc_create, 6925 .remove = nfs4_proc_remove, 6926 .unlink_setup = nfs4_proc_unlink_setup, 6927 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, 6928 .unlink_done = nfs4_proc_unlink_done, 6929 .rename = nfs4_proc_rename, 6930 .rename_setup = nfs4_proc_rename_setup, 6931 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, 6932 .rename_done = nfs4_proc_rename_done, 6933 .link = nfs4_proc_link, 6934 .symlink = nfs4_proc_symlink, 6935 .mkdir = nfs4_proc_mkdir, 6936 .rmdir = nfs4_proc_remove, 6937 .readdir = nfs4_proc_readdir, 6938 .mknod = nfs4_proc_mknod, 6939 .statfs = nfs4_proc_statfs, 6940 .fsinfo = nfs4_proc_fsinfo, 6941 .pathconf = nfs4_proc_pathconf, 6942 .set_capabilities = nfs4_server_capabilities, 6943 .decode_dirent = nfs4_decode_dirent, 6944 .read_setup = nfs4_proc_read_setup, 6945 .read_pageio_init = pnfs_pageio_init_read, 6946 .read_rpc_prepare = nfs4_proc_read_rpc_prepare, 6947 .read_done = nfs4_read_done, 6948 .write_setup = nfs4_proc_write_setup, 6949 .write_pageio_init = pnfs_pageio_init_write, 6950 .write_rpc_prepare = nfs4_proc_write_rpc_prepare, 6951 .write_done = nfs4_write_done, 6952 .commit_setup = nfs4_proc_commit_setup, 6953 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare, 6954 .commit_done = nfs4_commit_done, 6955 .lock = nfs4_proc_lock, 6956 .clear_acl_cache = nfs4_zap_acl_attr, 6957 .close_context = nfs4_close_context, 6958 .open_context = nfs4_atomic_open, 6959 .have_delegation = nfs4_have_delegation, 6960 .return_delegation = nfs4_inode_return_delegation, 6961 .alloc_client = nfs4_alloc_client, 6962 .init_client = nfs4_init_client, 6963 .free_client = nfs4_free_client, 6964 .create_server = nfs4_create_server, 6965 .clone_server = nfs_clone_server, 6966 }; 6967 6968 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { 6969 .prefix = XATTR_NAME_NFSV4_ACL, 6970 .list = nfs4_xattr_list_nfs4_acl, 6971 .get = nfs4_xattr_get_nfs4_acl, 6972 .set = nfs4_xattr_set_nfs4_acl, 6973 }; 6974 6975 const struct xattr_handler *nfs4_xattr_handlers[] = { 6976 &nfs4_xattr_nfs4_acl_handler, 6977 NULL 6978 }; 6979 6980 /* 6981 * Local variables: 6982 * c-basic-offset: 8 6983 * End: 6984 */ 6985