1 /* 2 * Copyright (c) 2001 The Regents of the University of Michigan. 3 * All rights reserved. 4 * 5 * Kendrick Smith <kmsmith@umich.edu> 6 * Andy Adamson <kandros@umich.edu> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of the University nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 */ 34 35 #include <linux/file.h> 36 #include <linux/fs.h> 37 #include <linux/slab.h> 38 #include <linux/namei.h> 39 #include <linux/swap.h> 40 #include <linux/pagemap.h> 41 #include <linux/ratelimit.h> 42 #include <linux/sunrpc/svcauth_gss.h> 43 #include <linux/sunrpc/addr.h> 44 #include <linux/jhash.h> 45 #include <linux/string_helpers.h> 46 #include "xdr4.h" 47 #include "xdr4cb.h" 48 #include "vfs.h" 49 #include "current_stateid.h" 50 51 #include "netns.h" 52 #include "pnfs.h" 53 #include "filecache.h" 54 #include "trace.h" 55 56 #define NFSDDBG_FACILITY NFSDDBG_PROC 57 58 #define all_ones {{~0,~0},~0} 59 static const stateid_t one_stateid = { 60 .si_generation = ~0, 61 .si_opaque = all_ones, 62 }; 63 static const stateid_t zero_stateid = { 64 /* all fields zero */ 65 }; 66 static const stateid_t currentstateid = { 67 .si_generation = 1, 68 }; 69 static const stateid_t close_stateid = { 70 .si_generation = 0xffffffffU, 71 }; 72 73 static u64 current_sessionid = 1; 74 75 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t))) 76 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t))) 77 #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t))) 78 #define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t))) 79 80 /* forward declarations */ 81 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner); 82 static void nfs4_free_ol_stateid(struct nfs4_stid *stid); 83 void nfsd4_end_grace(struct nfsd_net *nn); 84 static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps); 85 86 /* Locking: */ 87 88 /* 89 * Currently used for the del_recall_lru and file hash table. In an 90 * effort to decrease the scope of the client_mutex, this spinlock may 91 * eventually cover more: 92 */ 93 static DEFINE_SPINLOCK(state_lock); 94 95 enum nfsd4_st_mutex_lock_subclass { 96 OPEN_STATEID_MUTEX = 0, 97 LOCK_STATEID_MUTEX = 1, 98 }; 99 100 /* 101 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for 102 * the refcount on the open stateid to drop. 103 */ 104 static DECLARE_WAIT_QUEUE_HEAD(close_wq); 105 106 /* 107 * A waitqueue where a writer to clients/#/ctl destroying a client can 108 * wait for cl_rpc_users to drop to 0 and then for the client to be 109 * unhashed. 110 */ 111 static DECLARE_WAIT_QUEUE_HEAD(expiry_wq); 112 113 static struct kmem_cache *client_slab; 114 static struct kmem_cache *openowner_slab; 115 static struct kmem_cache *lockowner_slab; 116 static struct kmem_cache *file_slab; 117 static struct kmem_cache *stateid_slab; 118 static struct kmem_cache *deleg_slab; 119 static struct kmem_cache *odstate_slab; 120 121 static void free_session(struct nfsd4_session *); 122 123 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops; 124 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops; 125 126 static bool is_session_dead(struct nfsd4_session *ses) 127 { 128 return ses->se_flags & NFS4_SESSION_DEAD; 129 } 130 131 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me) 132 { 133 if (atomic_read(&ses->se_ref) > ref_held_by_me) 134 return nfserr_jukebox; 135 ses->se_flags |= NFS4_SESSION_DEAD; 136 return nfs_ok; 137 } 138 139 static bool is_client_expired(struct nfs4_client *clp) 140 { 141 return clp->cl_time == 0; 142 } 143 144 static __be32 get_client_locked(struct nfs4_client *clp) 145 { 146 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 147 148 lockdep_assert_held(&nn->client_lock); 149 150 if (is_client_expired(clp)) 151 return nfserr_expired; 152 atomic_inc(&clp->cl_rpc_users); 153 return nfs_ok; 154 } 155 156 /* must be called under the client_lock */ 157 static inline void 158 renew_client_locked(struct nfs4_client *clp) 159 { 160 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 161 162 if (is_client_expired(clp)) { 163 WARN_ON(1); 164 printk("%s: client (clientid %08x/%08x) already expired\n", 165 __func__, 166 clp->cl_clientid.cl_boot, 167 clp->cl_clientid.cl_id); 168 return; 169 } 170 171 list_move_tail(&clp->cl_lru, &nn->client_lru); 172 clp->cl_time = ktime_get_boottime_seconds(); 173 } 174 175 static void put_client_renew_locked(struct nfs4_client *clp) 176 { 177 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 178 179 lockdep_assert_held(&nn->client_lock); 180 181 if (!atomic_dec_and_test(&clp->cl_rpc_users)) 182 return; 183 if (!is_client_expired(clp)) 184 renew_client_locked(clp); 185 else 186 wake_up_all(&expiry_wq); 187 } 188 189 static void put_client_renew(struct nfs4_client *clp) 190 { 191 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 192 193 if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock)) 194 return; 195 if (!is_client_expired(clp)) 196 renew_client_locked(clp); 197 else 198 wake_up_all(&expiry_wq); 199 spin_unlock(&nn->client_lock); 200 } 201 202 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses) 203 { 204 __be32 status; 205 206 if (is_session_dead(ses)) 207 return nfserr_badsession; 208 status = get_client_locked(ses->se_client); 209 if (status) 210 return status; 211 atomic_inc(&ses->se_ref); 212 return nfs_ok; 213 } 214 215 static void nfsd4_put_session_locked(struct nfsd4_session *ses) 216 { 217 struct nfs4_client *clp = ses->se_client; 218 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 219 220 lockdep_assert_held(&nn->client_lock); 221 222 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses)) 223 free_session(ses); 224 put_client_renew_locked(clp); 225 } 226 227 static void nfsd4_put_session(struct nfsd4_session *ses) 228 { 229 struct nfs4_client *clp = ses->se_client; 230 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 231 232 spin_lock(&nn->client_lock); 233 nfsd4_put_session_locked(ses); 234 spin_unlock(&nn->client_lock); 235 } 236 237 static struct nfsd4_blocked_lock * 238 find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh, 239 struct nfsd_net *nn) 240 { 241 struct nfsd4_blocked_lock *cur, *found = NULL; 242 243 spin_lock(&nn->blocked_locks_lock); 244 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) { 245 if (fh_match(fh, &cur->nbl_fh)) { 246 list_del_init(&cur->nbl_list); 247 list_del_init(&cur->nbl_lru); 248 found = cur; 249 break; 250 } 251 } 252 spin_unlock(&nn->blocked_locks_lock); 253 if (found) 254 locks_delete_block(&found->nbl_lock); 255 return found; 256 } 257 258 static struct nfsd4_blocked_lock * 259 find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh, 260 struct nfsd_net *nn) 261 { 262 struct nfsd4_blocked_lock *nbl; 263 264 nbl = find_blocked_lock(lo, fh, nn); 265 if (!nbl) { 266 nbl= kmalloc(sizeof(*nbl), GFP_KERNEL); 267 if (nbl) { 268 INIT_LIST_HEAD(&nbl->nbl_list); 269 INIT_LIST_HEAD(&nbl->nbl_lru); 270 fh_copy_shallow(&nbl->nbl_fh, fh); 271 locks_init_lock(&nbl->nbl_lock); 272 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client, 273 &nfsd4_cb_notify_lock_ops, 274 NFSPROC4_CLNT_CB_NOTIFY_LOCK); 275 } 276 } 277 return nbl; 278 } 279 280 static void 281 free_blocked_lock(struct nfsd4_blocked_lock *nbl) 282 { 283 locks_delete_block(&nbl->nbl_lock); 284 locks_release_private(&nbl->nbl_lock); 285 kfree(nbl); 286 } 287 288 static void 289 remove_blocked_locks(struct nfs4_lockowner *lo) 290 { 291 struct nfs4_client *clp = lo->lo_owner.so_client; 292 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 293 struct nfsd4_blocked_lock *nbl; 294 LIST_HEAD(reaplist); 295 296 /* Dequeue all blocked locks */ 297 spin_lock(&nn->blocked_locks_lock); 298 while (!list_empty(&lo->lo_blocked)) { 299 nbl = list_first_entry(&lo->lo_blocked, 300 struct nfsd4_blocked_lock, 301 nbl_list); 302 list_del_init(&nbl->nbl_list); 303 list_move(&nbl->nbl_lru, &reaplist); 304 } 305 spin_unlock(&nn->blocked_locks_lock); 306 307 /* Now free them */ 308 while (!list_empty(&reaplist)) { 309 nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock, 310 nbl_lru); 311 list_del_init(&nbl->nbl_lru); 312 free_blocked_lock(nbl); 313 } 314 } 315 316 static void 317 nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb) 318 { 319 struct nfsd4_blocked_lock *nbl = container_of(cb, 320 struct nfsd4_blocked_lock, nbl_cb); 321 locks_delete_block(&nbl->nbl_lock); 322 } 323 324 static int 325 nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task) 326 { 327 /* 328 * Since this is just an optimization, we don't try very hard if it 329 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and 330 * just quit trying on anything else. 331 */ 332 switch (task->tk_status) { 333 case -NFS4ERR_DELAY: 334 rpc_delay(task, 1 * HZ); 335 return 0; 336 default: 337 return 1; 338 } 339 } 340 341 static void 342 nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb) 343 { 344 struct nfsd4_blocked_lock *nbl = container_of(cb, 345 struct nfsd4_blocked_lock, nbl_cb); 346 347 free_blocked_lock(nbl); 348 } 349 350 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = { 351 .prepare = nfsd4_cb_notify_lock_prepare, 352 .done = nfsd4_cb_notify_lock_done, 353 .release = nfsd4_cb_notify_lock_release, 354 }; 355 356 static inline struct nfs4_stateowner * 357 nfs4_get_stateowner(struct nfs4_stateowner *sop) 358 { 359 atomic_inc(&sop->so_count); 360 return sop; 361 } 362 363 static int 364 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner) 365 { 366 return (sop->so_owner.len == owner->len) && 367 0 == memcmp(sop->so_owner.data, owner->data, owner->len); 368 } 369 370 static struct nfs4_openowner * 371 find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open, 372 struct nfs4_client *clp) 373 { 374 struct nfs4_stateowner *so; 375 376 lockdep_assert_held(&clp->cl_lock); 377 378 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval], 379 so_strhash) { 380 if (!so->so_is_open_owner) 381 continue; 382 if (same_owner_str(so, &open->op_owner)) 383 return openowner(nfs4_get_stateowner(so)); 384 } 385 return NULL; 386 } 387 388 static struct nfs4_openowner * 389 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open, 390 struct nfs4_client *clp) 391 { 392 struct nfs4_openowner *oo; 393 394 spin_lock(&clp->cl_lock); 395 oo = find_openstateowner_str_locked(hashval, open, clp); 396 spin_unlock(&clp->cl_lock); 397 return oo; 398 } 399 400 static inline u32 401 opaque_hashval(const void *ptr, int nbytes) 402 { 403 unsigned char *cptr = (unsigned char *) ptr; 404 405 u32 x = 0; 406 while (nbytes--) { 407 x *= 37; 408 x += *cptr++; 409 } 410 return x; 411 } 412 413 static void nfsd4_free_file_rcu(struct rcu_head *rcu) 414 { 415 struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu); 416 417 kmem_cache_free(file_slab, fp); 418 } 419 420 void 421 put_nfs4_file(struct nfs4_file *fi) 422 { 423 might_lock(&state_lock); 424 425 if (refcount_dec_and_lock(&fi->fi_ref, &state_lock)) { 426 hlist_del_rcu(&fi->fi_hash); 427 spin_unlock(&state_lock); 428 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate)); 429 WARN_ON_ONCE(!list_empty(&fi->fi_delegations)); 430 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu); 431 } 432 } 433 434 static struct nfsd_file * 435 __nfs4_get_fd(struct nfs4_file *f, int oflag) 436 { 437 if (f->fi_fds[oflag]) 438 return nfsd_file_get(f->fi_fds[oflag]); 439 return NULL; 440 } 441 442 static struct nfsd_file * 443 find_writeable_file_locked(struct nfs4_file *f) 444 { 445 struct nfsd_file *ret; 446 447 lockdep_assert_held(&f->fi_lock); 448 449 ret = __nfs4_get_fd(f, O_WRONLY); 450 if (!ret) 451 ret = __nfs4_get_fd(f, O_RDWR); 452 return ret; 453 } 454 455 static struct nfsd_file * 456 find_writeable_file(struct nfs4_file *f) 457 { 458 struct nfsd_file *ret; 459 460 spin_lock(&f->fi_lock); 461 ret = find_writeable_file_locked(f); 462 spin_unlock(&f->fi_lock); 463 464 return ret; 465 } 466 467 static struct nfsd_file * 468 find_readable_file_locked(struct nfs4_file *f) 469 { 470 struct nfsd_file *ret; 471 472 lockdep_assert_held(&f->fi_lock); 473 474 ret = __nfs4_get_fd(f, O_RDONLY); 475 if (!ret) 476 ret = __nfs4_get_fd(f, O_RDWR); 477 return ret; 478 } 479 480 static struct nfsd_file * 481 find_readable_file(struct nfs4_file *f) 482 { 483 struct nfsd_file *ret; 484 485 spin_lock(&f->fi_lock); 486 ret = find_readable_file_locked(f); 487 spin_unlock(&f->fi_lock); 488 489 return ret; 490 } 491 492 struct nfsd_file * 493 find_any_file(struct nfs4_file *f) 494 { 495 struct nfsd_file *ret; 496 497 if (!f) 498 return NULL; 499 spin_lock(&f->fi_lock); 500 ret = __nfs4_get_fd(f, O_RDWR); 501 if (!ret) { 502 ret = __nfs4_get_fd(f, O_WRONLY); 503 if (!ret) 504 ret = __nfs4_get_fd(f, O_RDONLY); 505 } 506 spin_unlock(&f->fi_lock); 507 return ret; 508 } 509 510 static struct nfsd_file *find_deleg_file(struct nfs4_file *f) 511 { 512 struct nfsd_file *ret = NULL; 513 514 spin_lock(&f->fi_lock); 515 if (f->fi_deleg_file) 516 ret = nfsd_file_get(f->fi_deleg_file); 517 spin_unlock(&f->fi_lock); 518 return ret; 519 } 520 521 static atomic_long_t num_delegations; 522 unsigned long max_delegations; 523 524 /* 525 * Open owner state (share locks) 526 */ 527 528 /* hash tables for lock and open owners */ 529 #define OWNER_HASH_BITS 8 530 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS) 531 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1) 532 533 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername) 534 { 535 unsigned int ret; 536 537 ret = opaque_hashval(ownername->data, ownername->len); 538 return ret & OWNER_HASH_MASK; 539 } 540 541 /* hash table for nfs4_file */ 542 #define FILE_HASH_BITS 8 543 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS) 544 545 static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh) 546 { 547 return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0); 548 } 549 550 static unsigned int file_hashval(struct knfsd_fh *fh) 551 { 552 return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1); 553 } 554 555 static struct hlist_head file_hashtbl[FILE_HASH_SIZE]; 556 557 static void 558 __nfs4_file_get_access(struct nfs4_file *fp, u32 access) 559 { 560 lockdep_assert_held(&fp->fi_lock); 561 562 if (access & NFS4_SHARE_ACCESS_WRITE) 563 atomic_inc(&fp->fi_access[O_WRONLY]); 564 if (access & NFS4_SHARE_ACCESS_READ) 565 atomic_inc(&fp->fi_access[O_RDONLY]); 566 } 567 568 static __be32 569 nfs4_file_get_access(struct nfs4_file *fp, u32 access) 570 { 571 lockdep_assert_held(&fp->fi_lock); 572 573 /* Does this access mode make sense? */ 574 if (access & ~NFS4_SHARE_ACCESS_BOTH) 575 return nfserr_inval; 576 577 /* Does it conflict with a deny mode already set? */ 578 if ((access & fp->fi_share_deny) != 0) 579 return nfserr_share_denied; 580 581 __nfs4_file_get_access(fp, access); 582 return nfs_ok; 583 } 584 585 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny) 586 { 587 /* Common case is that there is no deny mode. */ 588 if (deny) { 589 /* Does this deny mode make sense? */ 590 if (deny & ~NFS4_SHARE_DENY_BOTH) 591 return nfserr_inval; 592 593 if ((deny & NFS4_SHARE_DENY_READ) && 594 atomic_read(&fp->fi_access[O_RDONLY])) 595 return nfserr_share_denied; 596 597 if ((deny & NFS4_SHARE_DENY_WRITE) && 598 atomic_read(&fp->fi_access[O_WRONLY])) 599 return nfserr_share_denied; 600 } 601 return nfs_ok; 602 } 603 604 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag) 605 { 606 might_lock(&fp->fi_lock); 607 608 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) { 609 struct nfsd_file *f1 = NULL; 610 struct nfsd_file *f2 = NULL; 611 612 swap(f1, fp->fi_fds[oflag]); 613 if (atomic_read(&fp->fi_access[1 - oflag]) == 0) 614 swap(f2, fp->fi_fds[O_RDWR]); 615 spin_unlock(&fp->fi_lock); 616 if (f1) 617 nfsd_file_put(f1); 618 if (f2) 619 nfsd_file_put(f2); 620 } 621 } 622 623 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access) 624 { 625 WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH); 626 627 if (access & NFS4_SHARE_ACCESS_WRITE) 628 __nfs4_file_put_access(fp, O_WRONLY); 629 if (access & NFS4_SHARE_ACCESS_READ) 630 __nfs4_file_put_access(fp, O_RDONLY); 631 } 632 633 /* 634 * Allocate a new open/delegation state counter. This is needed for 635 * pNFS for proper return on close semantics. 636 * 637 * Note that we only allocate it for pNFS-enabled exports, otherwise 638 * all pointers to struct nfs4_clnt_odstate are always NULL. 639 */ 640 static struct nfs4_clnt_odstate * 641 alloc_clnt_odstate(struct nfs4_client *clp) 642 { 643 struct nfs4_clnt_odstate *co; 644 645 co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL); 646 if (co) { 647 co->co_client = clp; 648 refcount_set(&co->co_odcount, 1); 649 } 650 return co; 651 } 652 653 static void 654 hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co) 655 { 656 struct nfs4_file *fp = co->co_file; 657 658 lockdep_assert_held(&fp->fi_lock); 659 list_add(&co->co_perfile, &fp->fi_clnt_odstate); 660 } 661 662 static inline void 663 get_clnt_odstate(struct nfs4_clnt_odstate *co) 664 { 665 if (co) 666 refcount_inc(&co->co_odcount); 667 } 668 669 static void 670 put_clnt_odstate(struct nfs4_clnt_odstate *co) 671 { 672 struct nfs4_file *fp; 673 674 if (!co) 675 return; 676 677 fp = co->co_file; 678 if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) { 679 list_del(&co->co_perfile); 680 spin_unlock(&fp->fi_lock); 681 682 nfsd4_return_all_file_layouts(co->co_client, fp); 683 kmem_cache_free(odstate_slab, co); 684 } 685 } 686 687 static struct nfs4_clnt_odstate * 688 find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new) 689 { 690 struct nfs4_clnt_odstate *co; 691 struct nfs4_client *cl; 692 693 if (!new) 694 return NULL; 695 696 cl = new->co_client; 697 698 spin_lock(&fp->fi_lock); 699 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) { 700 if (co->co_client == cl) { 701 get_clnt_odstate(co); 702 goto out; 703 } 704 } 705 co = new; 706 co->co_file = fp; 707 hash_clnt_odstate_locked(new); 708 out: 709 spin_unlock(&fp->fi_lock); 710 return co; 711 } 712 713 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab, 714 void (*sc_free)(struct nfs4_stid *)) 715 { 716 struct nfs4_stid *stid; 717 int new_id; 718 719 stid = kmem_cache_zalloc(slab, GFP_KERNEL); 720 if (!stid) 721 return NULL; 722 723 idr_preload(GFP_KERNEL); 724 spin_lock(&cl->cl_lock); 725 /* Reserving 0 for start of file in nfsdfs "states" file: */ 726 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT); 727 spin_unlock(&cl->cl_lock); 728 idr_preload_end(); 729 if (new_id < 0) 730 goto out_free; 731 732 stid->sc_free = sc_free; 733 stid->sc_client = cl; 734 stid->sc_stateid.si_opaque.so_id = new_id; 735 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid; 736 /* Will be incremented before return to client: */ 737 refcount_set(&stid->sc_count, 1); 738 spin_lock_init(&stid->sc_lock); 739 INIT_LIST_HEAD(&stid->sc_cp_list); 740 741 /* 742 * It shouldn't be a problem to reuse an opaque stateid value. 743 * I don't think it is for 4.1. But with 4.0 I worry that, for 744 * example, a stray write retransmission could be accepted by 745 * the server when it should have been rejected. Therefore, 746 * adopt a trick from the sctp code to attempt to maximize the 747 * amount of time until an id is reused, by ensuring they always 748 * "increase" (mod INT_MAX): 749 */ 750 return stid; 751 out_free: 752 kmem_cache_free(slab, stid); 753 return NULL; 754 } 755 756 /* 757 * Create a unique stateid_t to represent each COPY. 758 */ 759 static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid, 760 unsigned char sc_type) 761 { 762 int new_id; 763 764 stid->stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time; 765 stid->stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id; 766 stid->sc_type = sc_type; 767 768 idr_preload(GFP_KERNEL); 769 spin_lock(&nn->s2s_cp_lock); 770 new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT); 771 stid->stid.si_opaque.so_id = new_id; 772 stid->stid.si_generation = 1; 773 spin_unlock(&nn->s2s_cp_lock); 774 idr_preload_end(); 775 if (new_id < 0) 776 return 0; 777 return 1; 778 } 779 780 int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy) 781 { 782 return nfs4_init_cp_state(nn, ©->cp_stateid, NFS4_COPY_STID); 783 } 784 785 struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn, 786 struct nfs4_stid *p_stid) 787 { 788 struct nfs4_cpntf_state *cps; 789 790 cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL); 791 if (!cps) 792 return NULL; 793 cps->cpntf_time = ktime_get_boottime_seconds(); 794 refcount_set(&cps->cp_stateid.sc_count, 1); 795 if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID)) 796 goto out_free; 797 spin_lock(&nn->s2s_cp_lock); 798 list_add(&cps->cp_list, &p_stid->sc_cp_list); 799 spin_unlock(&nn->s2s_cp_lock); 800 return cps; 801 out_free: 802 kfree(cps); 803 return NULL; 804 } 805 806 void nfs4_free_copy_state(struct nfsd4_copy *copy) 807 { 808 struct nfsd_net *nn; 809 810 WARN_ON_ONCE(copy->cp_stateid.sc_type != NFS4_COPY_STID); 811 nn = net_generic(copy->cp_clp->net, nfsd_net_id); 812 spin_lock(&nn->s2s_cp_lock); 813 idr_remove(&nn->s2s_cp_stateids, 814 copy->cp_stateid.stid.si_opaque.so_id); 815 spin_unlock(&nn->s2s_cp_lock); 816 } 817 818 static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid) 819 { 820 struct nfs4_cpntf_state *cps; 821 struct nfsd_net *nn; 822 823 nn = net_generic(net, nfsd_net_id); 824 spin_lock(&nn->s2s_cp_lock); 825 while (!list_empty(&stid->sc_cp_list)) { 826 cps = list_first_entry(&stid->sc_cp_list, 827 struct nfs4_cpntf_state, cp_list); 828 _free_cpntf_state_locked(nn, cps); 829 } 830 spin_unlock(&nn->s2s_cp_lock); 831 } 832 833 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp) 834 { 835 struct nfs4_stid *stid; 836 837 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid); 838 if (!stid) 839 return NULL; 840 841 return openlockstateid(stid); 842 } 843 844 static void nfs4_free_deleg(struct nfs4_stid *stid) 845 { 846 kmem_cache_free(deleg_slab, stid); 847 atomic_long_dec(&num_delegations); 848 } 849 850 /* 851 * When we recall a delegation, we should be careful not to hand it 852 * out again straight away. 853 * To ensure this we keep a pair of bloom filters ('new' and 'old') 854 * in which the filehandles of recalled delegations are "stored". 855 * If a filehandle appear in either filter, a delegation is blocked. 856 * When a delegation is recalled, the filehandle is stored in the "new" 857 * filter. 858 * Every 30 seconds we swap the filters and clear the "new" one, 859 * unless both are empty of course. 860 * 861 * Each filter is 256 bits. We hash the filehandle to 32bit and use the 862 * low 3 bytes as hash-table indices. 863 * 864 * 'blocked_delegations_lock', which is always taken in block_delegations(), 865 * is used to manage concurrent access. Testing does not need the lock 866 * except when swapping the two filters. 867 */ 868 static DEFINE_SPINLOCK(blocked_delegations_lock); 869 static struct bloom_pair { 870 int entries, old_entries; 871 time64_t swap_time; 872 int new; /* index into 'set' */ 873 DECLARE_BITMAP(set[2], 256); 874 } blocked_delegations; 875 876 static int delegation_blocked(struct knfsd_fh *fh) 877 { 878 u32 hash; 879 struct bloom_pair *bd = &blocked_delegations; 880 881 if (bd->entries == 0) 882 return 0; 883 if (ktime_get_seconds() - bd->swap_time > 30) { 884 spin_lock(&blocked_delegations_lock); 885 if (ktime_get_seconds() - bd->swap_time > 30) { 886 bd->entries -= bd->old_entries; 887 bd->old_entries = bd->entries; 888 memset(bd->set[bd->new], 0, 889 sizeof(bd->set[0])); 890 bd->new = 1-bd->new; 891 bd->swap_time = ktime_get_seconds(); 892 } 893 spin_unlock(&blocked_delegations_lock); 894 } 895 hash = jhash(&fh->fh_base, fh->fh_size, 0); 896 if (test_bit(hash&255, bd->set[0]) && 897 test_bit((hash>>8)&255, bd->set[0]) && 898 test_bit((hash>>16)&255, bd->set[0])) 899 return 1; 900 901 if (test_bit(hash&255, bd->set[1]) && 902 test_bit((hash>>8)&255, bd->set[1]) && 903 test_bit((hash>>16)&255, bd->set[1])) 904 return 1; 905 906 return 0; 907 } 908 909 static void block_delegations(struct knfsd_fh *fh) 910 { 911 u32 hash; 912 struct bloom_pair *bd = &blocked_delegations; 913 914 hash = jhash(&fh->fh_base, fh->fh_size, 0); 915 916 spin_lock(&blocked_delegations_lock); 917 __set_bit(hash&255, bd->set[bd->new]); 918 __set_bit((hash>>8)&255, bd->set[bd->new]); 919 __set_bit((hash>>16)&255, bd->set[bd->new]); 920 if (bd->entries == 0) 921 bd->swap_time = ktime_get_seconds(); 922 bd->entries += 1; 923 spin_unlock(&blocked_delegations_lock); 924 } 925 926 static struct nfs4_delegation * 927 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp, 928 struct svc_fh *current_fh, 929 struct nfs4_clnt_odstate *odstate) 930 { 931 struct nfs4_delegation *dp; 932 long n; 933 934 dprintk("NFSD alloc_init_deleg\n"); 935 n = atomic_long_inc_return(&num_delegations); 936 if (n < 0 || n > max_delegations) 937 goto out_dec; 938 if (delegation_blocked(¤t_fh->fh_handle)) 939 goto out_dec; 940 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg)); 941 if (dp == NULL) 942 goto out_dec; 943 944 /* 945 * delegation seqid's are never incremented. The 4.1 special 946 * meaning of seqid 0 isn't meaningful, really, but let's avoid 947 * 0 anyway just for consistency and use 1: 948 */ 949 dp->dl_stid.sc_stateid.si_generation = 1; 950 INIT_LIST_HEAD(&dp->dl_perfile); 951 INIT_LIST_HEAD(&dp->dl_perclnt); 952 INIT_LIST_HEAD(&dp->dl_recall_lru); 953 dp->dl_clnt_odstate = odstate; 954 get_clnt_odstate(odstate); 955 dp->dl_type = NFS4_OPEN_DELEGATE_READ; 956 dp->dl_retries = 1; 957 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client, 958 &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL); 959 get_nfs4_file(fp); 960 dp->dl_stid.sc_file = fp; 961 return dp; 962 out_dec: 963 atomic_long_dec(&num_delegations); 964 return NULL; 965 } 966 967 void 968 nfs4_put_stid(struct nfs4_stid *s) 969 { 970 struct nfs4_file *fp = s->sc_file; 971 struct nfs4_client *clp = s->sc_client; 972 973 might_lock(&clp->cl_lock); 974 975 if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) { 976 wake_up_all(&close_wq); 977 return; 978 } 979 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); 980 nfs4_free_cpntf_statelist(clp->net, s); 981 spin_unlock(&clp->cl_lock); 982 s->sc_free(s); 983 if (fp) 984 put_nfs4_file(fp); 985 } 986 987 void 988 nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid) 989 { 990 stateid_t *src = &stid->sc_stateid; 991 992 spin_lock(&stid->sc_lock); 993 if (unlikely(++src->si_generation == 0)) 994 src->si_generation = 1; 995 memcpy(dst, src, sizeof(*dst)); 996 spin_unlock(&stid->sc_lock); 997 } 998 999 static void put_deleg_file(struct nfs4_file *fp) 1000 { 1001 struct nfsd_file *nf = NULL; 1002 1003 spin_lock(&fp->fi_lock); 1004 if (--fp->fi_delegees == 0) 1005 swap(nf, fp->fi_deleg_file); 1006 spin_unlock(&fp->fi_lock); 1007 1008 if (nf) 1009 nfsd_file_put(nf); 1010 } 1011 1012 static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp) 1013 { 1014 struct nfs4_file *fp = dp->dl_stid.sc_file; 1015 struct nfsd_file *nf = fp->fi_deleg_file; 1016 1017 WARN_ON_ONCE(!fp->fi_delegees); 1018 1019 vfs_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp); 1020 put_deleg_file(fp); 1021 } 1022 1023 static void destroy_unhashed_deleg(struct nfs4_delegation *dp) 1024 { 1025 put_clnt_odstate(dp->dl_clnt_odstate); 1026 nfs4_unlock_deleg_lease(dp); 1027 nfs4_put_stid(&dp->dl_stid); 1028 } 1029 1030 void nfs4_unhash_stid(struct nfs4_stid *s) 1031 { 1032 s->sc_type = 0; 1033 } 1034 1035 /** 1036 * nfs4_delegation_exists - Discover if this delegation already exists 1037 * @clp: a pointer to the nfs4_client we're granting a delegation to 1038 * @fp: a pointer to the nfs4_file we're granting a delegation on 1039 * 1040 * Return: 1041 * On success: true iff an existing delegation is found 1042 */ 1043 1044 static bool 1045 nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp) 1046 { 1047 struct nfs4_delegation *searchdp = NULL; 1048 struct nfs4_client *searchclp = NULL; 1049 1050 lockdep_assert_held(&state_lock); 1051 lockdep_assert_held(&fp->fi_lock); 1052 1053 list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) { 1054 searchclp = searchdp->dl_stid.sc_client; 1055 if (clp == searchclp) { 1056 return true; 1057 } 1058 } 1059 return false; 1060 } 1061 1062 /** 1063 * hash_delegation_locked - Add a delegation to the appropriate lists 1064 * @dp: a pointer to the nfs4_delegation we are adding. 1065 * @fp: a pointer to the nfs4_file we're granting a delegation on 1066 * 1067 * Return: 1068 * On success: NULL if the delegation was successfully hashed. 1069 * 1070 * On error: -EAGAIN if one was previously granted to this 1071 * nfs4_client for this nfs4_file. Delegation is not hashed. 1072 * 1073 */ 1074 1075 static int 1076 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp) 1077 { 1078 struct nfs4_client *clp = dp->dl_stid.sc_client; 1079 1080 lockdep_assert_held(&state_lock); 1081 lockdep_assert_held(&fp->fi_lock); 1082 1083 if (nfs4_delegation_exists(clp, fp)) 1084 return -EAGAIN; 1085 refcount_inc(&dp->dl_stid.sc_count); 1086 dp->dl_stid.sc_type = NFS4_DELEG_STID; 1087 list_add(&dp->dl_perfile, &fp->fi_delegations); 1088 list_add(&dp->dl_perclnt, &clp->cl_delegations); 1089 return 0; 1090 } 1091 1092 static bool 1093 unhash_delegation_locked(struct nfs4_delegation *dp) 1094 { 1095 struct nfs4_file *fp = dp->dl_stid.sc_file; 1096 1097 lockdep_assert_held(&state_lock); 1098 1099 if (list_empty(&dp->dl_perfile)) 1100 return false; 1101 1102 dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID; 1103 /* Ensure that deleg break won't try to requeue it */ 1104 ++dp->dl_time; 1105 spin_lock(&fp->fi_lock); 1106 list_del_init(&dp->dl_perclnt); 1107 list_del_init(&dp->dl_recall_lru); 1108 list_del_init(&dp->dl_perfile); 1109 spin_unlock(&fp->fi_lock); 1110 return true; 1111 } 1112 1113 static void destroy_delegation(struct nfs4_delegation *dp) 1114 { 1115 bool unhashed; 1116 1117 spin_lock(&state_lock); 1118 unhashed = unhash_delegation_locked(dp); 1119 spin_unlock(&state_lock); 1120 if (unhashed) 1121 destroy_unhashed_deleg(dp); 1122 } 1123 1124 static void revoke_delegation(struct nfs4_delegation *dp) 1125 { 1126 struct nfs4_client *clp = dp->dl_stid.sc_client; 1127 1128 WARN_ON(!list_empty(&dp->dl_recall_lru)); 1129 1130 if (clp->cl_minorversion) { 1131 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID; 1132 refcount_inc(&dp->dl_stid.sc_count); 1133 spin_lock(&clp->cl_lock); 1134 list_add(&dp->dl_recall_lru, &clp->cl_revoked); 1135 spin_unlock(&clp->cl_lock); 1136 } 1137 destroy_unhashed_deleg(dp); 1138 } 1139 1140 /* 1141 * SETCLIENTID state 1142 */ 1143 1144 static unsigned int clientid_hashval(u32 id) 1145 { 1146 return id & CLIENT_HASH_MASK; 1147 } 1148 1149 static unsigned int clientstr_hashval(struct xdr_netobj name) 1150 { 1151 return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK; 1152 } 1153 1154 /* 1155 * We store the NONE, READ, WRITE, and BOTH bits separately in the 1156 * st_{access,deny}_bmap field of the stateid, in order to track not 1157 * only what share bits are currently in force, but also what 1158 * combinations of share bits previous opens have used. This allows us 1159 * to enforce the recommendation of rfc 3530 14.2.19 that the server 1160 * return an error if the client attempt to downgrade to a combination 1161 * of share bits not explicable by closing some of its previous opens. 1162 * 1163 * XXX: This enforcement is actually incomplete, since we don't keep 1164 * track of access/deny bit combinations; so, e.g., we allow: 1165 * 1166 * OPEN allow read, deny write 1167 * OPEN allow both, deny none 1168 * DOWNGRADE allow read, deny none 1169 * 1170 * which we should reject. 1171 */ 1172 static unsigned int 1173 bmap_to_share_mode(unsigned long bmap) { 1174 int i; 1175 unsigned int access = 0; 1176 1177 for (i = 1; i < 4; i++) { 1178 if (test_bit(i, &bmap)) 1179 access |= i; 1180 } 1181 return access; 1182 } 1183 1184 /* set share access for a given stateid */ 1185 static inline void 1186 set_access(u32 access, struct nfs4_ol_stateid *stp) 1187 { 1188 unsigned char mask = 1 << access; 1189 1190 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH); 1191 stp->st_access_bmap |= mask; 1192 } 1193 1194 /* clear share access for a given stateid */ 1195 static inline void 1196 clear_access(u32 access, struct nfs4_ol_stateid *stp) 1197 { 1198 unsigned char mask = 1 << access; 1199 1200 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH); 1201 stp->st_access_bmap &= ~mask; 1202 } 1203 1204 /* test whether a given stateid has access */ 1205 static inline bool 1206 test_access(u32 access, struct nfs4_ol_stateid *stp) 1207 { 1208 unsigned char mask = 1 << access; 1209 1210 return (bool)(stp->st_access_bmap & mask); 1211 } 1212 1213 /* set share deny for a given stateid */ 1214 static inline void 1215 set_deny(u32 deny, struct nfs4_ol_stateid *stp) 1216 { 1217 unsigned char mask = 1 << deny; 1218 1219 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH); 1220 stp->st_deny_bmap |= mask; 1221 } 1222 1223 /* clear share deny for a given stateid */ 1224 static inline void 1225 clear_deny(u32 deny, struct nfs4_ol_stateid *stp) 1226 { 1227 unsigned char mask = 1 << deny; 1228 1229 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH); 1230 stp->st_deny_bmap &= ~mask; 1231 } 1232 1233 /* test whether a given stateid is denying specific access */ 1234 static inline bool 1235 test_deny(u32 deny, struct nfs4_ol_stateid *stp) 1236 { 1237 unsigned char mask = 1 << deny; 1238 1239 return (bool)(stp->st_deny_bmap & mask); 1240 } 1241 1242 static int nfs4_access_to_omode(u32 access) 1243 { 1244 switch (access & NFS4_SHARE_ACCESS_BOTH) { 1245 case NFS4_SHARE_ACCESS_READ: 1246 return O_RDONLY; 1247 case NFS4_SHARE_ACCESS_WRITE: 1248 return O_WRONLY; 1249 case NFS4_SHARE_ACCESS_BOTH: 1250 return O_RDWR; 1251 } 1252 WARN_ON_ONCE(1); 1253 return O_RDONLY; 1254 } 1255 1256 /* 1257 * A stateid that had a deny mode associated with it is being released 1258 * or downgraded. Recalculate the deny mode on the file. 1259 */ 1260 static void 1261 recalculate_deny_mode(struct nfs4_file *fp) 1262 { 1263 struct nfs4_ol_stateid *stp; 1264 1265 spin_lock(&fp->fi_lock); 1266 fp->fi_share_deny = 0; 1267 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) 1268 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap); 1269 spin_unlock(&fp->fi_lock); 1270 } 1271 1272 static void 1273 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp) 1274 { 1275 int i; 1276 bool change = false; 1277 1278 for (i = 1; i < 4; i++) { 1279 if ((i & deny) != i) { 1280 change = true; 1281 clear_deny(i, stp); 1282 } 1283 } 1284 1285 /* Recalculate per-file deny mode if there was a change */ 1286 if (change) 1287 recalculate_deny_mode(stp->st_stid.sc_file); 1288 } 1289 1290 /* release all access and file references for a given stateid */ 1291 static void 1292 release_all_access(struct nfs4_ol_stateid *stp) 1293 { 1294 int i; 1295 struct nfs4_file *fp = stp->st_stid.sc_file; 1296 1297 if (fp && stp->st_deny_bmap != 0) 1298 recalculate_deny_mode(fp); 1299 1300 for (i = 1; i < 4; i++) { 1301 if (test_access(i, stp)) 1302 nfs4_file_put_access(stp->st_stid.sc_file, i); 1303 clear_access(i, stp); 1304 } 1305 } 1306 1307 static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop) 1308 { 1309 kfree(sop->so_owner.data); 1310 sop->so_ops->so_free(sop); 1311 } 1312 1313 static void nfs4_put_stateowner(struct nfs4_stateowner *sop) 1314 { 1315 struct nfs4_client *clp = sop->so_client; 1316 1317 might_lock(&clp->cl_lock); 1318 1319 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock)) 1320 return; 1321 sop->so_ops->so_unhash(sop); 1322 spin_unlock(&clp->cl_lock); 1323 nfs4_free_stateowner(sop); 1324 } 1325 1326 static bool 1327 nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp) 1328 { 1329 return list_empty(&stp->st_perfile); 1330 } 1331 1332 static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp) 1333 { 1334 struct nfs4_file *fp = stp->st_stid.sc_file; 1335 1336 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock); 1337 1338 if (list_empty(&stp->st_perfile)) 1339 return false; 1340 1341 spin_lock(&fp->fi_lock); 1342 list_del_init(&stp->st_perfile); 1343 spin_unlock(&fp->fi_lock); 1344 list_del(&stp->st_perstateowner); 1345 return true; 1346 } 1347 1348 static void nfs4_free_ol_stateid(struct nfs4_stid *stid) 1349 { 1350 struct nfs4_ol_stateid *stp = openlockstateid(stid); 1351 1352 put_clnt_odstate(stp->st_clnt_odstate); 1353 release_all_access(stp); 1354 if (stp->st_stateowner) 1355 nfs4_put_stateowner(stp->st_stateowner); 1356 kmem_cache_free(stateid_slab, stid); 1357 } 1358 1359 static void nfs4_free_lock_stateid(struct nfs4_stid *stid) 1360 { 1361 struct nfs4_ol_stateid *stp = openlockstateid(stid); 1362 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner); 1363 struct nfsd_file *nf; 1364 1365 nf = find_any_file(stp->st_stid.sc_file); 1366 if (nf) { 1367 get_file(nf->nf_file); 1368 filp_close(nf->nf_file, (fl_owner_t)lo); 1369 nfsd_file_put(nf); 1370 } 1371 nfs4_free_ol_stateid(stid); 1372 } 1373 1374 /* 1375 * Put the persistent reference to an already unhashed generic stateid, while 1376 * holding the cl_lock. If it's the last reference, then put it onto the 1377 * reaplist for later destruction. 1378 */ 1379 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp, 1380 struct list_head *reaplist) 1381 { 1382 struct nfs4_stid *s = &stp->st_stid; 1383 struct nfs4_client *clp = s->sc_client; 1384 1385 lockdep_assert_held(&clp->cl_lock); 1386 1387 WARN_ON_ONCE(!list_empty(&stp->st_locks)); 1388 1389 if (!refcount_dec_and_test(&s->sc_count)) { 1390 wake_up_all(&close_wq); 1391 return; 1392 } 1393 1394 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); 1395 list_add(&stp->st_locks, reaplist); 1396 } 1397 1398 static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp) 1399 { 1400 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); 1401 1402 if (!unhash_ol_stateid(stp)) 1403 return false; 1404 list_del_init(&stp->st_locks); 1405 nfs4_unhash_stid(&stp->st_stid); 1406 return true; 1407 } 1408 1409 static void release_lock_stateid(struct nfs4_ol_stateid *stp) 1410 { 1411 struct nfs4_client *clp = stp->st_stid.sc_client; 1412 bool unhashed; 1413 1414 spin_lock(&clp->cl_lock); 1415 unhashed = unhash_lock_stateid(stp); 1416 spin_unlock(&clp->cl_lock); 1417 if (unhashed) 1418 nfs4_put_stid(&stp->st_stid); 1419 } 1420 1421 static void unhash_lockowner_locked(struct nfs4_lockowner *lo) 1422 { 1423 struct nfs4_client *clp = lo->lo_owner.so_client; 1424 1425 lockdep_assert_held(&clp->cl_lock); 1426 1427 list_del_init(&lo->lo_owner.so_strhash); 1428 } 1429 1430 /* 1431 * Free a list of generic stateids that were collected earlier after being 1432 * fully unhashed. 1433 */ 1434 static void 1435 free_ol_stateid_reaplist(struct list_head *reaplist) 1436 { 1437 struct nfs4_ol_stateid *stp; 1438 struct nfs4_file *fp; 1439 1440 might_sleep(); 1441 1442 while (!list_empty(reaplist)) { 1443 stp = list_first_entry(reaplist, struct nfs4_ol_stateid, 1444 st_locks); 1445 list_del(&stp->st_locks); 1446 fp = stp->st_stid.sc_file; 1447 stp->st_stid.sc_free(&stp->st_stid); 1448 if (fp) 1449 put_nfs4_file(fp); 1450 } 1451 } 1452 1453 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp, 1454 struct list_head *reaplist) 1455 { 1456 struct nfs4_ol_stateid *stp; 1457 1458 lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock); 1459 1460 while (!list_empty(&open_stp->st_locks)) { 1461 stp = list_entry(open_stp->st_locks.next, 1462 struct nfs4_ol_stateid, st_locks); 1463 WARN_ON(!unhash_lock_stateid(stp)); 1464 put_ol_stateid_locked(stp, reaplist); 1465 } 1466 } 1467 1468 static bool unhash_open_stateid(struct nfs4_ol_stateid *stp, 1469 struct list_head *reaplist) 1470 { 1471 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); 1472 1473 if (!unhash_ol_stateid(stp)) 1474 return false; 1475 release_open_stateid_locks(stp, reaplist); 1476 return true; 1477 } 1478 1479 static void release_open_stateid(struct nfs4_ol_stateid *stp) 1480 { 1481 LIST_HEAD(reaplist); 1482 1483 spin_lock(&stp->st_stid.sc_client->cl_lock); 1484 if (unhash_open_stateid(stp, &reaplist)) 1485 put_ol_stateid_locked(stp, &reaplist); 1486 spin_unlock(&stp->st_stid.sc_client->cl_lock); 1487 free_ol_stateid_reaplist(&reaplist); 1488 } 1489 1490 static void unhash_openowner_locked(struct nfs4_openowner *oo) 1491 { 1492 struct nfs4_client *clp = oo->oo_owner.so_client; 1493 1494 lockdep_assert_held(&clp->cl_lock); 1495 1496 list_del_init(&oo->oo_owner.so_strhash); 1497 list_del_init(&oo->oo_perclient); 1498 } 1499 1500 static void release_last_closed_stateid(struct nfs4_openowner *oo) 1501 { 1502 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net, 1503 nfsd_net_id); 1504 struct nfs4_ol_stateid *s; 1505 1506 spin_lock(&nn->client_lock); 1507 s = oo->oo_last_closed_stid; 1508 if (s) { 1509 list_del_init(&oo->oo_close_lru); 1510 oo->oo_last_closed_stid = NULL; 1511 } 1512 spin_unlock(&nn->client_lock); 1513 if (s) 1514 nfs4_put_stid(&s->st_stid); 1515 } 1516 1517 static void release_openowner(struct nfs4_openowner *oo) 1518 { 1519 struct nfs4_ol_stateid *stp; 1520 struct nfs4_client *clp = oo->oo_owner.so_client; 1521 struct list_head reaplist; 1522 1523 INIT_LIST_HEAD(&reaplist); 1524 1525 spin_lock(&clp->cl_lock); 1526 unhash_openowner_locked(oo); 1527 while (!list_empty(&oo->oo_owner.so_stateids)) { 1528 stp = list_first_entry(&oo->oo_owner.so_stateids, 1529 struct nfs4_ol_stateid, st_perstateowner); 1530 if (unhash_open_stateid(stp, &reaplist)) 1531 put_ol_stateid_locked(stp, &reaplist); 1532 } 1533 spin_unlock(&clp->cl_lock); 1534 free_ol_stateid_reaplist(&reaplist); 1535 release_last_closed_stateid(oo); 1536 nfs4_put_stateowner(&oo->oo_owner); 1537 } 1538 1539 static inline int 1540 hash_sessionid(struct nfs4_sessionid *sessionid) 1541 { 1542 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid; 1543 1544 return sid->sequence % SESSION_HASH_SIZE; 1545 } 1546 1547 #ifdef CONFIG_SUNRPC_DEBUG 1548 static inline void 1549 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) 1550 { 1551 u32 *ptr = (u32 *)(&sessionid->data[0]); 1552 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]); 1553 } 1554 #else 1555 static inline void 1556 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) 1557 { 1558 } 1559 #endif 1560 1561 /* 1562 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it 1563 * won't be used for replay. 1564 */ 1565 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr) 1566 { 1567 struct nfs4_stateowner *so = cstate->replay_owner; 1568 1569 if (nfserr == nfserr_replay_me) 1570 return; 1571 1572 if (!seqid_mutating_err(ntohl(nfserr))) { 1573 nfsd4_cstate_clear_replay(cstate); 1574 return; 1575 } 1576 if (!so) 1577 return; 1578 if (so->so_is_open_owner) 1579 release_last_closed_stateid(openowner(so)); 1580 so->so_seqid++; 1581 return; 1582 } 1583 1584 static void 1585 gen_sessionid(struct nfsd4_session *ses) 1586 { 1587 struct nfs4_client *clp = ses->se_client; 1588 struct nfsd4_sessionid *sid; 1589 1590 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data; 1591 sid->clientid = clp->cl_clientid; 1592 sid->sequence = current_sessionid++; 1593 sid->reserved = 0; 1594 } 1595 1596 /* 1597 * The protocol defines ca_maxresponssize_cached to include the size of 1598 * the rpc header, but all we need to cache is the data starting after 1599 * the end of the initial SEQUENCE operation--the rest we regenerate 1600 * each time. Therefore we can advertise a ca_maxresponssize_cached 1601 * value that is the number of bytes in our cache plus a few additional 1602 * bytes. In order to stay on the safe side, and not promise more than 1603 * we can cache, those additional bytes must be the minimum possible: 24 1604 * bytes of rpc header (xid through accept state, with AUTH_NULL 1605 * verifier), 12 for the compound header (with zero-length tag), and 44 1606 * for the SEQUENCE op response: 1607 */ 1608 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44) 1609 1610 static void 1611 free_session_slots(struct nfsd4_session *ses) 1612 { 1613 int i; 1614 1615 for (i = 0; i < ses->se_fchannel.maxreqs; i++) { 1616 free_svc_cred(&ses->se_slots[i]->sl_cred); 1617 kfree(ses->se_slots[i]); 1618 } 1619 } 1620 1621 /* 1622 * We don't actually need to cache the rpc and session headers, so we 1623 * can allocate a little less for each slot: 1624 */ 1625 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca) 1626 { 1627 u32 size; 1628 1629 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ) 1630 size = 0; 1631 else 1632 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ; 1633 return size + sizeof(struct nfsd4_slot); 1634 } 1635 1636 /* 1637 * XXX: If we run out of reserved DRC memory we could (up to a point) 1638 * re-negotiate active sessions and reduce their slot usage to make 1639 * room for new connections. For now we just fail the create session. 1640 */ 1641 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn) 1642 { 1643 u32 slotsize = slot_bytes(ca); 1644 u32 num = ca->maxreqs; 1645 unsigned long avail, total_avail; 1646 unsigned int scale_factor; 1647 1648 spin_lock(&nfsd_drc_lock); 1649 if (nfsd_drc_max_mem > nfsd_drc_mem_used) 1650 total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used; 1651 else 1652 /* We have handed out more space than we chose in 1653 * set_max_drc() to allow. That isn't really a 1654 * problem as long as that doesn't make us think we 1655 * have lots more due to integer overflow. 1656 */ 1657 total_avail = 0; 1658 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail); 1659 /* 1660 * Never use more than a fraction of the remaining memory, 1661 * unless it's the only way to give this client a slot. 1662 * The chosen fraction is either 1/8 or 1/number of threads, 1663 * whichever is smaller. This ensures there are adequate 1664 * slots to support multiple clients per thread. 1665 * Give the client one slot even if that would require 1666 * over-allocation--it is better than failure. 1667 */ 1668 scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads); 1669 1670 avail = clamp_t(unsigned long, avail, slotsize, 1671 total_avail/scale_factor); 1672 num = min_t(int, num, avail / slotsize); 1673 num = max_t(int, num, 1); 1674 nfsd_drc_mem_used += num * slotsize; 1675 spin_unlock(&nfsd_drc_lock); 1676 1677 return num; 1678 } 1679 1680 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca) 1681 { 1682 int slotsize = slot_bytes(ca); 1683 1684 spin_lock(&nfsd_drc_lock); 1685 nfsd_drc_mem_used -= slotsize * ca->maxreqs; 1686 spin_unlock(&nfsd_drc_lock); 1687 } 1688 1689 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs, 1690 struct nfsd4_channel_attrs *battrs) 1691 { 1692 int numslots = fattrs->maxreqs; 1693 int slotsize = slot_bytes(fattrs); 1694 struct nfsd4_session *new; 1695 int mem, i; 1696 1697 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *) 1698 + sizeof(struct nfsd4_session) > PAGE_SIZE); 1699 mem = numslots * sizeof(struct nfsd4_slot *); 1700 1701 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL); 1702 if (!new) 1703 return NULL; 1704 /* allocate each struct nfsd4_slot and data cache in one piece */ 1705 for (i = 0; i < numslots; i++) { 1706 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL); 1707 if (!new->se_slots[i]) 1708 goto out_free; 1709 } 1710 1711 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs)); 1712 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs)); 1713 1714 return new; 1715 out_free: 1716 while (i--) 1717 kfree(new->se_slots[i]); 1718 kfree(new); 1719 return NULL; 1720 } 1721 1722 static void free_conn(struct nfsd4_conn *c) 1723 { 1724 svc_xprt_put(c->cn_xprt); 1725 kfree(c); 1726 } 1727 1728 static void nfsd4_conn_lost(struct svc_xpt_user *u) 1729 { 1730 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user); 1731 struct nfs4_client *clp = c->cn_session->se_client; 1732 1733 spin_lock(&clp->cl_lock); 1734 if (!list_empty(&c->cn_persession)) { 1735 list_del(&c->cn_persession); 1736 free_conn(c); 1737 } 1738 nfsd4_probe_callback(clp); 1739 spin_unlock(&clp->cl_lock); 1740 } 1741 1742 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags) 1743 { 1744 struct nfsd4_conn *conn; 1745 1746 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL); 1747 if (!conn) 1748 return NULL; 1749 svc_xprt_get(rqstp->rq_xprt); 1750 conn->cn_xprt = rqstp->rq_xprt; 1751 conn->cn_flags = flags; 1752 INIT_LIST_HEAD(&conn->cn_xpt_user.list); 1753 return conn; 1754 } 1755 1756 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) 1757 { 1758 conn->cn_session = ses; 1759 list_add(&conn->cn_persession, &ses->se_conns); 1760 } 1761 1762 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) 1763 { 1764 struct nfs4_client *clp = ses->se_client; 1765 1766 spin_lock(&clp->cl_lock); 1767 __nfsd4_hash_conn(conn, ses); 1768 spin_unlock(&clp->cl_lock); 1769 } 1770 1771 static int nfsd4_register_conn(struct nfsd4_conn *conn) 1772 { 1773 conn->cn_xpt_user.callback = nfsd4_conn_lost; 1774 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user); 1775 } 1776 1777 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses) 1778 { 1779 int ret; 1780 1781 nfsd4_hash_conn(conn, ses); 1782 ret = nfsd4_register_conn(conn); 1783 if (ret) 1784 /* oops; xprt is already down: */ 1785 nfsd4_conn_lost(&conn->cn_xpt_user); 1786 /* We may have gained or lost a callback channel: */ 1787 nfsd4_probe_callback_sync(ses->se_client); 1788 } 1789 1790 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses) 1791 { 1792 u32 dir = NFS4_CDFC4_FORE; 1793 1794 if (cses->flags & SESSION4_BACK_CHAN) 1795 dir |= NFS4_CDFC4_BACK; 1796 return alloc_conn(rqstp, dir); 1797 } 1798 1799 /* must be called under client_lock */ 1800 static void nfsd4_del_conns(struct nfsd4_session *s) 1801 { 1802 struct nfs4_client *clp = s->se_client; 1803 struct nfsd4_conn *c; 1804 1805 spin_lock(&clp->cl_lock); 1806 while (!list_empty(&s->se_conns)) { 1807 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession); 1808 list_del_init(&c->cn_persession); 1809 spin_unlock(&clp->cl_lock); 1810 1811 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user); 1812 free_conn(c); 1813 1814 spin_lock(&clp->cl_lock); 1815 } 1816 spin_unlock(&clp->cl_lock); 1817 } 1818 1819 static void __free_session(struct nfsd4_session *ses) 1820 { 1821 free_session_slots(ses); 1822 kfree(ses); 1823 } 1824 1825 static void free_session(struct nfsd4_session *ses) 1826 { 1827 nfsd4_del_conns(ses); 1828 nfsd4_put_drc_mem(&ses->se_fchannel); 1829 __free_session(ses); 1830 } 1831 1832 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses) 1833 { 1834 int idx; 1835 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 1836 1837 new->se_client = clp; 1838 gen_sessionid(new); 1839 1840 INIT_LIST_HEAD(&new->se_conns); 1841 1842 new->se_cb_seq_nr = 1; 1843 new->se_flags = cses->flags; 1844 new->se_cb_prog = cses->callback_prog; 1845 new->se_cb_sec = cses->cb_sec; 1846 atomic_set(&new->se_ref, 0); 1847 idx = hash_sessionid(&new->se_sessionid); 1848 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]); 1849 spin_lock(&clp->cl_lock); 1850 list_add(&new->se_perclnt, &clp->cl_sessions); 1851 spin_unlock(&clp->cl_lock); 1852 1853 { 1854 struct sockaddr *sa = svc_addr(rqstp); 1855 /* 1856 * This is a little silly; with sessions there's no real 1857 * use for the callback address. Use the peer address 1858 * as a reasonable default for now, but consider fixing 1859 * the rpc client not to require an address in the 1860 * future: 1861 */ 1862 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa); 1863 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa); 1864 } 1865 } 1866 1867 /* caller must hold client_lock */ 1868 static struct nfsd4_session * 1869 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net) 1870 { 1871 struct nfsd4_session *elem; 1872 int idx; 1873 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 1874 1875 lockdep_assert_held(&nn->client_lock); 1876 1877 dump_sessionid(__func__, sessionid); 1878 idx = hash_sessionid(sessionid); 1879 /* Search in the appropriate list */ 1880 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) { 1881 if (!memcmp(elem->se_sessionid.data, sessionid->data, 1882 NFS4_MAX_SESSIONID_LEN)) { 1883 return elem; 1884 } 1885 } 1886 1887 dprintk("%s: session not found\n", __func__); 1888 return NULL; 1889 } 1890 1891 static struct nfsd4_session * 1892 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net, 1893 __be32 *ret) 1894 { 1895 struct nfsd4_session *session; 1896 __be32 status = nfserr_badsession; 1897 1898 session = __find_in_sessionid_hashtbl(sessionid, net); 1899 if (!session) 1900 goto out; 1901 status = nfsd4_get_session_locked(session); 1902 if (status) 1903 session = NULL; 1904 out: 1905 *ret = status; 1906 return session; 1907 } 1908 1909 /* caller must hold client_lock */ 1910 static void 1911 unhash_session(struct nfsd4_session *ses) 1912 { 1913 struct nfs4_client *clp = ses->se_client; 1914 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 1915 1916 lockdep_assert_held(&nn->client_lock); 1917 1918 list_del(&ses->se_hash); 1919 spin_lock(&ses->se_client->cl_lock); 1920 list_del(&ses->se_perclnt); 1921 spin_unlock(&ses->se_client->cl_lock); 1922 } 1923 1924 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */ 1925 static int 1926 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn) 1927 { 1928 /* 1929 * We're assuming the clid was not given out from a boot 1930 * precisely 2^32 (about 136 years) before this one. That seems 1931 * a safe assumption: 1932 */ 1933 if (clid->cl_boot == (u32)nn->boot_time) 1934 return 0; 1935 trace_nfsd_clid_stale(clid); 1936 return 1; 1937 } 1938 1939 /* 1940 * XXX Should we use a slab cache ? 1941 * This type of memory management is somewhat inefficient, but we use it 1942 * anyway since SETCLIENTID is not a common operation. 1943 */ 1944 static struct nfs4_client *alloc_client(struct xdr_netobj name) 1945 { 1946 struct nfs4_client *clp; 1947 int i; 1948 1949 clp = kmem_cache_zalloc(client_slab, GFP_KERNEL); 1950 if (clp == NULL) 1951 return NULL; 1952 xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL); 1953 if (clp->cl_name.data == NULL) 1954 goto err_no_name; 1955 clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE, 1956 sizeof(struct list_head), 1957 GFP_KERNEL); 1958 if (!clp->cl_ownerstr_hashtbl) 1959 goto err_no_hashtbl; 1960 for (i = 0; i < OWNER_HASH_SIZE; i++) 1961 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]); 1962 INIT_LIST_HEAD(&clp->cl_sessions); 1963 idr_init(&clp->cl_stateids); 1964 atomic_set(&clp->cl_rpc_users, 0); 1965 clp->cl_cb_state = NFSD4_CB_UNKNOWN; 1966 INIT_LIST_HEAD(&clp->cl_idhash); 1967 INIT_LIST_HEAD(&clp->cl_openowners); 1968 INIT_LIST_HEAD(&clp->cl_delegations); 1969 INIT_LIST_HEAD(&clp->cl_lru); 1970 INIT_LIST_HEAD(&clp->cl_revoked); 1971 #ifdef CONFIG_NFSD_PNFS 1972 INIT_LIST_HEAD(&clp->cl_lo_states); 1973 #endif 1974 INIT_LIST_HEAD(&clp->async_copies); 1975 spin_lock_init(&clp->async_lock); 1976 spin_lock_init(&clp->cl_lock); 1977 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); 1978 return clp; 1979 err_no_hashtbl: 1980 kfree(clp->cl_name.data); 1981 err_no_name: 1982 kmem_cache_free(client_slab, clp); 1983 return NULL; 1984 } 1985 1986 static void __free_client(struct kref *k) 1987 { 1988 struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref); 1989 struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs); 1990 1991 free_svc_cred(&clp->cl_cred); 1992 kfree(clp->cl_ownerstr_hashtbl); 1993 kfree(clp->cl_name.data); 1994 kfree(clp->cl_nii_domain.data); 1995 kfree(clp->cl_nii_name.data); 1996 idr_destroy(&clp->cl_stateids); 1997 kmem_cache_free(client_slab, clp); 1998 } 1999 2000 static void drop_client(struct nfs4_client *clp) 2001 { 2002 kref_put(&clp->cl_nfsdfs.cl_ref, __free_client); 2003 } 2004 2005 static void 2006 free_client(struct nfs4_client *clp) 2007 { 2008 while (!list_empty(&clp->cl_sessions)) { 2009 struct nfsd4_session *ses; 2010 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, 2011 se_perclnt); 2012 list_del(&ses->se_perclnt); 2013 WARN_ON_ONCE(atomic_read(&ses->se_ref)); 2014 free_session(ses); 2015 } 2016 rpc_destroy_wait_queue(&clp->cl_cb_waitq); 2017 if (clp->cl_nfsd_dentry) { 2018 nfsd_client_rmdir(clp->cl_nfsd_dentry); 2019 clp->cl_nfsd_dentry = NULL; 2020 wake_up_all(&expiry_wq); 2021 } 2022 drop_client(clp); 2023 } 2024 2025 /* must be called under the client_lock */ 2026 static void 2027 unhash_client_locked(struct nfs4_client *clp) 2028 { 2029 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2030 struct nfsd4_session *ses; 2031 2032 lockdep_assert_held(&nn->client_lock); 2033 2034 /* Mark the client as expired! */ 2035 clp->cl_time = 0; 2036 /* Make it invisible */ 2037 if (!list_empty(&clp->cl_idhash)) { 2038 list_del_init(&clp->cl_idhash); 2039 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags)) 2040 rb_erase(&clp->cl_namenode, &nn->conf_name_tree); 2041 else 2042 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); 2043 } 2044 list_del_init(&clp->cl_lru); 2045 spin_lock(&clp->cl_lock); 2046 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) 2047 list_del_init(&ses->se_hash); 2048 spin_unlock(&clp->cl_lock); 2049 } 2050 2051 static void 2052 unhash_client(struct nfs4_client *clp) 2053 { 2054 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2055 2056 spin_lock(&nn->client_lock); 2057 unhash_client_locked(clp); 2058 spin_unlock(&nn->client_lock); 2059 } 2060 2061 static __be32 mark_client_expired_locked(struct nfs4_client *clp) 2062 { 2063 if (atomic_read(&clp->cl_rpc_users)) 2064 return nfserr_jukebox; 2065 unhash_client_locked(clp); 2066 return nfs_ok; 2067 } 2068 2069 static void 2070 __destroy_client(struct nfs4_client *clp) 2071 { 2072 int i; 2073 struct nfs4_openowner *oo; 2074 struct nfs4_delegation *dp; 2075 struct list_head reaplist; 2076 2077 INIT_LIST_HEAD(&reaplist); 2078 spin_lock(&state_lock); 2079 while (!list_empty(&clp->cl_delegations)) { 2080 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); 2081 WARN_ON(!unhash_delegation_locked(dp)); 2082 list_add(&dp->dl_recall_lru, &reaplist); 2083 } 2084 spin_unlock(&state_lock); 2085 while (!list_empty(&reaplist)) { 2086 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); 2087 list_del_init(&dp->dl_recall_lru); 2088 destroy_unhashed_deleg(dp); 2089 } 2090 while (!list_empty(&clp->cl_revoked)) { 2091 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru); 2092 list_del_init(&dp->dl_recall_lru); 2093 nfs4_put_stid(&dp->dl_stid); 2094 } 2095 while (!list_empty(&clp->cl_openowners)) { 2096 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient); 2097 nfs4_get_stateowner(&oo->oo_owner); 2098 release_openowner(oo); 2099 } 2100 for (i = 0; i < OWNER_HASH_SIZE; i++) { 2101 struct nfs4_stateowner *so, *tmp; 2102 2103 list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i], 2104 so_strhash) { 2105 /* Should be no openowners at this point */ 2106 WARN_ON_ONCE(so->so_is_open_owner); 2107 remove_blocked_locks(lockowner(so)); 2108 } 2109 } 2110 nfsd4_return_all_client_layouts(clp); 2111 nfsd4_shutdown_copy(clp); 2112 nfsd4_shutdown_callback(clp); 2113 if (clp->cl_cb_conn.cb_xprt) 2114 svc_xprt_put(clp->cl_cb_conn.cb_xprt); 2115 free_client(clp); 2116 wake_up_all(&expiry_wq); 2117 } 2118 2119 static void 2120 destroy_client(struct nfs4_client *clp) 2121 { 2122 unhash_client(clp); 2123 __destroy_client(clp); 2124 } 2125 2126 static void inc_reclaim_complete(struct nfs4_client *clp) 2127 { 2128 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2129 2130 if (!nn->track_reclaim_completes) 2131 return; 2132 if (!nfsd4_find_reclaim_client(clp->cl_name, nn)) 2133 return; 2134 if (atomic_inc_return(&nn->nr_reclaim_complete) == 2135 nn->reclaim_str_hashtbl_size) { 2136 printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n", 2137 clp->net->ns.inum); 2138 nfsd4_end_grace(nn); 2139 } 2140 } 2141 2142 static void expire_client(struct nfs4_client *clp) 2143 { 2144 unhash_client(clp); 2145 nfsd4_client_record_remove(clp); 2146 __destroy_client(clp); 2147 } 2148 2149 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source) 2150 { 2151 memcpy(target->cl_verifier.data, source->data, 2152 sizeof(target->cl_verifier.data)); 2153 } 2154 2155 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source) 2156 { 2157 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; 2158 target->cl_clientid.cl_id = source->cl_clientid.cl_id; 2159 } 2160 2161 static int copy_cred(struct svc_cred *target, struct svc_cred *source) 2162 { 2163 target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL); 2164 target->cr_raw_principal = kstrdup(source->cr_raw_principal, 2165 GFP_KERNEL); 2166 target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL); 2167 if ((source->cr_principal && !target->cr_principal) || 2168 (source->cr_raw_principal && !target->cr_raw_principal) || 2169 (source->cr_targ_princ && !target->cr_targ_princ)) 2170 return -ENOMEM; 2171 2172 target->cr_flavor = source->cr_flavor; 2173 target->cr_uid = source->cr_uid; 2174 target->cr_gid = source->cr_gid; 2175 target->cr_group_info = source->cr_group_info; 2176 get_group_info(target->cr_group_info); 2177 target->cr_gss_mech = source->cr_gss_mech; 2178 if (source->cr_gss_mech) 2179 gss_mech_get(source->cr_gss_mech); 2180 return 0; 2181 } 2182 2183 static int 2184 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2) 2185 { 2186 if (o1->len < o2->len) 2187 return -1; 2188 if (o1->len > o2->len) 2189 return 1; 2190 return memcmp(o1->data, o2->data, o1->len); 2191 } 2192 2193 static int 2194 same_verf(nfs4_verifier *v1, nfs4_verifier *v2) 2195 { 2196 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data)); 2197 } 2198 2199 static int 2200 same_clid(clientid_t *cl1, clientid_t *cl2) 2201 { 2202 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id); 2203 } 2204 2205 static bool groups_equal(struct group_info *g1, struct group_info *g2) 2206 { 2207 int i; 2208 2209 if (g1->ngroups != g2->ngroups) 2210 return false; 2211 for (i=0; i<g1->ngroups; i++) 2212 if (!gid_eq(g1->gid[i], g2->gid[i])) 2213 return false; 2214 return true; 2215 } 2216 2217 /* 2218 * RFC 3530 language requires clid_inuse be returned when the 2219 * "principal" associated with a requests differs from that previously 2220 * used. We use uid, gid's, and gss principal string as our best 2221 * approximation. We also don't want to allow non-gss use of a client 2222 * established using gss: in theory cr_principal should catch that 2223 * change, but in practice cr_principal can be null even in the gss case 2224 * since gssd doesn't always pass down a principal string. 2225 */ 2226 static bool is_gss_cred(struct svc_cred *cr) 2227 { 2228 /* Is cr_flavor one of the gss "pseudoflavors"?: */ 2229 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR); 2230 } 2231 2232 2233 static bool 2234 same_creds(struct svc_cred *cr1, struct svc_cred *cr2) 2235 { 2236 if ((is_gss_cred(cr1) != is_gss_cred(cr2)) 2237 || (!uid_eq(cr1->cr_uid, cr2->cr_uid)) 2238 || (!gid_eq(cr1->cr_gid, cr2->cr_gid)) 2239 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info)) 2240 return false; 2241 /* XXX: check that cr_targ_princ fields match ? */ 2242 if (cr1->cr_principal == cr2->cr_principal) 2243 return true; 2244 if (!cr1->cr_principal || !cr2->cr_principal) 2245 return false; 2246 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal); 2247 } 2248 2249 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp) 2250 { 2251 struct svc_cred *cr = &rqstp->rq_cred; 2252 u32 service; 2253 2254 if (!cr->cr_gss_mech) 2255 return false; 2256 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor); 2257 return service == RPC_GSS_SVC_INTEGRITY || 2258 service == RPC_GSS_SVC_PRIVACY; 2259 } 2260 2261 bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp) 2262 { 2263 struct svc_cred *cr = &rqstp->rq_cred; 2264 2265 if (!cl->cl_mach_cred) 2266 return true; 2267 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech) 2268 return false; 2269 if (!svc_rqst_integrity_protected(rqstp)) 2270 return false; 2271 if (cl->cl_cred.cr_raw_principal) 2272 return 0 == strcmp(cl->cl_cred.cr_raw_principal, 2273 cr->cr_raw_principal); 2274 if (!cr->cr_principal) 2275 return false; 2276 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal); 2277 } 2278 2279 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn) 2280 { 2281 __be32 verf[2]; 2282 2283 /* 2284 * This is opaque to client, so no need to byte-swap. Use 2285 * __force to keep sparse happy 2286 */ 2287 verf[0] = (__force __be32)(u32)ktime_get_real_seconds(); 2288 verf[1] = (__force __be32)nn->clverifier_counter++; 2289 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data)); 2290 } 2291 2292 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn) 2293 { 2294 clp->cl_clientid.cl_boot = (u32)nn->boot_time; 2295 clp->cl_clientid.cl_id = nn->clientid_counter++; 2296 gen_confirm(clp, nn); 2297 } 2298 2299 static struct nfs4_stid * 2300 find_stateid_locked(struct nfs4_client *cl, stateid_t *t) 2301 { 2302 struct nfs4_stid *ret; 2303 2304 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id); 2305 if (!ret || !ret->sc_type) 2306 return NULL; 2307 return ret; 2308 } 2309 2310 static struct nfs4_stid * 2311 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask) 2312 { 2313 struct nfs4_stid *s; 2314 2315 spin_lock(&cl->cl_lock); 2316 s = find_stateid_locked(cl, t); 2317 if (s != NULL) { 2318 if (typemask & s->sc_type) 2319 refcount_inc(&s->sc_count); 2320 else 2321 s = NULL; 2322 } 2323 spin_unlock(&cl->cl_lock); 2324 return s; 2325 } 2326 2327 static struct nfs4_client *get_nfsdfs_clp(struct inode *inode) 2328 { 2329 struct nfsdfs_client *nc; 2330 nc = get_nfsdfs_client(inode); 2331 if (!nc) 2332 return NULL; 2333 return container_of(nc, struct nfs4_client, cl_nfsdfs); 2334 } 2335 2336 static void seq_quote_mem(struct seq_file *m, char *data, int len) 2337 { 2338 seq_printf(m, "\""); 2339 seq_escape_mem_ascii(m, data, len); 2340 seq_printf(m, "\""); 2341 } 2342 2343 static int client_info_show(struct seq_file *m, void *v) 2344 { 2345 struct inode *inode = m->private; 2346 struct nfs4_client *clp; 2347 u64 clid; 2348 2349 clp = get_nfsdfs_clp(inode); 2350 if (!clp) 2351 return -ENXIO; 2352 memcpy(&clid, &clp->cl_clientid, sizeof(clid)); 2353 seq_printf(m, "clientid: 0x%llx\n", clid); 2354 seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr); 2355 seq_printf(m, "name: "); 2356 seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len); 2357 seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion); 2358 if (clp->cl_nii_domain.data) { 2359 seq_printf(m, "Implementation domain: "); 2360 seq_quote_mem(m, clp->cl_nii_domain.data, 2361 clp->cl_nii_domain.len); 2362 seq_printf(m, "\nImplementation name: "); 2363 seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len); 2364 seq_printf(m, "\nImplementation time: [%lld, %ld]\n", 2365 clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec); 2366 } 2367 drop_client(clp); 2368 2369 return 0; 2370 } 2371 2372 static int client_info_open(struct inode *inode, struct file *file) 2373 { 2374 return single_open(file, client_info_show, inode); 2375 } 2376 2377 static const struct file_operations client_info_fops = { 2378 .open = client_info_open, 2379 .read = seq_read, 2380 .llseek = seq_lseek, 2381 .release = single_release, 2382 }; 2383 2384 static void *states_start(struct seq_file *s, loff_t *pos) 2385 __acquires(&clp->cl_lock) 2386 { 2387 struct nfs4_client *clp = s->private; 2388 unsigned long id = *pos; 2389 void *ret; 2390 2391 spin_lock(&clp->cl_lock); 2392 ret = idr_get_next_ul(&clp->cl_stateids, &id); 2393 *pos = id; 2394 return ret; 2395 } 2396 2397 static void *states_next(struct seq_file *s, void *v, loff_t *pos) 2398 { 2399 struct nfs4_client *clp = s->private; 2400 unsigned long id = *pos; 2401 void *ret; 2402 2403 id = *pos; 2404 id++; 2405 ret = idr_get_next_ul(&clp->cl_stateids, &id); 2406 *pos = id; 2407 return ret; 2408 } 2409 2410 static void states_stop(struct seq_file *s, void *v) 2411 __releases(&clp->cl_lock) 2412 { 2413 struct nfs4_client *clp = s->private; 2414 2415 spin_unlock(&clp->cl_lock); 2416 } 2417 2418 static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f) 2419 { 2420 seq_printf(s, "filename: \"%pD2\"", f->nf_file); 2421 } 2422 2423 static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f) 2424 { 2425 struct inode *inode = f->nf_inode; 2426 2427 seq_printf(s, "superblock: \"%02x:%02x:%ld\"", 2428 MAJOR(inode->i_sb->s_dev), 2429 MINOR(inode->i_sb->s_dev), 2430 inode->i_ino); 2431 } 2432 2433 static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo) 2434 { 2435 seq_printf(s, "owner: "); 2436 seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len); 2437 } 2438 2439 static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid) 2440 { 2441 seq_printf(s, "0x%.8x", stid->si_generation); 2442 seq_printf(s, "%12phN", &stid->si_opaque); 2443 } 2444 2445 static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st) 2446 { 2447 struct nfs4_ol_stateid *ols; 2448 struct nfs4_file *nf; 2449 struct nfsd_file *file; 2450 struct nfs4_stateowner *oo; 2451 unsigned int access, deny; 2452 2453 if (st->sc_type != NFS4_OPEN_STID && st->sc_type != NFS4_LOCK_STID) 2454 return 0; /* XXX: or SEQ_SKIP? */ 2455 ols = openlockstateid(st); 2456 oo = ols->st_stateowner; 2457 nf = st->sc_file; 2458 file = find_any_file(nf); 2459 if (!file) 2460 return 0; 2461 2462 seq_printf(s, "- "); 2463 nfs4_show_stateid(s, &st->sc_stateid); 2464 seq_printf(s, ": { type: open, "); 2465 2466 access = bmap_to_share_mode(ols->st_access_bmap); 2467 deny = bmap_to_share_mode(ols->st_deny_bmap); 2468 2469 seq_printf(s, "access: %s%s, ", 2470 access & NFS4_SHARE_ACCESS_READ ? "r" : "-", 2471 access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-"); 2472 seq_printf(s, "deny: %s%s, ", 2473 deny & NFS4_SHARE_ACCESS_READ ? "r" : "-", 2474 deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-"); 2475 2476 nfs4_show_superblock(s, file); 2477 seq_printf(s, ", "); 2478 nfs4_show_fname(s, file); 2479 seq_printf(s, ", "); 2480 nfs4_show_owner(s, oo); 2481 seq_printf(s, " }\n"); 2482 nfsd_file_put(file); 2483 2484 return 0; 2485 } 2486 2487 static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st) 2488 { 2489 struct nfs4_ol_stateid *ols; 2490 struct nfs4_file *nf; 2491 struct nfsd_file *file; 2492 struct nfs4_stateowner *oo; 2493 2494 ols = openlockstateid(st); 2495 oo = ols->st_stateowner; 2496 nf = st->sc_file; 2497 file = find_any_file(nf); 2498 if (!file) 2499 return 0; 2500 2501 seq_printf(s, "- "); 2502 nfs4_show_stateid(s, &st->sc_stateid); 2503 seq_printf(s, ": { type: lock, "); 2504 2505 /* 2506 * Note: a lock stateid isn't really the same thing as a lock, 2507 * it's the locking state held by one owner on a file, and there 2508 * may be multiple (or no) lock ranges associated with it. 2509 * (Same for the matter is true of open stateids.) 2510 */ 2511 2512 nfs4_show_superblock(s, file); 2513 /* XXX: open stateid? */ 2514 seq_printf(s, ", "); 2515 nfs4_show_fname(s, file); 2516 seq_printf(s, ", "); 2517 nfs4_show_owner(s, oo); 2518 seq_printf(s, " }\n"); 2519 nfsd_file_put(file); 2520 2521 return 0; 2522 } 2523 2524 static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st) 2525 { 2526 struct nfs4_delegation *ds; 2527 struct nfs4_file *nf; 2528 struct nfsd_file *file; 2529 2530 ds = delegstateid(st); 2531 nf = st->sc_file; 2532 file = find_deleg_file(nf); 2533 if (!file) 2534 return 0; 2535 2536 seq_printf(s, "- "); 2537 nfs4_show_stateid(s, &st->sc_stateid); 2538 seq_printf(s, ": { type: deleg, "); 2539 2540 /* Kinda dead code as long as we only support read delegs: */ 2541 seq_printf(s, "access: %s, ", 2542 ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w"); 2543 2544 /* XXX: lease time, whether it's being recalled. */ 2545 2546 nfs4_show_superblock(s, file); 2547 seq_printf(s, ", "); 2548 nfs4_show_fname(s, file); 2549 seq_printf(s, " }\n"); 2550 nfsd_file_put(file); 2551 2552 return 0; 2553 } 2554 2555 static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st) 2556 { 2557 struct nfs4_layout_stateid *ls; 2558 struct nfsd_file *file; 2559 2560 ls = container_of(st, struct nfs4_layout_stateid, ls_stid); 2561 file = ls->ls_file; 2562 2563 seq_printf(s, "- "); 2564 nfs4_show_stateid(s, &st->sc_stateid); 2565 seq_printf(s, ": { type: layout, "); 2566 2567 /* XXX: What else would be useful? */ 2568 2569 nfs4_show_superblock(s, file); 2570 seq_printf(s, ", "); 2571 nfs4_show_fname(s, file); 2572 seq_printf(s, " }\n"); 2573 2574 return 0; 2575 } 2576 2577 static int states_show(struct seq_file *s, void *v) 2578 { 2579 struct nfs4_stid *st = v; 2580 2581 switch (st->sc_type) { 2582 case NFS4_OPEN_STID: 2583 return nfs4_show_open(s, st); 2584 case NFS4_LOCK_STID: 2585 return nfs4_show_lock(s, st); 2586 case NFS4_DELEG_STID: 2587 return nfs4_show_deleg(s, st); 2588 case NFS4_LAYOUT_STID: 2589 return nfs4_show_layout(s, st); 2590 default: 2591 return 0; /* XXX: or SEQ_SKIP? */ 2592 } 2593 /* XXX: copy stateids? */ 2594 } 2595 2596 static struct seq_operations states_seq_ops = { 2597 .start = states_start, 2598 .next = states_next, 2599 .stop = states_stop, 2600 .show = states_show 2601 }; 2602 2603 static int client_states_open(struct inode *inode, struct file *file) 2604 { 2605 struct seq_file *s; 2606 struct nfs4_client *clp; 2607 int ret; 2608 2609 clp = get_nfsdfs_clp(inode); 2610 if (!clp) 2611 return -ENXIO; 2612 2613 ret = seq_open(file, &states_seq_ops); 2614 if (ret) 2615 return ret; 2616 s = file->private_data; 2617 s->private = clp; 2618 return 0; 2619 } 2620 2621 static int client_opens_release(struct inode *inode, struct file *file) 2622 { 2623 struct seq_file *m = file->private_data; 2624 struct nfs4_client *clp = m->private; 2625 2626 /* XXX: alternatively, we could get/drop in seq start/stop */ 2627 drop_client(clp); 2628 return 0; 2629 } 2630 2631 static const struct file_operations client_states_fops = { 2632 .open = client_states_open, 2633 .read = seq_read, 2634 .llseek = seq_lseek, 2635 .release = client_opens_release, 2636 }; 2637 2638 /* 2639 * Normally we refuse to destroy clients that are in use, but here the 2640 * administrator is telling us to just do it. We also want to wait 2641 * so the caller has a guarantee that the client's locks are gone by 2642 * the time the write returns: 2643 */ 2644 static void force_expire_client(struct nfs4_client *clp) 2645 { 2646 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2647 bool already_expired; 2648 2649 spin_lock(&clp->cl_lock); 2650 clp->cl_time = 0; 2651 spin_unlock(&clp->cl_lock); 2652 2653 wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0); 2654 spin_lock(&nn->client_lock); 2655 already_expired = list_empty(&clp->cl_lru); 2656 if (!already_expired) 2657 unhash_client_locked(clp); 2658 spin_unlock(&nn->client_lock); 2659 2660 if (!already_expired) 2661 expire_client(clp); 2662 else 2663 wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL); 2664 } 2665 2666 static ssize_t client_ctl_write(struct file *file, const char __user *buf, 2667 size_t size, loff_t *pos) 2668 { 2669 char *data; 2670 struct nfs4_client *clp; 2671 2672 data = simple_transaction_get(file, buf, size); 2673 if (IS_ERR(data)) 2674 return PTR_ERR(data); 2675 if (size != 7 || 0 != memcmp(data, "expire\n", 7)) 2676 return -EINVAL; 2677 clp = get_nfsdfs_clp(file_inode(file)); 2678 if (!clp) 2679 return -ENXIO; 2680 force_expire_client(clp); 2681 drop_client(clp); 2682 return 7; 2683 } 2684 2685 static const struct file_operations client_ctl_fops = { 2686 .write = client_ctl_write, 2687 .release = simple_transaction_release, 2688 }; 2689 2690 static const struct tree_descr client_files[] = { 2691 [0] = {"info", &client_info_fops, S_IRUSR}, 2692 [1] = {"states", &client_states_fops, S_IRUSR}, 2693 [2] = {"ctl", &client_ctl_fops, S_IWUSR}, 2694 [3] = {""}, 2695 }; 2696 2697 static struct nfs4_client *create_client(struct xdr_netobj name, 2698 struct svc_rqst *rqstp, nfs4_verifier *verf) 2699 { 2700 struct nfs4_client *clp; 2701 struct sockaddr *sa = svc_addr(rqstp); 2702 int ret; 2703 struct net *net = SVC_NET(rqstp); 2704 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 2705 2706 clp = alloc_client(name); 2707 if (clp == NULL) 2708 return NULL; 2709 2710 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred); 2711 if (ret) { 2712 free_client(clp); 2713 return NULL; 2714 } 2715 gen_clid(clp, nn); 2716 kref_init(&clp->cl_nfsdfs.cl_ref); 2717 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL); 2718 clp->cl_time = ktime_get_boottime_seconds(); 2719 clear_bit(0, &clp->cl_cb_slot_busy); 2720 copy_verf(clp, verf); 2721 memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage)); 2722 clp->cl_cb_session = NULL; 2723 clp->net = net; 2724 clp->cl_nfsd_dentry = nfsd_client_mkdir(nn, &clp->cl_nfsdfs, 2725 clp->cl_clientid.cl_id - nn->clientid_base, 2726 client_files); 2727 if (!clp->cl_nfsd_dentry) { 2728 free_client(clp); 2729 return NULL; 2730 } 2731 return clp; 2732 } 2733 2734 static void 2735 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root) 2736 { 2737 struct rb_node **new = &(root->rb_node), *parent = NULL; 2738 struct nfs4_client *clp; 2739 2740 while (*new) { 2741 clp = rb_entry(*new, struct nfs4_client, cl_namenode); 2742 parent = *new; 2743 2744 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0) 2745 new = &((*new)->rb_left); 2746 else 2747 new = &((*new)->rb_right); 2748 } 2749 2750 rb_link_node(&new_clp->cl_namenode, parent, new); 2751 rb_insert_color(&new_clp->cl_namenode, root); 2752 } 2753 2754 static struct nfs4_client * 2755 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root) 2756 { 2757 int cmp; 2758 struct rb_node *node = root->rb_node; 2759 struct nfs4_client *clp; 2760 2761 while (node) { 2762 clp = rb_entry(node, struct nfs4_client, cl_namenode); 2763 cmp = compare_blob(&clp->cl_name, name); 2764 if (cmp > 0) 2765 node = node->rb_left; 2766 else if (cmp < 0) 2767 node = node->rb_right; 2768 else 2769 return clp; 2770 } 2771 return NULL; 2772 } 2773 2774 static void 2775 add_to_unconfirmed(struct nfs4_client *clp) 2776 { 2777 unsigned int idhashval; 2778 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2779 2780 lockdep_assert_held(&nn->client_lock); 2781 2782 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); 2783 add_clp_to_name_tree(clp, &nn->unconf_name_tree); 2784 idhashval = clientid_hashval(clp->cl_clientid.cl_id); 2785 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]); 2786 renew_client_locked(clp); 2787 } 2788 2789 static void 2790 move_to_confirmed(struct nfs4_client *clp) 2791 { 2792 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id); 2793 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2794 2795 lockdep_assert_held(&nn->client_lock); 2796 2797 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp); 2798 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]); 2799 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); 2800 add_clp_to_name_tree(clp, &nn->conf_name_tree); 2801 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); 2802 renew_client_locked(clp); 2803 } 2804 2805 static struct nfs4_client * 2806 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions) 2807 { 2808 struct nfs4_client *clp; 2809 unsigned int idhashval = clientid_hashval(clid->cl_id); 2810 2811 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) { 2812 if (same_clid(&clp->cl_clientid, clid)) { 2813 if ((bool)clp->cl_minorversion != sessions) 2814 return NULL; 2815 renew_client_locked(clp); 2816 return clp; 2817 } 2818 } 2819 return NULL; 2820 } 2821 2822 static struct nfs4_client * 2823 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) 2824 { 2825 struct list_head *tbl = nn->conf_id_hashtbl; 2826 2827 lockdep_assert_held(&nn->client_lock); 2828 return find_client_in_id_table(tbl, clid, sessions); 2829 } 2830 2831 static struct nfs4_client * 2832 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) 2833 { 2834 struct list_head *tbl = nn->unconf_id_hashtbl; 2835 2836 lockdep_assert_held(&nn->client_lock); 2837 return find_client_in_id_table(tbl, clid, sessions); 2838 } 2839 2840 static bool clp_used_exchangeid(struct nfs4_client *clp) 2841 { 2842 return clp->cl_exchange_flags != 0; 2843 } 2844 2845 static struct nfs4_client * 2846 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn) 2847 { 2848 lockdep_assert_held(&nn->client_lock); 2849 return find_clp_in_name_tree(name, &nn->conf_name_tree); 2850 } 2851 2852 static struct nfs4_client * 2853 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn) 2854 { 2855 lockdep_assert_held(&nn->client_lock); 2856 return find_clp_in_name_tree(name, &nn->unconf_name_tree); 2857 } 2858 2859 static void 2860 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp) 2861 { 2862 struct nfs4_cb_conn *conn = &clp->cl_cb_conn; 2863 struct sockaddr *sa = svc_addr(rqstp); 2864 u32 scopeid = rpc_get_scope_id(sa); 2865 unsigned short expected_family; 2866 2867 /* Currently, we only support tcp and tcp6 for the callback channel */ 2868 if (se->se_callback_netid_len == 3 && 2869 !memcmp(se->se_callback_netid_val, "tcp", 3)) 2870 expected_family = AF_INET; 2871 else if (se->se_callback_netid_len == 4 && 2872 !memcmp(se->se_callback_netid_val, "tcp6", 4)) 2873 expected_family = AF_INET6; 2874 else 2875 goto out_err; 2876 2877 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val, 2878 se->se_callback_addr_len, 2879 (struct sockaddr *)&conn->cb_addr, 2880 sizeof(conn->cb_addr)); 2881 2882 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family) 2883 goto out_err; 2884 2885 if (conn->cb_addr.ss_family == AF_INET6) 2886 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid; 2887 2888 conn->cb_prog = se->se_callback_prog; 2889 conn->cb_ident = se->se_callback_ident; 2890 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen); 2891 trace_nfsd_cb_args(clp, conn); 2892 return; 2893 out_err: 2894 conn->cb_addr.ss_family = AF_UNSPEC; 2895 conn->cb_addrlen = 0; 2896 trace_nfsd_cb_nodelegs(clp); 2897 return; 2898 } 2899 2900 /* 2901 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size. 2902 */ 2903 static void 2904 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp) 2905 { 2906 struct xdr_buf *buf = resp->xdr.buf; 2907 struct nfsd4_slot *slot = resp->cstate.slot; 2908 unsigned int base; 2909 2910 dprintk("--> %s slot %p\n", __func__, slot); 2911 2912 slot->sl_flags |= NFSD4_SLOT_INITIALIZED; 2913 slot->sl_opcnt = resp->opcnt; 2914 slot->sl_status = resp->cstate.status; 2915 free_svc_cred(&slot->sl_cred); 2916 copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred); 2917 2918 if (!nfsd4_cache_this(resp)) { 2919 slot->sl_flags &= ~NFSD4_SLOT_CACHED; 2920 return; 2921 } 2922 slot->sl_flags |= NFSD4_SLOT_CACHED; 2923 2924 base = resp->cstate.data_offset; 2925 slot->sl_datalen = buf->len - base; 2926 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen)) 2927 WARN(1, "%s: sessions DRC could not cache compound\n", 2928 __func__); 2929 return; 2930 } 2931 2932 /* 2933 * Encode the replay sequence operation from the slot values. 2934 * If cachethis is FALSE encode the uncached rep error on the next 2935 * operation which sets resp->p and increments resp->opcnt for 2936 * nfs4svc_encode_compoundres. 2937 * 2938 */ 2939 static __be32 2940 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args, 2941 struct nfsd4_compoundres *resp) 2942 { 2943 struct nfsd4_op *op; 2944 struct nfsd4_slot *slot = resp->cstate.slot; 2945 2946 /* Encode the replayed sequence operation */ 2947 op = &args->ops[resp->opcnt - 1]; 2948 nfsd4_encode_operation(resp, op); 2949 2950 if (slot->sl_flags & NFSD4_SLOT_CACHED) 2951 return op->status; 2952 if (args->opcnt == 1) { 2953 /* 2954 * The original operation wasn't a solo sequence--we 2955 * always cache those--so this retry must not match the 2956 * original: 2957 */ 2958 op->status = nfserr_seq_false_retry; 2959 } else { 2960 op = &args->ops[resp->opcnt++]; 2961 op->status = nfserr_retry_uncached_rep; 2962 nfsd4_encode_operation(resp, op); 2963 } 2964 return op->status; 2965 } 2966 2967 /* 2968 * The sequence operation is not cached because we can use the slot and 2969 * session values. 2970 */ 2971 static __be32 2972 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, 2973 struct nfsd4_sequence *seq) 2974 { 2975 struct nfsd4_slot *slot = resp->cstate.slot; 2976 struct xdr_stream *xdr = &resp->xdr; 2977 __be32 *p; 2978 __be32 status; 2979 2980 dprintk("--> %s slot %p\n", __func__, slot); 2981 2982 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp); 2983 if (status) 2984 return status; 2985 2986 p = xdr_reserve_space(xdr, slot->sl_datalen); 2987 if (!p) { 2988 WARN_ON_ONCE(1); 2989 return nfserr_serverfault; 2990 } 2991 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen); 2992 xdr_commit_encode(xdr); 2993 2994 resp->opcnt = slot->sl_opcnt; 2995 return slot->sl_status; 2996 } 2997 2998 /* 2999 * Set the exchange_id flags returned by the server. 3000 */ 3001 static void 3002 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid) 3003 { 3004 #ifdef CONFIG_NFSD_PNFS 3005 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS; 3006 #else 3007 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS; 3008 #endif 3009 3010 /* Referrals are supported, Migration is not. */ 3011 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER; 3012 3013 /* set the wire flags to return to client. */ 3014 clid->flags = new->cl_exchange_flags; 3015 } 3016 3017 static bool client_has_openowners(struct nfs4_client *clp) 3018 { 3019 struct nfs4_openowner *oo; 3020 3021 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) { 3022 if (!list_empty(&oo->oo_owner.so_stateids)) 3023 return true; 3024 } 3025 return false; 3026 } 3027 3028 static bool client_has_state(struct nfs4_client *clp) 3029 { 3030 return client_has_openowners(clp) 3031 #ifdef CONFIG_NFSD_PNFS 3032 || !list_empty(&clp->cl_lo_states) 3033 #endif 3034 || !list_empty(&clp->cl_delegations) 3035 || !list_empty(&clp->cl_sessions) 3036 || !list_empty(&clp->async_copies); 3037 } 3038 3039 static __be32 copy_impl_id(struct nfs4_client *clp, 3040 struct nfsd4_exchange_id *exid) 3041 { 3042 if (!exid->nii_domain.data) 3043 return 0; 3044 xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL); 3045 if (!clp->cl_nii_domain.data) 3046 return nfserr_jukebox; 3047 xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL); 3048 if (!clp->cl_nii_name.data) 3049 return nfserr_jukebox; 3050 clp->cl_nii_time = exid->nii_time; 3051 return 0; 3052 } 3053 3054 __be32 3055 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3056 union nfsd4_op_u *u) 3057 { 3058 struct nfsd4_exchange_id *exid = &u->exchange_id; 3059 struct nfs4_client *conf, *new; 3060 struct nfs4_client *unconf = NULL; 3061 __be32 status; 3062 char addr_str[INET6_ADDRSTRLEN]; 3063 nfs4_verifier verf = exid->verifier; 3064 struct sockaddr *sa = svc_addr(rqstp); 3065 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A; 3066 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3067 3068 rpc_ntop(sa, addr_str, sizeof(addr_str)); 3069 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p " 3070 "ip_addr=%s flags %x, spa_how %u\n", 3071 __func__, rqstp, exid, exid->clname.len, exid->clname.data, 3072 addr_str, exid->flags, exid->spa_how); 3073 3074 if (exid->flags & ~EXCHGID4_FLAG_MASK_A) 3075 return nfserr_inval; 3076 3077 new = create_client(exid->clname, rqstp, &verf); 3078 if (new == NULL) 3079 return nfserr_jukebox; 3080 status = copy_impl_id(new, exid); 3081 if (status) 3082 goto out_nolock; 3083 3084 switch (exid->spa_how) { 3085 case SP4_MACH_CRED: 3086 exid->spo_must_enforce[0] = 0; 3087 exid->spo_must_enforce[1] = ( 3088 1 << (OP_BIND_CONN_TO_SESSION - 32) | 3089 1 << (OP_EXCHANGE_ID - 32) | 3090 1 << (OP_CREATE_SESSION - 32) | 3091 1 << (OP_DESTROY_SESSION - 32) | 3092 1 << (OP_DESTROY_CLIENTID - 32)); 3093 3094 exid->spo_must_allow[0] &= (1 << (OP_CLOSE) | 3095 1 << (OP_OPEN_DOWNGRADE) | 3096 1 << (OP_LOCKU) | 3097 1 << (OP_DELEGRETURN)); 3098 3099 exid->spo_must_allow[1] &= ( 3100 1 << (OP_TEST_STATEID - 32) | 3101 1 << (OP_FREE_STATEID - 32)); 3102 if (!svc_rqst_integrity_protected(rqstp)) { 3103 status = nfserr_inval; 3104 goto out_nolock; 3105 } 3106 /* 3107 * Sometimes userspace doesn't give us a principal. 3108 * Which is a bug, really. Anyway, we can't enforce 3109 * MACH_CRED in that case, better to give up now: 3110 */ 3111 if (!new->cl_cred.cr_principal && 3112 !new->cl_cred.cr_raw_principal) { 3113 status = nfserr_serverfault; 3114 goto out_nolock; 3115 } 3116 new->cl_mach_cred = true; 3117 case SP4_NONE: 3118 break; 3119 default: /* checked by xdr code */ 3120 WARN_ON_ONCE(1); 3121 fallthrough; 3122 case SP4_SSV: 3123 status = nfserr_encr_alg_unsupp; 3124 goto out_nolock; 3125 } 3126 3127 /* Cases below refer to rfc 5661 section 18.35.4: */ 3128 spin_lock(&nn->client_lock); 3129 conf = find_confirmed_client_by_name(&exid->clname, nn); 3130 if (conf) { 3131 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred); 3132 bool verfs_match = same_verf(&verf, &conf->cl_verifier); 3133 3134 if (update) { 3135 if (!clp_used_exchangeid(conf)) { /* buggy client */ 3136 status = nfserr_inval; 3137 goto out; 3138 } 3139 if (!nfsd4_mach_creds_match(conf, rqstp)) { 3140 status = nfserr_wrong_cred; 3141 goto out; 3142 } 3143 if (!creds_match) { /* case 9 */ 3144 status = nfserr_perm; 3145 goto out; 3146 } 3147 if (!verfs_match) { /* case 8 */ 3148 status = nfserr_not_same; 3149 goto out; 3150 } 3151 /* case 6 */ 3152 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R; 3153 goto out_copy; 3154 } 3155 if (!creds_match) { /* case 3 */ 3156 if (client_has_state(conf)) { 3157 status = nfserr_clid_inuse; 3158 goto out; 3159 } 3160 goto out_new; 3161 } 3162 if (verfs_match) { /* case 2 */ 3163 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; 3164 goto out_copy; 3165 } 3166 /* case 5, client reboot */ 3167 conf = NULL; 3168 goto out_new; 3169 } 3170 3171 if (update) { /* case 7 */ 3172 status = nfserr_noent; 3173 goto out; 3174 } 3175 3176 unconf = find_unconfirmed_client_by_name(&exid->clname, nn); 3177 if (unconf) /* case 4, possible retry or client restart */ 3178 unhash_client_locked(unconf); 3179 3180 /* case 1 (normal case) */ 3181 out_new: 3182 if (conf) { 3183 status = mark_client_expired_locked(conf); 3184 if (status) 3185 goto out; 3186 } 3187 new->cl_minorversion = cstate->minorversion; 3188 new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0]; 3189 new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1]; 3190 3191 add_to_unconfirmed(new); 3192 swap(new, conf); 3193 out_copy: 3194 exid->clientid.cl_boot = conf->cl_clientid.cl_boot; 3195 exid->clientid.cl_id = conf->cl_clientid.cl_id; 3196 3197 exid->seqid = conf->cl_cs_slot.sl_seqid + 1; 3198 nfsd4_set_ex_flags(conf, exid); 3199 3200 dprintk("nfsd4_exchange_id seqid %d flags %x\n", 3201 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags); 3202 status = nfs_ok; 3203 3204 out: 3205 spin_unlock(&nn->client_lock); 3206 out_nolock: 3207 if (new) 3208 expire_client(new); 3209 if (unconf) 3210 expire_client(unconf); 3211 return status; 3212 } 3213 3214 static __be32 3215 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse) 3216 { 3217 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid, 3218 slot_seqid); 3219 3220 /* The slot is in use, and no response has been sent. */ 3221 if (slot_inuse) { 3222 if (seqid == slot_seqid) 3223 return nfserr_jukebox; 3224 else 3225 return nfserr_seq_misordered; 3226 } 3227 /* Note unsigned 32-bit arithmetic handles wraparound: */ 3228 if (likely(seqid == slot_seqid + 1)) 3229 return nfs_ok; 3230 if (seqid == slot_seqid) 3231 return nfserr_replay_cache; 3232 return nfserr_seq_misordered; 3233 } 3234 3235 /* 3236 * Cache the create session result into the create session single DRC 3237 * slot cache by saving the xdr structure. sl_seqid has been set. 3238 * Do this for solo or embedded create session operations. 3239 */ 3240 static void 3241 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses, 3242 struct nfsd4_clid_slot *slot, __be32 nfserr) 3243 { 3244 slot->sl_status = nfserr; 3245 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses)); 3246 } 3247 3248 static __be32 3249 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses, 3250 struct nfsd4_clid_slot *slot) 3251 { 3252 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses)); 3253 return slot->sl_status; 3254 } 3255 3256 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\ 3257 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \ 3258 1 + /* MIN tag is length with zero, only length */ \ 3259 3 + /* version, opcount, opcode */ \ 3260 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \ 3261 /* seqid, slotID, slotID, cache */ \ 3262 4 ) * sizeof(__be32)) 3263 3264 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\ 3265 2 + /* verifier: AUTH_NULL, length 0 */\ 3266 1 + /* status */ \ 3267 1 + /* MIN tag is length with zero, only length */ \ 3268 3 + /* opcount, opcode, opstatus*/ \ 3269 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \ 3270 /* seqid, slotID, slotID, slotID, status */ \ 3271 5 ) * sizeof(__be32)) 3272 3273 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn) 3274 { 3275 u32 maxrpc = nn->nfsd_serv->sv_max_mesg; 3276 3277 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ) 3278 return nfserr_toosmall; 3279 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ) 3280 return nfserr_toosmall; 3281 ca->headerpadsz = 0; 3282 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc); 3283 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc); 3284 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND); 3285 ca->maxresp_cached = min_t(u32, ca->maxresp_cached, 3286 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ); 3287 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION); 3288 /* 3289 * Note decreasing slot size below client's request may make it 3290 * difficult for client to function correctly, whereas 3291 * decreasing the number of slots will (just?) affect 3292 * performance. When short on memory we therefore prefer to 3293 * decrease number of slots instead of their size. Clients that 3294 * request larger slots than they need will get poor results: 3295 * Note that we always allow at least one slot, because our 3296 * accounting is soft and provides no guarantees either way. 3297 */ 3298 ca->maxreqs = nfsd4_get_drc_mem(ca, nn); 3299 3300 return nfs_ok; 3301 } 3302 3303 /* 3304 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now. 3305 * These are based on similar macros in linux/sunrpc/msg_prot.h . 3306 */ 3307 #define RPC_MAX_HEADER_WITH_AUTH_SYS \ 3308 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK)) 3309 3310 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \ 3311 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK)) 3312 3313 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \ 3314 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32)) 3315 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \ 3316 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \ 3317 sizeof(__be32)) 3318 3319 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca) 3320 { 3321 ca->headerpadsz = 0; 3322 3323 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ) 3324 return nfserr_toosmall; 3325 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ) 3326 return nfserr_toosmall; 3327 ca->maxresp_cached = 0; 3328 if (ca->maxops < 2) 3329 return nfserr_toosmall; 3330 3331 return nfs_ok; 3332 } 3333 3334 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs) 3335 { 3336 switch (cbs->flavor) { 3337 case RPC_AUTH_NULL: 3338 case RPC_AUTH_UNIX: 3339 return nfs_ok; 3340 default: 3341 /* 3342 * GSS case: the spec doesn't allow us to return this 3343 * error. But it also doesn't allow us not to support 3344 * GSS. 3345 * I'd rather this fail hard than return some error the 3346 * client might think it can already handle: 3347 */ 3348 return nfserr_encr_alg_unsupp; 3349 } 3350 } 3351 3352 __be32 3353 nfsd4_create_session(struct svc_rqst *rqstp, 3354 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) 3355 { 3356 struct nfsd4_create_session *cr_ses = &u->create_session; 3357 struct sockaddr *sa = svc_addr(rqstp); 3358 struct nfs4_client *conf, *unconf; 3359 struct nfs4_client *old = NULL; 3360 struct nfsd4_session *new; 3361 struct nfsd4_conn *conn; 3362 struct nfsd4_clid_slot *cs_slot = NULL; 3363 __be32 status = 0; 3364 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3365 3366 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A) 3367 return nfserr_inval; 3368 status = nfsd4_check_cb_sec(&cr_ses->cb_sec); 3369 if (status) 3370 return status; 3371 status = check_forechannel_attrs(&cr_ses->fore_channel, nn); 3372 if (status) 3373 return status; 3374 status = check_backchannel_attrs(&cr_ses->back_channel); 3375 if (status) 3376 goto out_release_drc_mem; 3377 status = nfserr_jukebox; 3378 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel); 3379 if (!new) 3380 goto out_release_drc_mem; 3381 conn = alloc_conn_from_crses(rqstp, cr_ses); 3382 if (!conn) 3383 goto out_free_session; 3384 3385 spin_lock(&nn->client_lock); 3386 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn); 3387 conf = find_confirmed_client(&cr_ses->clientid, true, nn); 3388 WARN_ON_ONCE(conf && unconf); 3389 3390 if (conf) { 3391 status = nfserr_wrong_cred; 3392 if (!nfsd4_mach_creds_match(conf, rqstp)) 3393 goto out_free_conn; 3394 cs_slot = &conf->cl_cs_slot; 3395 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); 3396 if (status) { 3397 if (status == nfserr_replay_cache) 3398 status = nfsd4_replay_create_session(cr_ses, cs_slot); 3399 goto out_free_conn; 3400 } 3401 } else if (unconf) { 3402 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) || 3403 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) { 3404 status = nfserr_clid_inuse; 3405 goto out_free_conn; 3406 } 3407 status = nfserr_wrong_cred; 3408 if (!nfsd4_mach_creds_match(unconf, rqstp)) 3409 goto out_free_conn; 3410 cs_slot = &unconf->cl_cs_slot; 3411 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); 3412 if (status) { 3413 /* an unconfirmed replay returns misordered */ 3414 status = nfserr_seq_misordered; 3415 goto out_free_conn; 3416 } 3417 old = find_confirmed_client_by_name(&unconf->cl_name, nn); 3418 if (old) { 3419 status = mark_client_expired_locked(old); 3420 if (status) { 3421 old = NULL; 3422 goto out_free_conn; 3423 } 3424 } 3425 move_to_confirmed(unconf); 3426 conf = unconf; 3427 } else { 3428 status = nfserr_stale_clientid; 3429 goto out_free_conn; 3430 } 3431 status = nfs_ok; 3432 /* Persistent sessions are not supported */ 3433 cr_ses->flags &= ~SESSION4_PERSIST; 3434 /* Upshifting from TCP to RDMA is not supported */ 3435 cr_ses->flags &= ~SESSION4_RDMA; 3436 3437 init_session(rqstp, new, conf, cr_ses); 3438 nfsd4_get_session_locked(new); 3439 3440 memcpy(cr_ses->sessionid.data, new->se_sessionid.data, 3441 NFS4_MAX_SESSIONID_LEN); 3442 cs_slot->sl_seqid++; 3443 cr_ses->seqid = cs_slot->sl_seqid; 3444 3445 /* cache solo and embedded create sessions under the client_lock */ 3446 nfsd4_cache_create_session(cr_ses, cs_slot, status); 3447 spin_unlock(&nn->client_lock); 3448 /* init connection and backchannel */ 3449 nfsd4_init_conn(rqstp, conn, new); 3450 nfsd4_put_session(new); 3451 if (old) 3452 expire_client(old); 3453 return status; 3454 out_free_conn: 3455 spin_unlock(&nn->client_lock); 3456 free_conn(conn); 3457 if (old) 3458 expire_client(old); 3459 out_free_session: 3460 __free_session(new); 3461 out_release_drc_mem: 3462 nfsd4_put_drc_mem(&cr_ses->fore_channel); 3463 return status; 3464 } 3465 3466 static __be32 nfsd4_map_bcts_dir(u32 *dir) 3467 { 3468 switch (*dir) { 3469 case NFS4_CDFC4_FORE: 3470 case NFS4_CDFC4_BACK: 3471 return nfs_ok; 3472 case NFS4_CDFC4_FORE_OR_BOTH: 3473 case NFS4_CDFC4_BACK_OR_BOTH: 3474 *dir = NFS4_CDFC4_BOTH; 3475 return nfs_ok; 3476 } 3477 return nfserr_inval; 3478 } 3479 3480 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, 3481 struct nfsd4_compound_state *cstate, 3482 union nfsd4_op_u *u) 3483 { 3484 struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl; 3485 struct nfsd4_session *session = cstate->session; 3486 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3487 __be32 status; 3488 3489 status = nfsd4_check_cb_sec(&bc->bc_cb_sec); 3490 if (status) 3491 return status; 3492 spin_lock(&nn->client_lock); 3493 session->se_cb_prog = bc->bc_cb_program; 3494 session->se_cb_sec = bc->bc_cb_sec; 3495 spin_unlock(&nn->client_lock); 3496 3497 nfsd4_probe_callback(session->se_client); 3498 3499 return nfs_ok; 3500 } 3501 3502 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s) 3503 { 3504 struct nfsd4_conn *c; 3505 3506 list_for_each_entry(c, &s->se_conns, cn_persession) { 3507 if (c->cn_xprt == xpt) { 3508 return c; 3509 } 3510 } 3511 return NULL; 3512 } 3513 3514 static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst, 3515 struct nfsd4_session *session, u32 req) 3516 { 3517 struct nfs4_client *clp = session->se_client; 3518 struct svc_xprt *xpt = rqst->rq_xprt; 3519 struct nfsd4_conn *c; 3520 __be32 status; 3521 3522 /* Following the last paragraph of RFC 5661 Section 18.34.3: */ 3523 spin_lock(&clp->cl_lock); 3524 c = __nfsd4_find_conn(xpt, session); 3525 if (!c) 3526 status = nfserr_noent; 3527 else if (req == c->cn_flags) 3528 status = nfs_ok; 3529 else if (req == NFS4_CDFC4_FORE_OR_BOTH && 3530 c->cn_flags != NFS4_CDFC4_BACK) 3531 status = nfs_ok; 3532 else if (req == NFS4_CDFC4_BACK_OR_BOTH && 3533 c->cn_flags != NFS4_CDFC4_FORE) 3534 status = nfs_ok; 3535 else 3536 status = nfserr_inval; 3537 spin_unlock(&clp->cl_lock); 3538 return status; 3539 } 3540 3541 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp, 3542 struct nfsd4_compound_state *cstate, 3543 union nfsd4_op_u *u) 3544 { 3545 struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session; 3546 __be32 status; 3547 struct nfsd4_conn *conn; 3548 struct nfsd4_session *session; 3549 struct net *net = SVC_NET(rqstp); 3550 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 3551 3552 if (!nfsd4_last_compound_op(rqstp)) 3553 return nfserr_not_only_op; 3554 spin_lock(&nn->client_lock); 3555 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status); 3556 spin_unlock(&nn->client_lock); 3557 if (!session) 3558 goto out_no_session; 3559 status = nfserr_wrong_cred; 3560 if (!nfsd4_mach_creds_match(session->se_client, rqstp)) 3561 goto out; 3562 status = nfsd4_match_existing_connection(rqstp, session, bcts->dir); 3563 if (status == nfs_ok || status == nfserr_inval) 3564 goto out; 3565 status = nfsd4_map_bcts_dir(&bcts->dir); 3566 if (status) 3567 goto out; 3568 conn = alloc_conn(rqstp, bcts->dir); 3569 status = nfserr_jukebox; 3570 if (!conn) 3571 goto out; 3572 nfsd4_init_conn(rqstp, conn, session); 3573 status = nfs_ok; 3574 out: 3575 nfsd4_put_session(session); 3576 out_no_session: 3577 return status; 3578 } 3579 3580 static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid) 3581 { 3582 if (!cstate->session) 3583 return false; 3584 return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid)); 3585 } 3586 3587 __be32 3588 nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate, 3589 union nfsd4_op_u *u) 3590 { 3591 struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid; 3592 struct nfsd4_session *ses; 3593 __be32 status; 3594 int ref_held_by_me = 0; 3595 struct net *net = SVC_NET(r); 3596 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 3597 3598 status = nfserr_not_only_op; 3599 if (nfsd4_compound_in_session(cstate, sessionid)) { 3600 if (!nfsd4_last_compound_op(r)) 3601 goto out; 3602 ref_held_by_me++; 3603 } 3604 dump_sessionid(__func__, sessionid); 3605 spin_lock(&nn->client_lock); 3606 ses = find_in_sessionid_hashtbl(sessionid, net, &status); 3607 if (!ses) 3608 goto out_client_lock; 3609 status = nfserr_wrong_cred; 3610 if (!nfsd4_mach_creds_match(ses->se_client, r)) 3611 goto out_put_session; 3612 status = mark_session_dead_locked(ses, 1 + ref_held_by_me); 3613 if (status) 3614 goto out_put_session; 3615 unhash_session(ses); 3616 spin_unlock(&nn->client_lock); 3617 3618 nfsd4_probe_callback_sync(ses->se_client); 3619 3620 spin_lock(&nn->client_lock); 3621 status = nfs_ok; 3622 out_put_session: 3623 nfsd4_put_session_locked(ses); 3624 out_client_lock: 3625 spin_unlock(&nn->client_lock); 3626 out: 3627 return status; 3628 } 3629 3630 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses) 3631 { 3632 struct nfs4_client *clp = ses->se_client; 3633 struct nfsd4_conn *c; 3634 __be32 status = nfs_ok; 3635 int ret; 3636 3637 spin_lock(&clp->cl_lock); 3638 c = __nfsd4_find_conn(new->cn_xprt, ses); 3639 if (c) 3640 goto out_free; 3641 status = nfserr_conn_not_bound_to_session; 3642 if (clp->cl_mach_cred) 3643 goto out_free; 3644 __nfsd4_hash_conn(new, ses); 3645 spin_unlock(&clp->cl_lock); 3646 ret = nfsd4_register_conn(new); 3647 if (ret) 3648 /* oops; xprt is already down: */ 3649 nfsd4_conn_lost(&new->cn_xpt_user); 3650 return nfs_ok; 3651 out_free: 3652 spin_unlock(&clp->cl_lock); 3653 free_conn(new); 3654 return status; 3655 } 3656 3657 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session) 3658 { 3659 struct nfsd4_compoundargs *args = rqstp->rq_argp; 3660 3661 return args->opcnt > session->se_fchannel.maxops; 3662 } 3663 3664 static bool nfsd4_request_too_big(struct svc_rqst *rqstp, 3665 struct nfsd4_session *session) 3666 { 3667 struct xdr_buf *xb = &rqstp->rq_arg; 3668 3669 return xb->len > session->se_fchannel.maxreq_sz; 3670 } 3671 3672 static bool replay_matches_cache(struct svc_rqst *rqstp, 3673 struct nfsd4_sequence *seq, struct nfsd4_slot *slot) 3674 { 3675 struct nfsd4_compoundargs *argp = rqstp->rq_argp; 3676 3677 if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) != 3678 (bool)seq->cachethis) 3679 return false; 3680 /* 3681 * If there's an error then the reply can have fewer ops than 3682 * the call. 3683 */ 3684 if (slot->sl_opcnt < argp->opcnt && !slot->sl_status) 3685 return false; 3686 /* 3687 * But if we cached a reply with *more* ops than the call you're 3688 * sending us now, then this new call is clearly not really a 3689 * replay of the old one: 3690 */ 3691 if (slot->sl_opcnt > argp->opcnt) 3692 return false; 3693 /* This is the only check explicitly called by spec: */ 3694 if (!same_creds(&rqstp->rq_cred, &slot->sl_cred)) 3695 return false; 3696 /* 3697 * There may be more comparisons we could actually do, but the 3698 * spec doesn't require us to catch every case where the calls 3699 * don't match (that would require caching the call as well as 3700 * the reply), so we don't bother. 3701 */ 3702 return true; 3703 } 3704 3705 __be32 3706 nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3707 union nfsd4_op_u *u) 3708 { 3709 struct nfsd4_sequence *seq = &u->sequence; 3710 struct nfsd4_compoundres *resp = rqstp->rq_resp; 3711 struct xdr_stream *xdr = &resp->xdr; 3712 struct nfsd4_session *session; 3713 struct nfs4_client *clp; 3714 struct nfsd4_slot *slot; 3715 struct nfsd4_conn *conn; 3716 __be32 status; 3717 int buflen; 3718 struct net *net = SVC_NET(rqstp); 3719 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 3720 3721 if (resp->opcnt != 1) 3722 return nfserr_sequence_pos; 3723 3724 /* 3725 * Will be either used or freed by nfsd4_sequence_check_conn 3726 * below. 3727 */ 3728 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE); 3729 if (!conn) 3730 return nfserr_jukebox; 3731 3732 spin_lock(&nn->client_lock); 3733 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status); 3734 if (!session) 3735 goto out_no_session; 3736 clp = session->se_client; 3737 3738 status = nfserr_too_many_ops; 3739 if (nfsd4_session_too_many_ops(rqstp, session)) 3740 goto out_put_session; 3741 3742 status = nfserr_req_too_big; 3743 if (nfsd4_request_too_big(rqstp, session)) 3744 goto out_put_session; 3745 3746 status = nfserr_badslot; 3747 if (seq->slotid >= session->se_fchannel.maxreqs) 3748 goto out_put_session; 3749 3750 slot = session->se_slots[seq->slotid]; 3751 dprintk("%s: slotid %d\n", __func__, seq->slotid); 3752 3753 /* We do not negotiate the number of slots yet, so set the 3754 * maxslots to the session maxreqs which is used to encode 3755 * sr_highest_slotid and the sr_target_slot id to maxslots */ 3756 seq->maxslots = session->se_fchannel.maxreqs; 3757 3758 status = check_slot_seqid(seq->seqid, slot->sl_seqid, 3759 slot->sl_flags & NFSD4_SLOT_INUSE); 3760 if (status == nfserr_replay_cache) { 3761 status = nfserr_seq_misordered; 3762 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED)) 3763 goto out_put_session; 3764 status = nfserr_seq_false_retry; 3765 if (!replay_matches_cache(rqstp, seq, slot)) 3766 goto out_put_session; 3767 cstate->slot = slot; 3768 cstate->session = session; 3769 cstate->clp = clp; 3770 /* Return the cached reply status and set cstate->status 3771 * for nfsd4_proc_compound processing */ 3772 status = nfsd4_replay_cache_entry(resp, seq); 3773 cstate->status = nfserr_replay_cache; 3774 goto out; 3775 } 3776 if (status) 3777 goto out_put_session; 3778 3779 status = nfsd4_sequence_check_conn(conn, session); 3780 conn = NULL; 3781 if (status) 3782 goto out_put_session; 3783 3784 buflen = (seq->cachethis) ? 3785 session->se_fchannel.maxresp_cached : 3786 session->se_fchannel.maxresp_sz; 3787 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache : 3788 nfserr_rep_too_big; 3789 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack)) 3790 goto out_put_session; 3791 svc_reserve(rqstp, buflen); 3792 3793 status = nfs_ok; 3794 /* Success! bump slot seqid */ 3795 slot->sl_seqid = seq->seqid; 3796 slot->sl_flags |= NFSD4_SLOT_INUSE; 3797 if (seq->cachethis) 3798 slot->sl_flags |= NFSD4_SLOT_CACHETHIS; 3799 else 3800 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS; 3801 3802 cstate->slot = slot; 3803 cstate->session = session; 3804 cstate->clp = clp; 3805 3806 out: 3807 switch (clp->cl_cb_state) { 3808 case NFSD4_CB_DOWN: 3809 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN; 3810 break; 3811 case NFSD4_CB_FAULT: 3812 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT; 3813 break; 3814 default: 3815 seq->status_flags = 0; 3816 } 3817 if (!list_empty(&clp->cl_revoked)) 3818 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED; 3819 out_no_session: 3820 if (conn) 3821 free_conn(conn); 3822 spin_unlock(&nn->client_lock); 3823 return status; 3824 out_put_session: 3825 nfsd4_put_session_locked(session); 3826 goto out_no_session; 3827 } 3828 3829 void 3830 nfsd4_sequence_done(struct nfsd4_compoundres *resp) 3831 { 3832 struct nfsd4_compound_state *cs = &resp->cstate; 3833 3834 if (nfsd4_has_session(cs)) { 3835 if (cs->status != nfserr_replay_cache) { 3836 nfsd4_store_cache_entry(resp); 3837 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE; 3838 } 3839 /* Drop session reference that was taken in nfsd4_sequence() */ 3840 nfsd4_put_session(cs->session); 3841 } else if (cs->clp) 3842 put_client_renew(cs->clp); 3843 } 3844 3845 __be32 3846 nfsd4_destroy_clientid(struct svc_rqst *rqstp, 3847 struct nfsd4_compound_state *cstate, 3848 union nfsd4_op_u *u) 3849 { 3850 struct nfsd4_destroy_clientid *dc = &u->destroy_clientid; 3851 struct nfs4_client *conf, *unconf; 3852 struct nfs4_client *clp = NULL; 3853 __be32 status = 0; 3854 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3855 3856 spin_lock(&nn->client_lock); 3857 unconf = find_unconfirmed_client(&dc->clientid, true, nn); 3858 conf = find_confirmed_client(&dc->clientid, true, nn); 3859 WARN_ON_ONCE(conf && unconf); 3860 3861 if (conf) { 3862 if (client_has_state(conf)) { 3863 status = nfserr_clientid_busy; 3864 goto out; 3865 } 3866 status = mark_client_expired_locked(conf); 3867 if (status) 3868 goto out; 3869 clp = conf; 3870 } else if (unconf) 3871 clp = unconf; 3872 else { 3873 status = nfserr_stale_clientid; 3874 goto out; 3875 } 3876 if (!nfsd4_mach_creds_match(clp, rqstp)) { 3877 clp = NULL; 3878 status = nfserr_wrong_cred; 3879 goto out; 3880 } 3881 unhash_client_locked(clp); 3882 out: 3883 spin_unlock(&nn->client_lock); 3884 if (clp) 3885 expire_client(clp); 3886 return status; 3887 } 3888 3889 __be32 3890 nfsd4_reclaim_complete(struct svc_rqst *rqstp, 3891 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) 3892 { 3893 struct nfsd4_reclaim_complete *rc = &u->reclaim_complete; 3894 struct nfs4_client *clp = cstate->clp; 3895 __be32 status = 0; 3896 3897 if (rc->rca_one_fs) { 3898 if (!cstate->current_fh.fh_dentry) 3899 return nfserr_nofilehandle; 3900 /* 3901 * We don't take advantage of the rca_one_fs case. 3902 * That's OK, it's optional, we can safely ignore it. 3903 */ 3904 return nfs_ok; 3905 } 3906 3907 status = nfserr_complete_already; 3908 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags)) 3909 goto out; 3910 3911 status = nfserr_stale_clientid; 3912 if (is_client_expired(clp)) 3913 /* 3914 * The following error isn't really legal. 3915 * But we only get here if the client just explicitly 3916 * destroyed the client. Surely it no longer cares what 3917 * error it gets back on an operation for the dead 3918 * client. 3919 */ 3920 goto out; 3921 3922 status = nfs_ok; 3923 nfsd4_client_record_create(clp); 3924 inc_reclaim_complete(clp); 3925 out: 3926 return status; 3927 } 3928 3929 __be32 3930 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3931 union nfsd4_op_u *u) 3932 { 3933 struct nfsd4_setclientid *setclid = &u->setclientid; 3934 struct xdr_netobj clname = setclid->se_name; 3935 nfs4_verifier clverifier = setclid->se_verf; 3936 struct nfs4_client *conf, *new; 3937 struct nfs4_client *unconf = NULL; 3938 __be32 status; 3939 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3940 3941 new = create_client(clname, rqstp, &clverifier); 3942 if (new == NULL) 3943 return nfserr_jukebox; 3944 /* Cases below refer to rfc 3530 section 14.2.33: */ 3945 spin_lock(&nn->client_lock); 3946 conf = find_confirmed_client_by_name(&clname, nn); 3947 if (conf && client_has_state(conf)) { 3948 /* case 0: */ 3949 status = nfserr_clid_inuse; 3950 if (clp_used_exchangeid(conf)) 3951 goto out; 3952 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) { 3953 trace_nfsd_clid_inuse_err(conf); 3954 goto out; 3955 } 3956 } 3957 unconf = find_unconfirmed_client_by_name(&clname, nn); 3958 if (unconf) 3959 unhash_client_locked(unconf); 3960 /* We need to handle only case 1: probable callback update */ 3961 if (conf && same_verf(&conf->cl_verifier, &clverifier)) { 3962 copy_clid(new, conf); 3963 gen_confirm(new, nn); 3964 } 3965 new->cl_minorversion = 0; 3966 gen_callback(new, setclid, rqstp); 3967 add_to_unconfirmed(new); 3968 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; 3969 setclid->se_clientid.cl_id = new->cl_clientid.cl_id; 3970 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data)); 3971 new = NULL; 3972 status = nfs_ok; 3973 out: 3974 spin_unlock(&nn->client_lock); 3975 if (new) 3976 free_client(new); 3977 if (unconf) 3978 expire_client(unconf); 3979 return status; 3980 } 3981 3982 3983 __be32 3984 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, 3985 struct nfsd4_compound_state *cstate, 3986 union nfsd4_op_u *u) 3987 { 3988 struct nfsd4_setclientid_confirm *setclientid_confirm = 3989 &u->setclientid_confirm; 3990 struct nfs4_client *conf, *unconf; 3991 struct nfs4_client *old = NULL; 3992 nfs4_verifier confirm = setclientid_confirm->sc_confirm; 3993 clientid_t * clid = &setclientid_confirm->sc_clientid; 3994 __be32 status; 3995 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3996 3997 if (STALE_CLIENTID(clid, nn)) 3998 return nfserr_stale_clientid; 3999 4000 spin_lock(&nn->client_lock); 4001 conf = find_confirmed_client(clid, false, nn); 4002 unconf = find_unconfirmed_client(clid, false, nn); 4003 /* 4004 * We try hard to give out unique clientid's, so if we get an 4005 * attempt to confirm the same clientid with a different cred, 4006 * the client may be buggy; this should never happen. 4007 * 4008 * Nevertheless, RFC 7530 recommends INUSE for this case: 4009 */ 4010 status = nfserr_clid_inuse; 4011 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) 4012 goto out; 4013 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) 4014 goto out; 4015 /* cases below refer to rfc 3530 section 14.2.34: */ 4016 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) { 4017 if (conf && same_verf(&confirm, &conf->cl_confirm)) { 4018 /* case 2: probable retransmit */ 4019 status = nfs_ok; 4020 } else /* case 4: client hasn't noticed we rebooted yet? */ 4021 status = nfserr_stale_clientid; 4022 goto out; 4023 } 4024 status = nfs_ok; 4025 if (conf) { /* case 1: callback update */ 4026 old = unconf; 4027 unhash_client_locked(old); 4028 nfsd4_change_callback(conf, &unconf->cl_cb_conn); 4029 } else { /* case 3: normal case; new or rebooted client */ 4030 old = find_confirmed_client_by_name(&unconf->cl_name, nn); 4031 if (old) { 4032 status = nfserr_clid_inuse; 4033 if (client_has_state(old) 4034 && !same_creds(&unconf->cl_cred, 4035 &old->cl_cred)) 4036 goto out; 4037 status = mark_client_expired_locked(old); 4038 if (status) { 4039 old = NULL; 4040 goto out; 4041 } 4042 } 4043 move_to_confirmed(unconf); 4044 conf = unconf; 4045 } 4046 get_client_locked(conf); 4047 spin_unlock(&nn->client_lock); 4048 nfsd4_probe_callback(conf); 4049 spin_lock(&nn->client_lock); 4050 put_client_renew_locked(conf); 4051 out: 4052 spin_unlock(&nn->client_lock); 4053 if (old) 4054 expire_client(old); 4055 return status; 4056 } 4057 4058 static struct nfs4_file *nfsd4_alloc_file(void) 4059 { 4060 return kmem_cache_alloc(file_slab, GFP_KERNEL); 4061 } 4062 4063 /* OPEN Share state helper functions */ 4064 static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval, 4065 struct nfs4_file *fp) 4066 { 4067 lockdep_assert_held(&state_lock); 4068 4069 refcount_set(&fp->fi_ref, 1); 4070 spin_lock_init(&fp->fi_lock); 4071 INIT_LIST_HEAD(&fp->fi_stateids); 4072 INIT_LIST_HEAD(&fp->fi_delegations); 4073 INIT_LIST_HEAD(&fp->fi_clnt_odstate); 4074 fh_copy_shallow(&fp->fi_fhandle, fh); 4075 fp->fi_deleg_file = NULL; 4076 fp->fi_had_conflict = false; 4077 fp->fi_share_deny = 0; 4078 memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); 4079 memset(fp->fi_access, 0, sizeof(fp->fi_access)); 4080 #ifdef CONFIG_NFSD_PNFS 4081 INIT_LIST_HEAD(&fp->fi_lo_states); 4082 atomic_set(&fp->fi_lo_recalls, 0); 4083 #endif 4084 hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]); 4085 } 4086 4087 void 4088 nfsd4_free_slabs(void) 4089 { 4090 kmem_cache_destroy(client_slab); 4091 kmem_cache_destroy(openowner_slab); 4092 kmem_cache_destroy(lockowner_slab); 4093 kmem_cache_destroy(file_slab); 4094 kmem_cache_destroy(stateid_slab); 4095 kmem_cache_destroy(deleg_slab); 4096 kmem_cache_destroy(odstate_slab); 4097 } 4098 4099 int 4100 nfsd4_init_slabs(void) 4101 { 4102 client_slab = kmem_cache_create("nfsd4_clients", 4103 sizeof(struct nfs4_client), 0, 0, NULL); 4104 if (client_slab == NULL) 4105 goto out; 4106 openowner_slab = kmem_cache_create("nfsd4_openowners", 4107 sizeof(struct nfs4_openowner), 0, 0, NULL); 4108 if (openowner_slab == NULL) 4109 goto out_free_client_slab; 4110 lockowner_slab = kmem_cache_create("nfsd4_lockowners", 4111 sizeof(struct nfs4_lockowner), 0, 0, NULL); 4112 if (lockowner_slab == NULL) 4113 goto out_free_openowner_slab; 4114 file_slab = kmem_cache_create("nfsd4_files", 4115 sizeof(struct nfs4_file), 0, 0, NULL); 4116 if (file_slab == NULL) 4117 goto out_free_lockowner_slab; 4118 stateid_slab = kmem_cache_create("nfsd4_stateids", 4119 sizeof(struct nfs4_ol_stateid), 0, 0, NULL); 4120 if (stateid_slab == NULL) 4121 goto out_free_file_slab; 4122 deleg_slab = kmem_cache_create("nfsd4_delegations", 4123 sizeof(struct nfs4_delegation), 0, 0, NULL); 4124 if (deleg_slab == NULL) 4125 goto out_free_stateid_slab; 4126 odstate_slab = kmem_cache_create("nfsd4_odstate", 4127 sizeof(struct nfs4_clnt_odstate), 0, 0, NULL); 4128 if (odstate_slab == NULL) 4129 goto out_free_deleg_slab; 4130 return 0; 4131 4132 out_free_deleg_slab: 4133 kmem_cache_destroy(deleg_slab); 4134 out_free_stateid_slab: 4135 kmem_cache_destroy(stateid_slab); 4136 out_free_file_slab: 4137 kmem_cache_destroy(file_slab); 4138 out_free_lockowner_slab: 4139 kmem_cache_destroy(lockowner_slab); 4140 out_free_openowner_slab: 4141 kmem_cache_destroy(openowner_slab); 4142 out_free_client_slab: 4143 kmem_cache_destroy(client_slab); 4144 out: 4145 return -ENOMEM; 4146 } 4147 4148 static void init_nfs4_replay(struct nfs4_replay *rp) 4149 { 4150 rp->rp_status = nfserr_serverfault; 4151 rp->rp_buflen = 0; 4152 rp->rp_buf = rp->rp_ibuf; 4153 mutex_init(&rp->rp_mutex); 4154 } 4155 4156 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate, 4157 struct nfs4_stateowner *so) 4158 { 4159 if (!nfsd4_has_session(cstate)) { 4160 mutex_lock(&so->so_replay.rp_mutex); 4161 cstate->replay_owner = nfs4_get_stateowner(so); 4162 } 4163 } 4164 4165 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate) 4166 { 4167 struct nfs4_stateowner *so = cstate->replay_owner; 4168 4169 if (so != NULL) { 4170 cstate->replay_owner = NULL; 4171 mutex_unlock(&so->so_replay.rp_mutex); 4172 nfs4_put_stateowner(so); 4173 } 4174 } 4175 4176 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp) 4177 { 4178 struct nfs4_stateowner *sop; 4179 4180 sop = kmem_cache_alloc(slab, GFP_KERNEL); 4181 if (!sop) 4182 return NULL; 4183 4184 xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL); 4185 if (!sop->so_owner.data) { 4186 kmem_cache_free(slab, sop); 4187 return NULL; 4188 } 4189 4190 INIT_LIST_HEAD(&sop->so_stateids); 4191 sop->so_client = clp; 4192 init_nfs4_replay(&sop->so_replay); 4193 atomic_set(&sop->so_count, 1); 4194 return sop; 4195 } 4196 4197 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval) 4198 { 4199 lockdep_assert_held(&clp->cl_lock); 4200 4201 list_add(&oo->oo_owner.so_strhash, 4202 &clp->cl_ownerstr_hashtbl[strhashval]); 4203 list_add(&oo->oo_perclient, &clp->cl_openowners); 4204 } 4205 4206 static void nfs4_unhash_openowner(struct nfs4_stateowner *so) 4207 { 4208 unhash_openowner_locked(openowner(so)); 4209 } 4210 4211 static void nfs4_free_openowner(struct nfs4_stateowner *so) 4212 { 4213 struct nfs4_openowner *oo = openowner(so); 4214 4215 kmem_cache_free(openowner_slab, oo); 4216 } 4217 4218 static const struct nfs4_stateowner_operations openowner_ops = { 4219 .so_unhash = nfs4_unhash_openowner, 4220 .so_free = nfs4_free_openowner, 4221 }; 4222 4223 static struct nfs4_ol_stateid * 4224 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open) 4225 { 4226 struct nfs4_ol_stateid *local, *ret = NULL; 4227 struct nfs4_openowner *oo = open->op_openowner; 4228 4229 lockdep_assert_held(&fp->fi_lock); 4230 4231 list_for_each_entry(local, &fp->fi_stateids, st_perfile) { 4232 /* ignore lock owners */ 4233 if (local->st_stateowner->so_is_open_owner == 0) 4234 continue; 4235 if (local->st_stateowner != &oo->oo_owner) 4236 continue; 4237 if (local->st_stid.sc_type == NFS4_OPEN_STID) { 4238 ret = local; 4239 refcount_inc(&ret->st_stid.sc_count); 4240 break; 4241 } 4242 } 4243 return ret; 4244 } 4245 4246 static __be32 4247 nfsd4_verify_open_stid(struct nfs4_stid *s) 4248 { 4249 __be32 ret = nfs_ok; 4250 4251 switch (s->sc_type) { 4252 default: 4253 break; 4254 case 0: 4255 case NFS4_CLOSED_STID: 4256 case NFS4_CLOSED_DELEG_STID: 4257 ret = nfserr_bad_stateid; 4258 break; 4259 case NFS4_REVOKED_DELEG_STID: 4260 ret = nfserr_deleg_revoked; 4261 } 4262 return ret; 4263 } 4264 4265 /* Lock the stateid st_mutex, and deal with races with CLOSE */ 4266 static __be32 4267 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp) 4268 { 4269 __be32 ret; 4270 4271 mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX); 4272 ret = nfsd4_verify_open_stid(&stp->st_stid); 4273 if (ret != nfs_ok) 4274 mutex_unlock(&stp->st_mutex); 4275 return ret; 4276 } 4277 4278 static struct nfs4_ol_stateid * 4279 nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open) 4280 { 4281 struct nfs4_ol_stateid *stp; 4282 for (;;) { 4283 spin_lock(&fp->fi_lock); 4284 stp = nfsd4_find_existing_open(fp, open); 4285 spin_unlock(&fp->fi_lock); 4286 if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok) 4287 break; 4288 nfs4_put_stid(&stp->st_stid); 4289 } 4290 return stp; 4291 } 4292 4293 static struct nfs4_openowner * 4294 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open, 4295 struct nfsd4_compound_state *cstate) 4296 { 4297 struct nfs4_client *clp = cstate->clp; 4298 struct nfs4_openowner *oo, *ret; 4299 4300 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp); 4301 if (!oo) 4302 return NULL; 4303 oo->oo_owner.so_ops = &openowner_ops; 4304 oo->oo_owner.so_is_open_owner = 1; 4305 oo->oo_owner.so_seqid = open->op_seqid; 4306 oo->oo_flags = 0; 4307 if (nfsd4_has_session(cstate)) 4308 oo->oo_flags |= NFS4_OO_CONFIRMED; 4309 oo->oo_time = 0; 4310 oo->oo_last_closed_stid = NULL; 4311 INIT_LIST_HEAD(&oo->oo_close_lru); 4312 spin_lock(&clp->cl_lock); 4313 ret = find_openstateowner_str_locked(strhashval, open, clp); 4314 if (ret == NULL) { 4315 hash_openowner(oo, clp, strhashval); 4316 ret = oo; 4317 } else 4318 nfs4_free_stateowner(&oo->oo_owner); 4319 4320 spin_unlock(&clp->cl_lock); 4321 return ret; 4322 } 4323 4324 static struct nfs4_ol_stateid * 4325 init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open) 4326 { 4327 4328 struct nfs4_openowner *oo = open->op_openowner; 4329 struct nfs4_ol_stateid *retstp = NULL; 4330 struct nfs4_ol_stateid *stp; 4331 4332 stp = open->op_stp; 4333 /* We are moving these outside of the spinlocks to avoid the warnings */ 4334 mutex_init(&stp->st_mutex); 4335 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX); 4336 4337 retry: 4338 spin_lock(&oo->oo_owner.so_client->cl_lock); 4339 spin_lock(&fp->fi_lock); 4340 4341 retstp = nfsd4_find_existing_open(fp, open); 4342 if (retstp) 4343 goto out_unlock; 4344 4345 open->op_stp = NULL; 4346 refcount_inc(&stp->st_stid.sc_count); 4347 stp->st_stid.sc_type = NFS4_OPEN_STID; 4348 INIT_LIST_HEAD(&stp->st_locks); 4349 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner); 4350 get_nfs4_file(fp); 4351 stp->st_stid.sc_file = fp; 4352 stp->st_access_bmap = 0; 4353 stp->st_deny_bmap = 0; 4354 stp->st_openstp = NULL; 4355 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids); 4356 list_add(&stp->st_perfile, &fp->fi_stateids); 4357 4358 out_unlock: 4359 spin_unlock(&fp->fi_lock); 4360 spin_unlock(&oo->oo_owner.so_client->cl_lock); 4361 if (retstp) { 4362 /* Handle races with CLOSE */ 4363 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) { 4364 nfs4_put_stid(&retstp->st_stid); 4365 goto retry; 4366 } 4367 /* To keep mutex tracking happy */ 4368 mutex_unlock(&stp->st_mutex); 4369 stp = retstp; 4370 } 4371 return stp; 4372 } 4373 4374 /* 4375 * In the 4.0 case we need to keep the owners around a little while to handle 4376 * CLOSE replay. We still do need to release any file access that is held by 4377 * them before returning however. 4378 */ 4379 static void 4380 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net) 4381 { 4382 struct nfs4_ol_stateid *last; 4383 struct nfs4_openowner *oo = openowner(s->st_stateowner); 4384 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net, 4385 nfsd_net_id); 4386 4387 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo); 4388 4389 /* 4390 * We know that we hold one reference via nfsd4_close, and another 4391 * "persistent" reference for the client. If the refcount is higher 4392 * than 2, then there are still calls in progress that are using this 4393 * stateid. We can't put the sc_file reference until they are finished. 4394 * Wait for the refcount to drop to 2. Since it has been unhashed, 4395 * there should be no danger of the refcount going back up again at 4396 * this point. 4397 */ 4398 wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2); 4399 4400 release_all_access(s); 4401 if (s->st_stid.sc_file) { 4402 put_nfs4_file(s->st_stid.sc_file); 4403 s->st_stid.sc_file = NULL; 4404 } 4405 4406 spin_lock(&nn->client_lock); 4407 last = oo->oo_last_closed_stid; 4408 oo->oo_last_closed_stid = s; 4409 list_move_tail(&oo->oo_close_lru, &nn->close_lru); 4410 oo->oo_time = ktime_get_boottime_seconds(); 4411 spin_unlock(&nn->client_lock); 4412 if (last) 4413 nfs4_put_stid(&last->st_stid); 4414 } 4415 4416 /* search file_hashtbl[] for file */ 4417 static struct nfs4_file * 4418 find_file_locked(struct knfsd_fh *fh, unsigned int hashval) 4419 { 4420 struct nfs4_file *fp; 4421 4422 hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash, 4423 lockdep_is_held(&state_lock)) { 4424 if (fh_match(&fp->fi_fhandle, fh)) { 4425 if (refcount_inc_not_zero(&fp->fi_ref)) 4426 return fp; 4427 } 4428 } 4429 return NULL; 4430 } 4431 4432 struct nfs4_file * 4433 find_file(struct knfsd_fh *fh) 4434 { 4435 struct nfs4_file *fp; 4436 unsigned int hashval = file_hashval(fh); 4437 4438 rcu_read_lock(); 4439 fp = find_file_locked(fh, hashval); 4440 rcu_read_unlock(); 4441 return fp; 4442 } 4443 4444 static struct nfs4_file * 4445 find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh) 4446 { 4447 struct nfs4_file *fp; 4448 unsigned int hashval = file_hashval(fh); 4449 4450 rcu_read_lock(); 4451 fp = find_file_locked(fh, hashval); 4452 rcu_read_unlock(); 4453 if (fp) 4454 return fp; 4455 4456 spin_lock(&state_lock); 4457 fp = find_file_locked(fh, hashval); 4458 if (likely(fp == NULL)) { 4459 nfsd4_init_file(fh, hashval, new); 4460 fp = new; 4461 } 4462 spin_unlock(&state_lock); 4463 4464 return fp; 4465 } 4466 4467 /* 4468 * Called to check deny when READ with all zero stateid or 4469 * WRITE with all zero or all one stateid 4470 */ 4471 static __be32 4472 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type) 4473 { 4474 struct nfs4_file *fp; 4475 __be32 ret = nfs_ok; 4476 4477 fp = find_file(¤t_fh->fh_handle); 4478 if (!fp) 4479 return ret; 4480 /* Check for conflicting share reservations */ 4481 spin_lock(&fp->fi_lock); 4482 if (fp->fi_share_deny & deny_type) 4483 ret = nfserr_locked; 4484 spin_unlock(&fp->fi_lock); 4485 put_nfs4_file(fp); 4486 return ret; 4487 } 4488 4489 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb) 4490 { 4491 struct nfs4_delegation *dp = cb_to_delegation(cb); 4492 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net, 4493 nfsd_net_id); 4494 4495 block_delegations(&dp->dl_stid.sc_file->fi_fhandle); 4496 4497 /* 4498 * We can't do this in nfsd_break_deleg_cb because it is 4499 * already holding inode->i_lock. 4500 * 4501 * If the dl_time != 0, then we know that it has already been 4502 * queued for a lease break. Don't queue it again. 4503 */ 4504 spin_lock(&state_lock); 4505 if (dp->dl_time == 0) { 4506 dp->dl_time = ktime_get_boottime_seconds(); 4507 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru); 4508 } 4509 spin_unlock(&state_lock); 4510 } 4511 4512 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb, 4513 struct rpc_task *task) 4514 { 4515 struct nfs4_delegation *dp = cb_to_delegation(cb); 4516 4517 if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID || 4518 dp->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) 4519 return 1; 4520 4521 switch (task->tk_status) { 4522 case 0: 4523 return 1; 4524 case -NFS4ERR_DELAY: 4525 rpc_delay(task, 2 * HZ); 4526 return 0; 4527 case -EBADHANDLE: 4528 case -NFS4ERR_BAD_STATEID: 4529 /* 4530 * Race: client probably got cb_recall before open reply 4531 * granting delegation. 4532 */ 4533 if (dp->dl_retries--) { 4534 rpc_delay(task, 2 * HZ); 4535 return 0; 4536 } 4537 fallthrough; 4538 default: 4539 return 1; 4540 } 4541 } 4542 4543 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb) 4544 { 4545 struct nfs4_delegation *dp = cb_to_delegation(cb); 4546 4547 nfs4_put_stid(&dp->dl_stid); 4548 } 4549 4550 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = { 4551 .prepare = nfsd4_cb_recall_prepare, 4552 .done = nfsd4_cb_recall_done, 4553 .release = nfsd4_cb_recall_release, 4554 }; 4555 4556 static void nfsd_break_one_deleg(struct nfs4_delegation *dp) 4557 { 4558 /* 4559 * We're assuming the state code never drops its reference 4560 * without first removing the lease. Since we're in this lease 4561 * callback (and since the lease code is serialized by the 4562 * i_lock) we know the server hasn't removed the lease yet, and 4563 * we know it's safe to take a reference. 4564 */ 4565 refcount_inc(&dp->dl_stid.sc_count); 4566 nfsd4_run_cb(&dp->dl_recall); 4567 } 4568 4569 /* Called from break_lease() with i_lock held. */ 4570 static bool 4571 nfsd_break_deleg_cb(struct file_lock *fl) 4572 { 4573 bool ret = false; 4574 struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner; 4575 struct nfs4_file *fp = dp->dl_stid.sc_file; 4576 4577 trace_nfsd_deleg_break(&dp->dl_stid.sc_stateid); 4578 4579 /* 4580 * We don't want the locks code to timeout the lease for us; 4581 * we'll remove it ourself if a delegation isn't returned 4582 * in time: 4583 */ 4584 fl->fl_break_time = 0; 4585 4586 spin_lock(&fp->fi_lock); 4587 fp->fi_had_conflict = true; 4588 nfsd_break_one_deleg(dp); 4589 spin_unlock(&fp->fi_lock); 4590 return ret; 4591 } 4592 4593 static bool nfsd_breaker_owns_lease(struct file_lock *fl) 4594 { 4595 struct nfs4_delegation *dl = fl->fl_owner; 4596 struct svc_rqst *rqst; 4597 struct nfs4_client *clp; 4598 4599 if (!i_am_nfsd()) 4600 return NULL; 4601 rqst = kthread_data(current); 4602 /* Note rq_prog == NFS_ACL_PROGRAM is also possible: */ 4603 if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4) 4604 return NULL; 4605 clp = *(rqst->rq_lease_breaker); 4606 return dl->dl_stid.sc_client == clp; 4607 } 4608 4609 static int 4610 nfsd_change_deleg_cb(struct file_lock *onlist, int arg, 4611 struct list_head *dispose) 4612 { 4613 if (arg & F_UNLCK) 4614 return lease_modify(onlist, arg, dispose); 4615 else 4616 return -EAGAIN; 4617 } 4618 4619 static const struct lock_manager_operations nfsd_lease_mng_ops = { 4620 .lm_breaker_owns_lease = nfsd_breaker_owns_lease, 4621 .lm_break = nfsd_break_deleg_cb, 4622 .lm_change = nfsd_change_deleg_cb, 4623 }; 4624 4625 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid) 4626 { 4627 if (nfsd4_has_session(cstate)) 4628 return nfs_ok; 4629 if (seqid == so->so_seqid - 1) 4630 return nfserr_replay_me; 4631 if (seqid == so->so_seqid) 4632 return nfs_ok; 4633 return nfserr_bad_seqid; 4634 } 4635 4636 static struct nfs4_client *lookup_clientid(clientid_t *clid, bool sessions, 4637 struct nfsd_net *nn) 4638 { 4639 struct nfs4_client *found; 4640 4641 spin_lock(&nn->client_lock); 4642 found = find_confirmed_client(clid, sessions, nn); 4643 if (found) 4644 atomic_inc(&found->cl_rpc_users); 4645 spin_unlock(&nn->client_lock); 4646 return found; 4647 } 4648 4649 static __be32 set_client(clientid_t *clid, 4650 struct nfsd4_compound_state *cstate, 4651 struct nfsd_net *nn) 4652 { 4653 if (cstate->clp) { 4654 if (!same_clid(&cstate->clp->cl_clientid, clid)) 4655 return nfserr_stale_clientid; 4656 return nfs_ok; 4657 } 4658 if (STALE_CLIENTID(clid, nn)) 4659 return nfserr_stale_clientid; 4660 /* 4661 * We're in the 4.0 case (otherwise the SEQUENCE op would have 4662 * set cstate->clp), so session = false: 4663 */ 4664 cstate->clp = lookup_clientid(clid, false, nn); 4665 if (!cstate->clp) 4666 return nfserr_expired; 4667 return nfs_ok; 4668 } 4669 4670 __be32 4671 nfsd4_process_open1(struct nfsd4_compound_state *cstate, 4672 struct nfsd4_open *open, struct nfsd_net *nn) 4673 { 4674 clientid_t *clientid = &open->op_clientid; 4675 struct nfs4_client *clp = NULL; 4676 unsigned int strhashval; 4677 struct nfs4_openowner *oo = NULL; 4678 __be32 status; 4679 4680 /* 4681 * In case we need it later, after we've already created the 4682 * file and don't want to risk a further failure: 4683 */ 4684 open->op_file = nfsd4_alloc_file(); 4685 if (open->op_file == NULL) 4686 return nfserr_jukebox; 4687 4688 status = set_client(clientid, cstate, nn); 4689 if (status) 4690 return status; 4691 clp = cstate->clp; 4692 4693 strhashval = ownerstr_hashval(&open->op_owner); 4694 oo = find_openstateowner_str(strhashval, open, clp); 4695 open->op_openowner = oo; 4696 if (!oo) { 4697 goto new_owner; 4698 } 4699 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { 4700 /* Replace unconfirmed owners without checking for replay. */ 4701 release_openowner(oo); 4702 open->op_openowner = NULL; 4703 goto new_owner; 4704 } 4705 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid); 4706 if (status) 4707 return status; 4708 goto alloc_stateid; 4709 new_owner: 4710 oo = alloc_init_open_stateowner(strhashval, open, cstate); 4711 if (oo == NULL) 4712 return nfserr_jukebox; 4713 open->op_openowner = oo; 4714 alloc_stateid: 4715 open->op_stp = nfs4_alloc_open_stateid(clp); 4716 if (!open->op_stp) 4717 return nfserr_jukebox; 4718 4719 if (nfsd4_has_session(cstate) && 4720 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) { 4721 open->op_odstate = alloc_clnt_odstate(clp); 4722 if (!open->op_odstate) 4723 return nfserr_jukebox; 4724 } 4725 4726 return nfs_ok; 4727 } 4728 4729 static inline __be32 4730 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags) 4731 { 4732 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ)) 4733 return nfserr_openmode; 4734 else 4735 return nfs_ok; 4736 } 4737 4738 static int share_access_to_flags(u32 share_access) 4739 { 4740 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE; 4741 } 4742 4743 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s) 4744 { 4745 struct nfs4_stid *ret; 4746 4747 ret = find_stateid_by_type(cl, s, 4748 NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID); 4749 if (!ret) 4750 return NULL; 4751 return delegstateid(ret); 4752 } 4753 4754 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open) 4755 { 4756 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR || 4757 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH; 4758 } 4759 4760 static __be32 4761 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open, 4762 struct nfs4_delegation **dp) 4763 { 4764 int flags; 4765 __be32 status = nfserr_bad_stateid; 4766 struct nfs4_delegation *deleg; 4767 4768 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid); 4769 if (deleg == NULL) 4770 goto out; 4771 if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) { 4772 nfs4_put_stid(&deleg->dl_stid); 4773 if (cl->cl_minorversion) 4774 status = nfserr_deleg_revoked; 4775 goto out; 4776 } 4777 flags = share_access_to_flags(open->op_share_access); 4778 status = nfs4_check_delegmode(deleg, flags); 4779 if (status) { 4780 nfs4_put_stid(&deleg->dl_stid); 4781 goto out; 4782 } 4783 *dp = deleg; 4784 out: 4785 if (!nfsd4_is_deleg_cur(open)) 4786 return nfs_ok; 4787 if (status) 4788 return status; 4789 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; 4790 return nfs_ok; 4791 } 4792 4793 static inline int nfs4_access_to_access(u32 nfs4_access) 4794 { 4795 int flags = 0; 4796 4797 if (nfs4_access & NFS4_SHARE_ACCESS_READ) 4798 flags |= NFSD_MAY_READ; 4799 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE) 4800 flags |= NFSD_MAY_WRITE; 4801 return flags; 4802 } 4803 4804 static inline __be32 4805 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh, 4806 struct nfsd4_open *open) 4807 { 4808 struct iattr iattr = { 4809 .ia_valid = ATTR_SIZE, 4810 .ia_size = 0, 4811 }; 4812 if (!open->op_truncate) 4813 return 0; 4814 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE)) 4815 return nfserr_inval; 4816 return nfsd_setattr(rqstp, fh, &iattr, 0, (time64_t)0); 4817 } 4818 4819 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp, 4820 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, 4821 struct nfsd4_open *open) 4822 { 4823 struct nfsd_file *nf = NULL; 4824 __be32 status; 4825 int oflag = nfs4_access_to_omode(open->op_share_access); 4826 int access = nfs4_access_to_access(open->op_share_access); 4827 unsigned char old_access_bmap, old_deny_bmap; 4828 4829 spin_lock(&fp->fi_lock); 4830 4831 /* 4832 * Are we trying to set a deny mode that would conflict with 4833 * current access? 4834 */ 4835 status = nfs4_file_check_deny(fp, open->op_share_deny); 4836 if (status != nfs_ok) { 4837 spin_unlock(&fp->fi_lock); 4838 goto out; 4839 } 4840 4841 /* set access to the file */ 4842 status = nfs4_file_get_access(fp, open->op_share_access); 4843 if (status != nfs_ok) { 4844 spin_unlock(&fp->fi_lock); 4845 goto out; 4846 } 4847 4848 /* Set access bits in stateid */ 4849 old_access_bmap = stp->st_access_bmap; 4850 set_access(open->op_share_access, stp); 4851 4852 /* Set new deny mask */ 4853 old_deny_bmap = stp->st_deny_bmap; 4854 set_deny(open->op_share_deny, stp); 4855 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH); 4856 4857 if (!fp->fi_fds[oflag]) { 4858 spin_unlock(&fp->fi_lock); 4859 status = nfsd_file_acquire(rqstp, cur_fh, access, &nf); 4860 if (status) 4861 goto out_put_access; 4862 spin_lock(&fp->fi_lock); 4863 if (!fp->fi_fds[oflag]) { 4864 fp->fi_fds[oflag] = nf; 4865 nf = NULL; 4866 } 4867 } 4868 spin_unlock(&fp->fi_lock); 4869 if (nf) 4870 nfsd_file_put(nf); 4871 4872 status = nfsd4_truncate(rqstp, cur_fh, open); 4873 if (status) 4874 goto out_put_access; 4875 out: 4876 return status; 4877 out_put_access: 4878 stp->st_access_bmap = old_access_bmap; 4879 nfs4_file_put_access(fp, open->op_share_access); 4880 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp); 4881 goto out; 4882 } 4883 4884 static __be32 4885 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open) 4886 { 4887 __be32 status; 4888 unsigned char old_deny_bmap = stp->st_deny_bmap; 4889 4890 if (!test_access(open->op_share_access, stp)) 4891 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open); 4892 4893 /* test and set deny mode */ 4894 spin_lock(&fp->fi_lock); 4895 status = nfs4_file_check_deny(fp, open->op_share_deny); 4896 if (status == nfs_ok) { 4897 set_deny(open->op_share_deny, stp); 4898 fp->fi_share_deny |= 4899 (open->op_share_deny & NFS4_SHARE_DENY_BOTH); 4900 } 4901 spin_unlock(&fp->fi_lock); 4902 4903 if (status != nfs_ok) 4904 return status; 4905 4906 status = nfsd4_truncate(rqstp, cur_fh, open); 4907 if (status != nfs_ok) 4908 reset_union_bmap_deny(old_deny_bmap, stp); 4909 return status; 4910 } 4911 4912 /* Should we give out recallable state?: */ 4913 static bool nfsd4_cb_channel_good(struct nfs4_client *clp) 4914 { 4915 if (clp->cl_cb_state == NFSD4_CB_UP) 4916 return true; 4917 /* 4918 * In the sessions case, since we don't have to establish a 4919 * separate connection for callbacks, we assume it's OK 4920 * until we hear otherwise: 4921 */ 4922 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; 4923 } 4924 4925 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, 4926 int flag) 4927 { 4928 struct file_lock *fl; 4929 4930 fl = locks_alloc_lock(); 4931 if (!fl) 4932 return NULL; 4933 fl->fl_lmops = &nfsd_lease_mng_ops; 4934 fl->fl_flags = FL_DELEG; 4935 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; 4936 fl->fl_end = OFFSET_MAX; 4937 fl->fl_owner = (fl_owner_t)dp; 4938 fl->fl_pid = current->tgid; 4939 fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file; 4940 return fl; 4941 } 4942 4943 static int nfsd4_check_conflicting_opens(struct nfs4_client *clp, 4944 struct nfs4_file *fp) 4945 { 4946 struct nfs4_clnt_odstate *co; 4947 struct file *f = fp->fi_deleg_file->nf_file; 4948 struct inode *ino = locks_inode(f); 4949 int writes = atomic_read(&ino->i_writecount); 4950 4951 if (fp->fi_fds[O_WRONLY]) 4952 writes--; 4953 if (fp->fi_fds[O_RDWR]) 4954 writes--; 4955 if (writes > 0) 4956 return -EAGAIN; 4957 spin_lock(&fp->fi_lock); 4958 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) { 4959 if (co->co_client != clp) { 4960 spin_unlock(&fp->fi_lock); 4961 return -EAGAIN; 4962 } 4963 } 4964 spin_unlock(&fp->fi_lock); 4965 return 0; 4966 } 4967 4968 static struct nfs4_delegation * 4969 nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, 4970 struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate) 4971 { 4972 int status = 0; 4973 struct nfs4_delegation *dp; 4974 struct nfsd_file *nf; 4975 struct file_lock *fl; 4976 4977 /* 4978 * The fi_had_conflict and nfs_get_existing_delegation checks 4979 * here are just optimizations; we'll need to recheck them at 4980 * the end: 4981 */ 4982 if (fp->fi_had_conflict) 4983 return ERR_PTR(-EAGAIN); 4984 4985 nf = find_readable_file(fp); 4986 if (!nf) { 4987 /* 4988 * We probably could attempt another open and get a read 4989 * delegation, but for now, don't bother until the 4990 * client actually sends us one. 4991 */ 4992 return ERR_PTR(-EAGAIN); 4993 } 4994 spin_lock(&state_lock); 4995 spin_lock(&fp->fi_lock); 4996 if (nfs4_delegation_exists(clp, fp)) 4997 status = -EAGAIN; 4998 else if (!fp->fi_deleg_file) { 4999 fp->fi_deleg_file = nf; 5000 /* increment early to prevent fi_deleg_file from being 5001 * cleared */ 5002 fp->fi_delegees = 1; 5003 nf = NULL; 5004 } else 5005 fp->fi_delegees++; 5006 spin_unlock(&fp->fi_lock); 5007 spin_unlock(&state_lock); 5008 if (nf) 5009 nfsd_file_put(nf); 5010 if (status) 5011 return ERR_PTR(status); 5012 5013 status = -ENOMEM; 5014 dp = alloc_init_deleg(clp, fp, fh, odstate); 5015 if (!dp) 5016 goto out_delegees; 5017 5018 fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ); 5019 if (!fl) 5020 goto out_clnt_odstate; 5021 5022 status = nfsd4_check_conflicting_opens(clp, fp); 5023 if (status) { 5024 locks_free_lock(fl); 5025 goto out_clnt_odstate; 5026 } 5027 status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL); 5028 if (fl) 5029 locks_free_lock(fl); 5030 if (status) 5031 goto out_clnt_odstate; 5032 status = nfsd4_check_conflicting_opens(clp, fp); 5033 if (status) 5034 goto out_clnt_odstate; 5035 5036 spin_lock(&state_lock); 5037 spin_lock(&fp->fi_lock); 5038 if (fp->fi_had_conflict) 5039 status = -EAGAIN; 5040 else 5041 status = hash_delegation_locked(dp, fp); 5042 spin_unlock(&fp->fi_lock); 5043 spin_unlock(&state_lock); 5044 5045 if (status) 5046 goto out_unlock; 5047 5048 return dp; 5049 out_unlock: 5050 vfs_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp); 5051 out_clnt_odstate: 5052 put_clnt_odstate(dp->dl_clnt_odstate); 5053 nfs4_put_stid(&dp->dl_stid); 5054 out_delegees: 5055 put_deleg_file(fp); 5056 return ERR_PTR(status); 5057 } 5058 5059 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status) 5060 { 5061 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 5062 if (status == -EAGAIN) 5063 open->op_why_no_deleg = WND4_CONTENTION; 5064 else { 5065 open->op_why_no_deleg = WND4_RESOURCE; 5066 switch (open->op_deleg_want) { 5067 case NFS4_SHARE_WANT_READ_DELEG: 5068 case NFS4_SHARE_WANT_WRITE_DELEG: 5069 case NFS4_SHARE_WANT_ANY_DELEG: 5070 break; 5071 case NFS4_SHARE_WANT_CANCEL: 5072 open->op_why_no_deleg = WND4_CANCELLED; 5073 break; 5074 case NFS4_SHARE_WANT_NO_DELEG: 5075 WARN_ON_ONCE(1); 5076 } 5077 } 5078 } 5079 5080 /* 5081 * Attempt to hand out a delegation. 5082 * 5083 * Note we don't support write delegations, and won't until the vfs has 5084 * proper support for them. 5085 */ 5086 static void 5087 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, 5088 struct nfs4_ol_stateid *stp) 5089 { 5090 struct nfs4_delegation *dp; 5091 struct nfs4_openowner *oo = openowner(stp->st_stateowner); 5092 struct nfs4_client *clp = stp->st_stid.sc_client; 5093 int cb_up; 5094 int status = 0; 5095 5096 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client); 5097 open->op_recall = 0; 5098 switch (open->op_claim_type) { 5099 case NFS4_OPEN_CLAIM_PREVIOUS: 5100 if (!cb_up) 5101 open->op_recall = 1; 5102 if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ) 5103 goto out_no_deleg; 5104 break; 5105 case NFS4_OPEN_CLAIM_NULL: 5106 case NFS4_OPEN_CLAIM_FH: 5107 /* 5108 * Let's not give out any delegations till everyone's 5109 * had the chance to reclaim theirs, *and* until 5110 * NLM locks have all been reclaimed: 5111 */ 5112 if (locks_in_grace(clp->net)) 5113 goto out_no_deleg; 5114 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED)) 5115 goto out_no_deleg; 5116 break; 5117 default: 5118 goto out_no_deleg; 5119 } 5120 dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate); 5121 if (IS_ERR(dp)) 5122 goto out_no_deleg; 5123 5124 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid)); 5125 5126 trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid); 5127 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ; 5128 nfs4_put_stid(&dp->dl_stid); 5129 return; 5130 out_no_deleg: 5131 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE; 5132 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS && 5133 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) { 5134 dprintk("NFSD: WARNING: refusing delegation reclaim\n"); 5135 open->op_recall = 1; 5136 } 5137 5138 /* 4.1 client asking for a delegation? */ 5139 if (open->op_deleg_want) 5140 nfsd4_open_deleg_none_ext(open, status); 5141 return; 5142 } 5143 5144 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open, 5145 struct nfs4_delegation *dp) 5146 { 5147 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG && 5148 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) { 5149 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 5150 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE; 5151 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG && 5152 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) { 5153 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 5154 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE; 5155 } 5156 /* Otherwise the client must be confused wanting a delegation 5157 * it already has, therefore we don't return 5158 * NFS4_OPEN_DELEGATE_NONE_EXT and reason. 5159 */ 5160 } 5161 5162 __be32 5163 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open) 5164 { 5165 struct nfsd4_compoundres *resp = rqstp->rq_resp; 5166 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client; 5167 struct nfs4_file *fp = NULL; 5168 struct nfs4_ol_stateid *stp = NULL; 5169 struct nfs4_delegation *dp = NULL; 5170 __be32 status; 5171 bool new_stp = false; 5172 5173 /* 5174 * Lookup file; if found, lookup stateid and check open request, 5175 * and check for delegations in the process of being recalled. 5176 * If not found, create the nfs4_file struct 5177 */ 5178 fp = find_or_add_file(open->op_file, ¤t_fh->fh_handle); 5179 if (fp != open->op_file) { 5180 status = nfs4_check_deleg(cl, open, &dp); 5181 if (status) 5182 goto out; 5183 stp = nfsd4_find_and_lock_existing_open(fp, open); 5184 } else { 5185 open->op_file = NULL; 5186 status = nfserr_bad_stateid; 5187 if (nfsd4_is_deleg_cur(open)) 5188 goto out; 5189 } 5190 5191 if (!stp) { 5192 stp = init_open_stateid(fp, open); 5193 if (!open->op_stp) 5194 new_stp = true; 5195 } 5196 5197 /* 5198 * OPEN the file, or upgrade an existing OPEN. 5199 * If truncate fails, the OPEN fails. 5200 * 5201 * stp is already locked. 5202 */ 5203 if (!new_stp) { 5204 /* Stateid was found, this is an OPEN upgrade */ 5205 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); 5206 if (status) { 5207 mutex_unlock(&stp->st_mutex); 5208 goto out; 5209 } 5210 } else { 5211 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open); 5212 if (status) { 5213 stp->st_stid.sc_type = NFS4_CLOSED_STID; 5214 release_open_stateid(stp); 5215 mutex_unlock(&stp->st_mutex); 5216 goto out; 5217 } 5218 5219 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp, 5220 open->op_odstate); 5221 if (stp->st_clnt_odstate == open->op_odstate) 5222 open->op_odstate = NULL; 5223 } 5224 5225 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid); 5226 mutex_unlock(&stp->st_mutex); 5227 5228 if (nfsd4_has_session(&resp->cstate)) { 5229 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) { 5230 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 5231 open->op_why_no_deleg = WND4_NOT_WANTED; 5232 goto nodeleg; 5233 } 5234 } 5235 5236 /* 5237 * Attempt to hand out a delegation. No error return, because the 5238 * OPEN succeeds even if we fail. 5239 */ 5240 nfs4_open_delegation(current_fh, open, stp); 5241 nodeleg: 5242 status = nfs_ok; 5243 trace_nfsd_open(&stp->st_stid.sc_stateid); 5244 out: 5245 /* 4.1 client trying to upgrade/downgrade delegation? */ 5246 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp && 5247 open->op_deleg_want) 5248 nfsd4_deleg_xgrade_none_ext(open, dp); 5249 5250 if (fp) 5251 put_nfs4_file(fp); 5252 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS) 5253 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; 5254 /* 5255 * To finish the open response, we just need to set the rflags. 5256 */ 5257 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX; 5258 if (nfsd4_has_session(&resp->cstate)) 5259 open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK; 5260 else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED)) 5261 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM; 5262 5263 if (dp) 5264 nfs4_put_stid(&dp->dl_stid); 5265 if (stp) 5266 nfs4_put_stid(&stp->st_stid); 5267 5268 return status; 5269 } 5270 5271 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate, 5272 struct nfsd4_open *open) 5273 { 5274 if (open->op_openowner) { 5275 struct nfs4_stateowner *so = &open->op_openowner->oo_owner; 5276 5277 nfsd4_cstate_assign_replay(cstate, so); 5278 nfs4_put_stateowner(so); 5279 } 5280 if (open->op_file) 5281 kmem_cache_free(file_slab, open->op_file); 5282 if (open->op_stp) 5283 nfs4_put_stid(&open->op_stp->st_stid); 5284 if (open->op_odstate) 5285 kmem_cache_free(odstate_slab, open->op_odstate); 5286 } 5287 5288 __be32 5289 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 5290 union nfsd4_op_u *u) 5291 { 5292 clientid_t *clid = &u->renew; 5293 struct nfs4_client *clp; 5294 __be32 status; 5295 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 5296 5297 trace_nfsd_clid_renew(clid); 5298 status = set_client(clid, cstate, nn); 5299 if (status) 5300 return status; 5301 clp = cstate->clp; 5302 if (!list_empty(&clp->cl_delegations) 5303 && clp->cl_cb_state != NFSD4_CB_UP) 5304 return nfserr_cb_path_down; 5305 return nfs_ok; 5306 } 5307 5308 void 5309 nfsd4_end_grace(struct nfsd_net *nn) 5310 { 5311 /* do nothing if grace period already ended */ 5312 if (nn->grace_ended) 5313 return; 5314 5315 trace_nfsd_grace_complete(nn); 5316 nn->grace_ended = true; 5317 /* 5318 * If the server goes down again right now, an NFSv4 5319 * client will still be allowed to reclaim after it comes back up, 5320 * even if it hasn't yet had a chance to reclaim state this time. 5321 * 5322 */ 5323 nfsd4_record_grace_done(nn); 5324 /* 5325 * At this point, NFSv4 clients can still reclaim. But if the 5326 * server crashes, any that have not yet reclaimed will be out 5327 * of luck on the next boot. 5328 * 5329 * (NFSv4.1+ clients are considered to have reclaimed once they 5330 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to 5331 * have reclaimed after their first OPEN.) 5332 */ 5333 locks_end_grace(&nn->nfsd4_manager); 5334 /* 5335 * At this point, and once lockd and/or any other containers 5336 * exit their grace period, further reclaims will fail and 5337 * regular locking can resume. 5338 */ 5339 } 5340 5341 /* 5342 * If we've waited a lease period but there are still clients trying to 5343 * reclaim, wait a little longer to give them a chance to finish. 5344 */ 5345 static bool clients_still_reclaiming(struct nfsd_net *nn) 5346 { 5347 time64_t double_grace_period_end = nn->boot_time + 5348 2 * nn->nfsd4_lease; 5349 5350 if (nn->track_reclaim_completes && 5351 atomic_read(&nn->nr_reclaim_complete) == 5352 nn->reclaim_str_hashtbl_size) 5353 return false; 5354 if (!nn->somebody_reclaimed) 5355 return false; 5356 nn->somebody_reclaimed = false; 5357 /* 5358 * If we've given them *two* lease times to reclaim, and they're 5359 * still not done, give up: 5360 */ 5361 if (ktime_get_boottime_seconds() > double_grace_period_end) 5362 return false; 5363 return true; 5364 } 5365 5366 static time64_t 5367 nfs4_laundromat(struct nfsd_net *nn) 5368 { 5369 struct nfs4_client *clp; 5370 struct nfs4_openowner *oo; 5371 struct nfs4_delegation *dp; 5372 struct nfs4_ol_stateid *stp; 5373 struct nfsd4_blocked_lock *nbl; 5374 struct list_head *pos, *next, reaplist; 5375 time64_t cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease; 5376 time64_t t, new_timeo = nn->nfsd4_lease; 5377 struct nfs4_cpntf_state *cps; 5378 copy_stateid_t *cps_t; 5379 int i; 5380 5381 if (clients_still_reclaiming(nn)) { 5382 new_timeo = 0; 5383 goto out; 5384 } 5385 nfsd4_end_grace(nn); 5386 INIT_LIST_HEAD(&reaplist); 5387 5388 spin_lock(&nn->s2s_cp_lock); 5389 idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) { 5390 cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid); 5391 if (cps->cp_stateid.sc_type == NFS4_COPYNOTIFY_STID && 5392 cps->cpntf_time > cutoff) 5393 _free_cpntf_state_locked(nn, cps); 5394 } 5395 spin_unlock(&nn->s2s_cp_lock); 5396 5397 spin_lock(&nn->client_lock); 5398 list_for_each_safe(pos, next, &nn->client_lru) { 5399 clp = list_entry(pos, struct nfs4_client, cl_lru); 5400 if (clp->cl_time > cutoff) { 5401 t = clp->cl_time - cutoff; 5402 new_timeo = min(new_timeo, t); 5403 break; 5404 } 5405 if (mark_client_expired_locked(clp)) { 5406 trace_nfsd_clid_expired(&clp->cl_clientid); 5407 continue; 5408 } 5409 list_add(&clp->cl_lru, &reaplist); 5410 } 5411 spin_unlock(&nn->client_lock); 5412 list_for_each_safe(pos, next, &reaplist) { 5413 clp = list_entry(pos, struct nfs4_client, cl_lru); 5414 trace_nfsd_clid_purged(&clp->cl_clientid); 5415 list_del_init(&clp->cl_lru); 5416 expire_client(clp); 5417 } 5418 spin_lock(&state_lock); 5419 list_for_each_safe(pos, next, &nn->del_recall_lru) { 5420 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 5421 if (dp->dl_time > cutoff) { 5422 t = dp->dl_time - cutoff; 5423 new_timeo = min(new_timeo, t); 5424 break; 5425 } 5426 WARN_ON(!unhash_delegation_locked(dp)); 5427 list_add(&dp->dl_recall_lru, &reaplist); 5428 } 5429 spin_unlock(&state_lock); 5430 while (!list_empty(&reaplist)) { 5431 dp = list_first_entry(&reaplist, struct nfs4_delegation, 5432 dl_recall_lru); 5433 list_del_init(&dp->dl_recall_lru); 5434 revoke_delegation(dp); 5435 } 5436 5437 spin_lock(&nn->client_lock); 5438 while (!list_empty(&nn->close_lru)) { 5439 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner, 5440 oo_close_lru); 5441 if (oo->oo_time > cutoff) { 5442 t = oo->oo_time - cutoff; 5443 new_timeo = min(new_timeo, t); 5444 break; 5445 } 5446 list_del_init(&oo->oo_close_lru); 5447 stp = oo->oo_last_closed_stid; 5448 oo->oo_last_closed_stid = NULL; 5449 spin_unlock(&nn->client_lock); 5450 nfs4_put_stid(&stp->st_stid); 5451 spin_lock(&nn->client_lock); 5452 } 5453 spin_unlock(&nn->client_lock); 5454 5455 /* 5456 * It's possible for a client to try and acquire an already held lock 5457 * that is being held for a long time, and then lose interest in it. 5458 * So, we clean out any un-revisited request after a lease period 5459 * under the assumption that the client is no longer interested. 5460 * 5461 * RFC5661, sec. 9.6 states that the client must not rely on getting 5462 * notifications and must continue to poll for locks, even when the 5463 * server supports them. Thus this shouldn't lead to clients blocking 5464 * indefinitely once the lock does become free. 5465 */ 5466 BUG_ON(!list_empty(&reaplist)); 5467 spin_lock(&nn->blocked_locks_lock); 5468 while (!list_empty(&nn->blocked_locks_lru)) { 5469 nbl = list_first_entry(&nn->blocked_locks_lru, 5470 struct nfsd4_blocked_lock, nbl_lru); 5471 if (nbl->nbl_time > cutoff) { 5472 t = nbl->nbl_time - cutoff; 5473 new_timeo = min(new_timeo, t); 5474 break; 5475 } 5476 list_move(&nbl->nbl_lru, &reaplist); 5477 list_del_init(&nbl->nbl_list); 5478 } 5479 spin_unlock(&nn->blocked_locks_lock); 5480 5481 while (!list_empty(&reaplist)) { 5482 nbl = list_first_entry(&reaplist, 5483 struct nfsd4_blocked_lock, nbl_lru); 5484 list_del_init(&nbl->nbl_lru); 5485 free_blocked_lock(nbl); 5486 } 5487 out: 5488 new_timeo = max_t(time64_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT); 5489 return new_timeo; 5490 } 5491 5492 static struct workqueue_struct *laundry_wq; 5493 static void laundromat_main(struct work_struct *); 5494 5495 static void 5496 laundromat_main(struct work_struct *laundry) 5497 { 5498 time64_t t; 5499 struct delayed_work *dwork = to_delayed_work(laundry); 5500 struct nfsd_net *nn = container_of(dwork, struct nfsd_net, 5501 laundromat_work); 5502 5503 t = nfs4_laundromat(nn); 5504 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ); 5505 } 5506 5507 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp) 5508 { 5509 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle)) 5510 return nfserr_bad_stateid; 5511 return nfs_ok; 5512 } 5513 5514 static inline int 5515 access_permit_read(struct nfs4_ol_stateid *stp) 5516 { 5517 return test_access(NFS4_SHARE_ACCESS_READ, stp) || 5518 test_access(NFS4_SHARE_ACCESS_BOTH, stp) || 5519 test_access(NFS4_SHARE_ACCESS_WRITE, stp); 5520 } 5521 5522 static inline int 5523 access_permit_write(struct nfs4_ol_stateid *stp) 5524 { 5525 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) || 5526 test_access(NFS4_SHARE_ACCESS_BOTH, stp); 5527 } 5528 5529 static 5530 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags) 5531 { 5532 __be32 status = nfserr_openmode; 5533 5534 /* For lock stateid's, we test the parent open, not the lock: */ 5535 if (stp->st_openstp) 5536 stp = stp->st_openstp; 5537 if ((flags & WR_STATE) && !access_permit_write(stp)) 5538 goto out; 5539 if ((flags & RD_STATE) && !access_permit_read(stp)) 5540 goto out; 5541 status = nfs_ok; 5542 out: 5543 return status; 5544 } 5545 5546 static inline __be32 5547 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags) 5548 { 5549 if (ONE_STATEID(stateid) && (flags & RD_STATE)) 5550 return nfs_ok; 5551 else if (opens_in_grace(net)) { 5552 /* Answer in remaining cases depends on existence of 5553 * conflicting state; so we must wait out the grace period. */ 5554 return nfserr_grace; 5555 } else if (flags & WR_STATE) 5556 return nfs4_share_conflict(current_fh, 5557 NFS4_SHARE_DENY_WRITE); 5558 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */ 5559 return nfs4_share_conflict(current_fh, 5560 NFS4_SHARE_DENY_READ); 5561 } 5562 5563 /* 5564 * Allow READ/WRITE during grace period on recovered state only for files 5565 * that are not able to provide mandatory locking. 5566 */ 5567 static inline int 5568 grace_disallows_io(struct net *net, struct inode *inode) 5569 { 5570 return opens_in_grace(net) && mandatory_lock(inode); 5571 } 5572 5573 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session) 5574 { 5575 /* 5576 * When sessions are used the stateid generation number is ignored 5577 * when it is zero. 5578 */ 5579 if (has_session && in->si_generation == 0) 5580 return nfs_ok; 5581 5582 if (in->si_generation == ref->si_generation) 5583 return nfs_ok; 5584 5585 /* If the client sends us a stateid from the future, it's buggy: */ 5586 if (nfsd4_stateid_generation_after(in, ref)) 5587 return nfserr_bad_stateid; 5588 /* 5589 * However, we could see a stateid from the past, even from a 5590 * non-buggy client. For example, if the client sends a lock 5591 * while some IO is outstanding, the lock may bump si_generation 5592 * while the IO is still in flight. The client could avoid that 5593 * situation by waiting for responses on all the IO requests, 5594 * but better performance may result in retrying IO that 5595 * receives an old_stateid error if requests are rarely 5596 * reordered in flight: 5597 */ 5598 return nfserr_old_stateid; 5599 } 5600 5601 static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session) 5602 { 5603 __be32 ret; 5604 5605 spin_lock(&s->sc_lock); 5606 ret = nfsd4_verify_open_stid(s); 5607 if (ret == nfs_ok) 5608 ret = check_stateid_generation(in, &s->sc_stateid, has_session); 5609 spin_unlock(&s->sc_lock); 5610 return ret; 5611 } 5612 5613 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols) 5614 { 5615 if (ols->st_stateowner->so_is_open_owner && 5616 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED)) 5617 return nfserr_bad_stateid; 5618 return nfs_ok; 5619 } 5620 5621 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) 5622 { 5623 struct nfs4_stid *s; 5624 __be32 status = nfserr_bad_stateid; 5625 5626 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) || 5627 CLOSE_STATEID(stateid)) 5628 return status; 5629 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) 5630 return status; 5631 spin_lock(&cl->cl_lock); 5632 s = find_stateid_locked(cl, stateid); 5633 if (!s) 5634 goto out_unlock; 5635 status = nfsd4_stid_check_stateid_generation(stateid, s, 1); 5636 if (status) 5637 goto out_unlock; 5638 switch (s->sc_type) { 5639 case NFS4_DELEG_STID: 5640 status = nfs_ok; 5641 break; 5642 case NFS4_REVOKED_DELEG_STID: 5643 status = nfserr_deleg_revoked; 5644 break; 5645 case NFS4_OPEN_STID: 5646 case NFS4_LOCK_STID: 5647 status = nfsd4_check_openowner_confirmed(openlockstateid(s)); 5648 break; 5649 default: 5650 printk("unknown stateid type %x\n", s->sc_type); 5651 fallthrough; 5652 case NFS4_CLOSED_STID: 5653 case NFS4_CLOSED_DELEG_STID: 5654 status = nfserr_bad_stateid; 5655 } 5656 out_unlock: 5657 spin_unlock(&cl->cl_lock); 5658 return status; 5659 } 5660 5661 __be32 5662 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, 5663 stateid_t *stateid, unsigned char typemask, 5664 struct nfs4_stid **s, struct nfsd_net *nn) 5665 { 5666 __be32 status; 5667 bool return_revoked = false; 5668 5669 /* 5670 * only return revoked delegations if explicitly asked. 5671 * otherwise we report revoked or bad_stateid status. 5672 */ 5673 if (typemask & NFS4_REVOKED_DELEG_STID) 5674 return_revoked = true; 5675 else if (typemask & NFS4_DELEG_STID) 5676 typemask |= NFS4_REVOKED_DELEG_STID; 5677 5678 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) || 5679 CLOSE_STATEID(stateid)) 5680 return nfserr_bad_stateid; 5681 status = set_client(&stateid->si_opaque.so_clid, cstate, nn); 5682 if (status == nfserr_stale_clientid) { 5683 if (cstate->session) 5684 return nfserr_bad_stateid; 5685 return nfserr_stale_stateid; 5686 } 5687 if (status) 5688 return status; 5689 *s = find_stateid_by_type(cstate->clp, stateid, typemask); 5690 if (!*s) 5691 return nfserr_bad_stateid; 5692 if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) { 5693 nfs4_put_stid(*s); 5694 if (cstate->minorversion) 5695 return nfserr_deleg_revoked; 5696 return nfserr_bad_stateid; 5697 } 5698 return nfs_ok; 5699 } 5700 5701 static struct nfsd_file * 5702 nfs4_find_file(struct nfs4_stid *s, int flags) 5703 { 5704 if (!s) 5705 return NULL; 5706 5707 switch (s->sc_type) { 5708 case NFS4_DELEG_STID: 5709 if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file)) 5710 return NULL; 5711 return nfsd_file_get(s->sc_file->fi_deleg_file); 5712 case NFS4_OPEN_STID: 5713 case NFS4_LOCK_STID: 5714 if (flags & RD_STATE) 5715 return find_readable_file(s->sc_file); 5716 else 5717 return find_writeable_file(s->sc_file); 5718 } 5719 5720 return NULL; 5721 } 5722 5723 static __be32 5724 nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags) 5725 { 5726 __be32 status; 5727 5728 status = nfsd4_check_openowner_confirmed(ols); 5729 if (status) 5730 return status; 5731 return nfs4_check_openmode(ols, flags); 5732 } 5733 5734 static __be32 5735 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s, 5736 struct nfsd_file **nfp, int flags) 5737 { 5738 int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE; 5739 struct nfsd_file *nf; 5740 __be32 status; 5741 5742 nf = nfs4_find_file(s, flags); 5743 if (nf) { 5744 status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry, 5745 acc | NFSD_MAY_OWNER_OVERRIDE); 5746 if (status) { 5747 nfsd_file_put(nf); 5748 goto out; 5749 } 5750 } else { 5751 status = nfsd_file_acquire(rqstp, fhp, acc, &nf); 5752 if (status) 5753 return status; 5754 } 5755 *nfp = nf; 5756 out: 5757 return status; 5758 } 5759 static void 5760 _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps) 5761 { 5762 WARN_ON_ONCE(cps->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID); 5763 if (!refcount_dec_and_test(&cps->cp_stateid.sc_count)) 5764 return; 5765 list_del(&cps->cp_list); 5766 idr_remove(&nn->s2s_cp_stateids, 5767 cps->cp_stateid.stid.si_opaque.so_id); 5768 kfree(cps); 5769 } 5770 /* 5771 * A READ from an inter server to server COPY will have a 5772 * copy stateid. Look up the copy notify stateid from the 5773 * idr structure and take a reference on it. 5774 */ 5775 __be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st, 5776 struct nfs4_client *clp, 5777 struct nfs4_cpntf_state **cps) 5778 { 5779 copy_stateid_t *cps_t; 5780 struct nfs4_cpntf_state *state = NULL; 5781 5782 if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id) 5783 return nfserr_bad_stateid; 5784 spin_lock(&nn->s2s_cp_lock); 5785 cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id); 5786 if (cps_t) { 5787 state = container_of(cps_t, struct nfs4_cpntf_state, 5788 cp_stateid); 5789 if (state->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID) { 5790 state = NULL; 5791 goto unlock; 5792 } 5793 if (!clp) 5794 refcount_inc(&state->cp_stateid.sc_count); 5795 else 5796 _free_cpntf_state_locked(nn, state); 5797 } 5798 unlock: 5799 spin_unlock(&nn->s2s_cp_lock); 5800 if (!state) 5801 return nfserr_bad_stateid; 5802 if (!clp && state) 5803 *cps = state; 5804 return 0; 5805 } 5806 5807 static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st, 5808 struct nfs4_stid **stid) 5809 { 5810 __be32 status; 5811 struct nfs4_cpntf_state *cps = NULL; 5812 struct nfs4_client *found; 5813 5814 status = manage_cpntf_state(nn, st, NULL, &cps); 5815 if (status) 5816 return status; 5817 5818 cps->cpntf_time = ktime_get_boottime_seconds(); 5819 5820 status = nfserr_expired; 5821 found = lookup_clientid(&cps->cp_p_clid, true, nn); 5822 if (!found) 5823 goto out; 5824 5825 *stid = find_stateid_by_type(found, &cps->cp_p_stateid, 5826 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID); 5827 if (*stid) 5828 status = nfs_ok; 5829 else 5830 status = nfserr_bad_stateid; 5831 5832 put_client_renew(found); 5833 out: 5834 nfs4_put_cpntf_state(nn, cps); 5835 return status; 5836 } 5837 5838 void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps) 5839 { 5840 spin_lock(&nn->s2s_cp_lock); 5841 _free_cpntf_state_locked(nn, cps); 5842 spin_unlock(&nn->s2s_cp_lock); 5843 } 5844 5845 /* 5846 * Checks for stateid operations 5847 */ 5848 __be32 5849 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp, 5850 struct nfsd4_compound_state *cstate, struct svc_fh *fhp, 5851 stateid_t *stateid, int flags, struct nfsd_file **nfp, 5852 struct nfs4_stid **cstid) 5853 { 5854 struct inode *ino = d_inode(fhp->fh_dentry); 5855 struct net *net = SVC_NET(rqstp); 5856 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 5857 struct nfs4_stid *s = NULL; 5858 __be32 status; 5859 5860 if (nfp) 5861 *nfp = NULL; 5862 5863 if (grace_disallows_io(net, ino)) 5864 return nfserr_grace; 5865 5866 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) { 5867 status = check_special_stateids(net, fhp, stateid, flags); 5868 goto done; 5869 } 5870 5871 status = nfsd4_lookup_stateid(cstate, stateid, 5872 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID, 5873 &s, nn); 5874 if (status == nfserr_bad_stateid) 5875 status = find_cpntf_state(nn, stateid, &s); 5876 if (status) 5877 return status; 5878 status = nfsd4_stid_check_stateid_generation(stateid, s, 5879 nfsd4_has_session(cstate)); 5880 if (status) 5881 goto out; 5882 5883 switch (s->sc_type) { 5884 case NFS4_DELEG_STID: 5885 status = nfs4_check_delegmode(delegstateid(s), flags); 5886 break; 5887 case NFS4_OPEN_STID: 5888 case NFS4_LOCK_STID: 5889 status = nfs4_check_olstateid(openlockstateid(s), flags); 5890 break; 5891 default: 5892 status = nfserr_bad_stateid; 5893 break; 5894 } 5895 if (status) 5896 goto out; 5897 status = nfs4_check_fh(fhp, s); 5898 5899 done: 5900 if (status == nfs_ok && nfp) 5901 status = nfs4_check_file(rqstp, fhp, s, nfp, flags); 5902 out: 5903 if (s) { 5904 if (!status && cstid) 5905 *cstid = s; 5906 else 5907 nfs4_put_stid(s); 5908 } 5909 return status; 5910 } 5911 5912 /* 5913 * Test if the stateid is valid 5914 */ 5915 __be32 5916 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 5917 union nfsd4_op_u *u) 5918 { 5919 struct nfsd4_test_stateid *test_stateid = &u->test_stateid; 5920 struct nfsd4_test_stateid_id *stateid; 5921 struct nfs4_client *cl = cstate->clp; 5922 5923 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list) 5924 stateid->ts_id_status = 5925 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid); 5926 5927 return nfs_ok; 5928 } 5929 5930 static __be32 5931 nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s) 5932 { 5933 struct nfs4_ol_stateid *stp = openlockstateid(s); 5934 __be32 ret; 5935 5936 ret = nfsd4_lock_ol_stateid(stp); 5937 if (ret) 5938 goto out_put_stid; 5939 5940 ret = check_stateid_generation(stateid, &s->sc_stateid, 1); 5941 if (ret) 5942 goto out; 5943 5944 ret = nfserr_locks_held; 5945 if (check_for_locks(stp->st_stid.sc_file, 5946 lockowner(stp->st_stateowner))) 5947 goto out; 5948 5949 release_lock_stateid(stp); 5950 ret = nfs_ok; 5951 5952 out: 5953 mutex_unlock(&stp->st_mutex); 5954 out_put_stid: 5955 nfs4_put_stid(s); 5956 return ret; 5957 } 5958 5959 __be32 5960 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 5961 union nfsd4_op_u *u) 5962 { 5963 struct nfsd4_free_stateid *free_stateid = &u->free_stateid; 5964 stateid_t *stateid = &free_stateid->fr_stateid; 5965 struct nfs4_stid *s; 5966 struct nfs4_delegation *dp; 5967 struct nfs4_client *cl = cstate->clp; 5968 __be32 ret = nfserr_bad_stateid; 5969 5970 spin_lock(&cl->cl_lock); 5971 s = find_stateid_locked(cl, stateid); 5972 if (!s) 5973 goto out_unlock; 5974 spin_lock(&s->sc_lock); 5975 switch (s->sc_type) { 5976 case NFS4_DELEG_STID: 5977 ret = nfserr_locks_held; 5978 break; 5979 case NFS4_OPEN_STID: 5980 ret = check_stateid_generation(stateid, &s->sc_stateid, 1); 5981 if (ret) 5982 break; 5983 ret = nfserr_locks_held; 5984 break; 5985 case NFS4_LOCK_STID: 5986 spin_unlock(&s->sc_lock); 5987 refcount_inc(&s->sc_count); 5988 spin_unlock(&cl->cl_lock); 5989 ret = nfsd4_free_lock_stateid(stateid, s); 5990 goto out; 5991 case NFS4_REVOKED_DELEG_STID: 5992 spin_unlock(&s->sc_lock); 5993 dp = delegstateid(s); 5994 list_del_init(&dp->dl_recall_lru); 5995 spin_unlock(&cl->cl_lock); 5996 nfs4_put_stid(s); 5997 ret = nfs_ok; 5998 goto out; 5999 /* Default falls through and returns nfserr_bad_stateid */ 6000 } 6001 spin_unlock(&s->sc_lock); 6002 out_unlock: 6003 spin_unlock(&cl->cl_lock); 6004 out: 6005 return ret; 6006 } 6007 6008 static inline int 6009 setlkflg (int type) 6010 { 6011 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ? 6012 RD_STATE : WR_STATE; 6013 } 6014 6015 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp) 6016 { 6017 struct svc_fh *current_fh = &cstate->current_fh; 6018 struct nfs4_stateowner *sop = stp->st_stateowner; 6019 __be32 status; 6020 6021 status = nfsd4_check_seqid(cstate, sop, seqid); 6022 if (status) 6023 return status; 6024 status = nfsd4_lock_ol_stateid(stp); 6025 if (status != nfs_ok) 6026 return status; 6027 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); 6028 if (status == nfs_ok) 6029 status = nfs4_check_fh(current_fh, &stp->st_stid); 6030 if (status != nfs_ok) 6031 mutex_unlock(&stp->st_mutex); 6032 return status; 6033 } 6034 6035 /* 6036 * Checks for sequence id mutating operations. 6037 */ 6038 static __be32 6039 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, 6040 stateid_t *stateid, char typemask, 6041 struct nfs4_ol_stateid **stpp, 6042 struct nfsd_net *nn) 6043 { 6044 __be32 status; 6045 struct nfs4_stid *s; 6046 struct nfs4_ol_stateid *stp = NULL; 6047 6048 trace_nfsd_preprocess(seqid, stateid); 6049 6050 *stpp = NULL; 6051 status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn); 6052 if (status) 6053 return status; 6054 stp = openlockstateid(s); 6055 nfsd4_cstate_assign_replay(cstate, stp->st_stateowner); 6056 6057 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp); 6058 if (!status) 6059 *stpp = stp; 6060 else 6061 nfs4_put_stid(&stp->st_stid); 6062 return status; 6063 } 6064 6065 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, 6066 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn) 6067 { 6068 __be32 status; 6069 struct nfs4_openowner *oo; 6070 struct nfs4_ol_stateid *stp; 6071 6072 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid, 6073 NFS4_OPEN_STID, &stp, nn); 6074 if (status) 6075 return status; 6076 oo = openowner(stp->st_stateowner); 6077 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { 6078 mutex_unlock(&stp->st_mutex); 6079 nfs4_put_stid(&stp->st_stid); 6080 return nfserr_bad_stateid; 6081 } 6082 *stpp = stp; 6083 return nfs_ok; 6084 } 6085 6086 __be32 6087 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 6088 union nfsd4_op_u *u) 6089 { 6090 struct nfsd4_open_confirm *oc = &u->open_confirm; 6091 __be32 status; 6092 struct nfs4_openowner *oo; 6093 struct nfs4_ol_stateid *stp; 6094 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 6095 6096 dprintk("NFSD: nfsd4_open_confirm on file %pd\n", 6097 cstate->current_fh.fh_dentry); 6098 6099 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0); 6100 if (status) 6101 return status; 6102 6103 status = nfs4_preprocess_seqid_op(cstate, 6104 oc->oc_seqid, &oc->oc_req_stateid, 6105 NFS4_OPEN_STID, &stp, nn); 6106 if (status) 6107 goto out; 6108 oo = openowner(stp->st_stateowner); 6109 status = nfserr_bad_stateid; 6110 if (oo->oo_flags & NFS4_OO_CONFIRMED) { 6111 mutex_unlock(&stp->st_mutex); 6112 goto put_stateid; 6113 } 6114 oo->oo_flags |= NFS4_OO_CONFIRMED; 6115 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid); 6116 mutex_unlock(&stp->st_mutex); 6117 trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid); 6118 nfsd4_client_record_create(oo->oo_owner.so_client); 6119 status = nfs_ok; 6120 put_stateid: 6121 nfs4_put_stid(&stp->st_stid); 6122 out: 6123 nfsd4_bump_seqid(cstate, status); 6124 return status; 6125 } 6126 6127 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access) 6128 { 6129 if (!test_access(access, stp)) 6130 return; 6131 nfs4_file_put_access(stp->st_stid.sc_file, access); 6132 clear_access(access, stp); 6133 } 6134 6135 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access) 6136 { 6137 switch (to_access) { 6138 case NFS4_SHARE_ACCESS_READ: 6139 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE); 6140 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH); 6141 break; 6142 case NFS4_SHARE_ACCESS_WRITE: 6143 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ); 6144 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH); 6145 break; 6146 case NFS4_SHARE_ACCESS_BOTH: 6147 break; 6148 default: 6149 WARN_ON_ONCE(1); 6150 } 6151 } 6152 6153 __be32 6154 nfsd4_open_downgrade(struct svc_rqst *rqstp, 6155 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) 6156 { 6157 struct nfsd4_open_downgrade *od = &u->open_downgrade; 6158 __be32 status; 6159 struct nfs4_ol_stateid *stp; 6160 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 6161 6162 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n", 6163 cstate->current_fh.fh_dentry); 6164 6165 /* We don't yet support WANT bits: */ 6166 if (od->od_deleg_want) 6167 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__, 6168 od->od_deleg_want); 6169 6170 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid, 6171 &od->od_stateid, &stp, nn); 6172 if (status) 6173 goto out; 6174 status = nfserr_inval; 6175 if (!test_access(od->od_share_access, stp)) { 6176 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n", 6177 stp->st_access_bmap, od->od_share_access); 6178 goto put_stateid; 6179 } 6180 if (!test_deny(od->od_share_deny, stp)) { 6181 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n", 6182 stp->st_deny_bmap, od->od_share_deny); 6183 goto put_stateid; 6184 } 6185 nfs4_stateid_downgrade(stp, od->od_share_access); 6186 reset_union_bmap_deny(od->od_share_deny, stp); 6187 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid); 6188 status = nfs_ok; 6189 put_stateid: 6190 mutex_unlock(&stp->st_mutex); 6191 nfs4_put_stid(&stp->st_stid); 6192 out: 6193 nfsd4_bump_seqid(cstate, status); 6194 return status; 6195 } 6196 6197 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) 6198 { 6199 struct nfs4_client *clp = s->st_stid.sc_client; 6200 bool unhashed; 6201 LIST_HEAD(reaplist); 6202 6203 spin_lock(&clp->cl_lock); 6204 unhashed = unhash_open_stateid(s, &reaplist); 6205 6206 if (clp->cl_minorversion) { 6207 if (unhashed) 6208 put_ol_stateid_locked(s, &reaplist); 6209 spin_unlock(&clp->cl_lock); 6210 free_ol_stateid_reaplist(&reaplist); 6211 } else { 6212 spin_unlock(&clp->cl_lock); 6213 free_ol_stateid_reaplist(&reaplist); 6214 if (unhashed) 6215 move_to_close_lru(s, clp->net); 6216 } 6217 } 6218 6219 /* 6220 * nfs4_unlock_state() called after encode 6221 */ 6222 __be32 6223 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 6224 union nfsd4_op_u *u) 6225 { 6226 struct nfsd4_close *close = &u->close; 6227 __be32 status; 6228 struct nfs4_ol_stateid *stp; 6229 struct net *net = SVC_NET(rqstp); 6230 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 6231 6232 dprintk("NFSD: nfsd4_close on file %pd\n", 6233 cstate->current_fh.fh_dentry); 6234 6235 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid, 6236 &close->cl_stateid, 6237 NFS4_OPEN_STID|NFS4_CLOSED_STID, 6238 &stp, nn); 6239 nfsd4_bump_seqid(cstate, status); 6240 if (status) 6241 goto out; 6242 6243 stp->st_stid.sc_type = NFS4_CLOSED_STID; 6244 6245 /* 6246 * Technically we don't _really_ have to increment or copy it, since 6247 * it should just be gone after this operation and we clobber the 6248 * copied value below, but we continue to do so here just to ensure 6249 * that racing ops see that there was a state change. 6250 */ 6251 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid); 6252 6253 nfsd4_close_open_stateid(stp); 6254 mutex_unlock(&stp->st_mutex); 6255 6256 /* v4.1+ suggests that we send a special stateid in here, since the 6257 * clients should just ignore this anyway. Since this is not useful 6258 * for v4.0 clients either, we set it to the special close_stateid 6259 * universally. 6260 * 6261 * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5 6262 */ 6263 memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid)); 6264 6265 /* put reference from nfs4_preprocess_seqid_op */ 6266 nfs4_put_stid(&stp->st_stid); 6267 out: 6268 return status; 6269 } 6270 6271 __be32 6272 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 6273 union nfsd4_op_u *u) 6274 { 6275 struct nfsd4_delegreturn *dr = &u->delegreturn; 6276 struct nfs4_delegation *dp; 6277 stateid_t *stateid = &dr->dr_stateid; 6278 struct nfs4_stid *s; 6279 __be32 status; 6280 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 6281 6282 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) 6283 return status; 6284 6285 status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn); 6286 if (status) 6287 goto out; 6288 dp = delegstateid(s); 6289 status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate)); 6290 if (status) 6291 goto put_stateid; 6292 6293 destroy_delegation(dp); 6294 put_stateid: 6295 nfs4_put_stid(&dp->dl_stid); 6296 out: 6297 return status; 6298 } 6299 6300 static inline u64 6301 end_offset(u64 start, u64 len) 6302 { 6303 u64 end; 6304 6305 end = start + len; 6306 return end >= start ? end: NFS4_MAX_UINT64; 6307 } 6308 6309 /* last octet in a range */ 6310 static inline u64 6311 last_byte_offset(u64 start, u64 len) 6312 { 6313 u64 end; 6314 6315 WARN_ON_ONCE(!len); 6316 end = start + len; 6317 return end > start ? end - 1: NFS4_MAX_UINT64; 6318 } 6319 6320 /* 6321 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that 6322 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th 6323 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit 6324 * locking, this prevents us from being completely protocol-compliant. The 6325 * real solution to this problem is to start using unsigned file offsets in 6326 * the VFS, but this is a very deep change! 6327 */ 6328 static inline void 6329 nfs4_transform_lock_offset(struct file_lock *lock) 6330 { 6331 if (lock->fl_start < 0) 6332 lock->fl_start = OFFSET_MAX; 6333 if (lock->fl_end < 0) 6334 lock->fl_end = OFFSET_MAX; 6335 } 6336 6337 static fl_owner_t 6338 nfsd4_fl_get_owner(fl_owner_t owner) 6339 { 6340 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner; 6341 6342 nfs4_get_stateowner(&lo->lo_owner); 6343 return owner; 6344 } 6345 6346 static void 6347 nfsd4_fl_put_owner(fl_owner_t owner) 6348 { 6349 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner; 6350 6351 if (lo) 6352 nfs4_put_stateowner(&lo->lo_owner); 6353 } 6354 6355 static void 6356 nfsd4_lm_notify(struct file_lock *fl) 6357 { 6358 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner; 6359 struct net *net = lo->lo_owner.so_client->net; 6360 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 6361 struct nfsd4_blocked_lock *nbl = container_of(fl, 6362 struct nfsd4_blocked_lock, nbl_lock); 6363 bool queue = false; 6364 6365 /* An empty list means that something else is going to be using it */ 6366 spin_lock(&nn->blocked_locks_lock); 6367 if (!list_empty(&nbl->nbl_list)) { 6368 list_del_init(&nbl->nbl_list); 6369 list_del_init(&nbl->nbl_lru); 6370 queue = true; 6371 } 6372 spin_unlock(&nn->blocked_locks_lock); 6373 6374 if (queue) 6375 nfsd4_run_cb(&nbl->nbl_cb); 6376 } 6377 6378 static const struct lock_manager_operations nfsd_posix_mng_ops = { 6379 .lm_notify = nfsd4_lm_notify, 6380 .lm_get_owner = nfsd4_fl_get_owner, 6381 .lm_put_owner = nfsd4_fl_put_owner, 6382 }; 6383 6384 static inline void 6385 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny) 6386 { 6387 struct nfs4_lockowner *lo; 6388 6389 if (fl->fl_lmops == &nfsd_posix_mng_ops) { 6390 lo = (struct nfs4_lockowner *) fl->fl_owner; 6391 xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner, 6392 GFP_KERNEL); 6393 if (!deny->ld_owner.data) 6394 /* We just don't care that much */ 6395 goto nevermind; 6396 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid; 6397 } else { 6398 nevermind: 6399 deny->ld_owner.len = 0; 6400 deny->ld_owner.data = NULL; 6401 deny->ld_clientid.cl_boot = 0; 6402 deny->ld_clientid.cl_id = 0; 6403 } 6404 deny->ld_start = fl->fl_start; 6405 deny->ld_length = NFS4_MAX_UINT64; 6406 if (fl->fl_end != NFS4_MAX_UINT64) 6407 deny->ld_length = fl->fl_end - fl->fl_start + 1; 6408 deny->ld_type = NFS4_READ_LT; 6409 if (fl->fl_type != F_RDLCK) 6410 deny->ld_type = NFS4_WRITE_LT; 6411 } 6412 6413 static struct nfs4_lockowner * 6414 find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner) 6415 { 6416 unsigned int strhashval = ownerstr_hashval(owner); 6417 struct nfs4_stateowner *so; 6418 6419 lockdep_assert_held(&clp->cl_lock); 6420 6421 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval], 6422 so_strhash) { 6423 if (so->so_is_open_owner) 6424 continue; 6425 if (same_owner_str(so, owner)) 6426 return lockowner(nfs4_get_stateowner(so)); 6427 } 6428 return NULL; 6429 } 6430 6431 static struct nfs4_lockowner * 6432 find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner) 6433 { 6434 struct nfs4_lockowner *lo; 6435 6436 spin_lock(&clp->cl_lock); 6437 lo = find_lockowner_str_locked(clp, owner); 6438 spin_unlock(&clp->cl_lock); 6439 return lo; 6440 } 6441 6442 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop) 6443 { 6444 unhash_lockowner_locked(lockowner(sop)); 6445 } 6446 6447 static void nfs4_free_lockowner(struct nfs4_stateowner *sop) 6448 { 6449 struct nfs4_lockowner *lo = lockowner(sop); 6450 6451 kmem_cache_free(lockowner_slab, lo); 6452 } 6453 6454 static const struct nfs4_stateowner_operations lockowner_ops = { 6455 .so_unhash = nfs4_unhash_lockowner, 6456 .so_free = nfs4_free_lockowner, 6457 }; 6458 6459 /* 6460 * Alloc a lock owner structure. 6461 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has 6462 * occurred. 6463 * 6464 * strhashval = ownerstr_hashval 6465 */ 6466 static struct nfs4_lockowner * 6467 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, 6468 struct nfs4_ol_stateid *open_stp, 6469 struct nfsd4_lock *lock) 6470 { 6471 struct nfs4_lockowner *lo, *ret; 6472 6473 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp); 6474 if (!lo) 6475 return NULL; 6476 INIT_LIST_HEAD(&lo->lo_blocked); 6477 INIT_LIST_HEAD(&lo->lo_owner.so_stateids); 6478 lo->lo_owner.so_is_open_owner = 0; 6479 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid; 6480 lo->lo_owner.so_ops = &lockowner_ops; 6481 spin_lock(&clp->cl_lock); 6482 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner); 6483 if (ret == NULL) { 6484 list_add(&lo->lo_owner.so_strhash, 6485 &clp->cl_ownerstr_hashtbl[strhashval]); 6486 ret = lo; 6487 } else 6488 nfs4_free_stateowner(&lo->lo_owner); 6489 6490 spin_unlock(&clp->cl_lock); 6491 return ret; 6492 } 6493 6494 static struct nfs4_ol_stateid * 6495 find_lock_stateid(const struct nfs4_lockowner *lo, 6496 const struct nfs4_ol_stateid *ost) 6497 { 6498 struct nfs4_ol_stateid *lst; 6499 6500 lockdep_assert_held(&ost->st_stid.sc_client->cl_lock); 6501 6502 /* If ost is not hashed, ost->st_locks will not be valid */ 6503 if (!nfs4_ol_stateid_unhashed(ost)) 6504 list_for_each_entry(lst, &ost->st_locks, st_locks) { 6505 if (lst->st_stateowner == &lo->lo_owner) { 6506 refcount_inc(&lst->st_stid.sc_count); 6507 return lst; 6508 } 6509 } 6510 return NULL; 6511 } 6512 6513 static struct nfs4_ol_stateid * 6514 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo, 6515 struct nfs4_file *fp, struct inode *inode, 6516 struct nfs4_ol_stateid *open_stp) 6517 { 6518 struct nfs4_client *clp = lo->lo_owner.so_client; 6519 struct nfs4_ol_stateid *retstp; 6520 6521 mutex_init(&stp->st_mutex); 6522 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX); 6523 retry: 6524 spin_lock(&clp->cl_lock); 6525 if (nfs4_ol_stateid_unhashed(open_stp)) 6526 goto out_close; 6527 retstp = find_lock_stateid(lo, open_stp); 6528 if (retstp) 6529 goto out_found; 6530 refcount_inc(&stp->st_stid.sc_count); 6531 stp->st_stid.sc_type = NFS4_LOCK_STID; 6532 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner); 6533 get_nfs4_file(fp); 6534 stp->st_stid.sc_file = fp; 6535 stp->st_access_bmap = 0; 6536 stp->st_deny_bmap = open_stp->st_deny_bmap; 6537 stp->st_openstp = open_stp; 6538 spin_lock(&fp->fi_lock); 6539 list_add(&stp->st_locks, &open_stp->st_locks); 6540 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); 6541 list_add(&stp->st_perfile, &fp->fi_stateids); 6542 spin_unlock(&fp->fi_lock); 6543 spin_unlock(&clp->cl_lock); 6544 return stp; 6545 out_found: 6546 spin_unlock(&clp->cl_lock); 6547 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) { 6548 nfs4_put_stid(&retstp->st_stid); 6549 goto retry; 6550 } 6551 /* To keep mutex tracking happy */ 6552 mutex_unlock(&stp->st_mutex); 6553 return retstp; 6554 out_close: 6555 spin_unlock(&clp->cl_lock); 6556 mutex_unlock(&stp->st_mutex); 6557 return NULL; 6558 } 6559 6560 static struct nfs4_ol_stateid * 6561 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi, 6562 struct inode *inode, struct nfs4_ol_stateid *ost, 6563 bool *new) 6564 { 6565 struct nfs4_stid *ns = NULL; 6566 struct nfs4_ol_stateid *lst; 6567 struct nfs4_openowner *oo = openowner(ost->st_stateowner); 6568 struct nfs4_client *clp = oo->oo_owner.so_client; 6569 6570 *new = false; 6571 spin_lock(&clp->cl_lock); 6572 lst = find_lock_stateid(lo, ost); 6573 spin_unlock(&clp->cl_lock); 6574 if (lst != NULL) { 6575 if (nfsd4_lock_ol_stateid(lst) == nfs_ok) 6576 goto out; 6577 nfs4_put_stid(&lst->st_stid); 6578 } 6579 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid); 6580 if (ns == NULL) 6581 return NULL; 6582 6583 lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost); 6584 if (lst == openlockstateid(ns)) 6585 *new = true; 6586 else 6587 nfs4_put_stid(ns); 6588 out: 6589 return lst; 6590 } 6591 6592 static int 6593 check_lock_length(u64 offset, u64 length) 6594 { 6595 return ((length == 0) || ((length != NFS4_MAX_UINT64) && 6596 (length > ~offset))); 6597 } 6598 6599 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access) 6600 { 6601 struct nfs4_file *fp = lock_stp->st_stid.sc_file; 6602 6603 lockdep_assert_held(&fp->fi_lock); 6604 6605 if (test_access(access, lock_stp)) 6606 return; 6607 __nfs4_file_get_access(fp, access); 6608 set_access(access, lock_stp); 6609 } 6610 6611 static __be32 6612 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, 6613 struct nfs4_ol_stateid *ost, 6614 struct nfsd4_lock *lock, 6615 struct nfs4_ol_stateid **plst, bool *new) 6616 { 6617 __be32 status; 6618 struct nfs4_file *fi = ost->st_stid.sc_file; 6619 struct nfs4_openowner *oo = openowner(ost->st_stateowner); 6620 struct nfs4_client *cl = oo->oo_owner.so_client; 6621 struct inode *inode = d_inode(cstate->current_fh.fh_dentry); 6622 struct nfs4_lockowner *lo; 6623 struct nfs4_ol_stateid *lst; 6624 unsigned int strhashval; 6625 6626 lo = find_lockowner_str(cl, &lock->lk_new_owner); 6627 if (!lo) { 6628 strhashval = ownerstr_hashval(&lock->lk_new_owner); 6629 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock); 6630 if (lo == NULL) 6631 return nfserr_jukebox; 6632 } else { 6633 /* with an existing lockowner, seqids must be the same */ 6634 status = nfserr_bad_seqid; 6635 if (!cstate->minorversion && 6636 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid) 6637 goto out; 6638 } 6639 6640 lst = find_or_create_lock_stateid(lo, fi, inode, ost, new); 6641 if (lst == NULL) { 6642 status = nfserr_jukebox; 6643 goto out; 6644 } 6645 6646 status = nfs_ok; 6647 *plst = lst; 6648 out: 6649 nfs4_put_stateowner(&lo->lo_owner); 6650 return status; 6651 } 6652 6653 /* 6654 * LOCK operation 6655 */ 6656 __be32 6657 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 6658 union nfsd4_op_u *u) 6659 { 6660 struct nfsd4_lock *lock = &u->lock; 6661 struct nfs4_openowner *open_sop = NULL; 6662 struct nfs4_lockowner *lock_sop = NULL; 6663 struct nfs4_ol_stateid *lock_stp = NULL; 6664 struct nfs4_ol_stateid *open_stp = NULL; 6665 struct nfs4_file *fp; 6666 struct nfsd_file *nf = NULL; 6667 struct nfsd4_blocked_lock *nbl = NULL; 6668 struct file_lock *file_lock = NULL; 6669 struct file_lock *conflock = NULL; 6670 __be32 status = 0; 6671 int lkflg; 6672 int err; 6673 bool new = false; 6674 unsigned char fl_type; 6675 unsigned int fl_flags = FL_POSIX; 6676 struct net *net = SVC_NET(rqstp); 6677 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 6678 6679 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n", 6680 (long long) lock->lk_offset, 6681 (long long) lock->lk_length); 6682 6683 if (check_lock_length(lock->lk_offset, lock->lk_length)) 6684 return nfserr_inval; 6685 6686 if ((status = fh_verify(rqstp, &cstate->current_fh, 6687 S_IFREG, NFSD_MAY_LOCK))) { 6688 dprintk("NFSD: nfsd4_lock: permission denied!\n"); 6689 return status; 6690 } 6691 6692 if (lock->lk_is_new) { 6693 if (nfsd4_has_session(cstate)) 6694 /* See rfc 5661 18.10.3: given clientid is ignored: */ 6695 memcpy(&lock->lk_new_clientid, 6696 &cstate->clp->cl_clientid, 6697 sizeof(clientid_t)); 6698 6699 /* validate and update open stateid and open seqid */ 6700 status = nfs4_preprocess_confirmed_seqid_op(cstate, 6701 lock->lk_new_open_seqid, 6702 &lock->lk_new_open_stateid, 6703 &open_stp, nn); 6704 if (status) 6705 goto out; 6706 mutex_unlock(&open_stp->st_mutex); 6707 open_sop = openowner(open_stp->st_stateowner); 6708 status = nfserr_bad_stateid; 6709 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid, 6710 &lock->lk_new_clientid)) 6711 goto out; 6712 status = lookup_or_create_lock_state(cstate, open_stp, lock, 6713 &lock_stp, &new); 6714 } else { 6715 status = nfs4_preprocess_seqid_op(cstate, 6716 lock->lk_old_lock_seqid, 6717 &lock->lk_old_lock_stateid, 6718 NFS4_LOCK_STID, &lock_stp, nn); 6719 } 6720 if (status) 6721 goto out; 6722 lock_sop = lockowner(lock_stp->st_stateowner); 6723 6724 lkflg = setlkflg(lock->lk_type); 6725 status = nfs4_check_openmode(lock_stp, lkflg); 6726 if (status) 6727 goto out; 6728 6729 status = nfserr_grace; 6730 if (locks_in_grace(net) && !lock->lk_reclaim) 6731 goto out; 6732 status = nfserr_no_grace; 6733 if (!locks_in_grace(net) && lock->lk_reclaim) 6734 goto out; 6735 6736 fp = lock_stp->st_stid.sc_file; 6737 switch (lock->lk_type) { 6738 case NFS4_READW_LT: 6739 if (nfsd4_has_session(cstate)) 6740 fl_flags |= FL_SLEEP; 6741 fallthrough; 6742 case NFS4_READ_LT: 6743 spin_lock(&fp->fi_lock); 6744 nf = find_readable_file_locked(fp); 6745 if (nf) 6746 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ); 6747 spin_unlock(&fp->fi_lock); 6748 fl_type = F_RDLCK; 6749 break; 6750 case NFS4_WRITEW_LT: 6751 if (nfsd4_has_session(cstate)) 6752 fl_flags |= FL_SLEEP; 6753 fallthrough; 6754 case NFS4_WRITE_LT: 6755 spin_lock(&fp->fi_lock); 6756 nf = find_writeable_file_locked(fp); 6757 if (nf) 6758 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE); 6759 spin_unlock(&fp->fi_lock); 6760 fl_type = F_WRLCK; 6761 break; 6762 default: 6763 status = nfserr_inval; 6764 goto out; 6765 } 6766 6767 if (!nf) { 6768 status = nfserr_openmode; 6769 goto out; 6770 } 6771 6772 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn); 6773 if (!nbl) { 6774 dprintk("NFSD: %s: unable to allocate block!\n", __func__); 6775 status = nfserr_jukebox; 6776 goto out; 6777 } 6778 6779 file_lock = &nbl->nbl_lock; 6780 file_lock->fl_type = fl_type; 6781 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner)); 6782 file_lock->fl_pid = current->tgid; 6783 file_lock->fl_file = nf->nf_file; 6784 file_lock->fl_flags = fl_flags; 6785 file_lock->fl_lmops = &nfsd_posix_mng_ops; 6786 file_lock->fl_start = lock->lk_offset; 6787 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length); 6788 nfs4_transform_lock_offset(file_lock); 6789 6790 conflock = locks_alloc_lock(); 6791 if (!conflock) { 6792 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 6793 status = nfserr_jukebox; 6794 goto out; 6795 } 6796 6797 if (fl_flags & FL_SLEEP) { 6798 nbl->nbl_time = ktime_get_boottime_seconds(); 6799 spin_lock(&nn->blocked_locks_lock); 6800 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked); 6801 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru); 6802 spin_unlock(&nn->blocked_locks_lock); 6803 } 6804 6805 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock); 6806 switch (err) { 6807 case 0: /* success! */ 6808 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid); 6809 status = 0; 6810 if (lock->lk_reclaim) 6811 nn->somebody_reclaimed = true; 6812 break; 6813 case FILE_LOCK_DEFERRED: 6814 nbl = NULL; 6815 fallthrough; 6816 case -EAGAIN: /* conflock holds conflicting lock */ 6817 status = nfserr_denied; 6818 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n"); 6819 nfs4_set_lock_denied(conflock, &lock->lk_denied); 6820 break; 6821 case -EDEADLK: 6822 status = nfserr_deadlock; 6823 break; 6824 default: 6825 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err); 6826 status = nfserrno(err); 6827 break; 6828 } 6829 out: 6830 if (nbl) { 6831 /* dequeue it if we queued it before */ 6832 if (fl_flags & FL_SLEEP) { 6833 spin_lock(&nn->blocked_locks_lock); 6834 list_del_init(&nbl->nbl_list); 6835 list_del_init(&nbl->nbl_lru); 6836 spin_unlock(&nn->blocked_locks_lock); 6837 } 6838 free_blocked_lock(nbl); 6839 } 6840 if (nf) 6841 nfsd_file_put(nf); 6842 if (lock_stp) { 6843 /* Bump seqid manually if the 4.0 replay owner is openowner */ 6844 if (cstate->replay_owner && 6845 cstate->replay_owner != &lock_sop->lo_owner && 6846 seqid_mutating_err(ntohl(status))) 6847 lock_sop->lo_owner.so_seqid++; 6848 6849 /* 6850 * If this is a new, never-before-used stateid, and we are 6851 * returning an error, then just go ahead and release it. 6852 */ 6853 if (status && new) 6854 release_lock_stateid(lock_stp); 6855 6856 mutex_unlock(&lock_stp->st_mutex); 6857 6858 nfs4_put_stid(&lock_stp->st_stid); 6859 } 6860 if (open_stp) 6861 nfs4_put_stid(&open_stp->st_stid); 6862 nfsd4_bump_seqid(cstate, status); 6863 if (conflock) 6864 locks_free_lock(conflock); 6865 return status; 6866 } 6867 6868 /* 6869 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN, 6870 * so we do a temporary open here just to get an open file to pass to 6871 * vfs_test_lock. (Arguably perhaps test_lock should be done with an 6872 * inode operation.) 6873 */ 6874 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock) 6875 { 6876 struct nfsd_file *nf; 6877 __be32 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf); 6878 if (!err) { 6879 err = nfserrno(vfs_test_lock(nf->nf_file, lock)); 6880 nfsd_file_put(nf); 6881 } 6882 return err; 6883 } 6884 6885 /* 6886 * LOCKT operation 6887 */ 6888 __be32 6889 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 6890 union nfsd4_op_u *u) 6891 { 6892 struct nfsd4_lockt *lockt = &u->lockt; 6893 struct file_lock *file_lock = NULL; 6894 struct nfs4_lockowner *lo = NULL; 6895 __be32 status; 6896 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 6897 6898 if (locks_in_grace(SVC_NET(rqstp))) 6899 return nfserr_grace; 6900 6901 if (check_lock_length(lockt->lt_offset, lockt->lt_length)) 6902 return nfserr_inval; 6903 6904 if (!nfsd4_has_session(cstate)) { 6905 status = set_client(&lockt->lt_clientid, cstate, nn); 6906 if (status) 6907 goto out; 6908 } 6909 6910 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) 6911 goto out; 6912 6913 file_lock = locks_alloc_lock(); 6914 if (!file_lock) { 6915 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 6916 status = nfserr_jukebox; 6917 goto out; 6918 } 6919 6920 switch (lockt->lt_type) { 6921 case NFS4_READ_LT: 6922 case NFS4_READW_LT: 6923 file_lock->fl_type = F_RDLCK; 6924 break; 6925 case NFS4_WRITE_LT: 6926 case NFS4_WRITEW_LT: 6927 file_lock->fl_type = F_WRLCK; 6928 break; 6929 default: 6930 dprintk("NFSD: nfs4_lockt: bad lock type!\n"); 6931 status = nfserr_inval; 6932 goto out; 6933 } 6934 6935 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner); 6936 if (lo) 6937 file_lock->fl_owner = (fl_owner_t)lo; 6938 file_lock->fl_pid = current->tgid; 6939 file_lock->fl_flags = FL_POSIX; 6940 6941 file_lock->fl_start = lockt->lt_offset; 6942 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length); 6943 6944 nfs4_transform_lock_offset(file_lock); 6945 6946 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock); 6947 if (status) 6948 goto out; 6949 6950 if (file_lock->fl_type != F_UNLCK) { 6951 status = nfserr_denied; 6952 nfs4_set_lock_denied(file_lock, &lockt->lt_denied); 6953 } 6954 out: 6955 if (lo) 6956 nfs4_put_stateowner(&lo->lo_owner); 6957 if (file_lock) 6958 locks_free_lock(file_lock); 6959 return status; 6960 } 6961 6962 __be32 6963 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 6964 union nfsd4_op_u *u) 6965 { 6966 struct nfsd4_locku *locku = &u->locku; 6967 struct nfs4_ol_stateid *stp; 6968 struct nfsd_file *nf = NULL; 6969 struct file_lock *file_lock = NULL; 6970 __be32 status; 6971 int err; 6972 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 6973 6974 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n", 6975 (long long) locku->lu_offset, 6976 (long long) locku->lu_length); 6977 6978 if (check_lock_length(locku->lu_offset, locku->lu_length)) 6979 return nfserr_inval; 6980 6981 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid, 6982 &locku->lu_stateid, NFS4_LOCK_STID, 6983 &stp, nn); 6984 if (status) 6985 goto out; 6986 nf = find_any_file(stp->st_stid.sc_file); 6987 if (!nf) { 6988 status = nfserr_lock_range; 6989 goto put_stateid; 6990 } 6991 file_lock = locks_alloc_lock(); 6992 if (!file_lock) { 6993 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 6994 status = nfserr_jukebox; 6995 goto put_file; 6996 } 6997 6998 file_lock->fl_type = F_UNLCK; 6999 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner)); 7000 file_lock->fl_pid = current->tgid; 7001 file_lock->fl_file = nf->nf_file; 7002 file_lock->fl_flags = FL_POSIX; 7003 file_lock->fl_lmops = &nfsd_posix_mng_ops; 7004 file_lock->fl_start = locku->lu_offset; 7005 7006 file_lock->fl_end = last_byte_offset(locku->lu_offset, 7007 locku->lu_length); 7008 nfs4_transform_lock_offset(file_lock); 7009 7010 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL); 7011 if (err) { 7012 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n"); 7013 goto out_nfserr; 7014 } 7015 nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid); 7016 put_file: 7017 nfsd_file_put(nf); 7018 put_stateid: 7019 mutex_unlock(&stp->st_mutex); 7020 nfs4_put_stid(&stp->st_stid); 7021 out: 7022 nfsd4_bump_seqid(cstate, status); 7023 if (file_lock) 7024 locks_free_lock(file_lock); 7025 return status; 7026 7027 out_nfserr: 7028 status = nfserrno(err); 7029 goto put_file; 7030 } 7031 7032 /* 7033 * returns 7034 * true: locks held by lockowner 7035 * false: no locks held by lockowner 7036 */ 7037 static bool 7038 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner) 7039 { 7040 struct file_lock *fl; 7041 int status = false; 7042 struct nfsd_file *nf = find_any_file(fp); 7043 struct inode *inode; 7044 struct file_lock_context *flctx; 7045 7046 if (!nf) { 7047 /* Any valid lock stateid should have some sort of access */ 7048 WARN_ON_ONCE(1); 7049 return status; 7050 } 7051 7052 inode = locks_inode(nf->nf_file); 7053 flctx = inode->i_flctx; 7054 7055 if (flctx && !list_empty_careful(&flctx->flc_posix)) { 7056 spin_lock(&flctx->flc_lock); 7057 list_for_each_entry(fl, &flctx->flc_posix, fl_list) { 7058 if (fl->fl_owner == (fl_owner_t)lowner) { 7059 status = true; 7060 break; 7061 } 7062 } 7063 spin_unlock(&flctx->flc_lock); 7064 } 7065 nfsd_file_put(nf); 7066 return status; 7067 } 7068 7069 __be32 7070 nfsd4_release_lockowner(struct svc_rqst *rqstp, 7071 struct nfsd4_compound_state *cstate, 7072 union nfsd4_op_u *u) 7073 { 7074 struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner; 7075 clientid_t *clid = &rlockowner->rl_clientid; 7076 struct nfs4_stateowner *sop; 7077 struct nfs4_lockowner *lo = NULL; 7078 struct nfs4_ol_stateid *stp; 7079 struct xdr_netobj *owner = &rlockowner->rl_owner; 7080 unsigned int hashval = ownerstr_hashval(owner); 7081 __be32 status; 7082 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 7083 struct nfs4_client *clp; 7084 LIST_HEAD (reaplist); 7085 7086 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n", 7087 clid->cl_boot, clid->cl_id); 7088 7089 status = set_client(clid, cstate, nn); 7090 if (status) 7091 return status; 7092 7093 clp = cstate->clp; 7094 /* Find the matching lock stateowner */ 7095 spin_lock(&clp->cl_lock); 7096 list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval], 7097 so_strhash) { 7098 7099 if (sop->so_is_open_owner || !same_owner_str(sop, owner)) 7100 continue; 7101 7102 /* see if there are still any locks associated with it */ 7103 lo = lockowner(sop); 7104 list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) { 7105 if (check_for_locks(stp->st_stid.sc_file, lo)) { 7106 status = nfserr_locks_held; 7107 spin_unlock(&clp->cl_lock); 7108 return status; 7109 } 7110 } 7111 7112 nfs4_get_stateowner(sop); 7113 break; 7114 } 7115 if (!lo) { 7116 spin_unlock(&clp->cl_lock); 7117 return status; 7118 } 7119 7120 unhash_lockowner_locked(lo); 7121 while (!list_empty(&lo->lo_owner.so_stateids)) { 7122 stp = list_first_entry(&lo->lo_owner.so_stateids, 7123 struct nfs4_ol_stateid, 7124 st_perstateowner); 7125 WARN_ON(!unhash_lock_stateid(stp)); 7126 put_ol_stateid_locked(stp, &reaplist); 7127 } 7128 spin_unlock(&clp->cl_lock); 7129 free_ol_stateid_reaplist(&reaplist); 7130 remove_blocked_locks(lo); 7131 nfs4_put_stateowner(&lo->lo_owner); 7132 7133 return status; 7134 } 7135 7136 static inline struct nfs4_client_reclaim * 7137 alloc_reclaim(void) 7138 { 7139 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL); 7140 } 7141 7142 bool 7143 nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn) 7144 { 7145 struct nfs4_client_reclaim *crp; 7146 7147 crp = nfsd4_find_reclaim_client(name, nn); 7148 return (crp && crp->cr_clp); 7149 } 7150 7151 /* 7152 * failure => all reset bets are off, nfserr_no_grace... 7153 * 7154 * The caller is responsible for freeing name.data if NULL is returned (it 7155 * will be freed in nfs4_remove_reclaim_record in the normal case). 7156 */ 7157 struct nfs4_client_reclaim * 7158 nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash, 7159 struct nfsd_net *nn) 7160 { 7161 unsigned int strhashval; 7162 struct nfs4_client_reclaim *crp; 7163 7164 trace_nfsd_clid_reclaim(nn, name.len, name.data); 7165 crp = alloc_reclaim(); 7166 if (crp) { 7167 strhashval = clientstr_hashval(name); 7168 INIT_LIST_HEAD(&crp->cr_strhash); 7169 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]); 7170 crp->cr_name.data = name.data; 7171 crp->cr_name.len = name.len; 7172 crp->cr_princhash.data = princhash.data; 7173 crp->cr_princhash.len = princhash.len; 7174 crp->cr_clp = NULL; 7175 nn->reclaim_str_hashtbl_size++; 7176 } 7177 return crp; 7178 } 7179 7180 void 7181 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn) 7182 { 7183 list_del(&crp->cr_strhash); 7184 kfree(crp->cr_name.data); 7185 kfree(crp->cr_princhash.data); 7186 kfree(crp); 7187 nn->reclaim_str_hashtbl_size--; 7188 } 7189 7190 void 7191 nfs4_release_reclaim(struct nfsd_net *nn) 7192 { 7193 struct nfs4_client_reclaim *crp = NULL; 7194 int i; 7195 7196 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 7197 while (!list_empty(&nn->reclaim_str_hashtbl[i])) { 7198 crp = list_entry(nn->reclaim_str_hashtbl[i].next, 7199 struct nfs4_client_reclaim, cr_strhash); 7200 nfs4_remove_reclaim_record(crp, nn); 7201 } 7202 } 7203 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size); 7204 } 7205 7206 /* 7207 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */ 7208 struct nfs4_client_reclaim * 7209 nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn) 7210 { 7211 unsigned int strhashval; 7212 struct nfs4_client_reclaim *crp = NULL; 7213 7214 trace_nfsd_clid_find(nn, name.len, name.data); 7215 7216 strhashval = clientstr_hashval(name); 7217 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) { 7218 if (compare_blob(&crp->cr_name, &name) == 0) { 7219 return crp; 7220 } 7221 } 7222 return NULL; 7223 } 7224 7225 __be32 7226 nfs4_check_open_reclaim(struct nfs4_client *clp) 7227 { 7228 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags)) 7229 return nfserr_no_grace; 7230 7231 if (nfsd4_client_record_check(clp)) 7232 return nfserr_reclaim_bad; 7233 7234 return nfs_ok; 7235 } 7236 7237 /* 7238 * Since the lifetime of a delegation isn't limited to that of an open, a 7239 * client may quite reasonably hang on to a delegation as long as it has 7240 * the inode cached. This becomes an obvious problem the first time a 7241 * client's inode cache approaches the size of the server's total memory. 7242 * 7243 * For now we avoid this problem by imposing a hard limit on the number 7244 * of delegations, which varies according to the server's memory size. 7245 */ 7246 static void 7247 set_max_delegations(void) 7248 { 7249 /* 7250 * Allow at most 4 delegations per megabyte of RAM. Quick 7251 * estimates suggest that in the worst case (where every delegation 7252 * is for a different inode), a delegation could take about 1.5K, 7253 * giving a worst case usage of about 6% of memory. 7254 */ 7255 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT); 7256 } 7257 7258 static int nfs4_state_create_net(struct net *net) 7259 { 7260 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 7261 int i; 7262 7263 nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE, 7264 sizeof(struct list_head), 7265 GFP_KERNEL); 7266 if (!nn->conf_id_hashtbl) 7267 goto err; 7268 nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE, 7269 sizeof(struct list_head), 7270 GFP_KERNEL); 7271 if (!nn->unconf_id_hashtbl) 7272 goto err_unconf_id; 7273 nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE, 7274 sizeof(struct list_head), 7275 GFP_KERNEL); 7276 if (!nn->sessionid_hashtbl) 7277 goto err_sessionid; 7278 7279 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 7280 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]); 7281 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]); 7282 } 7283 for (i = 0; i < SESSION_HASH_SIZE; i++) 7284 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]); 7285 nn->conf_name_tree = RB_ROOT; 7286 nn->unconf_name_tree = RB_ROOT; 7287 nn->boot_time = ktime_get_real_seconds(); 7288 nn->grace_ended = false; 7289 nn->nfsd4_manager.block_opens = true; 7290 INIT_LIST_HEAD(&nn->nfsd4_manager.list); 7291 INIT_LIST_HEAD(&nn->client_lru); 7292 INIT_LIST_HEAD(&nn->close_lru); 7293 INIT_LIST_HEAD(&nn->del_recall_lru); 7294 spin_lock_init(&nn->client_lock); 7295 spin_lock_init(&nn->s2s_cp_lock); 7296 idr_init(&nn->s2s_cp_stateids); 7297 7298 spin_lock_init(&nn->blocked_locks_lock); 7299 INIT_LIST_HEAD(&nn->blocked_locks_lru); 7300 7301 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main); 7302 get_net(net); 7303 7304 return 0; 7305 7306 err_sessionid: 7307 kfree(nn->unconf_id_hashtbl); 7308 err_unconf_id: 7309 kfree(nn->conf_id_hashtbl); 7310 err: 7311 return -ENOMEM; 7312 } 7313 7314 static void 7315 nfs4_state_destroy_net(struct net *net) 7316 { 7317 int i; 7318 struct nfs4_client *clp = NULL; 7319 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 7320 7321 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 7322 while (!list_empty(&nn->conf_id_hashtbl[i])) { 7323 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); 7324 destroy_client(clp); 7325 } 7326 } 7327 7328 WARN_ON(!list_empty(&nn->blocked_locks_lru)); 7329 7330 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 7331 while (!list_empty(&nn->unconf_id_hashtbl[i])) { 7332 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); 7333 destroy_client(clp); 7334 } 7335 } 7336 7337 kfree(nn->sessionid_hashtbl); 7338 kfree(nn->unconf_id_hashtbl); 7339 kfree(nn->conf_id_hashtbl); 7340 put_net(net); 7341 } 7342 7343 int 7344 nfs4_state_start_net(struct net *net) 7345 { 7346 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 7347 int ret; 7348 7349 ret = get_nfsdfs(net); 7350 if (ret) 7351 return ret; 7352 ret = nfs4_state_create_net(net); 7353 if (ret) { 7354 mntput(nn->nfsd_mnt); 7355 return ret; 7356 } 7357 locks_start_grace(net, &nn->nfsd4_manager); 7358 nfsd4_client_tracking_init(net); 7359 if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0) 7360 goto skip_grace; 7361 printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n", 7362 nn->nfsd4_grace, net->ns.inum); 7363 trace_nfsd_grace_start(nn); 7364 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ); 7365 return 0; 7366 7367 skip_grace: 7368 printk(KERN_INFO "NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n", 7369 net->ns.inum); 7370 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ); 7371 nfsd4_end_grace(nn); 7372 return 0; 7373 } 7374 7375 /* initialization to perform when the nfsd service is started: */ 7376 7377 int 7378 nfs4_state_start(void) 7379 { 7380 int ret; 7381 7382 laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4"); 7383 if (laundry_wq == NULL) { 7384 ret = -ENOMEM; 7385 goto out; 7386 } 7387 ret = nfsd4_create_callback_queue(); 7388 if (ret) 7389 goto out_free_laundry; 7390 7391 set_max_delegations(); 7392 return 0; 7393 7394 out_free_laundry: 7395 destroy_workqueue(laundry_wq); 7396 out: 7397 return ret; 7398 } 7399 7400 void 7401 nfs4_state_shutdown_net(struct net *net) 7402 { 7403 struct nfs4_delegation *dp = NULL; 7404 struct list_head *pos, *next, reaplist; 7405 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 7406 7407 cancel_delayed_work_sync(&nn->laundromat_work); 7408 locks_end_grace(&nn->nfsd4_manager); 7409 7410 INIT_LIST_HEAD(&reaplist); 7411 spin_lock(&state_lock); 7412 list_for_each_safe(pos, next, &nn->del_recall_lru) { 7413 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 7414 WARN_ON(!unhash_delegation_locked(dp)); 7415 list_add(&dp->dl_recall_lru, &reaplist); 7416 } 7417 spin_unlock(&state_lock); 7418 list_for_each_safe(pos, next, &reaplist) { 7419 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 7420 list_del_init(&dp->dl_recall_lru); 7421 destroy_unhashed_deleg(dp); 7422 } 7423 7424 nfsd4_client_tracking_exit(net); 7425 nfs4_state_destroy_net(net); 7426 mntput(nn->nfsd_mnt); 7427 } 7428 7429 void 7430 nfs4_state_shutdown(void) 7431 { 7432 destroy_workqueue(laundry_wq); 7433 nfsd4_destroy_callback_queue(); 7434 } 7435 7436 static void 7437 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid) 7438 { 7439 if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) && 7440 CURRENT_STATEID(stateid)) 7441 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t)); 7442 } 7443 7444 static void 7445 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid) 7446 { 7447 if (cstate->minorversion) { 7448 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t)); 7449 SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG); 7450 } 7451 } 7452 7453 void 7454 clear_current_stateid(struct nfsd4_compound_state *cstate) 7455 { 7456 CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG); 7457 } 7458 7459 /* 7460 * functions to set current state id 7461 */ 7462 void 7463 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, 7464 union nfsd4_op_u *u) 7465 { 7466 put_stateid(cstate, &u->open_downgrade.od_stateid); 7467 } 7468 7469 void 7470 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, 7471 union nfsd4_op_u *u) 7472 { 7473 put_stateid(cstate, &u->open.op_stateid); 7474 } 7475 7476 void 7477 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, 7478 union nfsd4_op_u *u) 7479 { 7480 put_stateid(cstate, &u->close.cl_stateid); 7481 } 7482 7483 void 7484 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, 7485 union nfsd4_op_u *u) 7486 { 7487 put_stateid(cstate, &u->lock.lk_resp_stateid); 7488 } 7489 7490 /* 7491 * functions to consume current state id 7492 */ 7493 7494 void 7495 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, 7496 union nfsd4_op_u *u) 7497 { 7498 get_stateid(cstate, &u->open_downgrade.od_stateid); 7499 } 7500 7501 void 7502 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, 7503 union nfsd4_op_u *u) 7504 { 7505 get_stateid(cstate, &u->delegreturn.dr_stateid); 7506 } 7507 7508 void 7509 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, 7510 union nfsd4_op_u *u) 7511 { 7512 get_stateid(cstate, &u->free_stateid.fr_stateid); 7513 } 7514 7515 void 7516 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, 7517 union nfsd4_op_u *u) 7518 { 7519 get_stateid(cstate, &u->setattr.sa_stateid); 7520 } 7521 7522 void 7523 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, 7524 union nfsd4_op_u *u) 7525 { 7526 get_stateid(cstate, &u->close.cl_stateid); 7527 } 7528 7529 void 7530 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, 7531 union nfsd4_op_u *u) 7532 { 7533 get_stateid(cstate, &u->locku.lu_stateid); 7534 } 7535 7536 void 7537 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, 7538 union nfsd4_op_u *u) 7539 { 7540 get_stateid(cstate, &u->read.rd_stateid); 7541 } 7542 7543 void 7544 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, 7545 union nfsd4_op_u *u) 7546 { 7547 get_stateid(cstate, &u->write.wr_stateid); 7548 } 7549