1 /* 2 * Copyright (c) 2001 The Regents of the University of Michigan. 3 * All rights reserved. 4 * 5 * Kendrick Smith <kmsmith@umich.edu> 6 * Andy Adamson <kandros@umich.edu> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of the University nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 */ 34 35 #include <linux/file.h> 36 #include <linux/fs.h> 37 #include <linux/slab.h> 38 #include <linux/namei.h> 39 #include <linux/swap.h> 40 #include <linux/pagemap.h> 41 #include <linux/ratelimit.h> 42 #include <linux/sunrpc/svcauth_gss.h> 43 #include <linux/sunrpc/addr.h> 44 #include <linux/jhash.h> 45 #include <linux/string_helpers.h> 46 #include <linux/fsnotify.h> 47 #include <linux/rhashtable.h> 48 #include <linux/nfs_ssc.h> 49 50 #include "xdr4.h" 51 #include "xdr4cb.h" 52 #include "vfs.h" 53 #include "current_stateid.h" 54 55 #include "netns.h" 56 #include "pnfs.h" 57 #include "filecache.h" 58 #include "trace.h" 59 60 #define NFSDDBG_FACILITY NFSDDBG_PROC 61 62 #define all_ones {{~0,~0},~0} 63 static const stateid_t one_stateid = { 64 .si_generation = ~0, 65 .si_opaque = all_ones, 66 }; 67 static const stateid_t zero_stateid = { 68 /* all fields zero */ 69 }; 70 static const stateid_t currentstateid = { 71 .si_generation = 1, 72 }; 73 static const stateid_t close_stateid = { 74 .si_generation = 0xffffffffU, 75 }; 76 77 static u64 current_sessionid = 1; 78 79 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t))) 80 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t))) 81 #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t))) 82 #define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t))) 83 84 /* forward declarations */ 85 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner); 86 static void nfs4_free_ol_stateid(struct nfs4_stid *stid); 87 void nfsd4_end_grace(struct nfsd_net *nn); 88 static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps); 89 static void nfsd4_file_hash_remove(struct nfs4_file *fi); 90 91 /* Locking: */ 92 93 /* 94 * Currently used for the del_recall_lru and file hash table. In an 95 * effort to decrease the scope of the client_mutex, this spinlock may 96 * eventually cover more: 97 */ 98 static DEFINE_SPINLOCK(state_lock); 99 100 enum nfsd4_st_mutex_lock_subclass { 101 OPEN_STATEID_MUTEX = 0, 102 LOCK_STATEID_MUTEX = 1, 103 }; 104 105 /* 106 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for 107 * the refcount on the open stateid to drop. 108 */ 109 static DECLARE_WAIT_QUEUE_HEAD(close_wq); 110 111 /* 112 * A waitqueue where a writer to clients/#/ctl destroying a client can 113 * wait for cl_rpc_users to drop to 0 and then for the client to be 114 * unhashed. 115 */ 116 static DECLARE_WAIT_QUEUE_HEAD(expiry_wq); 117 118 static struct kmem_cache *client_slab; 119 static struct kmem_cache *openowner_slab; 120 static struct kmem_cache *lockowner_slab; 121 static struct kmem_cache *file_slab; 122 static struct kmem_cache *stateid_slab; 123 static struct kmem_cache *deleg_slab; 124 static struct kmem_cache *odstate_slab; 125 126 static void free_session(struct nfsd4_session *); 127 128 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops; 129 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops; 130 131 static struct workqueue_struct *laundry_wq; 132 133 int nfsd4_create_laundry_wq(void) 134 { 135 int rc = 0; 136 137 laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4"); 138 if (laundry_wq == NULL) 139 rc = -ENOMEM; 140 return rc; 141 } 142 143 void nfsd4_destroy_laundry_wq(void) 144 { 145 destroy_workqueue(laundry_wq); 146 } 147 148 static bool is_session_dead(struct nfsd4_session *ses) 149 { 150 return ses->se_flags & NFS4_SESSION_DEAD; 151 } 152 153 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me) 154 { 155 if (atomic_read(&ses->se_ref) > ref_held_by_me) 156 return nfserr_jukebox; 157 ses->se_flags |= NFS4_SESSION_DEAD; 158 return nfs_ok; 159 } 160 161 static bool is_client_expired(struct nfs4_client *clp) 162 { 163 return clp->cl_time == 0; 164 } 165 166 static void nfsd4_dec_courtesy_client_count(struct nfsd_net *nn, 167 struct nfs4_client *clp) 168 { 169 if (clp->cl_state != NFSD4_ACTIVE) 170 atomic_add_unless(&nn->nfsd_courtesy_clients, -1, 0); 171 } 172 173 static __be32 get_client_locked(struct nfs4_client *clp) 174 { 175 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 176 177 lockdep_assert_held(&nn->client_lock); 178 179 if (is_client_expired(clp)) 180 return nfserr_expired; 181 atomic_inc(&clp->cl_rpc_users); 182 nfsd4_dec_courtesy_client_count(nn, clp); 183 clp->cl_state = NFSD4_ACTIVE; 184 return nfs_ok; 185 } 186 187 /* must be called under the client_lock */ 188 static inline void 189 renew_client_locked(struct nfs4_client *clp) 190 { 191 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 192 193 if (is_client_expired(clp)) { 194 WARN_ON(1); 195 printk("%s: client (clientid %08x/%08x) already expired\n", 196 __func__, 197 clp->cl_clientid.cl_boot, 198 clp->cl_clientid.cl_id); 199 return; 200 } 201 202 list_move_tail(&clp->cl_lru, &nn->client_lru); 203 clp->cl_time = ktime_get_boottime_seconds(); 204 nfsd4_dec_courtesy_client_count(nn, clp); 205 clp->cl_state = NFSD4_ACTIVE; 206 } 207 208 static void put_client_renew_locked(struct nfs4_client *clp) 209 { 210 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 211 212 lockdep_assert_held(&nn->client_lock); 213 214 if (!atomic_dec_and_test(&clp->cl_rpc_users)) 215 return; 216 if (!is_client_expired(clp)) 217 renew_client_locked(clp); 218 else 219 wake_up_all(&expiry_wq); 220 } 221 222 static void put_client_renew(struct nfs4_client *clp) 223 { 224 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 225 226 if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock)) 227 return; 228 if (!is_client_expired(clp)) 229 renew_client_locked(clp); 230 else 231 wake_up_all(&expiry_wq); 232 spin_unlock(&nn->client_lock); 233 } 234 235 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses) 236 { 237 __be32 status; 238 239 if (is_session_dead(ses)) 240 return nfserr_badsession; 241 status = get_client_locked(ses->se_client); 242 if (status) 243 return status; 244 atomic_inc(&ses->se_ref); 245 return nfs_ok; 246 } 247 248 static void nfsd4_put_session_locked(struct nfsd4_session *ses) 249 { 250 struct nfs4_client *clp = ses->se_client; 251 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 252 253 lockdep_assert_held(&nn->client_lock); 254 255 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses)) 256 free_session(ses); 257 put_client_renew_locked(clp); 258 } 259 260 static void nfsd4_put_session(struct nfsd4_session *ses) 261 { 262 struct nfs4_client *clp = ses->se_client; 263 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 264 265 spin_lock(&nn->client_lock); 266 nfsd4_put_session_locked(ses); 267 spin_unlock(&nn->client_lock); 268 } 269 270 static struct nfsd4_blocked_lock * 271 find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh, 272 struct nfsd_net *nn) 273 { 274 struct nfsd4_blocked_lock *cur, *found = NULL; 275 276 spin_lock(&nn->blocked_locks_lock); 277 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) { 278 if (fh_match(fh, &cur->nbl_fh)) { 279 list_del_init(&cur->nbl_list); 280 WARN_ON(list_empty(&cur->nbl_lru)); 281 list_del_init(&cur->nbl_lru); 282 found = cur; 283 break; 284 } 285 } 286 spin_unlock(&nn->blocked_locks_lock); 287 if (found) 288 locks_delete_block(&found->nbl_lock); 289 return found; 290 } 291 292 static struct nfsd4_blocked_lock * 293 find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh, 294 struct nfsd_net *nn) 295 { 296 struct nfsd4_blocked_lock *nbl; 297 298 nbl = find_blocked_lock(lo, fh, nn); 299 if (!nbl) { 300 nbl= kmalloc(sizeof(*nbl), GFP_KERNEL); 301 if (nbl) { 302 INIT_LIST_HEAD(&nbl->nbl_list); 303 INIT_LIST_HEAD(&nbl->nbl_lru); 304 fh_copy_shallow(&nbl->nbl_fh, fh); 305 locks_init_lock(&nbl->nbl_lock); 306 kref_init(&nbl->nbl_kref); 307 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client, 308 &nfsd4_cb_notify_lock_ops, 309 NFSPROC4_CLNT_CB_NOTIFY_LOCK); 310 } 311 } 312 return nbl; 313 } 314 315 static void 316 free_nbl(struct kref *kref) 317 { 318 struct nfsd4_blocked_lock *nbl; 319 320 nbl = container_of(kref, struct nfsd4_blocked_lock, nbl_kref); 321 kfree(nbl); 322 } 323 324 static void 325 free_blocked_lock(struct nfsd4_blocked_lock *nbl) 326 { 327 locks_delete_block(&nbl->nbl_lock); 328 locks_release_private(&nbl->nbl_lock); 329 kref_put(&nbl->nbl_kref, free_nbl); 330 } 331 332 static void 333 remove_blocked_locks(struct nfs4_lockowner *lo) 334 { 335 struct nfs4_client *clp = lo->lo_owner.so_client; 336 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 337 struct nfsd4_blocked_lock *nbl; 338 LIST_HEAD(reaplist); 339 340 /* Dequeue all blocked locks */ 341 spin_lock(&nn->blocked_locks_lock); 342 while (!list_empty(&lo->lo_blocked)) { 343 nbl = list_first_entry(&lo->lo_blocked, 344 struct nfsd4_blocked_lock, 345 nbl_list); 346 list_del_init(&nbl->nbl_list); 347 WARN_ON(list_empty(&nbl->nbl_lru)); 348 list_move(&nbl->nbl_lru, &reaplist); 349 } 350 spin_unlock(&nn->blocked_locks_lock); 351 352 /* Now free them */ 353 while (!list_empty(&reaplist)) { 354 nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock, 355 nbl_lru); 356 list_del_init(&nbl->nbl_lru); 357 free_blocked_lock(nbl); 358 } 359 } 360 361 static void 362 nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb) 363 { 364 struct nfsd4_blocked_lock *nbl = container_of(cb, 365 struct nfsd4_blocked_lock, nbl_cb); 366 locks_delete_block(&nbl->nbl_lock); 367 } 368 369 static int 370 nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task) 371 { 372 trace_nfsd_cb_notify_lock_done(&zero_stateid, task); 373 374 /* 375 * Since this is just an optimization, we don't try very hard if it 376 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and 377 * just quit trying on anything else. 378 */ 379 switch (task->tk_status) { 380 case -NFS4ERR_DELAY: 381 rpc_delay(task, 1 * HZ); 382 return 0; 383 default: 384 return 1; 385 } 386 } 387 388 static void 389 nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb) 390 { 391 struct nfsd4_blocked_lock *nbl = container_of(cb, 392 struct nfsd4_blocked_lock, nbl_cb); 393 394 free_blocked_lock(nbl); 395 } 396 397 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = { 398 .prepare = nfsd4_cb_notify_lock_prepare, 399 .done = nfsd4_cb_notify_lock_done, 400 .release = nfsd4_cb_notify_lock_release, 401 }; 402 403 /* 404 * We store the NONE, READ, WRITE, and BOTH bits separately in the 405 * st_{access,deny}_bmap field of the stateid, in order to track not 406 * only what share bits are currently in force, but also what 407 * combinations of share bits previous opens have used. This allows us 408 * to enforce the recommendation in 409 * https://datatracker.ietf.org/doc/html/rfc7530#section-16.19.4 that 410 * the server return an error if the client attempt to downgrade to a 411 * combination of share bits not explicable by closing some of its 412 * previous opens. 413 * 414 * This enforcement is arguably incomplete, since we don't keep 415 * track of access/deny bit combinations; so, e.g., we allow: 416 * 417 * OPEN allow read, deny write 418 * OPEN allow both, deny none 419 * DOWNGRADE allow read, deny none 420 * 421 * which we should reject. 422 * 423 * But you could also argue that our current code is already overkill, 424 * since it only exists to return NFS4ERR_INVAL on incorrect client 425 * behavior. 426 */ 427 static unsigned int 428 bmap_to_share_mode(unsigned long bmap) 429 { 430 int i; 431 unsigned int access = 0; 432 433 for (i = 1; i < 4; i++) { 434 if (test_bit(i, &bmap)) 435 access |= i; 436 } 437 return access; 438 } 439 440 /* set share access for a given stateid */ 441 static inline void 442 set_access(u32 access, struct nfs4_ol_stateid *stp) 443 { 444 unsigned char mask = 1 << access; 445 446 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH); 447 stp->st_access_bmap |= mask; 448 } 449 450 /* clear share access for a given stateid */ 451 static inline void 452 clear_access(u32 access, struct nfs4_ol_stateid *stp) 453 { 454 unsigned char mask = 1 << access; 455 456 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH); 457 stp->st_access_bmap &= ~mask; 458 } 459 460 /* test whether a given stateid has access */ 461 static inline bool 462 test_access(u32 access, struct nfs4_ol_stateid *stp) 463 { 464 unsigned char mask = 1 << access; 465 466 return (bool)(stp->st_access_bmap & mask); 467 } 468 469 /* set share deny for a given stateid */ 470 static inline void 471 set_deny(u32 deny, struct nfs4_ol_stateid *stp) 472 { 473 unsigned char mask = 1 << deny; 474 475 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH); 476 stp->st_deny_bmap |= mask; 477 } 478 479 /* clear share deny for a given stateid */ 480 static inline void 481 clear_deny(u32 deny, struct nfs4_ol_stateid *stp) 482 { 483 unsigned char mask = 1 << deny; 484 485 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH); 486 stp->st_deny_bmap &= ~mask; 487 } 488 489 /* test whether a given stateid is denying specific access */ 490 static inline bool 491 test_deny(u32 deny, struct nfs4_ol_stateid *stp) 492 { 493 unsigned char mask = 1 << deny; 494 495 return (bool)(stp->st_deny_bmap & mask); 496 } 497 498 static int nfs4_access_to_omode(u32 access) 499 { 500 switch (access & NFS4_SHARE_ACCESS_BOTH) { 501 case NFS4_SHARE_ACCESS_READ: 502 return O_RDONLY; 503 case NFS4_SHARE_ACCESS_WRITE: 504 return O_WRONLY; 505 case NFS4_SHARE_ACCESS_BOTH: 506 return O_RDWR; 507 } 508 WARN_ON_ONCE(1); 509 return O_RDONLY; 510 } 511 512 static inline int 513 access_permit_read(struct nfs4_ol_stateid *stp) 514 { 515 return test_access(NFS4_SHARE_ACCESS_READ, stp) || 516 test_access(NFS4_SHARE_ACCESS_BOTH, stp) || 517 test_access(NFS4_SHARE_ACCESS_WRITE, stp); 518 } 519 520 static inline int 521 access_permit_write(struct nfs4_ol_stateid *stp) 522 { 523 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) || 524 test_access(NFS4_SHARE_ACCESS_BOTH, stp); 525 } 526 527 static inline struct nfs4_stateowner * 528 nfs4_get_stateowner(struct nfs4_stateowner *sop) 529 { 530 atomic_inc(&sop->so_count); 531 return sop; 532 } 533 534 static int 535 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner) 536 { 537 return (sop->so_owner.len == owner->len) && 538 0 == memcmp(sop->so_owner.data, owner->data, owner->len); 539 } 540 541 static struct nfs4_openowner * 542 find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open, 543 struct nfs4_client *clp) 544 { 545 struct nfs4_stateowner *so; 546 547 lockdep_assert_held(&clp->cl_lock); 548 549 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval], 550 so_strhash) { 551 if (!so->so_is_open_owner) 552 continue; 553 if (same_owner_str(so, &open->op_owner)) 554 return openowner(nfs4_get_stateowner(so)); 555 } 556 return NULL; 557 } 558 559 static struct nfs4_openowner * 560 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open, 561 struct nfs4_client *clp) 562 { 563 struct nfs4_openowner *oo; 564 565 spin_lock(&clp->cl_lock); 566 oo = find_openstateowner_str_locked(hashval, open, clp); 567 spin_unlock(&clp->cl_lock); 568 return oo; 569 } 570 571 static inline u32 572 opaque_hashval(const void *ptr, int nbytes) 573 { 574 unsigned char *cptr = (unsigned char *) ptr; 575 576 u32 x = 0; 577 while (nbytes--) { 578 x *= 37; 579 x += *cptr++; 580 } 581 return x; 582 } 583 584 static void nfsd4_free_file_rcu(struct rcu_head *rcu) 585 { 586 struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu); 587 588 kmem_cache_free(file_slab, fp); 589 } 590 591 void 592 put_nfs4_file(struct nfs4_file *fi) 593 { 594 if (refcount_dec_and_test(&fi->fi_ref)) { 595 nfsd4_file_hash_remove(fi); 596 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate)); 597 WARN_ON_ONCE(!list_empty(&fi->fi_delegations)); 598 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu); 599 } 600 } 601 602 static struct nfsd_file * 603 find_writeable_file_locked(struct nfs4_file *f) 604 { 605 struct nfsd_file *ret; 606 607 lockdep_assert_held(&f->fi_lock); 608 609 ret = nfsd_file_get(f->fi_fds[O_WRONLY]); 610 if (!ret) 611 ret = nfsd_file_get(f->fi_fds[O_RDWR]); 612 return ret; 613 } 614 615 static struct nfsd_file * 616 find_writeable_file(struct nfs4_file *f) 617 { 618 struct nfsd_file *ret; 619 620 spin_lock(&f->fi_lock); 621 ret = find_writeable_file_locked(f); 622 spin_unlock(&f->fi_lock); 623 624 return ret; 625 } 626 627 static struct nfsd_file * 628 find_readable_file_locked(struct nfs4_file *f) 629 { 630 struct nfsd_file *ret; 631 632 lockdep_assert_held(&f->fi_lock); 633 634 ret = nfsd_file_get(f->fi_fds[O_RDONLY]); 635 if (!ret) 636 ret = nfsd_file_get(f->fi_fds[O_RDWR]); 637 return ret; 638 } 639 640 static struct nfsd_file * 641 find_readable_file(struct nfs4_file *f) 642 { 643 struct nfsd_file *ret; 644 645 spin_lock(&f->fi_lock); 646 ret = find_readable_file_locked(f); 647 spin_unlock(&f->fi_lock); 648 649 return ret; 650 } 651 652 struct nfsd_file * 653 find_any_file(struct nfs4_file *f) 654 { 655 struct nfsd_file *ret; 656 657 if (!f) 658 return NULL; 659 spin_lock(&f->fi_lock); 660 ret = nfsd_file_get(f->fi_fds[O_RDWR]); 661 if (!ret) { 662 ret = nfsd_file_get(f->fi_fds[O_WRONLY]); 663 if (!ret) 664 ret = nfsd_file_get(f->fi_fds[O_RDONLY]); 665 } 666 spin_unlock(&f->fi_lock); 667 return ret; 668 } 669 670 static struct nfsd_file *find_any_file_locked(struct nfs4_file *f) 671 { 672 lockdep_assert_held(&f->fi_lock); 673 674 if (f->fi_fds[O_RDWR]) 675 return f->fi_fds[O_RDWR]; 676 if (f->fi_fds[O_WRONLY]) 677 return f->fi_fds[O_WRONLY]; 678 if (f->fi_fds[O_RDONLY]) 679 return f->fi_fds[O_RDONLY]; 680 return NULL; 681 } 682 683 static atomic_long_t num_delegations; 684 unsigned long max_delegations; 685 686 /* 687 * Open owner state (share locks) 688 */ 689 690 /* hash tables for lock and open owners */ 691 #define OWNER_HASH_BITS 8 692 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS) 693 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1) 694 695 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername) 696 { 697 unsigned int ret; 698 699 ret = opaque_hashval(ownername->data, ownername->len); 700 return ret & OWNER_HASH_MASK; 701 } 702 703 static struct rhltable nfs4_file_rhltable ____cacheline_aligned_in_smp; 704 705 static const struct rhashtable_params nfs4_file_rhash_params = { 706 .key_len = sizeof_field(struct nfs4_file, fi_inode), 707 .key_offset = offsetof(struct nfs4_file, fi_inode), 708 .head_offset = offsetof(struct nfs4_file, fi_rlist), 709 710 /* 711 * Start with a single page hash table to reduce resizing churn 712 * on light workloads. 713 */ 714 .min_size = 256, 715 .automatic_shrinking = true, 716 }; 717 718 /* 719 * Check if courtesy clients have conflicting access and resolve it if possible 720 * 721 * access: is op_share_access if share_access is true. 722 * Check if access mode, op_share_access, would conflict with 723 * the current deny mode of the file 'fp'. 724 * access: is op_share_deny if share_access is false. 725 * Check if the deny mode, op_share_deny, would conflict with 726 * current access of the file 'fp'. 727 * stp: skip checking this entry. 728 * new_stp: normal open, not open upgrade. 729 * 730 * Function returns: 731 * false - access/deny mode conflict with normal client. 732 * true - no conflict or conflict with courtesy client(s) is resolved. 733 */ 734 static bool 735 nfs4_resolve_deny_conflicts_locked(struct nfs4_file *fp, bool new_stp, 736 struct nfs4_ol_stateid *stp, u32 access, bool share_access) 737 { 738 struct nfs4_ol_stateid *st; 739 bool resolvable = true; 740 unsigned char bmap; 741 struct nfsd_net *nn; 742 struct nfs4_client *clp; 743 744 lockdep_assert_held(&fp->fi_lock); 745 list_for_each_entry(st, &fp->fi_stateids, st_perfile) { 746 /* ignore lock stateid */ 747 if (st->st_openstp) 748 continue; 749 if (st == stp && new_stp) 750 continue; 751 /* check file access against deny mode or vice versa */ 752 bmap = share_access ? st->st_deny_bmap : st->st_access_bmap; 753 if (!(access & bmap_to_share_mode(bmap))) 754 continue; 755 clp = st->st_stid.sc_client; 756 if (try_to_expire_client(clp)) 757 continue; 758 resolvable = false; 759 break; 760 } 761 if (resolvable) { 762 clp = stp->st_stid.sc_client; 763 nn = net_generic(clp->net, nfsd_net_id); 764 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0); 765 } 766 return resolvable; 767 } 768 769 static void 770 __nfs4_file_get_access(struct nfs4_file *fp, u32 access) 771 { 772 lockdep_assert_held(&fp->fi_lock); 773 774 if (access & NFS4_SHARE_ACCESS_WRITE) 775 atomic_inc(&fp->fi_access[O_WRONLY]); 776 if (access & NFS4_SHARE_ACCESS_READ) 777 atomic_inc(&fp->fi_access[O_RDONLY]); 778 } 779 780 static __be32 781 nfs4_file_get_access(struct nfs4_file *fp, u32 access) 782 { 783 lockdep_assert_held(&fp->fi_lock); 784 785 /* Does this access mode make sense? */ 786 if (access & ~NFS4_SHARE_ACCESS_BOTH) 787 return nfserr_inval; 788 789 /* Does it conflict with a deny mode already set? */ 790 if ((access & fp->fi_share_deny) != 0) 791 return nfserr_share_denied; 792 793 __nfs4_file_get_access(fp, access); 794 return nfs_ok; 795 } 796 797 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny) 798 { 799 /* Common case is that there is no deny mode. */ 800 if (deny) { 801 /* Does this deny mode make sense? */ 802 if (deny & ~NFS4_SHARE_DENY_BOTH) 803 return nfserr_inval; 804 805 if ((deny & NFS4_SHARE_DENY_READ) && 806 atomic_read(&fp->fi_access[O_RDONLY])) 807 return nfserr_share_denied; 808 809 if ((deny & NFS4_SHARE_DENY_WRITE) && 810 atomic_read(&fp->fi_access[O_WRONLY])) 811 return nfserr_share_denied; 812 } 813 return nfs_ok; 814 } 815 816 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag) 817 { 818 might_lock(&fp->fi_lock); 819 820 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) { 821 struct nfsd_file *f1 = NULL; 822 struct nfsd_file *f2 = NULL; 823 824 swap(f1, fp->fi_fds[oflag]); 825 if (atomic_read(&fp->fi_access[1 - oflag]) == 0) 826 swap(f2, fp->fi_fds[O_RDWR]); 827 spin_unlock(&fp->fi_lock); 828 if (f1) 829 nfsd_file_put(f1); 830 if (f2) 831 nfsd_file_put(f2); 832 } 833 } 834 835 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access) 836 { 837 WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH); 838 839 if (access & NFS4_SHARE_ACCESS_WRITE) 840 __nfs4_file_put_access(fp, O_WRONLY); 841 if (access & NFS4_SHARE_ACCESS_READ) 842 __nfs4_file_put_access(fp, O_RDONLY); 843 } 844 845 /* 846 * Allocate a new open/delegation state counter. This is needed for 847 * pNFS for proper return on close semantics. 848 * 849 * Note that we only allocate it for pNFS-enabled exports, otherwise 850 * all pointers to struct nfs4_clnt_odstate are always NULL. 851 */ 852 static struct nfs4_clnt_odstate * 853 alloc_clnt_odstate(struct nfs4_client *clp) 854 { 855 struct nfs4_clnt_odstate *co; 856 857 co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL); 858 if (co) { 859 co->co_client = clp; 860 refcount_set(&co->co_odcount, 1); 861 } 862 return co; 863 } 864 865 static void 866 hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co) 867 { 868 struct nfs4_file *fp = co->co_file; 869 870 lockdep_assert_held(&fp->fi_lock); 871 list_add(&co->co_perfile, &fp->fi_clnt_odstate); 872 } 873 874 static inline void 875 get_clnt_odstate(struct nfs4_clnt_odstate *co) 876 { 877 if (co) 878 refcount_inc(&co->co_odcount); 879 } 880 881 static void 882 put_clnt_odstate(struct nfs4_clnt_odstate *co) 883 { 884 struct nfs4_file *fp; 885 886 if (!co) 887 return; 888 889 fp = co->co_file; 890 if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) { 891 list_del(&co->co_perfile); 892 spin_unlock(&fp->fi_lock); 893 894 nfsd4_return_all_file_layouts(co->co_client, fp); 895 kmem_cache_free(odstate_slab, co); 896 } 897 } 898 899 static struct nfs4_clnt_odstate * 900 find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new) 901 { 902 struct nfs4_clnt_odstate *co; 903 struct nfs4_client *cl; 904 905 if (!new) 906 return NULL; 907 908 cl = new->co_client; 909 910 spin_lock(&fp->fi_lock); 911 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) { 912 if (co->co_client == cl) { 913 get_clnt_odstate(co); 914 goto out; 915 } 916 } 917 co = new; 918 co->co_file = fp; 919 hash_clnt_odstate_locked(new); 920 out: 921 spin_unlock(&fp->fi_lock); 922 return co; 923 } 924 925 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab, 926 void (*sc_free)(struct nfs4_stid *)) 927 { 928 struct nfs4_stid *stid; 929 int new_id; 930 931 stid = kmem_cache_zalloc(slab, GFP_KERNEL); 932 if (!stid) 933 return NULL; 934 935 idr_preload(GFP_KERNEL); 936 spin_lock(&cl->cl_lock); 937 /* Reserving 0 for start of file in nfsdfs "states" file: */ 938 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT); 939 spin_unlock(&cl->cl_lock); 940 idr_preload_end(); 941 if (new_id < 0) 942 goto out_free; 943 944 stid->sc_free = sc_free; 945 stid->sc_client = cl; 946 stid->sc_stateid.si_opaque.so_id = new_id; 947 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid; 948 /* Will be incremented before return to client: */ 949 refcount_set(&stid->sc_count, 1); 950 spin_lock_init(&stid->sc_lock); 951 INIT_LIST_HEAD(&stid->sc_cp_list); 952 953 /* 954 * It shouldn't be a problem to reuse an opaque stateid value. 955 * I don't think it is for 4.1. But with 4.0 I worry that, for 956 * example, a stray write retransmission could be accepted by 957 * the server when it should have been rejected. Therefore, 958 * adopt a trick from the sctp code to attempt to maximize the 959 * amount of time until an id is reused, by ensuring they always 960 * "increase" (mod INT_MAX): 961 */ 962 return stid; 963 out_free: 964 kmem_cache_free(slab, stid); 965 return NULL; 966 } 967 968 /* 969 * Create a unique stateid_t to represent each COPY. 970 */ 971 static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid, 972 unsigned char cs_type) 973 { 974 int new_id; 975 976 stid->cs_stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time; 977 stid->cs_stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id; 978 979 idr_preload(GFP_KERNEL); 980 spin_lock(&nn->s2s_cp_lock); 981 new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT); 982 stid->cs_stid.si_opaque.so_id = new_id; 983 stid->cs_stid.si_generation = 1; 984 spin_unlock(&nn->s2s_cp_lock); 985 idr_preload_end(); 986 if (new_id < 0) 987 return 0; 988 stid->cs_type = cs_type; 989 return 1; 990 } 991 992 int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy) 993 { 994 return nfs4_init_cp_state(nn, ©->cp_stateid, NFS4_COPY_STID); 995 } 996 997 struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn, 998 struct nfs4_stid *p_stid) 999 { 1000 struct nfs4_cpntf_state *cps; 1001 1002 cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL); 1003 if (!cps) 1004 return NULL; 1005 cps->cpntf_time = ktime_get_boottime_seconds(); 1006 refcount_set(&cps->cp_stateid.cs_count, 1); 1007 if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID)) 1008 goto out_free; 1009 spin_lock(&nn->s2s_cp_lock); 1010 list_add(&cps->cp_list, &p_stid->sc_cp_list); 1011 spin_unlock(&nn->s2s_cp_lock); 1012 return cps; 1013 out_free: 1014 kfree(cps); 1015 return NULL; 1016 } 1017 1018 void nfs4_free_copy_state(struct nfsd4_copy *copy) 1019 { 1020 struct nfsd_net *nn; 1021 1022 if (copy->cp_stateid.cs_type != NFS4_COPY_STID) 1023 return; 1024 nn = net_generic(copy->cp_clp->net, nfsd_net_id); 1025 spin_lock(&nn->s2s_cp_lock); 1026 idr_remove(&nn->s2s_cp_stateids, 1027 copy->cp_stateid.cs_stid.si_opaque.so_id); 1028 spin_unlock(&nn->s2s_cp_lock); 1029 } 1030 1031 static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid) 1032 { 1033 struct nfs4_cpntf_state *cps; 1034 struct nfsd_net *nn; 1035 1036 nn = net_generic(net, nfsd_net_id); 1037 spin_lock(&nn->s2s_cp_lock); 1038 while (!list_empty(&stid->sc_cp_list)) { 1039 cps = list_first_entry(&stid->sc_cp_list, 1040 struct nfs4_cpntf_state, cp_list); 1041 _free_cpntf_state_locked(nn, cps); 1042 } 1043 spin_unlock(&nn->s2s_cp_lock); 1044 } 1045 1046 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp) 1047 { 1048 struct nfs4_stid *stid; 1049 1050 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid); 1051 if (!stid) 1052 return NULL; 1053 1054 return openlockstateid(stid); 1055 } 1056 1057 static void nfs4_free_deleg(struct nfs4_stid *stid) 1058 { 1059 struct nfs4_delegation *dp = delegstateid(stid); 1060 1061 WARN_ON_ONCE(!list_empty(&stid->sc_cp_list)); 1062 WARN_ON_ONCE(!list_empty(&dp->dl_perfile)); 1063 WARN_ON_ONCE(!list_empty(&dp->dl_perclnt)); 1064 WARN_ON_ONCE(!list_empty(&dp->dl_recall_lru)); 1065 kmem_cache_free(deleg_slab, stid); 1066 atomic_long_dec(&num_delegations); 1067 } 1068 1069 /* 1070 * When we recall a delegation, we should be careful not to hand it 1071 * out again straight away. 1072 * To ensure this we keep a pair of bloom filters ('new' and 'old') 1073 * in which the filehandles of recalled delegations are "stored". 1074 * If a filehandle appear in either filter, a delegation is blocked. 1075 * When a delegation is recalled, the filehandle is stored in the "new" 1076 * filter. 1077 * Every 30 seconds we swap the filters and clear the "new" one, 1078 * unless both are empty of course. 1079 * 1080 * Each filter is 256 bits. We hash the filehandle to 32bit and use the 1081 * low 3 bytes as hash-table indices. 1082 * 1083 * 'blocked_delegations_lock', which is always taken in block_delegations(), 1084 * is used to manage concurrent access. Testing does not need the lock 1085 * except when swapping the two filters. 1086 */ 1087 static DEFINE_SPINLOCK(blocked_delegations_lock); 1088 static struct bloom_pair { 1089 int entries, old_entries; 1090 time64_t swap_time; 1091 int new; /* index into 'set' */ 1092 DECLARE_BITMAP(set[2], 256); 1093 } blocked_delegations; 1094 1095 static int delegation_blocked(struct knfsd_fh *fh) 1096 { 1097 u32 hash; 1098 struct bloom_pair *bd = &blocked_delegations; 1099 1100 if (bd->entries == 0) 1101 return 0; 1102 if (ktime_get_seconds() - bd->swap_time > 30) { 1103 spin_lock(&blocked_delegations_lock); 1104 if (ktime_get_seconds() - bd->swap_time > 30) { 1105 bd->entries -= bd->old_entries; 1106 bd->old_entries = bd->entries; 1107 memset(bd->set[bd->new], 0, 1108 sizeof(bd->set[0])); 1109 bd->new = 1-bd->new; 1110 bd->swap_time = ktime_get_seconds(); 1111 } 1112 spin_unlock(&blocked_delegations_lock); 1113 } 1114 hash = jhash(&fh->fh_raw, fh->fh_size, 0); 1115 if (test_bit(hash&255, bd->set[0]) && 1116 test_bit((hash>>8)&255, bd->set[0]) && 1117 test_bit((hash>>16)&255, bd->set[0])) 1118 return 1; 1119 1120 if (test_bit(hash&255, bd->set[1]) && 1121 test_bit((hash>>8)&255, bd->set[1]) && 1122 test_bit((hash>>16)&255, bd->set[1])) 1123 return 1; 1124 1125 return 0; 1126 } 1127 1128 static void block_delegations(struct knfsd_fh *fh) 1129 { 1130 u32 hash; 1131 struct bloom_pair *bd = &blocked_delegations; 1132 1133 hash = jhash(&fh->fh_raw, fh->fh_size, 0); 1134 1135 spin_lock(&blocked_delegations_lock); 1136 __set_bit(hash&255, bd->set[bd->new]); 1137 __set_bit((hash>>8)&255, bd->set[bd->new]); 1138 __set_bit((hash>>16)&255, bd->set[bd->new]); 1139 if (bd->entries == 0) 1140 bd->swap_time = ktime_get_seconds(); 1141 bd->entries += 1; 1142 spin_unlock(&blocked_delegations_lock); 1143 } 1144 1145 static struct nfs4_delegation * 1146 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp, 1147 struct nfs4_clnt_odstate *odstate) 1148 { 1149 struct nfs4_delegation *dp; 1150 long n; 1151 1152 dprintk("NFSD alloc_init_deleg\n"); 1153 n = atomic_long_inc_return(&num_delegations); 1154 if (n < 0 || n > max_delegations) 1155 goto out_dec; 1156 if (delegation_blocked(&fp->fi_fhandle)) 1157 goto out_dec; 1158 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg)); 1159 if (dp == NULL) 1160 goto out_dec; 1161 1162 /* 1163 * delegation seqid's are never incremented. The 4.1 special 1164 * meaning of seqid 0 isn't meaningful, really, but let's avoid 1165 * 0 anyway just for consistency and use 1: 1166 */ 1167 dp->dl_stid.sc_stateid.si_generation = 1; 1168 INIT_LIST_HEAD(&dp->dl_perfile); 1169 INIT_LIST_HEAD(&dp->dl_perclnt); 1170 INIT_LIST_HEAD(&dp->dl_recall_lru); 1171 dp->dl_clnt_odstate = odstate; 1172 get_clnt_odstate(odstate); 1173 dp->dl_type = NFS4_OPEN_DELEGATE_READ; 1174 dp->dl_retries = 1; 1175 dp->dl_recalled = false; 1176 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client, 1177 &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL); 1178 get_nfs4_file(fp); 1179 dp->dl_stid.sc_file = fp; 1180 return dp; 1181 out_dec: 1182 atomic_long_dec(&num_delegations); 1183 return NULL; 1184 } 1185 1186 void 1187 nfs4_put_stid(struct nfs4_stid *s) 1188 { 1189 struct nfs4_file *fp = s->sc_file; 1190 struct nfs4_client *clp = s->sc_client; 1191 1192 might_lock(&clp->cl_lock); 1193 1194 if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) { 1195 wake_up_all(&close_wq); 1196 return; 1197 } 1198 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); 1199 nfs4_free_cpntf_statelist(clp->net, s); 1200 spin_unlock(&clp->cl_lock); 1201 s->sc_free(s); 1202 if (fp) 1203 put_nfs4_file(fp); 1204 } 1205 1206 void 1207 nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid) 1208 { 1209 stateid_t *src = &stid->sc_stateid; 1210 1211 spin_lock(&stid->sc_lock); 1212 if (unlikely(++src->si_generation == 0)) 1213 src->si_generation = 1; 1214 memcpy(dst, src, sizeof(*dst)); 1215 spin_unlock(&stid->sc_lock); 1216 } 1217 1218 static void put_deleg_file(struct nfs4_file *fp) 1219 { 1220 struct nfsd_file *nf = NULL; 1221 1222 spin_lock(&fp->fi_lock); 1223 if (--fp->fi_delegees == 0) 1224 swap(nf, fp->fi_deleg_file); 1225 spin_unlock(&fp->fi_lock); 1226 1227 if (nf) 1228 nfsd_file_put(nf); 1229 } 1230 1231 static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp) 1232 { 1233 struct nfs4_file *fp = dp->dl_stid.sc_file; 1234 struct nfsd_file *nf = fp->fi_deleg_file; 1235 1236 WARN_ON_ONCE(!fp->fi_delegees); 1237 1238 vfs_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp); 1239 put_deleg_file(fp); 1240 } 1241 1242 static void destroy_unhashed_deleg(struct nfs4_delegation *dp) 1243 { 1244 put_clnt_odstate(dp->dl_clnt_odstate); 1245 nfs4_unlock_deleg_lease(dp); 1246 nfs4_put_stid(&dp->dl_stid); 1247 } 1248 1249 void nfs4_unhash_stid(struct nfs4_stid *s) 1250 { 1251 s->sc_type = 0; 1252 } 1253 1254 /** 1255 * nfs4_delegation_exists - Discover if this delegation already exists 1256 * @clp: a pointer to the nfs4_client we're granting a delegation to 1257 * @fp: a pointer to the nfs4_file we're granting a delegation on 1258 * 1259 * Return: 1260 * On success: true iff an existing delegation is found 1261 */ 1262 1263 static bool 1264 nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp) 1265 { 1266 struct nfs4_delegation *searchdp = NULL; 1267 struct nfs4_client *searchclp = NULL; 1268 1269 lockdep_assert_held(&state_lock); 1270 lockdep_assert_held(&fp->fi_lock); 1271 1272 list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) { 1273 searchclp = searchdp->dl_stid.sc_client; 1274 if (clp == searchclp) { 1275 return true; 1276 } 1277 } 1278 return false; 1279 } 1280 1281 /** 1282 * hash_delegation_locked - Add a delegation to the appropriate lists 1283 * @dp: a pointer to the nfs4_delegation we are adding. 1284 * @fp: a pointer to the nfs4_file we're granting a delegation on 1285 * 1286 * Return: 1287 * On success: NULL if the delegation was successfully hashed. 1288 * 1289 * On error: -EAGAIN if one was previously granted to this 1290 * nfs4_client for this nfs4_file. Delegation is not hashed. 1291 * 1292 */ 1293 1294 static int 1295 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp) 1296 { 1297 struct nfs4_client *clp = dp->dl_stid.sc_client; 1298 1299 lockdep_assert_held(&state_lock); 1300 lockdep_assert_held(&fp->fi_lock); 1301 1302 if (nfs4_delegation_exists(clp, fp)) 1303 return -EAGAIN; 1304 refcount_inc(&dp->dl_stid.sc_count); 1305 dp->dl_stid.sc_type = NFS4_DELEG_STID; 1306 list_add(&dp->dl_perfile, &fp->fi_delegations); 1307 list_add(&dp->dl_perclnt, &clp->cl_delegations); 1308 return 0; 1309 } 1310 1311 static bool delegation_hashed(struct nfs4_delegation *dp) 1312 { 1313 return !(list_empty(&dp->dl_perfile)); 1314 } 1315 1316 static bool 1317 unhash_delegation_locked(struct nfs4_delegation *dp) 1318 { 1319 struct nfs4_file *fp = dp->dl_stid.sc_file; 1320 1321 lockdep_assert_held(&state_lock); 1322 1323 if (!delegation_hashed(dp)) 1324 return false; 1325 1326 dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID; 1327 /* Ensure that deleg break won't try to requeue it */ 1328 ++dp->dl_time; 1329 spin_lock(&fp->fi_lock); 1330 list_del_init(&dp->dl_perclnt); 1331 list_del_init(&dp->dl_recall_lru); 1332 list_del_init(&dp->dl_perfile); 1333 spin_unlock(&fp->fi_lock); 1334 return true; 1335 } 1336 1337 static void destroy_delegation(struct nfs4_delegation *dp) 1338 { 1339 bool unhashed; 1340 1341 spin_lock(&state_lock); 1342 unhashed = unhash_delegation_locked(dp); 1343 spin_unlock(&state_lock); 1344 if (unhashed) 1345 destroy_unhashed_deleg(dp); 1346 } 1347 1348 static void revoke_delegation(struct nfs4_delegation *dp) 1349 { 1350 struct nfs4_client *clp = dp->dl_stid.sc_client; 1351 1352 WARN_ON(!list_empty(&dp->dl_recall_lru)); 1353 1354 trace_nfsd_stid_revoke(&dp->dl_stid); 1355 1356 if (clp->cl_minorversion) { 1357 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID; 1358 refcount_inc(&dp->dl_stid.sc_count); 1359 spin_lock(&clp->cl_lock); 1360 list_add(&dp->dl_recall_lru, &clp->cl_revoked); 1361 spin_unlock(&clp->cl_lock); 1362 } 1363 destroy_unhashed_deleg(dp); 1364 } 1365 1366 /* 1367 * SETCLIENTID state 1368 */ 1369 1370 static unsigned int clientid_hashval(u32 id) 1371 { 1372 return id & CLIENT_HASH_MASK; 1373 } 1374 1375 static unsigned int clientstr_hashval(struct xdr_netobj name) 1376 { 1377 return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK; 1378 } 1379 1380 /* 1381 * A stateid that had a deny mode associated with it is being released 1382 * or downgraded. Recalculate the deny mode on the file. 1383 */ 1384 static void 1385 recalculate_deny_mode(struct nfs4_file *fp) 1386 { 1387 struct nfs4_ol_stateid *stp; 1388 1389 spin_lock(&fp->fi_lock); 1390 fp->fi_share_deny = 0; 1391 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) 1392 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap); 1393 spin_unlock(&fp->fi_lock); 1394 } 1395 1396 static void 1397 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp) 1398 { 1399 int i; 1400 bool change = false; 1401 1402 for (i = 1; i < 4; i++) { 1403 if ((i & deny) != i) { 1404 change = true; 1405 clear_deny(i, stp); 1406 } 1407 } 1408 1409 /* Recalculate per-file deny mode if there was a change */ 1410 if (change) 1411 recalculate_deny_mode(stp->st_stid.sc_file); 1412 } 1413 1414 /* release all access and file references for a given stateid */ 1415 static void 1416 release_all_access(struct nfs4_ol_stateid *stp) 1417 { 1418 int i; 1419 struct nfs4_file *fp = stp->st_stid.sc_file; 1420 1421 if (fp && stp->st_deny_bmap != 0) 1422 recalculate_deny_mode(fp); 1423 1424 for (i = 1; i < 4; i++) { 1425 if (test_access(i, stp)) 1426 nfs4_file_put_access(stp->st_stid.sc_file, i); 1427 clear_access(i, stp); 1428 } 1429 } 1430 1431 static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop) 1432 { 1433 kfree(sop->so_owner.data); 1434 sop->so_ops->so_free(sop); 1435 } 1436 1437 static void nfs4_put_stateowner(struct nfs4_stateowner *sop) 1438 { 1439 struct nfs4_client *clp = sop->so_client; 1440 1441 might_lock(&clp->cl_lock); 1442 1443 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock)) 1444 return; 1445 sop->so_ops->so_unhash(sop); 1446 spin_unlock(&clp->cl_lock); 1447 nfs4_free_stateowner(sop); 1448 } 1449 1450 static bool 1451 nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp) 1452 { 1453 return list_empty(&stp->st_perfile); 1454 } 1455 1456 static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp) 1457 { 1458 struct nfs4_file *fp = stp->st_stid.sc_file; 1459 1460 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock); 1461 1462 if (list_empty(&stp->st_perfile)) 1463 return false; 1464 1465 spin_lock(&fp->fi_lock); 1466 list_del_init(&stp->st_perfile); 1467 spin_unlock(&fp->fi_lock); 1468 list_del(&stp->st_perstateowner); 1469 return true; 1470 } 1471 1472 static void nfs4_free_ol_stateid(struct nfs4_stid *stid) 1473 { 1474 struct nfs4_ol_stateid *stp = openlockstateid(stid); 1475 1476 put_clnt_odstate(stp->st_clnt_odstate); 1477 release_all_access(stp); 1478 if (stp->st_stateowner) 1479 nfs4_put_stateowner(stp->st_stateowner); 1480 WARN_ON(!list_empty(&stid->sc_cp_list)); 1481 kmem_cache_free(stateid_slab, stid); 1482 } 1483 1484 static void nfs4_free_lock_stateid(struct nfs4_stid *stid) 1485 { 1486 struct nfs4_ol_stateid *stp = openlockstateid(stid); 1487 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner); 1488 struct nfsd_file *nf; 1489 1490 nf = find_any_file(stp->st_stid.sc_file); 1491 if (nf) { 1492 get_file(nf->nf_file); 1493 filp_close(nf->nf_file, (fl_owner_t)lo); 1494 nfsd_file_put(nf); 1495 } 1496 nfs4_free_ol_stateid(stid); 1497 } 1498 1499 /* 1500 * Put the persistent reference to an already unhashed generic stateid, while 1501 * holding the cl_lock. If it's the last reference, then put it onto the 1502 * reaplist for later destruction. 1503 */ 1504 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp, 1505 struct list_head *reaplist) 1506 { 1507 struct nfs4_stid *s = &stp->st_stid; 1508 struct nfs4_client *clp = s->sc_client; 1509 1510 lockdep_assert_held(&clp->cl_lock); 1511 1512 WARN_ON_ONCE(!list_empty(&stp->st_locks)); 1513 1514 if (!refcount_dec_and_test(&s->sc_count)) { 1515 wake_up_all(&close_wq); 1516 return; 1517 } 1518 1519 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); 1520 list_add(&stp->st_locks, reaplist); 1521 } 1522 1523 static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp) 1524 { 1525 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); 1526 1527 if (!unhash_ol_stateid(stp)) 1528 return false; 1529 list_del_init(&stp->st_locks); 1530 nfs4_unhash_stid(&stp->st_stid); 1531 return true; 1532 } 1533 1534 static void release_lock_stateid(struct nfs4_ol_stateid *stp) 1535 { 1536 struct nfs4_client *clp = stp->st_stid.sc_client; 1537 bool unhashed; 1538 1539 spin_lock(&clp->cl_lock); 1540 unhashed = unhash_lock_stateid(stp); 1541 spin_unlock(&clp->cl_lock); 1542 if (unhashed) 1543 nfs4_put_stid(&stp->st_stid); 1544 } 1545 1546 static void unhash_lockowner_locked(struct nfs4_lockowner *lo) 1547 { 1548 struct nfs4_client *clp = lo->lo_owner.so_client; 1549 1550 lockdep_assert_held(&clp->cl_lock); 1551 1552 list_del_init(&lo->lo_owner.so_strhash); 1553 } 1554 1555 /* 1556 * Free a list of generic stateids that were collected earlier after being 1557 * fully unhashed. 1558 */ 1559 static void 1560 free_ol_stateid_reaplist(struct list_head *reaplist) 1561 { 1562 struct nfs4_ol_stateid *stp; 1563 struct nfs4_file *fp; 1564 1565 might_sleep(); 1566 1567 while (!list_empty(reaplist)) { 1568 stp = list_first_entry(reaplist, struct nfs4_ol_stateid, 1569 st_locks); 1570 list_del(&stp->st_locks); 1571 fp = stp->st_stid.sc_file; 1572 stp->st_stid.sc_free(&stp->st_stid); 1573 if (fp) 1574 put_nfs4_file(fp); 1575 } 1576 } 1577 1578 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp, 1579 struct list_head *reaplist) 1580 { 1581 struct nfs4_ol_stateid *stp; 1582 1583 lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock); 1584 1585 while (!list_empty(&open_stp->st_locks)) { 1586 stp = list_entry(open_stp->st_locks.next, 1587 struct nfs4_ol_stateid, st_locks); 1588 WARN_ON(!unhash_lock_stateid(stp)); 1589 put_ol_stateid_locked(stp, reaplist); 1590 } 1591 } 1592 1593 static bool unhash_open_stateid(struct nfs4_ol_stateid *stp, 1594 struct list_head *reaplist) 1595 { 1596 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); 1597 1598 if (!unhash_ol_stateid(stp)) 1599 return false; 1600 release_open_stateid_locks(stp, reaplist); 1601 return true; 1602 } 1603 1604 static void release_open_stateid(struct nfs4_ol_stateid *stp) 1605 { 1606 LIST_HEAD(reaplist); 1607 1608 spin_lock(&stp->st_stid.sc_client->cl_lock); 1609 if (unhash_open_stateid(stp, &reaplist)) 1610 put_ol_stateid_locked(stp, &reaplist); 1611 spin_unlock(&stp->st_stid.sc_client->cl_lock); 1612 free_ol_stateid_reaplist(&reaplist); 1613 } 1614 1615 static void unhash_openowner_locked(struct nfs4_openowner *oo) 1616 { 1617 struct nfs4_client *clp = oo->oo_owner.so_client; 1618 1619 lockdep_assert_held(&clp->cl_lock); 1620 1621 list_del_init(&oo->oo_owner.so_strhash); 1622 list_del_init(&oo->oo_perclient); 1623 } 1624 1625 static void release_last_closed_stateid(struct nfs4_openowner *oo) 1626 { 1627 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net, 1628 nfsd_net_id); 1629 struct nfs4_ol_stateid *s; 1630 1631 spin_lock(&nn->client_lock); 1632 s = oo->oo_last_closed_stid; 1633 if (s) { 1634 list_del_init(&oo->oo_close_lru); 1635 oo->oo_last_closed_stid = NULL; 1636 } 1637 spin_unlock(&nn->client_lock); 1638 if (s) 1639 nfs4_put_stid(&s->st_stid); 1640 } 1641 1642 static void release_openowner(struct nfs4_openowner *oo) 1643 { 1644 struct nfs4_ol_stateid *stp; 1645 struct nfs4_client *clp = oo->oo_owner.so_client; 1646 struct list_head reaplist; 1647 1648 INIT_LIST_HEAD(&reaplist); 1649 1650 spin_lock(&clp->cl_lock); 1651 unhash_openowner_locked(oo); 1652 while (!list_empty(&oo->oo_owner.so_stateids)) { 1653 stp = list_first_entry(&oo->oo_owner.so_stateids, 1654 struct nfs4_ol_stateid, st_perstateowner); 1655 if (unhash_open_stateid(stp, &reaplist)) 1656 put_ol_stateid_locked(stp, &reaplist); 1657 } 1658 spin_unlock(&clp->cl_lock); 1659 free_ol_stateid_reaplist(&reaplist); 1660 release_last_closed_stateid(oo); 1661 nfs4_put_stateowner(&oo->oo_owner); 1662 } 1663 1664 static inline int 1665 hash_sessionid(struct nfs4_sessionid *sessionid) 1666 { 1667 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid; 1668 1669 return sid->sequence % SESSION_HASH_SIZE; 1670 } 1671 1672 #ifdef CONFIG_SUNRPC_DEBUG 1673 static inline void 1674 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) 1675 { 1676 u32 *ptr = (u32 *)(&sessionid->data[0]); 1677 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]); 1678 } 1679 #else 1680 static inline void 1681 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) 1682 { 1683 } 1684 #endif 1685 1686 /* 1687 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it 1688 * won't be used for replay. 1689 */ 1690 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr) 1691 { 1692 struct nfs4_stateowner *so = cstate->replay_owner; 1693 1694 if (nfserr == nfserr_replay_me) 1695 return; 1696 1697 if (!seqid_mutating_err(ntohl(nfserr))) { 1698 nfsd4_cstate_clear_replay(cstate); 1699 return; 1700 } 1701 if (!so) 1702 return; 1703 if (so->so_is_open_owner) 1704 release_last_closed_stateid(openowner(so)); 1705 so->so_seqid++; 1706 return; 1707 } 1708 1709 static void 1710 gen_sessionid(struct nfsd4_session *ses) 1711 { 1712 struct nfs4_client *clp = ses->se_client; 1713 struct nfsd4_sessionid *sid; 1714 1715 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data; 1716 sid->clientid = clp->cl_clientid; 1717 sid->sequence = current_sessionid++; 1718 sid->reserved = 0; 1719 } 1720 1721 /* 1722 * The protocol defines ca_maxresponssize_cached to include the size of 1723 * the rpc header, but all we need to cache is the data starting after 1724 * the end of the initial SEQUENCE operation--the rest we regenerate 1725 * each time. Therefore we can advertise a ca_maxresponssize_cached 1726 * value that is the number of bytes in our cache plus a few additional 1727 * bytes. In order to stay on the safe side, and not promise more than 1728 * we can cache, those additional bytes must be the minimum possible: 24 1729 * bytes of rpc header (xid through accept state, with AUTH_NULL 1730 * verifier), 12 for the compound header (with zero-length tag), and 44 1731 * for the SEQUENCE op response: 1732 */ 1733 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44) 1734 1735 static void 1736 free_session_slots(struct nfsd4_session *ses) 1737 { 1738 int i; 1739 1740 for (i = 0; i < ses->se_fchannel.maxreqs; i++) { 1741 free_svc_cred(&ses->se_slots[i]->sl_cred); 1742 kfree(ses->se_slots[i]); 1743 } 1744 } 1745 1746 /* 1747 * We don't actually need to cache the rpc and session headers, so we 1748 * can allocate a little less for each slot: 1749 */ 1750 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca) 1751 { 1752 u32 size; 1753 1754 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ) 1755 size = 0; 1756 else 1757 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ; 1758 return size + sizeof(struct nfsd4_slot); 1759 } 1760 1761 /* 1762 * XXX: If we run out of reserved DRC memory we could (up to a point) 1763 * re-negotiate active sessions and reduce their slot usage to make 1764 * room for new connections. For now we just fail the create session. 1765 */ 1766 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn) 1767 { 1768 u32 slotsize = slot_bytes(ca); 1769 u32 num = ca->maxreqs; 1770 unsigned long avail, total_avail; 1771 unsigned int scale_factor; 1772 1773 spin_lock(&nfsd_drc_lock); 1774 if (nfsd_drc_max_mem > nfsd_drc_mem_used) 1775 total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used; 1776 else 1777 /* We have handed out more space than we chose in 1778 * set_max_drc() to allow. That isn't really a 1779 * problem as long as that doesn't make us think we 1780 * have lots more due to integer overflow. 1781 */ 1782 total_avail = 0; 1783 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail); 1784 /* 1785 * Never use more than a fraction of the remaining memory, 1786 * unless it's the only way to give this client a slot. 1787 * The chosen fraction is either 1/8 or 1/number of threads, 1788 * whichever is smaller. This ensures there are adequate 1789 * slots to support multiple clients per thread. 1790 * Give the client one slot even if that would require 1791 * over-allocation--it is better than failure. 1792 */ 1793 scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads); 1794 1795 avail = clamp_t(unsigned long, avail, slotsize, 1796 total_avail/scale_factor); 1797 num = min_t(int, num, avail / slotsize); 1798 num = max_t(int, num, 1); 1799 nfsd_drc_mem_used += num * slotsize; 1800 spin_unlock(&nfsd_drc_lock); 1801 1802 return num; 1803 } 1804 1805 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca) 1806 { 1807 int slotsize = slot_bytes(ca); 1808 1809 spin_lock(&nfsd_drc_lock); 1810 nfsd_drc_mem_used -= slotsize * ca->maxreqs; 1811 spin_unlock(&nfsd_drc_lock); 1812 } 1813 1814 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs, 1815 struct nfsd4_channel_attrs *battrs) 1816 { 1817 int numslots = fattrs->maxreqs; 1818 int slotsize = slot_bytes(fattrs); 1819 struct nfsd4_session *new; 1820 int i; 1821 1822 BUILD_BUG_ON(struct_size(new, se_slots, NFSD_MAX_SLOTS_PER_SESSION) 1823 > PAGE_SIZE); 1824 1825 new = kzalloc(struct_size(new, se_slots, numslots), GFP_KERNEL); 1826 if (!new) 1827 return NULL; 1828 /* allocate each struct nfsd4_slot and data cache in one piece */ 1829 for (i = 0; i < numslots; i++) { 1830 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL); 1831 if (!new->se_slots[i]) 1832 goto out_free; 1833 } 1834 1835 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs)); 1836 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs)); 1837 1838 return new; 1839 out_free: 1840 while (i--) 1841 kfree(new->se_slots[i]); 1842 kfree(new); 1843 return NULL; 1844 } 1845 1846 static void free_conn(struct nfsd4_conn *c) 1847 { 1848 svc_xprt_put(c->cn_xprt); 1849 kfree(c); 1850 } 1851 1852 static void nfsd4_conn_lost(struct svc_xpt_user *u) 1853 { 1854 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user); 1855 struct nfs4_client *clp = c->cn_session->se_client; 1856 1857 trace_nfsd_cb_lost(clp); 1858 1859 spin_lock(&clp->cl_lock); 1860 if (!list_empty(&c->cn_persession)) { 1861 list_del(&c->cn_persession); 1862 free_conn(c); 1863 } 1864 nfsd4_probe_callback(clp); 1865 spin_unlock(&clp->cl_lock); 1866 } 1867 1868 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags) 1869 { 1870 struct nfsd4_conn *conn; 1871 1872 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL); 1873 if (!conn) 1874 return NULL; 1875 svc_xprt_get(rqstp->rq_xprt); 1876 conn->cn_xprt = rqstp->rq_xprt; 1877 conn->cn_flags = flags; 1878 INIT_LIST_HEAD(&conn->cn_xpt_user.list); 1879 return conn; 1880 } 1881 1882 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) 1883 { 1884 conn->cn_session = ses; 1885 list_add(&conn->cn_persession, &ses->se_conns); 1886 } 1887 1888 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) 1889 { 1890 struct nfs4_client *clp = ses->se_client; 1891 1892 spin_lock(&clp->cl_lock); 1893 __nfsd4_hash_conn(conn, ses); 1894 spin_unlock(&clp->cl_lock); 1895 } 1896 1897 static int nfsd4_register_conn(struct nfsd4_conn *conn) 1898 { 1899 conn->cn_xpt_user.callback = nfsd4_conn_lost; 1900 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user); 1901 } 1902 1903 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses) 1904 { 1905 int ret; 1906 1907 nfsd4_hash_conn(conn, ses); 1908 ret = nfsd4_register_conn(conn); 1909 if (ret) 1910 /* oops; xprt is already down: */ 1911 nfsd4_conn_lost(&conn->cn_xpt_user); 1912 /* We may have gained or lost a callback channel: */ 1913 nfsd4_probe_callback_sync(ses->se_client); 1914 } 1915 1916 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses) 1917 { 1918 u32 dir = NFS4_CDFC4_FORE; 1919 1920 if (cses->flags & SESSION4_BACK_CHAN) 1921 dir |= NFS4_CDFC4_BACK; 1922 return alloc_conn(rqstp, dir); 1923 } 1924 1925 /* must be called under client_lock */ 1926 static void nfsd4_del_conns(struct nfsd4_session *s) 1927 { 1928 struct nfs4_client *clp = s->se_client; 1929 struct nfsd4_conn *c; 1930 1931 spin_lock(&clp->cl_lock); 1932 while (!list_empty(&s->se_conns)) { 1933 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession); 1934 list_del_init(&c->cn_persession); 1935 spin_unlock(&clp->cl_lock); 1936 1937 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user); 1938 free_conn(c); 1939 1940 spin_lock(&clp->cl_lock); 1941 } 1942 spin_unlock(&clp->cl_lock); 1943 } 1944 1945 static void __free_session(struct nfsd4_session *ses) 1946 { 1947 free_session_slots(ses); 1948 kfree(ses); 1949 } 1950 1951 static void free_session(struct nfsd4_session *ses) 1952 { 1953 nfsd4_del_conns(ses); 1954 nfsd4_put_drc_mem(&ses->se_fchannel); 1955 __free_session(ses); 1956 } 1957 1958 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses) 1959 { 1960 int idx; 1961 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 1962 1963 new->se_client = clp; 1964 gen_sessionid(new); 1965 1966 INIT_LIST_HEAD(&new->se_conns); 1967 1968 new->se_cb_seq_nr = 1; 1969 new->se_flags = cses->flags; 1970 new->se_cb_prog = cses->callback_prog; 1971 new->se_cb_sec = cses->cb_sec; 1972 atomic_set(&new->se_ref, 0); 1973 idx = hash_sessionid(&new->se_sessionid); 1974 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]); 1975 spin_lock(&clp->cl_lock); 1976 list_add(&new->se_perclnt, &clp->cl_sessions); 1977 spin_unlock(&clp->cl_lock); 1978 1979 { 1980 struct sockaddr *sa = svc_addr(rqstp); 1981 /* 1982 * This is a little silly; with sessions there's no real 1983 * use for the callback address. Use the peer address 1984 * as a reasonable default for now, but consider fixing 1985 * the rpc client not to require an address in the 1986 * future: 1987 */ 1988 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa); 1989 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa); 1990 } 1991 } 1992 1993 /* caller must hold client_lock */ 1994 static struct nfsd4_session * 1995 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net) 1996 { 1997 struct nfsd4_session *elem; 1998 int idx; 1999 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 2000 2001 lockdep_assert_held(&nn->client_lock); 2002 2003 dump_sessionid(__func__, sessionid); 2004 idx = hash_sessionid(sessionid); 2005 /* Search in the appropriate list */ 2006 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) { 2007 if (!memcmp(elem->se_sessionid.data, sessionid->data, 2008 NFS4_MAX_SESSIONID_LEN)) { 2009 return elem; 2010 } 2011 } 2012 2013 dprintk("%s: session not found\n", __func__); 2014 return NULL; 2015 } 2016 2017 static struct nfsd4_session * 2018 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net, 2019 __be32 *ret) 2020 { 2021 struct nfsd4_session *session; 2022 __be32 status = nfserr_badsession; 2023 2024 session = __find_in_sessionid_hashtbl(sessionid, net); 2025 if (!session) 2026 goto out; 2027 status = nfsd4_get_session_locked(session); 2028 if (status) 2029 session = NULL; 2030 out: 2031 *ret = status; 2032 return session; 2033 } 2034 2035 /* caller must hold client_lock */ 2036 static void 2037 unhash_session(struct nfsd4_session *ses) 2038 { 2039 struct nfs4_client *clp = ses->se_client; 2040 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2041 2042 lockdep_assert_held(&nn->client_lock); 2043 2044 list_del(&ses->se_hash); 2045 spin_lock(&ses->se_client->cl_lock); 2046 list_del(&ses->se_perclnt); 2047 spin_unlock(&ses->se_client->cl_lock); 2048 } 2049 2050 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */ 2051 static int 2052 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn) 2053 { 2054 /* 2055 * We're assuming the clid was not given out from a boot 2056 * precisely 2^32 (about 136 years) before this one. That seems 2057 * a safe assumption: 2058 */ 2059 if (clid->cl_boot == (u32)nn->boot_time) 2060 return 0; 2061 trace_nfsd_clid_stale(clid); 2062 return 1; 2063 } 2064 2065 /* 2066 * XXX Should we use a slab cache ? 2067 * This type of memory management is somewhat inefficient, but we use it 2068 * anyway since SETCLIENTID is not a common operation. 2069 */ 2070 static struct nfs4_client *alloc_client(struct xdr_netobj name, 2071 struct nfsd_net *nn) 2072 { 2073 struct nfs4_client *clp; 2074 int i; 2075 2076 if (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) { 2077 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0); 2078 return NULL; 2079 } 2080 clp = kmem_cache_zalloc(client_slab, GFP_KERNEL); 2081 if (clp == NULL) 2082 return NULL; 2083 xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL); 2084 if (clp->cl_name.data == NULL) 2085 goto err_no_name; 2086 clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE, 2087 sizeof(struct list_head), 2088 GFP_KERNEL); 2089 if (!clp->cl_ownerstr_hashtbl) 2090 goto err_no_hashtbl; 2091 for (i = 0; i < OWNER_HASH_SIZE; i++) 2092 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]); 2093 INIT_LIST_HEAD(&clp->cl_sessions); 2094 idr_init(&clp->cl_stateids); 2095 atomic_set(&clp->cl_rpc_users, 0); 2096 clp->cl_cb_state = NFSD4_CB_UNKNOWN; 2097 clp->cl_state = NFSD4_ACTIVE; 2098 atomic_inc(&nn->nfs4_client_count); 2099 atomic_set(&clp->cl_delegs_in_recall, 0); 2100 INIT_LIST_HEAD(&clp->cl_idhash); 2101 INIT_LIST_HEAD(&clp->cl_openowners); 2102 INIT_LIST_HEAD(&clp->cl_delegations); 2103 INIT_LIST_HEAD(&clp->cl_lru); 2104 INIT_LIST_HEAD(&clp->cl_revoked); 2105 #ifdef CONFIG_NFSD_PNFS 2106 INIT_LIST_HEAD(&clp->cl_lo_states); 2107 #endif 2108 INIT_LIST_HEAD(&clp->async_copies); 2109 spin_lock_init(&clp->async_lock); 2110 spin_lock_init(&clp->cl_lock); 2111 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); 2112 return clp; 2113 err_no_hashtbl: 2114 kfree(clp->cl_name.data); 2115 err_no_name: 2116 kmem_cache_free(client_slab, clp); 2117 return NULL; 2118 } 2119 2120 static void __free_client(struct kref *k) 2121 { 2122 struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref); 2123 struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs); 2124 2125 free_svc_cred(&clp->cl_cred); 2126 kfree(clp->cl_ownerstr_hashtbl); 2127 kfree(clp->cl_name.data); 2128 kfree(clp->cl_nii_domain.data); 2129 kfree(clp->cl_nii_name.data); 2130 idr_destroy(&clp->cl_stateids); 2131 kfree(clp->cl_ra); 2132 kmem_cache_free(client_slab, clp); 2133 } 2134 2135 static void drop_client(struct nfs4_client *clp) 2136 { 2137 kref_put(&clp->cl_nfsdfs.cl_ref, __free_client); 2138 } 2139 2140 static void 2141 free_client(struct nfs4_client *clp) 2142 { 2143 while (!list_empty(&clp->cl_sessions)) { 2144 struct nfsd4_session *ses; 2145 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, 2146 se_perclnt); 2147 list_del(&ses->se_perclnt); 2148 WARN_ON_ONCE(atomic_read(&ses->se_ref)); 2149 free_session(ses); 2150 } 2151 rpc_destroy_wait_queue(&clp->cl_cb_waitq); 2152 if (clp->cl_nfsd_dentry) { 2153 nfsd_client_rmdir(clp->cl_nfsd_dentry); 2154 clp->cl_nfsd_dentry = NULL; 2155 wake_up_all(&expiry_wq); 2156 } 2157 drop_client(clp); 2158 } 2159 2160 /* must be called under the client_lock */ 2161 static void 2162 unhash_client_locked(struct nfs4_client *clp) 2163 { 2164 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2165 struct nfsd4_session *ses; 2166 2167 lockdep_assert_held(&nn->client_lock); 2168 2169 /* Mark the client as expired! */ 2170 clp->cl_time = 0; 2171 /* Make it invisible */ 2172 if (!list_empty(&clp->cl_idhash)) { 2173 list_del_init(&clp->cl_idhash); 2174 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags)) 2175 rb_erase(&clp->cl_namenode, &nn->conf_name_tree); 2176 else 2177 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); 2178 } 2179 list_del_init(&clp->cl_lru); 2180 spin_lock(&clp->cl_lock); 2181 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) 2182 list_del_init(&ses->se_hash); 2183 spin_unlock(&clp->cl_lock); 2184 } 2185 2186 static void 2187 unhash_client(struct nfs4_client *clp) 2188 { 2189 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2190 2191 spin_lock(&nn->client_lock); 2192 unhash_client_locked(clp); 2193 spin_unlock(&nn->client_lock); 2194 } 2195 2196 static __be32 mark_client_expired_locked(struct nfs4_client *clp) 2197 { 2198 if (atomic_read(&clp->cl_rpc_users)) 2199 return nfserr_jukebox; 2200 unhash_client_locked(clp); 2201 return nfs_ok; 2202 } 2203 2204 static void 2205 __destroy_client(struct nfs4_client *clp) 2206 { 2207 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2208 int i; 2209 struct nfs4_openowner *oo; 2210 struct nfs4_delegation *dp; 2211 struct list_head reaplist; 2212 2213 INIT_LIST_HEAD(&reaplist); 2214 spin_lock(&state_lock); 2215 while (!list_empty(&clp->cl_delegations)) { 2216 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); 2217 WARN_ON(!unhash_delegation_locked(dp)); 2218 list_add(&dp->dl_recall_lru, &reaplist); 2219 } 2220 spin_unlock(&state_lock); 2221 while (!list_empty(&reaplist)) { 2222 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); 2223 list_del_init(&dp->dl_recall_lru); 2224 destroy_unhashed_deleg(dp); 2225 } 2226 while (!list_empty(&clp->cl_revoked)) { 2227 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru); 2228 list_del_init(&dp->dl_recall_lru); 2229 nfs4_put_stid(&dp->dl_stid); 2230 } 2231 while (!list_empty(&clp->cl_openowners)) { 2232 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient); 2233 nfs4_get_stateowner(&oo->oo_owner); 2234 release_openowner(oo); 2235 } 2236 for (i = 0; i < OWNER_HASH_SIZE; i++) { 2237 struct nfs4_stateowner *so, *tmp; 2238 2239 list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i], 2240 so_strhash) { 2241 /* Should be no openowners at this point */ 2242 WARN_ON_ONCE(so->so_is_open_owner); 2243 remove_blocked_locks(lockowner(so)); 2244 } 2245 } 2246 nfsd4_return_all_client_layouts(clp); 2247 nfsd4_shutdown_copy(clp); 2248 nfsd4_shutdown_callback(clp); 2249 if (clp->cl_cb_conn.cb_xprt) 2250 svc_xprt_put(clp->cl_cb_conn.cb_xprt); 2251 atomic_add_unless(&nn->nfs4_client_count, -1, 0); 2252 nfsd4_dec_courtesy_client_count(nn, clp); 2253 free_client(clp); 2254 wake_up_all(&expiry_wq); 2255 } 2256 2257 static void 2258 destroy_client(struct nfs4_client *clp) 2259 { 2260 unhash_client(clp); 2261 __destroy_client(clp); 2262 } 2263 2264 static void inc_reclaim_complete(struct nfs4_client *clp) 2265 { 2266 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2267 2268 if (!nn->track_reclaim_completes) 2269 return; 2270 if (!nfsd4_find_reclaim_client(clp->cl_name, nn)) 2271 return; 2272 if (atomic_inc_return(&nn->nr_reclaim_complete) == 2273 nn->reclaim_str_hashtbl_size) { 2274 printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n", 2275 clp->net->ns.inum); 2276 nfsd4_end_grace(nn); 2277 } 2278 } 2279 2280 static void expire_client(struct nfs4_client *clp) 2281 { 2282 unhash_client(clp); 2283 nfsd4_client_record_remove(clp); 2284 __destroy_client(clp); 2285 } 2286 2287 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source) 2288 { 2289 memcpy(target->cl_verifier.data, source->data, 2290 sizeof(target->cl_verifier.data)); 2291 } 2292 2293 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source) 2294 { 2295 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; 2296 target->cl_clientid.cl_id = source->cl_clientid.cl_id; 2297 } 2298 2299 static int copy_cred(struct svc_cred *target, struct svc_cred *source) 2300 { 2301 target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL); 2302 target->cr_raw_principal = kstrdup(source->cr_raw_principal, 2303 GFP_KERNEL); 2304 target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL); 2305 if ((source->cr_principal && !target->cr_principal) || 2306 (source->cr_raw_principal && !target->cr_raw_principal) || 2307 (source->cr_targ_princ && !target->cr_targ_princ)) 2308 return -ENOMEM; 2309 2310 target->cr_flavor = source->cr_flavor; 2311 target->cr_uid = source->cr_uid; 2312 target->cr_gid = source->cr_gid; 2313 target->cr_group_info = source->cr_group_info; 2314 get_group_info(target->cr_group_info); 2315 target->cr_gss_mech = source->cr_gss_mech; 2316 if (source->cr_gss_mech) 2317 gss_mech_get(source->cr_gss_mech); 2318 return 0; 2319 } 2320 2321 static int 2322 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2) 2323 { 2324 if (o1->len < o2->len) 2325 return -1; 2326 if (o1->len > o2->len) 2327 return 1; 2328 return memcmp(o1->data, o2->data, o1->len); 2329 } 2330 2331 static int 2332 same_verf(nfs4_verifier *v1, nfs4_verifier *v2) 2333 { 2334 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data)); 2335 } 2336 2337 static int 2338 same_clid(clientid_t *cl1, clientid_t *cl2) 2339 { 2340 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id); 2341 } 2342 2343 static bool groups_equal(struct group_info *g1, struct group_info *g2) 2344 { 2345 int i; 2346 2347 if (g1->ngroups != g2->ngroups) 2348 return false; 2349 for (i=0; i<g1->ngroups; i++) 2350 if (!gid_eq(g1->gid[i], g2->gid[i])) 2351 return false; 2352 return true; 2353 } 2354 2355 /* 2356 * RFC 3530 language requires clid_inuse be returned when the 2357 * "principal" associated with a requests differs from that previously 2358 * used. We use uid, gid's, and gss principal string as our best 2359 * approximation. We also don't want to allow non-gss use of a client 2360 * established using gss: in theory cr_principal should catch that 2361 * change, but in practice cr_principal can be null even in the gss case 2362 * since gssd doesn't always pass down a principal string. 2363 */ 2364 static bool is_gss_cred(struct svc_cred *cr) 2365 { 2366 /* Is cr_flavor one of the gss "pseudoflavors"?: */ 2367 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR); 2368 } 2369 2370 2371 static bool 2372 same_creds(struct svc_cred *cr1, struct svc_cred *cr2) 2373 { 2374 if ((is_gss_cred(cr1) != is_gss_cred(cr2)) 2375 || (!uid_eq(cr1->cr_uid, cr2->cr_uid)) 2376 || (!gid_eq(cr1->cr_gid, cr2->cr_gid)) 2377 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info)) 2378 return false; 2379 /* XXX: check that cr_targ_princ fields match ? */ 2380 if (cr1->cr_principal == cr2->cr_principal) 2381 return true; 2382 if (!cr1->cr_principal || !cr2->cr_principal) 2383 return false; 2384 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal); 2385 } 2386 2387 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp) 2388 { 2389 struct svc_cred *cr = &rqstp->rq_cred; 2390 u32 service; 2391 2392 if (!cr->cr_gss_mech) 2393 return false; 2394 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor); 2395 return service == RPC_GSS_SVC_INTEGRITY || 2396 service == RPC_GSS_SVC_PRIVACY; 2397 } 2398 2399 bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp) 2400 { 2401 struct svc_cred *cr = &rqstp->rq_cred; 2402 2403 if (!cl->cl_mach_cred) 2404 return true; 2405 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech) 2406 return false; 2407 if (!svc_rqst_integrity_protected(rqstp)) 2408 return false; 2409 if (cl->cl_cred.cr_raw_principal) 2410 return 0 == strcmp(cl->cl_cred.cr_raw_principal, 2411 cr->cr_raw_principal); 2412 if (!cr->cr_principal) 2413 return false; 2414 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal); 2415 } 2416 2417 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn) 2418 { 2419 __be32 verf[2]; 2420 2421 /* 2422 * This is opaque to client, so no need to byte-swap. Use 2423 * __force to keep sparse happy 2424 */ 2425 verf[0] = (__force __be32)(u32)ktime_get_real_seconds(); 2426 verf[1] = (__force __be32)nn->clverifier_counter++; 2427 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data)); 2428 } 2429 2430 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn) 2431 { 2432 clp->cl_clientid.cl_boot = (u32)nn->boot_time; 2433 clp->cl_clientid.cl_id = nn->clientid_counter++; 2434 gen_confirm(clp, nn); 2435 } 2436 2437 static struct nfs4_stid * 2438 find_stateid_locked(struct nfs4_client *cl, stateid_t *t) 2439 { 2440 struct nfs4_stid *ret; 2441 2442 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id); 2443 if (!ret || !ret->sc_type) 2444 return NULL; 2445 return ret; 2446 } 2447 2448 static struct nfs4_stid * 2449 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask) 2450 { 2451 struct nfs4_stid *s; 2452 2453 spin_lock(&cl->cl_lock); 2454 s = find_stateid_locked(cl, t); 2455 if (s != NULL) { 2456 if (typemask & s->sc_type) 2457 refcount_inc(&s->sc_count); 2458 else 2459 s = NULL; 2460 } 2461 spin_unlock(&cl->cl_lock); 2462 return s; 2463 } 2464 2465 static struct nfs4_client *get_nfsdfs_clp(struct inode *inode) 2466 { 2467 struct nfsdfs_client *nc; 2468 nc = get_nfsdfs_client(inode); 2469 if (!nc) 2470 return NULL; 2471 return container_of(nc, struct nfs4_client, cl_nfsdfs); 2472 } 2473 2474 static void seq_quote_mem(struct seq_file *m, char *data, int len) 2475 { 2476 seq_printf(m, "\""); 2477 seq_escape_mem(m, data, len, ESCAPE_HEX | ESCAPE_NAP | ESCAPE_APPEND, "\"\\"); 2478 seq_printf(m, "\""); 2479 } 2480 2481 static const char *cb_state2str(int state) 2482 { 2483 switch (state) { 2484 case NFSD4_CB_UP: 2485 return "UP"; 2486 case NFSD4_CB_UNKNOWN: 2487 return "UNKNOWN"; 2488 case NFSD4_CB_DOWN: 2489 return "DOWN"; 2490 case NFSD4_CB_FAULT: 2491 return "FAULT"; 2492 } 2493 return "UNDEFINED"; 2494 } 2495 2496 static int client_info_show(struct seq_file *m, void *v) 2497 { 2498 struct inode *inode = file_inode(m->file); 2499 struct nfs4_client *clp; 2500 u64 clid; 2501 2502 clp = get_nfsdfs_clp(inode); 2503 if (!clp) 2504 return -ENXIO; 2505 memcpy(&clid, &clp->cl_clientid, sizeof(clid)); 2506 seq_printf(m, "clientid: 0x%llx\n", clid); 2507 seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr); 2508 2509 if (clp->cl_state == NFSD4_COURTESY) 2510 seq_puts(m, "status: courtesy\n"); 2511 else if (clp->cl_state == NFSD4_EXPIRABLE) 2512 seq_puts(m, "status: expirable\n"); 2513 else if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags)) 2514 seq_puts(m, "status: confirmed\n"); 2515 else 2516 seq_puts(m, "status: unconfirmed\n"); 2517 seq_printf(m, "seconds from last renew: %lld\n", 2518 ktime_get_boottime_seconds() - clp->cl_time); 2519 seq_printf(m, "name: "); 2520 seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len); 2521 seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion); 2522 if (clp->cl_nii_domain.data) { 2523 seq_printf(m, "Implementation domain: "); 2524 seq_quote_mem(m, clp->cl_nii_domain.data, 2525 clp->cl_nii_domain.len); 2526 seq_printf(m, "\nImplementation name: "); 2527 seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len); 2528 seq_printf(m, "\nImplementation time: [%lld, %ld]\n", 2529 clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec); 2530 } 2531 seq_printf(m, "callback state: %s\n", cb_state2str(clp->cl_cb_state)); 2532 seq_printf(m, "callback address: %pISpc\n", &clp->cl_cb_conn.cb_addr); 2533 drop_client(clp); 2534 2535 return 0; 2536 } 2537 2538 DEFINE_SHOW_ATTRIBUTE(client_info); 2539 2540 static void *states_start(struct seq_file *s, loff_t *pos) 2541 __acquires(&clp->cl_lock) 2542 { 2543 struct nfs4_client *clp = s->private; 2544 unsigned long id = *pos; 2545 void *ret; 2546 2547 spin_lock(&clp->cl_lock); 2548 ret = idr_get_next_ul(&clp->cl_stateids, &id); 2549 *pos = id; 2550 return ret; 2551 } 2552 2553 static void *states_next(struct seq_file *s, void *v, loff_t *pos) 2554 { 2555 struct nfs4_client *clp = s->private; 2556 unsigned long id = *pos; 2557 void *ret; 2558 2559 id = *pos; 2560 id++; 2561 ret = idr_get_next_ul(&clp->cl_stateids, &id); 2562 *pos = id; 2563 return ret; 2564 } 2565 2566 static void states_stop(struct seq_file *s, void *v) 2567 __releases(&clp->cl_lock) 2568 { 2569 struct nfs4_client *clp = s->private; 2570 2571 spin_unlock(&clp->cl_lock); 2572 } 2573 2574 static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f) 2575 { 2576 seq_printf(s, "filename: \"%pD2\"", f->nf_file); 2577 } 2578 2579 static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f) 2580 { 2581 struct inode *inode = file_inode(f->nf_file); 2582 2583 seq_printf(s, "superblock: \"%02x:%02x:%ld\"", 2584 MAJOR(inode->i_sb->s_dev), 2585 MINOR(inode->i_sb->s_dev), 2586 inode->i_ino); 2587 } 2588 2589 static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo) 2590 { 2591 seq_printf(s, "owner: "); 2592 seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len); 2593 } 2594 2595 static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid) 2596 { 2597 seq_printf(s, "0x%.8x", stid->si_generation); 2598 seq_printf(s, "%12phN", &stid->si_opaque); 2599 } 2600 2601 static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st) 2602 { 2603 struct nfs4_ol_stateid *ols; 2604 struct nfs4_file *nf; 2605 struct nfsd_file *file; 2606 struct nfs4_stateowner *oo; 2607 unsigned int access, deny; 2608 2609 if (st->sc_type != NFS4_OPEN_STID && st->sc_type != NFS4_LOCK_STID) 2610 return 0; /* XXX: or SEQ_SKIP? */ 2611 ols = openlockstateid(st); 2612 oo = ols->st_stateowner; 2613 nf = st->sc_file; 2614 2615 spin_lock(&nf->fi_lock); 2616 file = find_any_file_locked(nf); 2617 if (!file) 2618 goto out; 2619 2620 seq_printf(s, "- "); 2621 nfs4_show_stateid(s, &st->sc_stateid); 2622 seq_printf(s, ": { type: open, "); 2623 2624 access = bmap_to_share_mode(ols->st_access_bmap); 2625 deny = bmap_to_share_mode(ols->st_deny_bmap); 2626 2627 seq_printf(s, "access: %s%s, ", 2628 access & NFS4_SHARE_ACCESS_READ ? "r" : "-", 2629 access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-"); 2630 seq_printf(s, "deny: %s%s, ", 2631 deny & NFS4_SHARE_ACCESS_READ ? "r" : "-", 2632 deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-"); 2633 2634 nfs4_show_superblock(s, file); 2635 seq_printf(s, ", "); 2636 nfs4_show_fname(s, file); 2637 seq_printf(s, ", "); 2638 nfs4_show_owner(s, oo); 2639 seq_printf(s, " }\n"); 2640 out: 2641 spin_unlock(&nf->fi_lock); 2642 return 0; 2643 } 2644 2645 static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st) 2646 { 2647 struct nfs4_ol_stateid *ols; 2648 struct nfs4_file *nf; 2649 struct nfsd_file *file; 2650 struct nfs4_stateowner *oo; 2651 2652 ols = openlockstateid(st); 2653 oo = ols->st_stateowner; 2654 nf = st->sc_file; 2655 spin_lock(&nf->fi_lock); 2656 file = find_any_file_locked(nf); 2657 if (!file) 2658 goto out; 2659 2660 seq_printf(s, "- "); 2661 nfs4_show_stateid(s, &st->sc_stateid); 2662 seq_printf(s, ": { type: lock, "); 2663 2664 /* 2665 * Note: a lock stateid isn't really the same thing as a lock, 2666 * it's the locking state held by one owner on a file, and there 2667 * may be multiple (or no) lock ranges associated with it. 2668 * (Same for the matter is true of open stateids.) 2669 */ 2670 2671 nfs4_show_superblock(s, file); 2672 /* XXX: open stateid? */ 2673 seq_printf(s, ", "); 2674 nfs4_show_fname(s, file); 2675 seq_printf(s, ", "); 2676 nfs4_show_owner(s, oo); 2677 seq_printf(s, " }\n"); 2678 out: 2679 spin_unlock(&nf->fi_lock); 2680 return 0; 2681 } 2682 2683 static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st) 2684 { 2685 struct nfs4_delegation *ds; 2686 struct nfs4_file *nf; 2687 struct nfsd_file *file; 2688 2689 ds = delegstateid(st); 2690 nf = st->sc_file; 2691 spin_lock(&nf->fi_lock); 2692 file = nf->fi_deleg_file; 2693 if (!file) 2694 goto out; 2695 2696 seq_printf(s, "- "); 2697 nfs4_show_stateid(s, &st->sc_stateid); 2698 seq_printf(s, ": { type: deleg, "); 2699 2700 /* Kinda dead code as long as we only support read delegs: */ 2701 seq_printf(s, "access: %s, ", 2702 ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w"); 2703 2704 /* XXX: lease time, whether it's being recalled. */ 2705 2706 nfs4_show_superblock(s, file); 2707 seq_printf(s, ", "); 2708 nfs4_show_fname(s, file); 2709 seq_printf(s, " }\n"); 2710 out: 2711 spin_unlock(&nf->fi_lock); 2712 return 0; 2713 } 2714 2715 static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st) 2716 { 2717 struct nfs4_layout_stateid *ls; 2718 struct nfsd_file *file; 2719 2720 ls = container_of(st, struct nfs4_layout_stateid, ls_stid); 2721 file = ls->ls_file; 2722 2723 seq_printf(s, "- "); 2724 nfs4_show_stateid(s, &st->sc_stateid); 2725 seq_printf(s, ": { type: layout, "); 2726 2727 /* XXX: What else would be useful? */ 2728 2729 nfs4_show_superblock(s, file); 2730 seq_printf(s, ", "); 2731 nfs4_show_fname(s, file); 2732 seq_printf(s, " }\n"); 2733 2734 return 0; 2735 } 2736 2737 static int states_show(struct seq_file *s, void *v) 2738 { 2739 struct nfs4_stid *st = v; 2740 2741 switch (st->sc_type) { 2742 case NFS4_OPEN_STID: 2743 return nfs4_show_open(s, st); 2744 case NFS4_LOCK_STID: 2745 return nfs4_show_lock(s, st); 2746 case NFS4_DELEG_STID: 2747 return nfs4_show_deleg(s, st); 2748 case NFS4_LAYOUT_STID: 2749 return nfs4_show_layout(s, st); 2750 default: 2751 return 0; /* XXX: or SEQ_SKIP? */ 2752 } 2753 /* XXX: copy stateids? */ 2754 } 2755 2756 static struct seq_operations states_seq_ops = { 2757 .start = states_start, 2758 .next = states_next, 2759 .stop = states_stop, 2760 .show = states_show 2761 }; 2762 2763 static int client_states_open(struct inode *inode, struct file *file) 2764 { 2765 struct seq_file *s; 2766 struct nfs4_client *clp; 2767 int ret; 2768 2769 clp = get_nfsdfs_clp(inode); 2770 if (!clp) 2771 return -ENXIO; 2772 2773 ret = seq_open(file, &states_seq_ops); 2774 if (ret) 2775 return ret; 2776 s = file->private_data; 2777 s->private = clp; 2778 return 0; 2779 } 2780 2781 static int client_opens_release(struct inode *inode, struct file *file) 2782 { 2783 struct seq_file *m = file->private_data; 2784 struct nfs4_client *clp = m->private; 2785 2786 /* XXX: alternatively, we could get/drop in seq start/stop */ 2787 drop_client(clp); 2788 return 0; 2789 } 2790 2791 static const struct file_operations client_states_fops = { 2792 .open = client_states_open, 2793 .read = seq_read, 2794 .llseek = seq_lseek, 2795 .release = client_opens_release, 2796 }; 2797 2798 /* 2799 * Normally we refuse to destroy clients that are in use, but here the 2800 * administrator is telling us to just do it. We also want to wait 2801 * so the caller has a guarantee that the client's locks are gone by 2802 * the time the write returns: 2803 */ 2804 static void force_expire_client(struct nfs4_client *clp) 2805 { 2806 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2807 bool already_expired; 2808 2809 trace_nfsd_clid_admin_expired(&clp->cl_clientid); 2810 2811 spin_lock(&nn->client_lock); 2812 clp->cl_time = 0; 2813 spin_unlock(&nn->client_lock); 2814 2815 wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0); 2816 spin_lock(&nn->client_lock); 2817 already_expired = list_empty(&clp->cl_lru); 2818 if (!already_expired) 2819 unhash_client_locked(clp); 2820 spin_unlock(&nn->client_lock); 2821 2822 if (!already_expired) 2823 expire_client(clp); 2824 else 2825 wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL); 2826 } 2827 2828 static ssize_t client_ctl_write(struct file *file, const char __user *buf, 2829 size_t size, loff_t *pos) 2830 { 2831 char *data; 2832 struct nfs4_client *clp; 2833 2834 data = simple_transaction_get(file, buf, size); 2835 if (IS_ERR(data)) 2836 return PTR_ERR(data); 2837 if (size != 7 || 0 != memcmp(data, "expire\n", 7)) 2838 return -EINVAL; 2839 clp = get_nfsdfs_clp(file_inode(file)); 2840 if (!clp) 2841 return -ENXIO; 2842 force_expire_client(clp); 2843 drop_client(clp); 2844 return 7; 2845 } 2846 2847 static const struct file_operations client_ctl_fops = { 2848 .write = client_ctl_write, 2849 .release = simple_transaction_release, 2850 }; 2851 2852 static const struct tree_descr client_files[] = { 2853 [0] = {"info", &client_info_fops, S_IRUSR}, 2854 [1] = {"states", &client_states_fops, S_IRUSR}, 2855 [2] = {"ctl", &client_ctl_fops, S_IWUSR}, 2856 [3] = {""}, 2857 }; 2858 2859 static int 2860 nfsd4_cb_recall_any_done(struct nfsd4_callback *cb, 2861 struct rpc_task *task) 2862 { 2863 trace_nfsd_cb_recall_any_done(cb, task); 2864 switch (task->tk_status) { 2865 case -NFS4ERR_DELAY: 2866 rpc_delay(task, 2 * HZ); 2867 return 0; 2868 default: 2869 return 1; 2870 } 2871 } 2872 2873 static void 2874 nfsd4_cb_recall_any_release(struct nfsd4_callback *cb) 2875 { 2876 struct nfs4_client *clp = cb->cb_clp; 2877 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2878 2879 spin_lock(&nn->client_lock); 2880 clear_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags); 2881 put_client_renew_locked(clp); 2882 spin_unlock(&nn->client_lock); 2883 } 2884 2885 static const struct nfsd4_callback_ops nfsd4_cb_recall_any_ops = { 2886 .done = nfsd4_cb_recall_any_done, 2887 .release = nfsd4_cb_recall_any_release, 2888 }; 2889 2890 static struct nfs4_client *create_client(struct xdr_netobj name, 2891 struct svc_rqst *rqstp, nfs4_verifier *verf) 2892 { 2893 struct nfs4_client *clp; 2894 struct sockaddr *sa = svc_addr(rqstp); 2895 int ret; 2896 struct net *net = SVC_NET(rqstp); 2897 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 2898 struct dentry *dentries[ARRAY_SIZE(client_files)]; 2899 2900 clp = alloc_client(name, nn); 2901 if (clp == NULL) 2902 return NULL; 2903 2904 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred); 2905 if (ret) { 2906 free_client(clp); 2907 return NULL; 2908 } 2909 gen_clid(clp, nn); 2910 kref_init(&clp->cl_nfsdfs.cl_ref); 2911 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL); 2912 clp->cl_time = ktime_get_boottime_seconds(); 2913 clear_bit(0, &clp->cl_cb_slot_busy); 2914 copy_verf(clp, verf); 2915 memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage)); 2916 clp->cl_cb_session = NULL; 2917 clp->net = net; 2918 clp->cl_nfsd_dentry = nfsd_client_mkdir( 2919 nn, &clp->cl_nfsdfs, 2920 clp->cl_clientid.cl_id - nn->clientid_base, 2921 client_files, dentries); 2922 clp->cl_nfsd_info_dentry = dentries[0]; 2923 if (!clp->cl_nfsd_dentry) { 2924 free_client(clp); 2925 return NULL; 2926 } 2927 clp->cl_ra = kzalloc(sizeof(*clp->cl_ra), GFP_KERNEL); 2928 if (!clp->cl_ra) { 2929 free_client(clp); 2930 return NULL; 2931 } 2932 clp->cl_ra_time = 0; 2933 nfsd4_init_cb(&clp->cl_ra->ra_cb, clp, &nfsd4_cb_recall_any_ops, 2934 NFSPROC4_CLNT_CB_RECALL_ANY); 2935 return clp; 2936 } 2937 2938 static void 2939 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root) 2940 { 2941 struct rb_node **new = &(root->rb_node), *parent = NULL; 2942 struct nfs4_client *clp; 2943 2944 while (*new) { 2945 clp = rb_entry(*new, struct nfs4_client, cl_namenode); 2946 parent = *new; 2947 2948 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0) 2949 new = &((*new)->rb_left); 2950 else 2951 new = &((*new)->rb_right); 2952 } 2953 2954 rb_link_node(&new_clp->cl_namenode, parent, new); 2955 rb_insert_color(&new_clp->cl_namenode, root); 2956 } 2957 2958 static struct nfs4_client * 2959 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root) 2960 { 2961 int cmp; 2962 struct rb_node *node = root->rb_node; 2963 struct nfs4_client *clp; 2964 2965 while (node) { 2966 clp = rb_entry(node, struct nfs4_client, cl_namenode); 2967 cmp = compare_blob(&clp->cl_name, name); 2968 if (cmp > 0) 2969 node = node->rb_left; 2970 else if (cmp < 0) 2971 node = node->rb_right; 2972 else 2973 return clp; 2974 } 2975 return NULL; 2976 } 2977 2978 static void 2979 add_to_unconfirmed(struct nfs4_client *clp) 2980 { 2981 unsigned int idhashval; 2982 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2983 2984 lockdep_assert_held(&nn->client_lock); 2985 2986 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); 2987 add_clp_to_name_tree(clp, &nn->unconf_name_tree); 2988 idhashval = clientid_hashval(clp->cl_clientid.cl_id); 2989 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]); 2990 renew_client_locked(clp); 2991 } 2992 2993 static void 2994 move_to_confirmed(struct nfs4_client *clp) 2995 { 2996 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id); 2997 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2998 2999 lockdep_assert_held(&nn->client_lock); 3000 3001 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]); 3002 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); 3003 add_clp_to_name_tree(clp, &nn->conf_name_tree); 3004 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); 3005 trace_nfsd_clid_confirmed(&clp->cl_clientid); 3006 renew_client_locked(clp); 3007 } 3008 3009 static struct nfs4_client * 3010 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions) 3011 { 3012 struct nfs4_client *clp; 3013 unsigned int idhashval = clientid_hashval(clid->cl_id); 3014 3015 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) { 3016 if (same_clid(&clp->cl_clientid, clid)) { 3017 if ((bool)clp->cl_minorversion != sessions) 3018 return NULL; 3019 renew_client_locked(clp); 3020 return clp; 3021 } 3022 } 3023 return NULL; 3024 } 3025 3026 static struct nfs4_client * 3027 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) 3028 { 3029 struct list_head *tbl = nn->conf_id_hashtbl; 3030 3031 lockdep_assert_held(&nn->client_lock); 3032 return find_client_in_id_table(tbl, clid, sessions); 3033 } 3034 3035 static struct nfs4_client * 3036 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) 3037 { 3038 struct list_head *tbl = nn->unconf_id_hashtbl; 3039 3040 lockdep_assert_held(&nn->client_lock); 3041 return find_client_in_id_table(tbl, clid, sessions); 3042 } 3043 3044 static bool clp_used_exchangeid(struct nfs4_client *clp) 3045 { 3046 return clp->cl_exchange_flags != 0; 3047 } 3048 3049 static struct nfs4_client * 3050 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn) 3051 { 3052 lockdep_assert_held(&nn->client_lock); 3053 return find_clp_in_name_tree(name, &nn->conf_name_tree); 3054 } 3055 3056 static struct nfs4_client * 3057 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn) 3058 { 3059 lockdep_assert_held(&nn->client_lock); 3060 return find_clp_in_name_tree(name, &nn->unconf_name_tree); 3061 } 3062 3063 static void 3064 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp) 3065 { 3066 struct nfs4_cb_conn *conn = &clp->cl_cb_conn; 3067 struct sockaddr *sa = svc_addr(rqstp); 3068 u32 scopeid = rpc_get_scope_id(sa); 3069 unsigned short expected_family; 3070 3071 /* Currently, we only support tcp and tcp6 for the callback channel */ 3072 if (se->se_callback_netid_len == 3 && 3073 !memcmp(se->se_callback_netid_val, "tcp", 3)) 3074 expected_family = AF_INET; 3075 else if (se->se_callback_netid_len == 4 && 3076 !memcmp(se->se_callback_netid_val, "tcp6", 4)) 3077 expected_family = AF_INET6; 3078 else 3079 goto out_err; 3080 3081 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val, 3082 se->se_callback_addr_len, 3083 (struct sockaddr *)&conn->cb_addr, 3084 sizeof(conn->cb_addr)); 3085 3086 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family) 3087 goto out_err; 3088 3089 if (conn->cb_addr.ss_family == AF_INET6) 3090 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid; 3091 3092 conn->cb_prog = se->se_callback_prog; 3093 conn->cb_ident = se->se_callback_ident; 3094 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen); 3095 trace_nfsd_cb_args(clp, conn); 3096 return; 3097 out_err: 3098 conn->cb_addr.ss_family = AF_UNSPEC; 3099 conn->cb_addrlen = 0; 3100 trace_nfsd_cb_nodelegs(clp); 3101 return; 3102 } 3103 3104 /* 3105 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size. 3106 */ 3107 static void 3108 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp) 3109 { 3110 struct xdr_buf *buf = resp->xdr->buf; 3111 struct nfsd4_slot *slot = resp->cstate.slot; 3112 unsigned int base; 3113 3114 dprintk("--> %s slot %p\n", __func__, slot); 3115 3116 slot->sl_flags |= NFSD4_SLOT_INITIALIZED; 3117 slot->sl_opcnt = resp->opcnt; 3118 slot->sl_status = resp->cstate.status; 3119 free_svc_cred(&slot->sl_cred); 3120 copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred); 3121 3122 if (!nfsd4_cache_this(resp)) { 3123 slot->sl_flags &= ~NFSD4_SLOT_CACHED; 3124 return; 3125 } 3126 slot->sl_flags |= NFSD4_SLOT_CACHED; 3127 3128 base = resp->cstate.data_offset; 3129 slot->sl_datalen = buf->len - base; 3130 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen)) 3131 WARN(1, "%s: sessions DRC could not cache compound\n", 3132 __func__); 3133 return; 3134 } 3135 3136 /* 3137 * Encode the replay sequence operation from the slot values. 3138 * If cachethis is FALSE encode the uncached rep error on the next 3139 * operation which sets resp->p and increments resp->opcnt for 3140 * nfs4svc_encode_compoundres. 3141 * 3142 */ 3143 static __be32 3144 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args, 3145 struct nfsd4_compoundres *resp) 3146 { 3147 struct nfsd4_op *op; 3148 struct nfsd4_slot *slot = resp->cstate.slot; 3149 3150 /* Encode the replayed sequence operation */ 3151 op = &args->ops[resp->opcnt - 1]; 3152 nfsd4_encode_operation(resp, op); 3153 3154 if (slot->sl_flags & NFSD4_SLOT_CACHED) 3155 return op->status; 3156 if (args->opcnt == 1) { 3157 /* 3158 * The original operation wasn't a solo sequence--we 3159 * always cache those--so this retry must not match the 3160 * original: 3161 */ 3162 op->status = nfserr_seq_false_retry; 3163 } else { 3164 op = &args->ops[resp->opcnt++]; 3165 op->status = nfserr_retry_uncached_rep; 3166 nfsd4_encode_operation(resp, op); 3167 } 3168 return op->status; 3169 } 3170 3171 /* 3172 * The sequence operation is not cached because we can use the slot and 3173 * session values. 3174 */ 3175 static __be32 3176 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, 3177 struct nfsd4_sequence *seq) 3178 { 3179 struct nfsd4_slot *slot = resp->cstate.slot; 3180 struct xdr_stream *xdr = resp->xdr; 3181 __be32 *p; 3182 __be32 status; 3183 3184 dprintk("--> %s slot %p\n", __func__, slot); 3185 3186 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp); 3187 if (status) 3188 return status; 3189 3190 p = xdr_reserve_space(xdr, slot->sl_datalen); 3191 if (!p) { 3192 WARN_ON_ONCE(1); 3193 return nfserr_serverfault; 3194 } 3195 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen); 3196 xdr_commit_encode(xdr); 3197 3198 resp->opcnt = slot->sl_opcnt; 3199 return slot->sl_status; 3200 } 3201 3202 /* 3203 * Set the exchange_id flags returned by the server. 3204 */ 3205 static void 3206 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid) 3207 { 3208 #ifdef CONFIG_NFSD_PNFS 3209 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS; 3210 #else 3211 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS; 3212 #endif 3213 3214 /* Referrals are supported, Migration is not. */ 3215 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER; 3216 3217 /* set the wire flags to return to client. */ 3218 clid->flags = new->cl_exchange_flags; 3219 } 3220 3221 static bool client_has_openowners(struct nfs4_client *clp) 3222 { 3223 struct nfs4_openowner *oo; 3224 3225 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) { 3226 if (!list_empty(&oo->oo_owner.so_stateids)) 3227 return true; 3228 } 3229 return false; 3230 } 3231 3232 static bool client_has_state(struct nfs4_client *clp) 3233 { 3234 return client_has_openowners(clp) 3235 #ifdef CONFIG_NFSD_PNFS 3236 || !list_empty(&clp->cl_lo_states) 3237 #endif 3238 || !list_empty(&clp->cl_delegations) 3239 || !list_empty(&clp->cl_sessions) 3240 || !list_empty(&clp->async_copies); 3241 } 3242 3243 static __be32 copy_impl_id(struct nfs4_client *clp, 3244 struct nfsd4_exchange_id *exid) 3245 { 3246 if (!exid->nii_domain.data) 3247 return 0; 3248 xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL); 3249 if (!clp->cl_nii_domain.data) 3250 return nfserr_jukebox; 3251 xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL); 3252 if (!clp->cl_nii_name.data) 3253 return nfserr_jukebox; 3254 clp->cl_nii_time = exid->nii_time; 3255 return 0; 3256 } 3257 3258 __be32 3259 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3260 union nfsd4_op_u *u) 3261 { 3262 struct nfsd4_exchange_id *exid = &u->exchange_id; 3263 struct nfs4_client *conf, *new; 3264 struct nfs4_client *unconf = NULL; 3265 __be32 status; 3266 char addr_str[INET6_ADDRSTRLEN]; 3267 nfs4_verifier verf = exid->verifier; 3268 struct sockaddr *sa = svc_addr(rqstp); 3269 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A; 3270 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3271 3272 rpc_ntop(sa, addr_str, sizeof(addr_str)); 3273 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p " 3274 "ip_addr=%s flags %x, spa_how %u\n", 3275 __func__, rqstp, exid, exid->clname.len, exid->clname.data, 3276 addr_str, exid->flags, exid->spa_how); 3277 3278 if (exid->flags & ~EXCHGID4_FLAG_MASK_A) 3279 return nfserr_inval; 3280 3281 new = create_client(exid->clname, rqstp, &verf); 3282 if (new == NULL) 3283 return nfserr_jukebox; 3284 status = copy_impl_id(new, exid); 3285 if (status) 3286 goto out_nolock; 3287 3288 switch (exid->spa_how) { 3289 case SP4_MACH_CRED: 3290 exid->spo_must_enforce[0] = 0; 3291 exid->spo_must_enforce[1] = ( 3292 1 << (OP_BIND_CONN_TO_SESSION - 32) | 3293 1 << (OP_EXCHANGE_ID - 32) | 3294 1 << (OP_CREATE_SESSION - 32) | 3295 1 << (OP_DESTROY_SESSION - 32) | 3296 1 << (OP_DESTROY_CLIENTID - 32)); 3297 3298 exid->spo_must_allow[0] &= (1 << (OP_CLOSE) | 3299 1 << (OP_OPEN_DOWNGRADE) | 3300 1 << (OP_LOCKU) | 3301 1 << (OP_DELEGRETURN)); 3302 3303 exid->spo_must_allow[1] &= ( 3304 1 << (OP_TEST_STATEID - 32) | 3305 1 << (OP_FREE_STATEID - 32)); 3306 if (!svc_rqst_integrity_protected(rqstp)) { 3307 status = nfserr_inval; 3308 goto out_nolock; 3309 } 3310 /* 3311 * Sometimes userspace doesn't give us a principal. 3312 * Which is a bug, really. Anyway, we can't enforce 3313 * MACH_CRED in that case, better to give up now: 3314 */ 3315 if (!new->cl_cred.cr_principal && 3316 !new->cl_cred.cr_raw_principal) { 3317 status = nfserr_serverfault; 3318 goto out_nolock; 3319 } 3320 new->cl_mach_cred = true; 3321 break; 3322 case SP4_NONE: 3323 break; 3324 default: /* checked by xdr code */ 3325 WARN_ON_ONCE(1); 3326 fallthrough; 3327 case SP4_SSV: 3328 status = nfserr_encr_alg_unsupp; 3329 goto out_nolock; 3330 } 3331 3332 /* Cases below refer to rfc 5661 section 18.35.4: */ 3333 spin_lock(&nn->client_lock); 3334 conf = find_confirmed_client_by_name(&exid->clname, nn); 3335 if (conf) { 3336 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred); 3337 bool verfs_match = same_verf(&verf, &conf->cl_verifier); 3338 3339 if (update) { 3340 if (!clp_used_exchangeid(conf)) { /* buggy client */ 3341 status = nfserr_inval; 3342 goto out; 3343 } 3344 if (!nfsd4_mach_creds_match(conf, rqstp)) { 3345 status = nfserr_wrong_cred; 3346 goto out; 3347 } 3348 if (!creds_match) { /* case 9 */ 3349 status = nfserr_perm; 3350 goto out; 3351 } 3352 if (!verfs_match) { /* case 8 */ 3353 status = nfserr_not_same; 3354 goto out; 3355 } 3356 /* case 6 */ 3357 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R; 3358 trace_nfsd_clid_confirmed_r(conf); 3359 goto out_copy; 3360 } 3361 if (!creds_match) { /* case 3 */ 3362 if (client_has_state(conf)) { 3363 status = nfserr_clid_inuse; 3364 trace_nfsd_clid_cred_mismatch(conf, rqstp); 3365 goto out; 3366 } 3367 goto out_new; 3368 } 3369 if (verfs_match) { /* case 2 */ 3370 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; 3371 trace_nfsd_clid_confirmed_r(conf); 3372 goto out_copy; 3373 } 3374 /* case 5, client reboot */ 3375 trace_nfsd_clid_verf_mismatch(conf, rqstp, &verf); 3376 conf = NULL; 3377 goto out_new; 3378 } 3379 3380 if (update) { /* case 7 */ 3381 status = nfserr_noent; 3382 goto out; 3383 } 3384 3385 unconf = find_unconfirmed_client_by_name(&exid->clname, nn); 3386 if (unconf) /* case 4, possible retry or client restart */ 3387 unhash_client_locked(unconf); 3388 3389 /* case 1, new owner ID */ 3390 trace_nfsd_clid_fresh(new); 3391 3392 out_new: 3393 if (conf) { 3394 status = mark_client_expired_locked(conf); 3395 if (status) 3396 goto out; 3397 trace_nfsd_clid_replaced(&conf->cl_clientid); 3398 } 3399 new->cl_minorversion = cstate->minorversion; 3400 new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0]; 3401 new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1]; 3402 3403 add_to_unconfirmed(new); 3404 swap(new, conf); 3405 out_copy: 3406 exid->clientid.cl_boot = conf->cl_clientid.cl_boot; 3407 exid->clientid.cl_id = conf->cl_clientid.cl_id; 3408 3409 exid->seqid = conf->cl_cs_slot.sl_seqid + 1; 3410 nfsd4_set_ex_flags(conf, exid); 3411 3412 dprintk("nfsd4_exchange_id seqid %d flags %x\n", 3413 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags); 3414 status = nfs_ok; 3415 3416 out: 3417 spin_unlock(&nn->client_lock); 3418 out_nolock: 3419 if (new) 3420 expire_client(new); 3421 if (unconf) { 3422 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid); 3423 expire_client(unconf); 3424 } 3425 return status; 3426 } 3427 3428 static __be32 3429 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse) 3430 { 3431 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid, 3432 slot_seqid); 3433 3434 /* The slot is in use, and no response has been sent. */ 3435 if (slot_inuse) { 3436 if (seqid == slot_seqid) 3437 return nfserr_jukebox; 3438 else 3439 return nfserr_seq_misordered; 3440 } 3441 /* Note unsigned 32-bit arithmetic handles wraparound: */ 3442 if (likely(seqid == slot_seqid + 1)) 3443 return nfs_ok; 3444 if (seqid == slot_seqid) 3445 return nfserr_replay_cache; 3446 return nfserr_seq_misordered; 3447 } 3448 3449 /* 3450 * Cache the create session result into the create session single DRC 3451 * slot cache by saving the xdr structure. sl_seqid has been set. 3452 * Do this for solo or embedded create session operations. 3453 */ 3454 static void 3455 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses, 3456 struct nfsd4_clid_slot *slot, __be32 nfserr) 3457 { 3458 slot->sl_status = nfserr; 3459 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses)); 3460 } 3461 3462 static __be32 3463 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses, 3464 struct nfsd4_clid_slot *slot) 3465 { 3466 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses)); 3467 return slot->sl_status; 3468 } 3469 3470 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\ 3471 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \ 3472 1 + /* MIN tag is length with zero, only length */ \ 3473 3 + /* version, opcount, opcode */ \ 3474 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \ 3475 /* seqid, slotID, slotID, cache */ \ 3476 4 ) * sizeof(__be32)) 3477 3478 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\ 3479 2 + /* verifier: AUTH_NULL, length 0 */\ 3480 1 + /* status */ \ 3481 1 + /* MIN tag is length with zero, only length */ \ 3482 3 + /* opcount, opcode, opstatus*/ \ 3483 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \ 3484 /* seqid, slotID, slotID, slotID, status */ \ 3485 5 ) * sizeof(__be32)) 3486 3487 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn) 3488 { 3489 u32 maxrpc = nn->nfsd_serv->sv_max_mesg; 3490 3491 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ) 3492 return nfserr_toosmall; 3493 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ) 3494 return nfserr_toosmall; 3495 ca->headerpadsz = 0; 3496 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc); 3497 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc); 3498 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND); 3499 ca->maxresp_cached = min_t(u32, ca->maxresp_cached, 3500 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ); 3501 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION); 3502 /* 3503 * Note decreasing slot size below client's request may make it 3504 * difficult for client to function correctly, whereas 3505 * decreasing the number of slots will (just?) affect 3506 * performance. When short on memory we therefore prefer to 3507 * decrease number of slots instead of their size. Clients that 3508 * request larger slots than they need will get poor results: 3509 * Note that we always allow at least one slot, because our 3510 * accounting is soft and provides no guarantees either way. 3511 */ 3512 ca->maxreqs = nfsd4_get_drc_mem(ca, nn); 3513 3514 return nfs_ok; 3515 } 3516 3517 /* 3518 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now. 3519 * These are based on similar macros in linux/sunrpc/msg_prot.h . 3520 */ 3521 #define RPC_MAX_HEADER_WITH_AUTH_SYS \ 3522 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK)) 3523 3524 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \ 3525 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK)) 3526 3527 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \ 3528 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32)) 3529 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \ 3530 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \ 3531 sizeof(__be32)) 3532 3533 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca) 3534 { 3535 ca->headerpadsz = 0; 3536 3537 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ) 3538 return nfserr_toosmall; 3539 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ) 3540 return nfserr_toosmall; 3541 ca->maxresp_cached = 0; 3542 if (ca->maxops < 2) 3543 return nfserr_toosmall; 3544 3545 return nfs_ok; 3546 } 3547 3548 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs) 3549 { 3550 switch (cbs->flavor) { 3551 case RPC_AUTH_NULL: 3552 case RPC_AUTH_UNIX: 3553 return nfs_ok; 3554 default: 3555 /* 3556 * GSS case: the spec doesn't allow us to return this 3557 * error. But it also doesn't allow us not to support 3558 * GSS. 3559 * I'd rather this fail hard than return some error the 3560 * client might think it can already handle: 3561 */ 3562 return nfserr_encr_alg_unsupp; 3563 } 3564 } 3565 3566 __be32 3567 nfsd4_create_session(struct svc_rqst *rqstp, 3568 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) 3569 { 3570 struct nfsd4_create_session *cr_ses = &u->create_session; 3571 struct sockaddr *sa = svc_addr(rqstp); 3572 struct nfs4_client *conf, *unconf; 3573 struct nfs4_client *old = NULL; 3574 struct nfsd4_session *new; 3575 struct nfsd4_conn *conn; 3576 struct nfsd4_clid_slot *cs_slot = NULL; 3577 __be32 status = 0; 3578 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3579 3580 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A) 3581 return nfserr_inval; 3582 status = nfsd4_check_cb_sec(&cr_ses->cb_sec); 3583 if (status) 3584 return status; 3585 status = check_forechannel_attrs(&cr_ses->fore_channel, nn); 3586 if (status) 3587 return status; 3588 status = check_backchannel_attrs(&cr_ses->back_channel); 3589 if (status) 3590 goto out_release_drc_mem; 3591 status = nfserr_jukebox; 3592 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel); 3593 if (!new) 3594 goto out_release_drc_mem; 3595 conn = alloc_conn_from_crses(rqstp, cr_ses); 3596 if (!conn) 3597 goto out_free_session; 3598 3599 spin_lock(&nn->client_lock); 3600 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn); 3601 conf = find_confirmed_client(&cr_ses->clientid, true, nn); 3602 WARN_ON_ONCE(conf && unconf); 3603 3604 if (conf) { 3605 status = nfserr_wrong_cred; 3606 if (!nfsd4_mach_creds_match(conf, rqstp)) 3607 goto out_free_conn; 3608 cs_slot = &conf->cl_cs_slot; 3609 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); 3610 if (status) { 3611 if (status == nfserr_replay_cache) 3612 status = nfsd4_replay_create_session(cr_ses, cs_slot); 3613 goto out_free_conn; 3614 } 3615 } else if (unconf) { 3616 status = nfserr_clid_inuse; 3617 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) || 3618 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) { 3619 trace_nfsd_clid_cred_mismatch(unconf, rqstp); 3620 goto out_free_conn; 3621 } 3622 status = nfserr_wrong_cred; 3623 if (!nfsd4_mach_creds_match(unconf, rqstp)) 3624 goto out_free_conn; 3625 cs_slot = &unconf->cl_cs_slot; 3626 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); 3627 if (status) { 3628 /* an unconfirmed replay returns misordered */ 3629 status = nfserr_seq_misordered; 3630 goto out_free_conn; 3631 } 3632 old = find_confirmed_client_by_name(&unconf->cl_name, nn); 3633 if (old) { 3634 status = mark_client_expired_locked(old); 3635 if (status) { 3636 old = NULL; 3637 goto out_free_conn; 3638 } 3639 trace_nfsd_clid_replaced(&old->cl_clientid); 3640 } 3641 move_to_confirmed(unconf); 3642 conf = unconf; 3643 } else { 3644 status = nfserr_stale_clientid; 3645 goto out_free_conn; 3646 } 3647 status = nfs_ok; 3648 /* Persistent sessions are not supported */ 3649 cr_ses->flags &= ~SESSION4_PERSIST; 3650 /* Upshifting from TCP to RDMA is not supported */ 3651 cr_ses->flags &= ~SESSION4_RDMA; 3652 3653 init_session(rqstp, new, conf, cr_ses); 3654 nfsd4_get_session_locked(new); 3655 3656 memcpy(cr_ses->sessionid.data, new->se_sessionid.data, 3657 NFS4_MAX_SESSIONID_LEN); 3658 cs_slot->sl_seqid++; 3659 cr_ses->seqid = cs_slot->sl_seqid; 3660 3661 /* cache solo and embedded create sessions under the client_lock */ 3662 nfsd4_cache_create_session(cr_ses, cs_slot, status); 3663 spin_unlock(&nn->client_lock); 3664 if (conf == unconf) 3665 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY); 3666 /* init connection and backchannel */ 3667 nfsd4_init_conn(rqstp, conn, new); 3668 nfsd4_put_session(new); 3669 if (old) 3670 expire_client(old); 3671 return status; 3672 out_free_conn: 3673 spin_unlock(&nn->client_lock); 3674 free_conn(conn); 3675 if (old) 3676 expire_client(old); 3677 out_free_session: 3678 __free_session(new); 3679 out_release_drc_mem: 3680 nfsd4_put_drc_mem(&cr_ses->fore_channel); 3681 return status; 3682 } 3683 3684 static __be32 nfsd4_map_bcts_dir(u32 *dir) 3685 { 3686 switch (*dir) { 3687 case NFS4_CDFC4_FORE: 3688 case NFS4_CDFC4_BACK: 3689 return nfs_ok; 3690 case NFS4_CDFC4_FORE_OR_BOTH: 3691 case NFS4_CDFC4_BACK_OR_BOTH: 3692 *dir = NFS4_CDFC4_BOTH; 3693 return nfs_ok; 3694 } 3695 return nfserr_inval; 3696 } 3697 3698 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, 3699 struct nfsd4_compound_state *cstate, 3700 union nfsd4_op_u *u) 3701 { 3702 struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl; 3703 struct nfsd4_session *session = cstate->session; 3704 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3705 __be32 status; 3706 3707 status = nfsd4_check_cb_sec(&bc->bc_cb_sec); 3708 if (status) 3709 return status; 3710 spin_lock(&nn->client_lock); 3711 session->se_cb_prog = bc->bc_cb_program; 3712 session->se_cb_sec = bc->bc_cb_sec; 3713 spin_unlock(&nn->client_lock); 3714 3715 nfsd4_probe_callback(session->se_client); 3716 3717 return nfs_ok; 3718 } 3719 3720 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s) 3721 { 3722 struct nfsd4_conn *c; 3723 3724 list_for_each_entry(c, &s->se_conns, cn_persession) { 3725 if (c->cn_xprt == xpt) { 3726 return c; 3727 } 3728 } 3729 return NULL; 3730 } 3731 3732 static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst, 3733 struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn) 3734 { 3735 struct nfs4_client *clp = session->se_client; 3736 struct svc_xprt *xpt = rqst->rq_xprt; 3737 struct nfsd4_conn *c; 3738 __be32 status; 3739 3740 /* Following the last paragraph of RFC 5661 Section 18.34.3: */ 3741 spin_lock(&clp->cl_lock); 3742 c = __nfsd4_find_conn(xpt, session); 3743 if (!c) 3744 status = nfserr_noent; 3745 else if (req == c->cn_flags) 3746 status = nfs_ok; 3747 else if (req == NFS4_CDFC4_FORE_OR_BOTH && 3748 c->cn_flags != NFS4_CDFC4_BACK) 3749 status = nfs_ok; 3750 else if (req == NFS4_CDFC4_BACK_OR_BOTH && 3751 c->cn_flags != NFS4_CDFC4_FORE) 3752 status = nfs_ok; 3753 else 3754 status = nfserr_inval; 3755 spin_unlock(&clp->cl_lock); 3756 if (status == nfs_ok && conn) 3757 *conn = c; 3758 return status; 3759 } 3760 3761 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp, 3762 struct nfsd4_compound_state *cstate, 3763 union nfsd4_op_u *u) 3764 { 3765 struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session; 3766 __be32 status; 3767 struct nfsd4_conn *conn; 3768 struct nfsd4_session *session; 3769 struct net *net = SVC_NET(rqstp); 3770 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 3771 3772 if (!nfsd4_last_compound_op(rqstp)) 3773 return nfserr_not_only_op; 3774 spin_lock(&nn->client_lock); 3775 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status); 3776 spin_unlock(&nn->client_lock); 3777 if (!session) 3778 goto out_no_session; 3779 status = nfserr_wrong_cred; 3780 if (!nfsd4_mach_creds_match(session->se_client, rqstp)) 3781 goto out; 3782 status = nfsd4_match_existing_connection(rqstp, session, 3783 bcts->dir, &conn); 3784 if (status == nfs_ok) { 3785 if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH || 3786 bcts->dir == NFS4_CDFC4_BACK) 3787 conn->cn_flags |= NFS4_CDFC4_BACK; 3788 nfsd4_probe_callback(session->se_client); 3789 goto out; 3790 } 3791 if (status == nfserr_inval) 3792 goto out; 3793 status = nfsd4_map_bcts_dir(&bcts->dir); 3794 if (status) 3795 goto out; 3796 conn = alloc_conn(rqstp, bcts->dir); 3797 status = nfserr_jukebox; 3798 if (!conn) 3799 goto out; 3800 nfsd4_init_conn(rqstp, conn, session); 3801 status = nfs_ok; 3802 out: 3803 nfsd4_put_session(session); 3804 out_no_session: 3805 return status; 3806 } 3807 3808 static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid) 3809 { 3810 if (!cstate->session) 3811 return false; 3812 return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid)); 3813 } 3814 3815 __be32 3816 nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate, 3817 union nfsd4_op_u *u) 3818 { 3819 struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid; 3820 struct nfsd4_session *ses; 3821 __be32 status; 3822 int ref_held_by_me = 0; 3823 struct net *net = SVC_NET(r); 3824 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 3825 3826 status = nfserr_not_only_op; 3827 if (nfsd4_compound_in_session(cstate, sessionid)) { 3828 if (!nfsd4_last_compound_op(r)) 3829 goto out; 3830 ref_held_by_me++; 3831 } 3832 dump_sessionid(__func__, sessionid); 3833 spin_lock(&nn->client_lock); 3834 ses = find_in_sessionid_hashtbl(sessionid, net, &status); 3835 if (!ses) 3836 goto out_client_lock; 3837 status = nfserr_wrong_cred; 3838 if (!nfsd4_mach_creds_match(ses->se_client, r)) 3839 goto out_put_session; 3840 status = mark_session_dead_locked(ses, 1 + ref_held_by_me); 3841 if (status) 3842 goto out_put_session; 3843 unhash_session(ses); 3844 spin_unlock(&nn->client_lock); 3845 3846 nfsd4_probe_callback_sync(ses->se_client); 3847 3848 spin_lock(&nn->client_lock); 3849 status = nfs_ok; 3850 out_put_session: 3851 nfsd4_put_session_locked(ses); 3852 out_client_lock: 3853 spin_unlock(&nn->client_lock); 3854 out: 3855 return status; 3856 } 3857 3858 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses) 3859 { 3860 struct nfs4_client *clp = ses->se_client; 3861 struct nfsd4_conn *c; 3862 __be32 status = nfs_ok; 3863 int ret; 3864 3865 spin_lock(&clp->cl_lock); 3866 c = __nfsd4_find_conn(new->cn_xprt, ses); 3867 if (c) 3868 goto out_free; 3869 status = nfserr_conn_not_bound_to_session; 3870 if (clp->cl_mach_cred) 3871 goto out_free; 3872 __nfsd4_hash_conn(new, ses); 3873 spin_unlock(&clp->cl_lock); 3874 ret = nfsd4_register_conn(new); 3875 if (ret) 3876 /* oops; xprt is already down: */ 3877 nfsd4_conn_lost(&new->cn_xpt_user); 3878 return nfs_ok; 3879 out_free: 3880 spin_unlock(&clp->cl_lock); 3881 free_conn(new); 3882 return status; 3883 } 3884 3885 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session) 3886 { 3887 struct nfsd4_compoundargs *args = rqstp->rq_argp; 3888 3889 return args->opcnt > session->se_fchannel.maxops; 3890 } 3891 3892 static bool nfsd4_request_too_big(struct svc_rqst *rqstp, 3893 struct nfsd4_session *session) 3894 { 3895 struct xdr_buf *xb = &rqstp->rq_arg; 3896 3897 return xb->len > session->se_fchannel.maxreq_sz; 3898 } 3899 3900 static bool replay_matches_cache(struct svc_rqst *rqstp, 3901 struct nfsd4_sequence *seq, struct nfsd4_slot *slot) 3902 { 3903 struct nfsd4_compoundargs *argp = rqstp->rq_argp; 3904 3905 if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) != 3906 (bool)seq->cachethis) 3907 return false; 3908 /* 3909 * If there's an error then the reply can have fewer ops than 3910 * the call. 3911 */ 3912 if (slot->sl_opcnt < argp->opcnt && !slot->sl_status) 3913 return false; 3914 /* 3915 * But if we cached a reply with *more* ops than the call you're 3916 * sending us now, then this new call is clearly not really a 3917 * replay of the old one: 3918 */ 3919 if (slot->sl_opcnt > argp->opcnt) 3920 return false; 3921 /* This is the only check explicitly called by spec: */ 3922 if (!same_creds(&rqstp->rq_cred, &slot->sl_cred)) 3923 return false; 3924 /* 3925 * There may be more comparisons we could actually do, but the 3926 * spec doesn't require us to catch every case where the calls 3927 * don't match (that would require caching the call as well as 3928 * the reply), so we don't bother. 3929 */ 3930 return true; 3931 } 3932 3933 __be32 3934 nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3935 union nfsd4_op_u *u) 3936 { 3937 struct nfsd4_sequence *seq = &u->sequence; 3938 struct nfsd4_compoundres *resp = rqstp->rq_resp; 3939 struct xdr_stream *xdr = resp->xdr; 3940 struct nfsd4_session *session; 3941 struct nfs4_client *clp; 3942 struct nfsd4_slot *slot; 3943 struct nfsd4_conn *conn; 3944 __be32 status; 3945 int buflen; 3946 struct net *net = SVC_NET(rqstp); 3947 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 3948 3949 if (resp->opcnt != 1) 3950 return nfserr_sequence_pos; 3951 3952 /* 3953 * Will be either used or freed by nfsd4_sequence_check_conn 3954 * below. 3955 */ 3956 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE); 3957 if (!conn) 3958 return nfserr_jukebox; 3959 3960 spin_lock(&nn->client_lock); 3961 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status); 3962 if (!session) 3963 goto out_no_session; 3964 clp = session->se_client; 3965 3966 status = nfserr_too_many_ops; 3967 if (nfsd4_session_too_many_ops(rqstp, session)) 3968 goto out_put_session; 3969 3970 status = nfserr_req_too_big; 3971 if (nfsd4_request_too_big(rqstp, session)) 3972 goto out_put_session; 3973 3974 status = nfserr_badslot; 3975 if (seq->slotid >= session->se_fchannel.maxreqs) 3976 goto out_put_session; 3977 3978 slot = session->se_slots[seq->slotid]; 3979 dprintk("%s: slotid %d\n", __func__, seq->slotid); 3980 3981 /* We do not negotiate the number of slots yet, so set the 3982 * maxslots to the session maxreqs which is used to encode 3983 * sr_highest_slotid and the sr_target_slot id to maxslots */ 3984 seq->maxslots = session->se_fchannel.maxreqs; 3985 3986 status = check_slot_seqid(seq->seqid, slot->sl_seqid, 3987 slot->sl_flags & NFSD4_SLOT_INUSE); 3988 if (status == nfserr_replay_cache) { 3989 status = nfserr_seq_misordered; 3990 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED)) 3991 goto out_put_session; 3992 status = nfserr_seq_false_retry; 3993 if (!replay_matches_cache(rqstp, seq, slot)) 3994 goto out_put_session; 3995 cstate->slot = slot; 3996 cstate->session = session; 3997 cstate->clp = clp; 3998 /* Return the cached reply status and set cstate->status 3999 * for nfsd4_proc_compound processing */ 4000 status = nfsd4_replay_cache_entry(resp, seq); 4001 cstate->status = nfserr_replay_cache; 4002 goto out; 4003 } 4004 if (status) 4005 goto out_put_session; 4006 4007 status = nfsd4_sequence_check_conn(conn, session); 4008 conn = NULL; 4009 if (status) 4010 goto out_put_session; 4011 4012 buflen = (seq->cachethis) ? 4013 session->se_fchannel.maxresp_cached : 4014 session->se_fchannel.maxresp_sz; 4015 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache : 4016 nfserr_rep_too_big; 4017 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack)) 4018 goto out_put_session; 4019 svc_reserve(rqstp, buflen); 4020 4021 status = nfs_ok; 4022 /* Success! bump slot seqid */ 4023 slot->sl_seqid = seq->seqid; 4024 slot->sl_flags |= NFSD4_SLOT_INUSE; 4025 if (seq->cachethis) 4026 slot->sl_flags |= NFSD4_SLOT_CACHETHIS; 4027 else 4028 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS; 4029 4030 cstate->slot = slot; 4031 cstate->session = session; 4032 cstate->clp = clp; 4033 4034 out: 4035 switch (clp->cl_cb_state) { 4036 case NFSD4_CB_DOWN: 4037 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN; 4038 break; 4039 case NFSD4_CB_FAULT: 4040 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT; 4041 break; 4042 default: 4043 seq->status_flags = 0; 4044 } 4045 if (!list_empty(&clp->cl_revoked)) 4046 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED; 4047 out_no_session: 4048 if (conn) 4049 free_conn(conn); 4050 spin_unlock(&nn->client_lock); 4051 return status; 4052 out_put_session: 4053 nfsd4_put_session_locked(session); 4054 goto out_no_session; 4055 } 4056 4057 void 4058 nfsd4_sequence_done(struct nfsd4_compoundres *resp) 4059 { 4060 struct nfsd4_compound_state *cs = &resp->cstate; 4061 4062 if (nfsd4_has_session(cs)) { 4063 if (cs->status != nfserr_replay_cache) { 4064 nfsd4_store_cache_entry(resp); 4065 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE; 4066 } 4067 /* Drop session reference that was taken in nfsd4_sequence() */ 4068 nfsd4_put_session(cs->session); 4069 } else if (cs->clp) 4070 put_client_renew(cs->clp); 4071 } 4072 4073 __be32 4074 nfsd4_destroy_clientid(struct svc_rqst *rqstp, 4075 struct nfsd4_compound_state *cstate, 4076 union nfsd4_op_u *u) 4077 { 4078 struct nfsd4_destroy_clientid *dc = &u->destroy_clientid; 4079 struct nfs4_client *conf, *unconf; 4080 struct nfs4_client *clp = NULL; 4081 __be32 status = 0; 4082 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 4083 4084 spin_lock(&nn->client_lock); 4085 unconf = find_unconfirmed_client(&dc->clientid, true, nn); 4086 conf = find_confirmed_client(&dc->clientid, true, nn); 4087 WARN_ON_ONCE(conf && unconf); 4088 4089 if (conf) { 4090 if (client_has_state(conf)) { 4091 status = nfserr_clientid_busy; 4092 goto out; 4093 } 4094 status = mark_client_expired_locked(conf); 4095 if (status) 4096 goto out; 4097 clp = conf; 4098 } else if (unconf) 4099 clp = unconf; 4100 else { 4101 status = nfserr_stale_clientid; 4102 goto out; 4103 } 4104 if (!nfsd4_mach_creds_match(clp, rqstp)) { 4105 clp = NULL; 4106 status = nfserr_wrong_cred; 4107 goto out; 4108 } 4109 trace_nfsd_clid_destroyed(&clp->cl_clientid); 4110 unhash_client_locked(clp); 4111 out: 4112 spin_unlock(&nn->client_lock); 4113 if (clp) 4114 expire_client(clp); 4115 return status; 4116 } 4117 4118 __be32 4119 nfsd4_reclaim_complete(struct svc_rqst *rqstp, 4120 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) 4121 { 4122 struct nfsd4_reclaim_complete *rc = &u->reclaim_complete; 4123 struct nfs4_client *clp = cstate->clp; 4124 __be32 status = 0; 4125 4126 if (rc->rca_one_fs) { 4127 if (!cstate->current_fh.fh_dentry) 4128 return nfserr_nofilehandle; 4129 /* 4130 * We don't take advantage of the rca_one_fs case. 4131 * That's OK, it's optional, we can safely ignore it. 4132 */ 4133 return nfs_ok; 4134 } 4135 4136 status = nfserr_complete_already; 4137 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags)) 4138 goto out; 4139 4140 status = nfserr_stale_clientid; 4141 if (is_client_expired(clp)) 4142 /* 4143 * The following error isn't really legal. 4144 * But we only get here if the client just explicitly 4145 * destroyed the client. Surely it no longer cares what 4146 * error it gets back on an operation for the dead 4147 * client. 4148 */ 4149 goto out; 4150 4151 status = nfs_ok; 4152 trace_nfsd_clid_reclaim_complete(&clp->cl_clientid); 4153 nfsd4_client_record_create(clp); 4154 inc_reclaim_complete(clp); 4155 out: 4156 return status; 4157 } 4158 4159 __be32 4160 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 4161 union nfsd4_op_u *u) 4162 { 4163 struct nfsd4_setclientid *setclid = &u->setclientid; 4164 struct xdr_netobj clname = setclid->se_name; 4165 nfs4_verifier clverifier = setclid->se_verf; 4166 struct nfs4_client *conf, *new; 4167 struct nfs4_client *unconf = NULL; 4168 __be32 status; 4169 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 4170 4171 new = create_client(clname, rqstp, &clverifier); 4172 if (new == NULL) 4173 return nfserr_jukebox; 4174 spin_lock(&nn->client_lock); 4175 conf = find_confirmed_client_by_name(&clname, nn); 4176 if (conf && client_has_state(conf)) { 4177 status = nfserr_clid_inuse; 4178 if (clp_used_exchangeid(conf)) 4179 goto out; 4180 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) { 4181 trace_nfsd_clid_cred_mismatch(conf, rqstp); 4182 goto out; 4183 } 4184 } 4185 unconf = find_unconfirmed_client_by_name(&clname, nn); 4186 if (unconf) 4187 unhash_client_locked(unconf); 4188 if (conf) { 4189 if (same_verf(&conf->cl_verifier, &clverifier)) { 4190 copy_clid(new, conf); 4191 gen_confirm(new, nn); 4192 } else 4193 trace_nfsd_clid_verf_mismatch(conf, rqstp, 4194 &clverifier); 4195 } else 4196 trace_nfsd_clid_fresh(new); 4197 new->cl_minorversion = 0; 4198 gen_callback(new, setclid, rqstp); 4199 add_to_unconfirmed(new); 4200 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; 4201 setclid->se_clientid.cl_id = new->cl_clientid.cl_id; 4202 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data)); 4203 new = NULL; 4204 status = nfs_ok; 4205 out: 4206 spin_unlock(&nn->client_lock); 4207 if (new) 4208 free_client(new); 4209 if (unconf) { 4210 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid); 4211 expire_client(unconf); 4212 } 4213 return status; 4214 } 4215 4216 __be32 4217 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, 4218 struct nfsd4_compound_state *cstate, 4219 union nfsd4_op_u *u) 4220 { 4221 struct nfsd4_setclientid_confirm *setclientid_confirm = 4222 &u->setclientid_confirm; 4223 struct nfs4_client *conf, *unconf; 4224 struct nfs4_client *old = NULL; 4225 nfs4_verifier confirm = setclientid_confirm->sc_confirm; 4226 clientid_t * clid = &setclientid_confirm->sc_clientid; 4227 __be32 status; 4228 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 4229 4230 if (STALE_CLIENTID(clid, nn)) 4231 return nfserr_stale_clientid; 4232 4233 spin_lock(&nn->client_lock); 4234 conf = find_confirmed_client(clid, false, nn); 4235 unconf = find_unconfirmed_client(clid, false, nn); 4236 /* 4237 * We try hard to give out unique clientid's, so if we get an 4238 * attempt to confirm the same clientid with a different cred, 4239 * the client may be buggy; this should never happen. 4240 * 4241 * Nevertheless, RFC 7530 recommends INUSE for this case: 4242 */ 4243 status = nfserr_clid_inuse; 4244 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) { 4245 trace_nfsd_clid_cred_mismatch(unconf, rqstp); 4246 goto out; 4247 } 4248 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) { 4249 trace_nfsd_clid_cred_mismatch(conf, rqstp); 4250 goto out; 4251 } 4252 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) { 4253 if (conf && same_verf(&confirm, &conf->cl_confirm)) { 4254 status = nfs_ok; 4255 } else 4256 status = nfserr_stale_clientid; 4257 goto out; 4258 } 4259 status = nfs_ok; 4260 if (conf) { 4261 old = unconf; 4262 unhash_client_locked(old); 4263 nfsd4_change_callback(conf, &unconf->cl_cb_conn); 4264 } else { 4265 old = find_confirmed_client_by_name(&unconf->cl_name, nn); 4266 if (old) { 4267 status = nfserr_clid_inuse; 4268 if (client_has_state(old) 4269 && !same_creds(&unconf->cl_cred, 4270 &old->cl_cred)) { 4271 old = NULL; 4272 goto out; 4273 } 4274 status = mark_client_expired_locked(old); 4275 if (status) { 4276 old = NULL; 4277 goto out; 4278 } 4279 trace_nfsd_clid_replaced(&old->cl_clientid); 4280 } 4281 move_to_confirmed(unconf); 4282 conf = unconf; 4283 } 4284 get_client_locked(conf); 4285 spin_unlock(&nn->client_lock); 4286 if (conf == unconf) 4287 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY); 4288 nfsd4_probe_callback(conf); 4289 spin_lock(&nn->client_lock); 4290 put_client_renew_locked(conf); 4291 out: 4292 spin_unlock(&nn->client_lock); 4293 if (old) 4294 expire_client(old); 4295 return status; 4296 } 4297 4298 static struct nfs4_file *nfsd4_alloc_file(void) 4299 { 4300 return kmem_cache_alloc(file_slab, GFP_KERNEL); 4301 } 4302 4303 /* OPEN Share state helper functions */ 4304 4305 static void nfsd4_file_init(const struct svc_fh *fh, struct nfs4_file *fp) 4306 { 4307 refcount_set(&fp->fi_ref, 1); 4308 spin_lock_init(&fp->fi_lock); 4309 INIT_LIST_HEAD(&fp->fi_stateids); 4310 INIT_LIST_HEAD(&fp->fi_delegations); 4311 INIT_LIST_HEAD(&fp->fi_clnt_odstate); 4312 fh_copy_shallow(&fp->fi_fhandle, &fh->fh_handle); 4313 fp->fi_deleg_file = NULL; 4314 fp->fi_had_conflict = false; 4315 fp->fi_share_deny = 0; 4316 memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); 4317 memset(fp->fi_access, 0, sizeof(fp->fi_access)); 4318 fp->fi_aliased = false; 4319 fp->fi_inode = d_inode(fh->fh_dentry); 4320 #ifdef CONFIG_NFSD_PNFS 4321 INIT_LIST_HEAD(&fp->fi_lo_states); 4322 atomic_set(&fp->fi_lo_recalls, 0); 4323 #endif 4324 } 4325 4326 void 4327 nfsd4_free_slabs(void) 4328 { 4329 kmem_cache_destroy(client_slab); 4330 kmem_cache_destroy(openowner_slab); 4331 kmem_cache_destroy(lockowner_slab); 4332 kmem_cache_destroy(file_slab); 4333 kmem_cache_destroy(stateid_slab); 4334 kmem_cache_destroy(deleg_slab); 4335 kmem_cache_destroy(odstate_slab); 4336 } 4337 4338 int 4339 nfsd4_init_slabs(void) 4340 { 4341 client_slab = kmem_cache_create("nfsd4_clients", 4342 sizeof(struct nfs4_client), 0, 0, NULL); 4343 if (client_slab == NULL) 4344 goto out; 4345 openowner_slab = kmem_cache_create("nfsd4_openowners", 4346 sizeof(struct nfs4_openowner), 0, 0, NULL); 4347 if (openowner_slab == NULL) 4348 goto out_free_client_slab; 4349 lockowner_slab = kmem_cache_create("nfsd4_lockowners", 4350 sizeof(struct nfs4_lockowner), 0, 0, NULL); 4351 if (lockowner_slab == NULL) 4352 goto out_free_openowner_slab; 4353 file_slab = kmem_cache_create("nfsd4_files", 4354 sizeof(struct nfs4_file), 0, 0, NULL); 4355 if (file_slab == NULL) 4356 goto out_free_lockowner_slab; 4357 stateid_slab = kmem_cache_create("nfsd4_stateids", 4358 sizeof(struct nfs4_ol_stateid), 0, 0, NULL); 4359 if (stateid_slab == NULL) 4360 goto out_free_file_slab; 4361 deleg_slab = kmem_cache_create("nfsd4_delegations", 4362 sizeof(struct nfs4_delegation), 0, 0, NULL); 4363 if (deleg_slab == NULL) 4364 goto out_free_stateid_slab; 4365 odstate_slab = kmem_cache_create("nfsd4_odstate", 4366 sizeof(struct nfs4_clnt_odstate), 0, 0, NULL); 4367 if (odstate_slab == NULL) 4368 goto out_free_deleg_slab; 4369 return 0; 4370 4371 out_free_deleg_slab: 4372 kmem_cache_destroy(deleg_slab); 4373 out_free_stateid_slab: 4374 kmem_cache_destroy(stateid_slab); 4375 out_free_file_slab: 4376 kmem_cache_destroy(file_slab); 4377 out_free_lockowner_slab: 4378 kmem_cache_destroy(lockowner_slab); 4379 out_free_openowner_slab: 4380 kmem_cache_destroy(openowner_slab); 4381 out_free_client_slab: 4382 kmem_cache_destroy(client_slab); 4383 out: 4384 return -ENOMEM; 4385 } 4386 4387 static unsigned long 4388 nfsd4_state_shrinker_count(struct shrinker *shrink, struct shrink_control *sc) 4389 { 4390 int count; 4391 struct nfsd_net *nn = container_of(shrink, 4392 struct nfsd_net, nfsd_client_shrinker); 4393 4394 count = atomic_read(&nn->nfsd_courtesy_clients); 4395 if (!count) 4396 count = atomic_long_read(&num_delegations); 4397 if (count) 4398 queue_work(laundry_wq, &nn->nfsd_shrinker_work); 4399 return (unsigned long)count; 4400 } 4401 4402 static unsigned long 4403 nfsd4_state_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc) 4404 { 4405 return SHRINK_STOP; 4406 } 4407 4408 void 4409 nfsd4_init_leases_net(struct nfsd_net *nn) 4410 { 4411 struct sysinfo si; 4412 u64 max_clients; 4413 4414 nn->nfsd4_lease = 90; /* default lease time */ 4415 nn->nfsd4_grace = 90; 4416 nn->somebody_reclaimed = false; 4417 nn->track_reclaim_completes = false; 4418 nn->clverifier_counter = get_random_u32(); 4419 nn->clientid_base = get_random_u32(); 4420 nn->clientid_counter = nn->clientid_base + 1; 4421 nn->s2s_cp_cl_id = nn->clientid_counter++; 4422 4423 atomic_set(&nn->nfs4_client_count, 0); 4424 si_meminfo(&si); 4425 max_clients = (u64)si.totalram * si.mem_unit / (1024 * 1024 * 1024); 4426 max_clients *= NFS4_CLIENTS_PER_GB; 4427 nn->nfs4_max_clients = max_t(int, max_clients, NFS4_CLIENTS_PER_GB); 4428 4429 atomic_set(&nn->nfsd_courtesy_clients, 0); 4430 } 4431 4432 static void init_nfs4_replay(struct nfs4_replay *rp) 4433 { 4434 rp->rp_status = nfserr_serverfault; 4435 rp->rp_buflen = 0; 4436 rp->rp_buf = rp->rp_ibuf; 4437 mutex_init(&rp->rp_mutex); 4438 } 4439 4440 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate, 4441 struct nfs4_stateowner *so) 4442 { 4443 if (!nfsd4_has_session(cstate)) { 4444 mutex_lock(&so->so_replay.rp_mutex); 4445 cstate->replay_owner = nfs4_get_stateowner(so); 4446 } 4447 } 4448 4449 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate) 4450 { 4451 struct nfs4_stateowner *so = cstate->replay_owner; 4452 4453 if (so != NULL) { 4454 cstate->replay_owner = NULL; 4455 mutex_unlock(&so->so_replay.rp_mutex); 4456 nfs4_put_stateowner(so); 4457 } 4458 } 4459 4460 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp) 4461 { 4462 struct nfs4_stateowner *sop; 4463 4464 sop = kmem_cache_alloc(slab, GFP_KERNEL); 4465 if (!sop) 4466 return NULL; 4467 4468 xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL); 4469 if (!sop->so_owner.data) { 4470 kmem_cache_free(slab, sop); 4471 return NULL; 4472 } 4473 4474 INIT_LIST_HEAD(&sop->so_stateids); 4475 sop->so_client = clp; 4476 init_nfs4_replay(&sop->so_replay); 4477 atomic_set(&sop->so_count, 1); 4478 return sop; 4479 } 4480 4481 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval) 4482 { 4483 lockdep_assert_held(&clp->cl_lock); 4484 4485 list_add(&oo->oo_owner.so_strhash, 4486 &clp->cl_ownerstr_hashtbl[strhashval]); 4487 list_add(&oo->oo_perclient, &clp->cl_openowners); 4488 } 4489 4490 static void nfs4_unhash_openowner(struct nfs4_stateowner *so) 4491 { 4492 unhash_openowner_locked(openowner(so)); 4493 } 4494 4495 static void nfs4_free_openowner(struct nfs4_stateowner *so) 4496 { 4497 struct nfs4_openowner *oo = openowner(so); 4498 4499 kmem_cache_free(openowner_slab, oo); 4500 } 4501 4502 static const struct nfs4_stateowner_operations openowner_ops = { 4503 .so_unhash = nfs4_unhash_openowner, 4504 .so_free = nfs4_free_openowner, 4505 }; 4506 4507 static struct nfs4_ol_stateid * 4508 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open) 4509 { 4510 struct nfs4_ol_stateid *local, *ret = NULL; 4511 struct nfs4_openowner *oo = open->op_openowner; 4512 4513 lockdep_assert_held(&fp->fi_lock); 4514 4515 list_for_each_entry(local, &fp->fi_stateids, st_perfile) { 4516 /* ignore lock owners */ 4517 if (local->st_stateowner->so_is_open_owner == 0) 4518 continue; 4519 if (local->st_stateowner != &oo->oo_owner) 4520 continue; 4521 if (local->st_stid.sc_type == NFS4_OPEN_STID) { 4522 ret = local; 4523 refcount_inc(&ret->st_stid.sc_count); 4524 break; 4525 } 4526 } 4527 return ret; 4528 } 4529 4530 static __be32 4531 nfsd4_verify_open_stid(struct nfs4_stid *s) 4532 { 4533 __be32 ret = nfs_ok; 4534 4535 switch (s->sc_type) { 4536 default: 4537 break; 4538 case 0: 4539 case NFS4_CLOSED_STID: 4540 case NFS4_CLOSED_DELEG_STID: 4541 ret = nfserr_bad_stateid; 4542 break; 4543 case NFS4_REVOKED_DELEG_STID: 4544 ret = nfserr_deleg_revoked; 4545 } 4546 return ret; 4547 } 4548 4549 /* Lock the stateid st_mutex, and deal with races with CLOSE */ 4550 static __be32 4551 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp) 4552 { 4553 __be32 ret; 4554 4555 mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX); 4556 ret = nfsd4_verify_open_stid(&stp->st_stid); 4557 if (ret != nfs_ok) 4558 mutex_unlock(&stp->st_mutex); 4559 return ret; 4560 } 4561 4562 static struct nfs4_ol_stateid * 4563 nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open) 4564 { 4565 struct nfs4_ol_stateid *stp; 4566 for (;;) { 4567 spin_lock(&fp->fi_lock); 4568 stp = nfsd4_find_existing_open(fp, open); 4569 spin_unlock(&fp->fi_lock); 4570 if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok) 4571 break; 4572 nfs4_put_stid(&stp->st_stid); 4573 } 4574 return stp; 4575 } 4576 4577 static struct nfs4_openowner * 4578 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open, 4579 struct nfsd4_compound_state *cstate) 4580 { 4581 struct nfs4_client *clp = cstate->clp; 4582 struct nfs4_openowner *oo, *ret; 4583 4584 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp); 4585 if (!oo) 4586 return NULL; 4587 oo->oo_owner.so_ops = &openowner_ops; 4588 oo->oo_owner.so_is_open_owner = 1; 4589 oo->oo_owner.so_seqid = open->op_seqid; 4590 oo->oo_flags = 0; 4591 if (nfsd4_has_session(cstate)) 4592 oo->oo_flags |= NFS4_OO_CONFIRMED; 4593 oo->oo_time = 0; 4594 oo->oo_last_closed_stid = NULL; 4595 INIT_LIST_HEAD(&oo->oo_close_lru); 4596 spin_lock(&clp->cl_lock); 4597 ret = find_openstateowner_str_locked(strhashval, open, clp); 4598 if (ret == NULL) { 4599 hash_openowner(oo, clp, strhashval); 4600 ret = oo; 4601 } else 4602 nfs4_free_stateowner(&oo->oo_owner); 4603 4604 spin_unlock(&clp->cl_lock); 4605 return ret; 4606 } 4607 4608 static struct nfs4_ol_stateid * 4609 init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open) 4610 { 4611 4612 struct nfs4_openowner *oo = open->op_openowner; 4613 struct nfs4_ol_stateid *retstp = NULL; 4614 struct nfs4_ol_stateid *stp; 4615 4616 stp = open->op_stp; 4617 /* We are moving these outside of the spinlocks to avoid the warnings */ 4618 mutex_init(&stp->st_mutex); 4619 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX); 4620 4621 retry: 4622 spin_lock(&oo->oo_owner.so_client->cl_lock); 4623 spin_lock(&fp->fi_lock); 4624 4625 retstp = nfsd4_find_existing_open(fp, open); 4626 if (retstp) 4627 goto out_unlock; 4628 4629 open->op_stp = NULL; 4630 refcount_inc(&stp->st_stid.sc_count); 4631 stp->st_stid.sc_type = NFS4_OPEN_STID; 4632 INIT_LIST_HEAD(&stp->st_locks); 4633 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner); 4634 get_nfs4_file(fp); 4635 stp->st_stid.sc_file = fp; 4636 stp->st_access_bmap = 0; 4637 stp->st_deny_bmap = 0; 4638 stp->st_openstp = NULL; 4639 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids); 4640 list_add(&stp->st_perfile, &fp->fi_stateids); 4641 4642 out_unlock: 4643 spin_unlock(&fp->fi_lock); 4644 spin_unlock(&oo->oo_owner.so_client->cl_lock); 4645 if (retstp) { 4646 /* Handle races with CLOSE */ 4647 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) { 4648 nfs4_put_stid(&retstp->st_stid); 4649 goto retry; 4650 } 4651 /* To keep mutex tracking happy */ 4652 mutex_unlock(&stp->st_mutex); 4653 stp = retstp; 4654 } 4655 return stp; 4656 } 4657 4658 /* 4659 * In the 4.0 case we need to keep the owners around a little while to handle 4660 * CLOSE replay. We still do need to release any file access that is held by 4661 * them before returning however. 4662 */ 4663 static void 4664 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net) 4665 { 4666 struct nfs4_ol_stateid *last; 4667 struct nfs4_openowner *oo = openowner(s->st_stateowner); 4668 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net, 4669 nfsd_net_id); 4670 4671 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo); 4672 4673 /* 4674 * We know that we hold one reference via nfsd4_close, and another 4675 * "persistent" reference for the client. If the refcount is higher 4676 * than 2, then there are still calls in progress that are using this 4677 * stateid. We can't put the sc_file reference until they are finished. 4678 * Wait for the refcount to drop to 2. Since it has been unhashed, 4679 * there should be no danger of the refcount going back up again at 4680 * this point. 4681 */ 4682 wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2); 4683 4684 release_all_access(s); 4685 if (s->st_stid.sc_file) { 4686 put_nfs4_file(s->st_stid.sc_file); 4687 s->st_stid.sc_file = NULL; 4688 } 4689 4690 spin_lock(&nn->client_lock); 4691 last = oo->oo_last_closed_stid; 4692 oo->oo_last_closed_stid = s; 4693 list_move_tail(&oo->oo_close_lru, &nn->close_lru); 4694 oo->oo_time = ktime_get_boottime_seconds(); 4695 spin_unlock(&nn->client_lock); 4696 if (last) 4697 nfs4_put_stid(&last->st_stid); 4698 } 4699 4700 static noinline_for_stack struct nfs4_file * 4701 nfsd4_file_hash_lookup(const struct svc_fh *fhp) 4702 { 4703 struct inode *inode = d_inode(fhp->fh_dentry); 4704 struct rhlist_head *tmp, *list; 4705 struct nfs4_file *fi; 4706 4707 rcu_read_lock(); 4708 list = rhltable_lookup(&nfs4_file_rhltable, &inode, 4709 nfs4_file_rhash_params); 4710 rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) { 4711 if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) { 4712 if (refcount_inc_not_zero(&fi->fi_ref)) { 4713 rcu_read_unlock(); 4714 return fi; 4715 } 4716 } 4717 } 4718 rcu_read_unlock(); 4719 return NULL; 4720 } 4721 4722 /* 4723 * On hash insertion, identify entries with the same inode but 4724 * distinct filehandles. They will all be on the list returned 4725 * by rhltable_lookup(). 4726 * 4727 * inode->i_lock prevents racing insertions from adding an entry 4728 * for the same inode/fhp pair twice. 4729 */ 4730 static noinline_for_stack struct nfs4_file * 4731 nfsd4_file_hash_insert(struct nfs4_file *new, const struct svc_fh *fhp) 4732 { 4733 struct inode *inode = d_inode(fhp->fh_dentry); 4734 struct rhlist_head *tmp, *list; 4735 struct nfs4_file *ret = NULL; 4736 bool alias_found = false; 4737 struct nfs4_file *fi; 4738 int err; 4739 4740 rcu_read_lock(); 4741 spin_lock(&inode->i_lock); 4742 4743 list = rhltable_lookup(&nfs4_file_rhltable, &inode, 4744 nfs4_file_rhash_params); 4745 rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) { 4746 if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) { 4747 if (refcount_inc_not_zero(&fi->fi_ref)) 4748 ret = fi; 4749 } else 4750 fi->fi_aliased = alias_found = true; 4751 } 4752 if (ret) 4753 goto out_unlock; 4754 4755 nfsd4_file_init(fhp, new); 4756 err = rhltable_insert(&nfs4_file_rhltable, &new->fi_rlist, 4757 nfs4_file_rhash_params); 4758 if (err) 4759 goto out_unlock; 4760 4761 new->fi_aliased = alias_found; 4762 ret = new; 4763 4764 out_unlock: 4765 spin_unlock(&inode->i_lock); 4766 rcu_read_unlock(); 4767 return ret; 4768 } 4769 4770 static noinline_for_stack void nfsd4_file_hash_remove(struct nfs4_file *fi) 4771 { 4772 rhltable_remove(&nfs4_file_rhltable, &fi->fi_rlist, 4773 nfs4_file_rhash_params); 4774 } 4775 4776 /* 4777 * Called to check deny when READ with all zero stateid or 4778 * WRITE with all zero or all one stateid 4779 */ 4780 static __be32 4781 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type) 4782 { 4783 struct nfs4_file *fp; 4784 __be32 ret = nfs_ok; 4785 4786 fp = nfsd4_file_hash_lookup(current_fh); 4787 if (!fp) 4788 return ret; 4789 4790 /* Check for conflicting share reservations */ 4791 spin_lock(&fp->fi_lock); 4792 if (fp->fi_share_deny & deny_type) 4793 ret = nfserr_locked; 4794 spin_unlock(&fp->fi_lock); 4795 put_nfs4_file(fp); 4796 return ret; 4797 } 4798 4799 static bool nfsd4_deleg_present(const struct inode *inode) 4800 { 4801 struct file_lock_context *ctx = locks_inode_context(inode); 4802 4803 return ctx && !list_empty_careful(&ctx->flc_lease); 4804 } 4805 4806 /** 4807 * nfsd_wait_for_delegreturn - wait for delegations to be returned 4808 * @rqstp: the RPC transaction being executed 4809 * @inode: in-core inode of the file being waited for 4810 * 4811 * The timeout prevents deadlock if all nfsd threads happen to be 4812 * tied up waiting for returning delegations. 4813 * 4814 * Return values: 4815 * %true: delegation was returned 4816 * %false: timed out waiting for delegreturn 4817 */ 4818 bool nfsd_wait_for_delegreturn(struct svc_rqst *rqstp, struct inode *inode) 4819 { 4820 long __maybe_unused timeo; 4821 4822 timeo = wait_var_event_timeout(inode, !nfsd4_deleg_present(inode), 4823 NFSD_DELEGRETURN_TIMEOUT); 4824 trace_nfsd_delegret_wakeup(rqstp, inode, timeo); 4825 return timeo > 0; 4826 } 4827 4828 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb) 4829 { 4830 struct nfs4_delegation *dp = cb_to_delegation(cb); 4831 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net, 4832 nfsd_net_id); 4833 4834 block_delegations(&dp->dl_stid.sc_file->fi_fhandle); 4835 4836 /* 4837 * We can't do this in nfsd_break_deleg_cb because it is 4838 * already holding inode->i_lock. 4839 * 4840 * If the dl_time != 0, then we know that it has already been 4841 * queued for a lease break. Don't queue it again. 4842 */ 4843 spin_lock(&state_lock); 4844 if (delegation_hashed(dp) && dp->dl_time == 0) { 4845 dp->dl_time = ktime_get_boottime_seconds(); 4846 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru); 4847 } 4848 spin_unlock(&state_lock); 4849 } 4850 4851 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb, 4852 struct rpc_task *task) 4853 { 4854 struct nfs4_delegation *dp = cb_to_delegation(cb); 4855 4856 trace_nfsd_cb_recall_done(&dp->dl_stid.sc_stateid, task); 4857 4858 if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID || 4859 dp->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) 4860 return 1; 4861 4862 switch (task->tk_status) { 4863 case 0: 4864 return 1; 4865 case -NFS4ERR_DELAY: 4866 rpc_delay(task, 2 * HZ); 4867 return 0; 4868 case -EBADHANDLE: 4869 case -NFS4ERR_BAD_STATEID: 4870 /* 4871 * Race: client probably got cb_recall before open reply 4872 * granting delegation. 4873 */ 4874 if (dp->dl_retries--) { 4875 rpc_delay(task, 2 * HZ); 4876 return 0; 4877 } 4878 fallthrough; 4879 default: 4880 return 1; 4881 } 4882 } 4883 4884 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb) 4885 { 4886 struct nfs4_delegation *dp = cb_to_delegation(cb); 4887 4888 nfs4_put_stid(&dp->dl_stid); 4889 } 4890 4891 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = { 4892 .prepare = nfsd4_cb_recall_prepare, 4893 .done = nfsd4_cb_recall_done, 4894 .release = nfsd4_cb_recall_release, 4895 }; 4896 4897 static void nfsd_break_one_deleg(struct nfs4_delegation *dp) 4898 { 4899 /* 4900 * We're assuming the state code never drops its reference 4901 * without first removing the lease. Since we're in this lease 4902 * callback (and since the lease code is serialized by the 4903 * flc_lock) we know the server hasn't removed the lease yet, and 4904 * we know it's safe to take a reference. 4905 */ 4906 refcount_inc(&dp->dl_stid.sc_count); 4907 WARN_ON_ONCE(!nfsd4_run_cb(&dp->dl_recall)); 4908 } 4909 4910 /* Called from break_lease() with flc_lock held. */ 4911 static bool 4912 nfsd_break_deleg_cb(struct file_lock *fl) 4913 { 4914 struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner; 4915 struct nfs4_file *fp = dp->dl_stid.sc_file; 4916 struct nfs4_client *clp = dp->dl_stid.sc_client; 4917 struct nfsd_net *nn; 4918 4919 trace_nfsd_cb_recall(&dp->dl_stid); 4920 4921 dp->dl_recalled = true; 4922 atomic_inc(&clp->cl_delegs_in_recall); 4923 if (try_to_expire_client(clp)) { 4924 nn = net_generic(clp->net, nfsd_net_id); 4925 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0); 4926 } 4927 4928 /* 4929 * We don't want the locks code to timeout the lease for us; 4930 * we'll remove it ourself if a delegation isn't returned 4931 * in time: 4932 */ 4933 fl->fl_break_time = 0; 4934 4935 spin_lock(&fp->fi_lock); 4936 fp->fi_had_conflict = true; 4937 nfsd_break_one_deleg(dp); 4938 spin_unlock(&fp->fi_lock); 4939 return false; 4940 } 4941 4942 /** 4943 * nfsd_breaker_owns_lease - Check if lease conflict was resolved 4944 * @fl: Lock state to check 4945 * 4946 * Return values: 4947 * %true: Lease conflict was resolved 4948 * %false: Lease conflict was not resolved. 4949 */ 4950 static bool nfsd_breaker_owns_lease(struct file_lock *fl) 4951 { 4952 struct nfs4_delegation *dl = fl->fl_owner; 4953 struct svc_rqst *rqst; 4954 struct nfs4_client *clp; 4955 4956 if (!i_am_nfsd()) 4957 return false; 4958 rqst = kthread_data(current); 4959 /* Note rq_prog == NFS_ACL_PROGRAM is also possible: */ 4960 if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4) 4961 return false; 4962 clp = *(rqst->rq_lease_breaker); 4963 return dl->dl_stid.sc_client == clp; 4964 } 4965 4966 static int 4967 nfsd_change_deleg_cb(struct file_lock *onlist, int arg, 4968 struct list_head *dispose) 4969 { 4970 struct nfs4_delegation *dp = (struct nfs4_delegation *)onlist->fl_owner; 4971 struct nfs4_client *clp = dp->dl_stid.sc_client; 4972 4973 if (arg & F_UNLCK) { 4974 if (dp->dl_recalled) 4975 atomic_dec(&clp->cl_delegs_in_recall); 4976 return lease_modify(onlist, arg, dispose); 4977 } else 4978 return -EAGAIN; 4979 } 4980 4981 static const struct lock_manager_operations nfsd_lease_mng_ops = { 4982 .lm_breaker_owns_lease = nfsd_breaker_owns_lease, 4983 .lm_break = nfsd_break_deleg_cb, 4984 .lm_change = nfsd_change_deleg_cb, 4985 }; 4986 4987 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid) 4988 { 4989 if (nfsd4_has_session(cstate)) 4990 return nfs_ok; 4991 if (seqid == so->so_seqid - 1) 4992 return nfserr_replay_me; 4993 if (seqid == so->so_seqid) 4994 return nfs_ok; 4995 return nfserr_bad_seqid; 4996 } 4997 4998 static struct nfs4_client *lookup_clientid(clientid_t *clid, bool sessions, 4999 struct nfsd_net *nn) 5000 { 5001 struct nfs4_client *found; 5002 5003 spin_lock(&nn->client_lock); 5004 found = find_confirmed_client(clid, sessions, nn); 5005 if (found) 5006 atomic_inc(&found->cl_rpc_users); 5007 spin_unlock(&nn->client_lock); 5008 return found; 5009 } 5010 5011 static __be32 set_client(clientid_t *clid, 5012 struct nfsd4_compound_state *cstate, 5013 struct nfsd_net *nn) 5014 { 5015 if (cstate->clp) { 5016 if (!same_clid(&cstate->clp->cl_clientid, clid)) 5017 return nfserr_stale_clientid; 5018 return nfs_ok; 5019 } 5020 if (STALE_CLIENTID(clid, nn)) 5021 return nfserr_stale_clientid; 5022 /* 5023 * We're in the 4.0 case (otherwise the SEQUENCE op would have 5024 * set cstate->clp), so session = false: 5025 */ 5026 cstate->clp = lookup_clientid(clid, false, nn); 5027 if (!cstate->clp) 5028 return nfserr_expired; 5029 return nfs_ok; 5030 } 5031 5032 __be32 5033 nfsd4_process_open1(struct nfsd4_compound_state *cstate, 5034 struct nfsd4_open *open, struct nfsd_net *nn) 5035 { 5036 clientid_t *clientid = &open->op_clientid; 5037 struct nfs4_client *clp = NULL; 5038 unsigned int strhashval; 5039 struct nfs4_openowner *oo = NULL; 5040 __be32 status; 5041 5042 /* 5043 * In case we need it later, after we've already created the 5044 * file and don't want to risk a further failure: 5045 */ 5046 open->op_file = nfsd4_alloc_file(); 5047 if (open->op_file == NULL) 5048 return nfserr_jukebox; 5049 5050 status = set_client(clientid, cstate, nn); 5051 if (status) 5052 return status; 5053 clp = cstate->clp; 5054 5055 strhashval = ownerstr_hashval(&open->op_owner); 5056 oo = find_openstateowner_str(strhashval, open, clp); 5057 open->op_openowner = oo; 5058 if (!oo) { 5059 goto new_owner; 5060 } 5061 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { 5062 /* Replace unconfirmed owners without checking for replay. */ 5063 release_openowner(oo); 5064 open->op_openowner = NULL; 5065 goto new_owner; 5066 } 5067 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid); 5068 if (status) 5069 return status; 5070 goto alloc_stateid; 5071 new_owner: 5072 oo = alloc_init_open_stateowner(strhashval, open, cstate); 5073 if (oo == NULL) 5074 return nfserr_jukebox; 5075 open->op_openowner = oo; 5076 alloc_stateid: 5077 open->op_stp = nfs4_alloc_open_stateid(clp); 5078 if (!open->op_stp) 5079 return nfserr_jukebox; 5080 5081 if (nfsd4_has_session(cstate) && 5082 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) { 5083 open->op_odstate = alloc_clnt_odstate(clp); 5084 if (!open->op_odstate) 5085 return nfserr_jukebox; 5086 } 5087 5088 return nfs_ok; 5089 } 5090 5091 static inline __be32 5092 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags) 5093 { 5094 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ)) 5095 return nfserr_openmode; 5096 else 5097 return nfs_ok; 5098 } 5099 5100 static int share_access_to_flags(u32 share_access) 5101 { 5102 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE; 5103 } 5104 5105 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s) 5106 { 5107 struct nfs4_stid *ret; 5108 5109 ret = find_stateid_by_type(cl, s, 5110 NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID); 5111 if (!ret) 5112 return NULL; 5113 return delegstateid(ret); 5114 } 5115 5116 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open) 5117 { 5118 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR || 5119 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH; 5120 } 5121 5122 static __be32 5123 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open, 5124 struct nfs4_delegation **dp) 5125 { 5126 int flags; 5127 __be32 status = nfserr_bad_stateid; 5128 struct nfs4_delegation *deleg; 5129 5130 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid); 5131 if (deleg == NULL) 5132 goto out; 5133 if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) { 5134 nfs4_put_stid(&deleg->dl_stid); 5135 if (cl->cl_minorversion) 5136 status = nfserr_deleg_revoked; 5137 goto out; 5138 } 5139 flags = share_access_to_flags(open->op_share_access); 5140 status = nfs4_check_delegmode(deleg, flags); 5141 if (status) { 5142 nfs4_put_stid(&deleg->dl_stid); 5143 goto out; 5144 } 5145 *dp = deleg; 5146 out: 5147 if (!nfsd4_is_deleg_cur(open)) 5148 return nfs_ok; 5149 if (status) 5150 return status; 5151 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; 5152 return nfs_ok; 5153 } 5154 5155 static inline int nfs4_access_to_access(u32 nfs4_access) 5156 { 5157 int flags = 0; 5158 5159 if (nfs4_access & NFS4_SHARE_ACCESS_READ) 5160 flags |= NFSD_MAY_READ; 5161 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE) 5162 flags |= NFSD_MAY_WRITE; 5163 return flags; 5164 } 5165 5166 static inline __be32 5167 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh, 5168 struct nfsd4_open *open) 5169 { 5170 struct iattr iattr = { 5171 .ia_valid = ATTR_SIZE, 5172 .ia_size = 0, 5173 }; 5174 struct nfsd_attrs attrs = { 5175 .na_iattr = &iattr, 5176 }; 5177 if (!open->op_truncate) 5178 return 0; 5179 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE)) 5180 return nfserr_inval; 5181 return nfsd_setattr(rqstp, fh, &attrs, 0, (time64_t)0); 5182 } 5183 5184 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp, 5185 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, 5186 struct nfsd4_open *open, bool new_stp) 5187 { 5188 struct nfsd_file *nf = NULL; 5189 __be32 status; 5190 int oflag = nfs4_access_to_omode(open->op_share_access); 5191 int access = nfs4_access_to_access(open->op_share_access); 5192 unsigned char old_access_bmap, old_deny_bmap; 5193 5194 spin_lock(&fp->fi_lock); 5195 5196 /* 5197 * Are we trying to set a deny mode that would conflict with 5198 * current access? 5199 */ 5200 status = nfs4_file_check_deny(fp, open->op_share_deny); 5201 if (status != nfs_ok) { 5202 if (status != nfserr_share_denied) { 5203 spin_unlock(&fp->fi_lock); 5204 goto out; 5205 } 5206 if (nfs4_resolve_deny_conflicts_locked(fp, new_stp, 5207 stp, open->op_share_deny, false)) 5208 status = nfserr_jukebox; 5209 spin_unlock(&fp->fi_lock); 5210 goto out; 5211 } 5212 5213 /* set access to the file */ 5214 status = nfs4_file_get_access(fp, open->op_share_access); 5215 if (status != nfs_ok) { 5216 if (status != nfserr_share_denied) { 5217 spin_unlock(&fp->fi_lock); 5218 goto out; 5219 } 5220 if (nfs4_resolve_deny_conflicts_locked(fp, new_stp, 5221 stp, open->op_share_access, true)) 5222 status = nfserr_jukebox; 5223 spin_unlock(&fp->fi_lock); 5224 goto out; 5225 } 5226 5227 /* Set access bits in stateid */ 5228 old_access_bmap = stp->st_access_bmap; 5229 set_access(open->op_share_access, stp); 5230 5231 /* Set new deny mask */ 5232 old_deny_bmap = stp->st_deny_bmap; 5233 set_deny(open->op_share_deny, stp); 5234 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH); 5235 5236 if (!fp->fi_fds[oflag]) { 5237 spin_unlock(&fp->fi_lock); 5238 5239 status = nfsd_file_acquire_opened(rqstp, cur_fh, access, 5240 open->op_filp, &nf); 5241 if (status != nfs_ok) 5242 goto out_put_access; 5243 5244 spin_lock(&fp->fi_lock); 5245 if (!fp->fi_fds[oflag]) { 5246 fp->fi_fds[oflag] = nf; 5247 nf = NULL; 5248 } 5249 } 5250 spin_unlock(&fp->fi_lock); 5251 if (nf) 5252 nfsd_file_put(nf); 5253 5254 status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode, 5255 access)); 5256 if (status) 5257 goto out_put_access; 5258 5259 status = nfsd4_truncate(rqstp, cur_fh, open); 5260 if (status) 5261 goto out_put_access; 5262 out: 5263 return status; 5264 out_put_access: 5265 stp->st_access_bmap = old_access_bmap; 5266 nfs4_file_put_access(fp, open->op_share_access); 5267 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp); 5268 goto out; 5269 } 5270 5271 static __be32 5272 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, 5273 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, 5274 struct nfsd4_open *open) 5275 { 5276 __be32 status; 5277 unsigned char old_deny_bmap = stp->st_deny_bmap; 5278 5279 if (!test_access(open->op_share_access, stp)) 5280 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open, false); 5281 5282 /* test and set deny mode */ 5283 spin_lock(&fp->fi_lock); 5284 status = nfs4_file_check_deny(fp, open->op_share_deny); 5285 switch (status) { 5286 case nfs_ok: 5287 set_deny(open->op_share_deny, stp); 5288 fp->fi_share_deny |= 5289 (open->op_share_deny & NFS4_SHARE_DENY_BOTH); 5290 break; 5291 case nfserr_share_denied: 5292 if (nfs4_resolve_deny_conflicts_locked(fp, false, 5293 stp, open->op_share_deny, false)) 5294 status = nfserr_jukebox; 5295 break; 5296 } 5297 spin_unlock(&fp->fi_lock); 5298 5299 if (status != nfs_ok) 5300 return status; 5301 5302 status = nfsd4_truncate(rqstp, cur_fh, open); 5303 if (status != nfs_ok) 5304 reset_union_bmap_deny(old_deny_bmap, stp); 5305 return status; 5306 } 5307 5308 /* Should we give out recallable state?: */ 5309 static bool nfsd4_cb_channel_good(struct nfs4_client *clp) 5310 { 5311 if (clp->cl_cb_state == NFSD4_CB_UP) 5312 return true; 5313 /* 5314 * In the sessions case, since we don't have to establish a 5315 * separate connection for callbacks, we assume it's OK 5316 * until we hear otherwise: 5317 */ 5318 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; 5319 } 5320 5321 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, 5322 int flag) 5323 { 5324 struct file_lock *fl; 5325 5326 fl = locks_alloc_lock(); 5327 if (!fl) 5328 return NULL; 5329 fl->fl_lmops = &nfsd_lease_mng_ops; 5330 fl->fl_flags = FL_DELEG; 5331 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; 5332 fl->fl_end = OFFSET_MAX; 5333 fl->fl_owner = (fl_owner_t)dp; 5334 fl->fl_pid = current->tgid; 5335 fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file; 5336 return fl; 5337 } 5338 5339 static int nfsd4_check_conflicting_opens(struct nfs4_client *clp, 5340 struct nfs4_file *fp) 5341 { 5342 struct nfs4_ol_stateid *st; 5343 struct file *f = fp->fi_deleg_file->nf_file; 5344 struct inode *ino = file_inode(f); 5345 int writes; 5346 5347 writes = atomic_read(&ino->i_writecount); 5348 if (!writes) 5349 return 0; 5350 /* 5351 * There could be multiple filehandles (hence multiple 5352 * nfs4_files) referencing this file, but that's not too 5353 * common; let's just give up in that case rather than 5354 * trying to go look up all the clients using that other 5355 * nfs4_file as well: 5356 */ 5357 if (fp->fi_aliased) 5358 return -EAGAIN; 5359 /* 5360 * If there's a close in progress, make sure that we see it 5361 * clear any fi_fds[] entries before we see it decrement 5362 * i_writecount: 5363 */ 5364 smp_mb__after_atomic(); 5365 5366 if (fp->fi_fds[O_WRONLY]) 5367 writes--; 5368 if (fp->fi_fds[O_RDWR]) 5369 writes--; 5370 if (writes > 0) 5371 return -EAGAIN; /* There may be non-NFSv4 writers */ 5372 /* 5373 * It's possible there are non-NFSv4 write opens in progress, 5374 * but if they haven't incremented i_writecount yet then they 5375 * also haven't called break lease yet; so, they'll break this 5376 * lease soon enough. So, all that's left to check for is NFSv4 5377 * opens: 5378 */ 5379 spin_lock(&fp->fi_lock); 5380 list_for_each_entry(st, &fp->fi_stateids, st_perfile) { 5381 if (st->st_openstp == NULL /* it's an open */ && 5382 access_permit_write(st) && 5383 st->st_stid.sc_client != clp) { 5384 spin_unlock(&fp->fi_lock); 5385 return -EAGAIN; 5386 } 5387 } 5388 spin_unlock(&fp->fi_lock); 5389 /* 5390 * There's a small chance that we could be racing with another 5391 * NFSv4 open. However, any open that hasn't added itself to 5392 * the fi_stateids list also hasn't called break_lease yet; so, 5393 * they'll break this lease soon enough. 5394 */ 5395 return 0; 5396 } 5397 5398 /* 5399 * It's possible that between opening the dentry and setting the delegation, 5400 * that it has been renamed or unlinked. Redo the lookup to verify that this 5401 * hasn't happened. 5402 */ 5403 static int 5404 nfsd4_verify_deleg_dentry(struct nfsd4_open *open, struct nfs4_file *fp, 5405 struct svc_fh *parent) 5406 { 5407 struct svc_export *exp; 5408 struct dentry *child; 5409 __be32 err; 5410 5411 err = nfsd_lookup_dentry(open->op_rqstp, parent, 5412 open->op_fname, open->op_fnamelen, 5413 &exp, &child); 5414 5415 if (err) 5416 return -EAGAIN; 5417 5418 exp_put(exp); 5419 dput(child); 5420 if (child != file_dentry(fp->fi_deleg_file->nf_file)) 5421 return -EAGAIN; 5422 5423 return 0; 5424 } 5425 5426 /* 5427 * We avoid breaking delegations held by a client due to its own activity, but 5428 * clearing setuid/setgid bits on a write is an implicit activity and the client 5429 * may not notice and continue using the old mode. Avoid giving out a delegation 5430 * on setuid/setgid files when the client is requesting an open for write. 5431 */ 5432 static int 5433 nfsd4_verify_setuid_write(struct nfsd4_open *open, struct nfsd_file *nf) 5434 { 5435 struct inode *inode = file_inode(nf->nf_file); 5436 5437 if ((open->op_share_access & NFS4_SHARE_ACCESS_WRITE) && 5438 (inode->i_mode & (S_ISUID|S_ISGID))) 5439 return -EAGAIN; 5440 return 0; 5441 } 5442 5443 static struct nfs4_delegation * 5444 nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, 5445 struct svc_fh *parent) 5446 { 5447 int status = 0; 5448 struct nfs4_client *clp = stp->st_stid.sc_client; 5449 struct nfs4_file *fp = stp->st_stid.sc_file; 5450 struct nfs4_clnt_odstate *odstate = stp->st_clnt_odstate; 5451 struct nfs4_delegation *dp; 5452 struct nfsd_file *nf; 5453 struct file_lock *fl; 5454 5455 /* 5456 * The fi_had_conflict and nfs_get_existing_delegation checks 5457 * here are just optimizations; we'll need to recheck them at 5458 * the end: 5459 */ 5460 if (fp->fi_had_conflict) 5461 return ERR_PTR(-EAGAIN); 5462 5463 nf = find_readable_file(fp); 5464 if (!nf) { 5465 /* 5466 * We probably could attempt another open and get a read 5467 * delegation, but for now, don't bother until the 5468 * client actually sends us one. 5469 */ 5470 return ERR_PTR(-EAGAIN); 5471 } 5472 spin_lock(&state_lock); 5473 spin_lock(&fp->fi_lock); 5474 if (nfs4_delegation_exists(clp, fp)) 5475 status = -EAGAIN; 5476 else if (nfsd4_verify_setuid_write(open, nf)) 5477 status = -EAGAIN; 5478 else if (!fp->fi_deleg_file) { 5479 fp->fi_deleg_file = nf; 5480 /* increment early to prevent fi_deleg_file from being 5481 * cleared */ 5482 fp->fi_delegees = 1; 5483 nf = NULL; 5484 } else 5485 fp->fi_delegees++; 5486 spin_unlock(&fp->fi_lock); 5487 spin_unlock(&state_lock); 5488 if (nf) 5489 nfsd_file_put(nf); 5490 if (status) 5491 return ERR_PTR(status); 5492 5493 status = -ENOMEM; 5494 dp = alloc_init_deleg(clp, fp, odstate); 5495 if (!dp) 5496 goto out_delegees; 5497 5498 fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ); 5499 if (!fl) 5500 goto out_clnt_odstate; 5501 5502 status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL); 5503 if (fl) 5504 locks_free_lock(fl); 5505 if (status) 5506 goto out_clnt_odstate; 5507 5508 if (parent) { 5509 status = nfsd4_verify_deleg_dentry(open, fp, parent); 5510 if (status) 5511 goto out_unlock; 5512 } 5513 5514 status = nfsd4_check_conflicting_opens(clp, fp); 5515 if (status) 5516 goto out_unlock; 5517 5518 /* 5519 * Now that the deleg is set, check again to ensure that nothing 5520 * raced in and changed the mode while we weren't lookng. 5521 */ 5522 status = nfsd4_verify_setuid_write(open, fp->fi_deleg_file); 5523 if (status) 5524 goto out_unlock; 5525 5526 spin_lock(&state_lock); 5527 spin_lock(&fp->fi_lock); 5528 if (fp->fi_had_conflict) 5529 status = -EAGAIN; 5530 else 5531 status = hash_delegation_locked(dp, fp); 5532 spin_unlock(&fp->fi_lock); 5533 spin_unlock(&state_lock); 5534 5535 if (status) 5536 goto out_unlock; 5537 5538 return dp; 5539 out_unlock: 5540 vfs_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp); 5541 out_clnt_odstate: 5542 put_clnt_odstate(dp->dl_clnt_odstate); 5543 nfs4_put_stid(&dp->dl_stid); 5544 out_delegees: 5545 put_deleg_file(fp); 5546 return ERR_PTR(status); 5547 } 5548 5549 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status) 5550 { 5551 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 5552 if (status == -EAGAIN) 5553 open->op_why_no_deleg = WND4_CONTENTION; 5554 else { 5555 open->op_why_no_deleg = WND4_RESOURCE; 5556 switch (open->op_deleg_want) { 5557 case NFS4_SHARE_WANT_READ_DELEG: 5558 case NFS4_SHARE_WANT_WRITE_DELEG: 5559 case NFS4_SHARE_WANT_ANY_DELEG: 5560 break; 5561 case NFS4_SHARE_WANT_CANCEL: 5562 open->op_why_no_deleg = WND4_CANCELLED; 5563 break; 5564 case NFS4_SHARE_WANT_NO_DELEG: 5565 WARN_ON_ONCE(1); 5566 } 5567 } 5568 } 5569 5570 /* 5571 * Attempt to hand out a delegation. 5572 * 5573 * Note we don't support write delegations, and won't until the vfs has 5574 * proper support for them. 5575 */ 5576 static void 5577 nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, 5578 struct svc_fh *currentfh) 5579 { 5580 struct nfs4_delegation *dp; 5581 struct nfs4_openowner *oo = openowner(stp->st_stateowner); 5582 struct nfs4_client *clp = stp->st_stid.sc_client; 5583 struct svc_fh *parent = NULL; 5584 int cb_up; 5585 int status = 0; 5586 5587 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client); 5588 open->op_recall = 0; 5589 switch (open->op_claim_type) { 5590 case NFS4_OPEN_CLAIM_PREVIOUS: 5591 if (!cb_up) 5592 open->op_recall = 1; 5593 if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ) 5594 goto out_no_deleg; 5595 break; 5596 case NFS4_OPEN_CLAIM_NULL: 5597 parent = currentfh; 5598 fallthrough; 5599 case NFS4_OPEN_CLAIM_FH: 5600 /* 5601 * Let's not give out any delegations till everyone's 5602 * had the chance to reclaim theirs, *and* until 5603 * NLM locks have all been reclaimed: 5604 */ 5605 if (locks_in_grace(clp->net)) 5606 goto out_no_deleg; 5607 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED)) 5608 goto out_no_deleg; 5609 break; 5610 default: 5611 goto out_no_deleg; 5612 } 5613 dp = nfs4_set_delegation(open, stp, parent); 5614 if (IS_ERR(dp)) 5615 goto out_no_deleg; 5616 5617 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid)); 5618 5619 trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid); 5620 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ; 5621 nfs4_put_stid(&dp->dl_stid); 5622 return; 5623 out_no_deleg: 5624 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE; 5625 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS && 5626 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) { 5627 dprintk("NFSD: WARNING: refusing delegation reclaim\n"); 5628 open->op_recall = 1; 5629 } 5630 5631 /* 4.1 client asking for a delegation? */ 5632 if (open->op_deleg_want) 5633 nfsd4_open_deleg_none_ext(open, status); 5634 return; 5635 } 5636 5637 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open, 5638 struct nfs4_delegation *dp) 5639 { 5640 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG && 5641 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) { 5642 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 5643 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE; 5644 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG && 5645 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) { 5646 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 5647 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE; 5648 } 5649 /* Otherwise the client must be confused wanting a delegation 5650 * it already has, therefore we don't return 5651 * NFS4_OPEN_DELEGATE_NONE_EXT and reason. 5652 */ 5653 } 5654 5655 /** 5656 * nfsd4_process_open2 - finish open processing 5657 * @rqstp: the RPC transaction being executed 5658 * @current_fh: NFSv4 COMPOUND's current filehandle 5659 * @open: OPEN arguments 5660 * 5661 * If successful, (1) truncate the file if open->op_truncate was 5662 * set, (2) set open->op_stateid, (3) set open->op_delegation. 5663 * 5664 * Returns %nfs_ok on success; otherwise an nfs4stat value in 5665 * network byte order is returned. 5666 */ 5667 __be32 5668 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open) 5669 { 5670 struct nfsd4_compoundres *resp = rqstp->rq_resp; 5671 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client; 5672 struct nfs4_file *fp = NULL; 5673 struct nfs4_ol_stateid *stp = NULL; 5674 struct nfs4_delegation *dp = NULL; 5675 __be32 status; 5676 bool new_stp = false; 5677 5678 /* 5679 * Lookup file; if found, lookup stateid and check open request, 5680 * and check for delegations in the process of being recalled. 5681 * If not found, create the nfs4_file struct 5682 */ 5683 fp = nfsd4_file_hash_insert(open->op_file, current_fh); 5684 if (unlikely(!fp)) 5685 return nfserr_jukebox; 5686 if (fp != open->op_file) { 5687 status = nfs4_check_deleg(cl, open, &dp); 5688 if (status) 5689 goto out; 5690 stp = nfsd4_find_and_lock_existing_open(fp, open); 5691 } else { 5692 open->op_file = NULL; 5693 status = nfserr_bad_stateid; 5694 if (nfsd4_is_deleg_cur(open)) 5695 goto out; 5696 } 5697 5698 if (!stp) { 5699 stp = init_open_stateid(fp, open); 5700 if (!open->op_stp) 5701 new_stp = true; 5702 } 5703 5704 /* 5705 * OPEN the file, or upgrade an existing OPEN. 5706 * If truncate fails, the OPEN fails. 5707 * 5708 * stp is already locked. 5709 */ 5710 if (!new_stp) { 5711 /* Stateid was found, this is an OPEN upgrade */ 5712 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); 5713 if (status) { 5714 mutex_unlock(&stp->st_mutex); 5715 goto out; 5716 } 5717 } else { 5718 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open, true); 5719 if (status) { 5720 stp->st_stid.sc_type = NFS4_CLOSED_STID; 5721 release_open_stateid(stp); 5722 mutex_unlock(&stp->st_mutex); 5723 goto out; 5724 } 5725 5726 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp, 5727 open->op_odstate); 5728 if (stp->st_clnt_odstate == open->op_odstate) 5729 open->op_odstate = NULL; 5730 } 5731 5732 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid); 5733 mutex_unlock(&stp->st_mutex); 5734 5735 if (nfsd4_has_session(&resp->cstate)) { 5736 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) { 5737 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 5738 open->op_why_no_deleg = WND4_NOT_WANTED; 5739 goto nodeleg; 5740 } 5741 } 5742 5743 /* 5744 * Attempt to hand out a delegation. No error return, because the 5745 * OPEN succeeds even if we fail. 5746 */ 5747 nfs4_open_delegation(open, stp, &resp->cstate.current_fh); 5748 nodeleg: 5749 status = nfs_ok; 5750 trace_nfsd_open(&stp->st_stid.sc_stateid); 5751 out: 5752 /* 4.1 client trying to upgrade/downgrade delegation? */ 5753 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp && 5754 open->op_deleg_want) 5755 nfsd4_deleg_xgrade_none_ext(open, dp); 5756 5757 if (fp) 5758 put_nfs4_file(fp); 5759 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS) 5760 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; 5761 /* 5762 * To finish the open response, we just need to set the rflags. 5763 */ 5764 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX; 5765 if (nfsd4_has_session(&resp->cstate)) 5766 open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK; 5767 else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED)) 5768 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM; 5769 5770 if (dp) 5771 nfs4_put_stid(&dp->dl_stid); 5772 if (stp) 5773 nfs4_put_stid(&stp->st_stid); 5774 5775 return status; 5776 } 5777 5778 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate, 5779 struct nfsd4_open *open) 5780 { 5781 if (open->op_openowner) { 5782 struct nfs4_stateowner *so = &open->op_openowner->oo_owner; 5783 5784 nfsd4_cstate_assign_replay(cstate, so); 5785 nfs4_put_stateowner(so); 5786 } 5787 if (open->op_file) 5788 kmem_cache_free(file_slab, open->op_file); 5789 if (open->op_stp) 5790 nfs4_put_stid(&open->op_stp->st_stid); 5791 if (open->op_odstate) 5792 kmem_cache_free(odstate_slab, open->op_odstate); 5793 } 5794 5795 __be32 5796 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 5797 union nfsd4_op_u *u) 5798 { 5799 clientid_t *clid = &u->renew; 5800 struct nfs4_client *clp; 5801 __be32 status; 5802 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 5803 5804 trace_nfsd_clid_renew(clid); 5805 status = set_client(clid, cstate, nn); 5806 if (status) 5807 return status; 5808 clp = cstate->clp; 5809 if (!list_empty(&clp->cl_delegations) 5810 && clp->cl_cb_state != NFSD4_CB_UP) 5811 return nfserr_cb_path_down; 5812 return nfs_ok; 5813 } 5814 5815 void 5816 nfsd4_end_grace(struct nfsd_net *nn) 5817 { 5818 /* do nothing if grace period already ended */ 5819 if (nn->grace_ended) 5820 return; 5821 5822 trace_nfsd_grace_complete(nn); 5823 nn->grace_ended = true; 5824 /* 5825 * If the server goes down again right now, an NFSv4 5826 * client will still be allowed to reclaim after it comes back up, 5827 * even if it hasn't yet had a chance to reclaim state this time. 5828 * 5829 */ 5830 nfsd4_record_grace_done(nn); 5831 /* 5832 * At this point, NFSv4 clients can still reclaim. But if the 5833 * server crashes, any that have not yet reclaimed will be out 5834 * of luck on the next boot. 5835 * 5836 * (NFSv4.1+ clients are considered to have reclaimed once they 5837 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to 5838 * have reclaimed after their first OPEN.) 5839 */ 5840 locks_end_grace(&nn->nfsd4_manager); 5841 /* 5842 * At this point, and once lockd and/or any other containers 5843 * exit their grace period, further reclaims will fail and 5844 * regular locking can resume. 5845 */ 5846 } 5847 5848 /* 5849 * If we've waited a lease period but there are still clients trying to 5850 * reclaim, wait a little longer to give them a chance to finish. 5851 */ 5852 static bool clients_still_reclaiming(struct nfsd_net *nn) 5853 { 5854 time64_t double_grace_period_end = nn->boot_time + 5855 2 * nn->nfsd4_lease; 5856 5857 if (nn->track_reclaim_completes && 5858 atomic_read(&nn->nr_reclaim_complete) == 5859 nn->reclaim_str_hashtbl_size) 5860 return false; 5861 if (!nn->somebody_reclaimed) 5862 return false; 5863 nn->somebody_reclaimed = false; 5864 /* 5865 * If we've given them *two* lease times to reclaim, and they're 5866 * still not done, give up: 5867 */ 5868 if (ktime_get_boottime_seconds() > double_grace_period_end) 5869 return false; 5870 return true; 5871 } 5872 5873 struct laundry_time { 5874 time64_t cutoff; 5875 time64_t new_timeo; 5876 }; 5877 5878 static bool state_expired(struct laundry_time *lt, time64_t last_refresh) 5879 { 5880 time64_t time_remaining; 5881 5882 if (last_refresh < lt->cutoff) 5883 return true; 5884 time_remaining = last_refresh - lt->cutoff; 5885 lt->new_timeo = min(lt->new_timeo, time_remaining); 5886 return false; 5887 } 5888 5889 #ifdef CONFIG_NFSD_V4_2_INTER_SSC 5890 void nfsd4_ssc_init_umount_work(struct nfsd_net *nn) 5891 { 5892 spin_lock_init(&nn->nfsd_ssc_lock); 5893 INIT_LIST_HEAD(&nn->nfsd_ssc_mount_list); 5894 init_waitqueue_head(&nn->nfsd_ssc_waitq); 5895 } 5896 EXPORT_SYMBOL_GPL(nfsd4_ssc_init_umount_work); 5897 5898 /* 5899 * This is called when nfsd is being shutdown, after all inter_ssc 5900 * cleanup were done, to destroy the ssc delayed unmount list. 5901 */ 5902 static void nfsd4_ssc_shutdown_umount(struct nfsd_net *nn) 5903 { 5904 struct nfsd4_ssc_umount_item *ni = NULL; 5905 struct nfsd4_ssc_umount_item *tmp; 5906 5907 spin_lock(&nn->nfsd_ssc_lock); 5908 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) { 5909 list_del(&ni->nsui_list); 5910 spin_unlock(&nn->nfsd_ssc_lock); 5911 mntput(ni->nsui_vfsmount); 5912 kfree(ni); 5913 spin_lock(&nn->nfsd_ssc_lock); 5914 } 5915 spin_unlock(&nn->nfsd_ssc_lock); 5916 } 5917 5918 static void nfsd4_ssc_expire_umount(struct nfsd_net *nn) 5919 { 5920 bool do_wakeup = false; 5921 struct nfsd4_ssc_umount_item *ni = NULL; 5922 struct nfsd4_ssc_umount_item *tmp; 5923 5924 spin_lock(&nn->nfsd_ssc_lock); 5925 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) { 5926 if (time_after(jiffies, ni->nsui_expire)) { 5927 if (refcount_read(&ni->nsui_refcnt) > 1) 5928 continue; 5929 5930 /* mark being unmount */ 5931 ni->nsui_busy = true; 5932 spin_unlock(&nn->nfsd_ssc_lock); 5933 mntput(ni->nsui_vfsmount); 5934 spin_lock(&nn->nfsd_ssc_lock); 5935 5936 /* waiters need to start from begin of list */ 5937 list_del(&ni->nsui_list); 5938 kfree(ni); 5939 5940 /* wakeup ssc_connect waiters */ 5941 do_wakeup = true; 5942 continue; 5943 } 5944 break; 5945 } 5946 if (do_wakeup) 5947 wake_up_all(&nn->nfsd_ssc_waitq); 5948 spin_unlock(&nn->nfsd_ssc_lock); 5949 } 5950 #endif 5951 5952 /* Check if any lock belonging to this lockowner has any blockers */ 5953 static bool 5954 nfs4_lockowner_has_blockers(struct nfs4_lockowner *lo) 5955 { 5956 struct file_lock_context *ctx; 5957 struct nfs4_ol_stateid *stp; 5958 struct nfs4_file *nf; 5959 5960 list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) { 5961 nf = stp->st_stid.sc_file; 5962 ctx = locks_inode_context(nf->fi_inode); 5963 if (!ctx) 5964 continue; 5965 if (locks_owner_has_blockers(ctx, lo)) 5966 return true; 5967 } 5968 return false; 5969 } 5970 5971 static bool 5972 nfs4_anylock_blockers(struct nfs4_client *clp) 5973 { 5974 int i; 5975 struct nfs4_stateowner *so; 5976 struct nfs4_lockowner *lo; 5977 5978 if (atomic_read(&clp->cl_delegs_in_recall)) 5979 return true; 5980 spin_lock(&clp->cl_lock); 5981 for (i = 0; i < OWNER_HASH_SIZE; i++) { 5982 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[i], 5983 so_strhash) { 5984 if (so->so_is_open_owner) 5985 continue; 5986 lo = lockowner(so); 5987 if (nfs4_lockowner_has_blockers(lo)) { 5988 spin_unlock(&clp->cl_lock); 5989 return true; 5990 } 5991 } 5992 } 5993 spin_unlock(&clp->cl_lock); 5994 return false; 5995 } 5996 5997 static void 5998 nfs4_get_client_reaplist(struct nfsd_net *nn, struct list_head *reaplist, 5999 struct laundry_time *lt) 6000 { 6001 unsigned int maxreap, reapcnt = 0; 6002 struct list_head *pos, *next; 6003 struct nfs4_client *clp; 6004 6005 maxreap = (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) ? 6006 NFSD_CLIENT_MAX_TRIM_PER_RUN : 0; 6007 INIT_LIST_HEAD(reaplist); 6008 spin_lock(&nn->client_lock); 6009 list_for_each_safe(pos, next, &nn->client_lru) { 6010 clp = list_entry(pos, struct nfs4_client, cl_lru); 6011 if (clp->cl_state == NFSD4_EXPIRABLE) 6012 goto exp_client; 6013 if (!state_expired(lt, clp->cl_time)) 6014 break; 6015 if (!atomic_read(&clp->cl_rpc_users)) { 6016 if (clp->cl_state == NFSD4_ACTIVE) 6017 atomic_inc(&nn->nfsd_courtesy_clients); 6018 clp->cl_state = NFSD4_COURTESY; 6019 } 6020 if (!client_has_state(clp)) 6021 goto exp_client; 6022 if (!nfs4_anylock_blockers(clp)) 6023 if (reapcnt >= maxreap) 6024 continue; 6025 exp_client: 6026 if (!mark_client_expired_locked(clp)) { 6027 list_add(&clp->cl_lru, reaplist); 6028 reapcnt++; 6029 } 6030 } 6031 spin_unlock(&nn->client_lock); 6032 } 6033 6034 static void 6035 nfs4_get_courtesy_client_reaplist(struct nfsd_net *nn, 6036 struct list_head *reaplist) 6037 { 6038 unsigned int maxreap = 0, reapcnt = 0; 6039 struct list_head *pos, *next; 6040 struct nfs4_client *clp; 6041 6042 maxreap = NFSD_CLIENT_MAX_TRIM_PER_RUN; 6043 INIT_LIST_HEAD(reaplist); 6044 6045 spin_lock(&nn->client_lock); 6046 list_for_each_safe(pos, next, &nn->client_lru) { 6047 clp = list_entry(pos, struct nfs4_client, cl_lru); 6048 if (clp->cl_state == NFSD4_ACTIVE) 6049 break; 6050 if (reapcnt >= maxreap) 6051 break; 6052 if (!mark_client_expired_locked(clp)) { 6053 list_add(&clp->cl_lru, reaplist); 6054 reapcnt++; 6055 } 6056 } 6057 spin_unlock(&nn->client_lock); 6058 } 6059 6060 static void 6061 nfs4_process_client_reaplist(struct list_head *reaplist) 6062 { 6063 struct list_head *pos, *next; 6064 struct nfs4_client *clp; 6065 6066 list_for_each_safe(pos, next, reaplist) { 6067 clp = list_entry(pos, struct nfs4_client, cl_lru); 6068 trace_nfsd_clid_purged(&clp->cl_clientid); 6069 list_del_init(&clp->cl_lru); 6070 expire_client(clp); 6071 } 6072 } 6073 6074 static time64_t 6075 nfs4_laundromat(struct nfsd_net *nn) 6076 { 6077 struct nfs4_openowner *oo; 6078 struct nfs4_delegation *dp; 6079 struct nfs4_ol_stateid *stp; 6080 struct nfsd4_blocked_lock *nbl; 6081 struct list_head *pos, *next, reaplist; 6082 struct laundry_time lt = { 6083 .cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease, 6084 .new_timeo = nn->nfsd4_lease 6085 }; 6086 struct nfs4_cpntf_state *cps; 6087 copy_stateid_t *cps_t; 6088 int i; 6089 6090 if (clients_still_reclaiming(nn)) { 6091 lt.new_timeo = 0; 6092 goto out; 6093 } 6094 nfsd4_end_grace(nn); 6095 6096 spin_lock(&nn->s2s_cp_lock); 6097 idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) { 6098 cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid); 6099 if (cps->cp_stateid.cs_type == NFS4_COPYNOTIFY_STID && 6100 state_expired(<, cps->cpntf_time)) 6101 _free_cpntf_state_locked(nn, cps); 6102 } 6103 spin_unlock(&nn->s2s_cp_lock); 6104 nfs4_get_client_reaplist(nn, &reaplist, <); 6105 nfs4_process_client_reaplist(&reaplist); 6106 6107 spin_lock(&state_lock); 6108 list_for_each_safe(pos, next, &nn->del_recall_lru) { 6109 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 6110 if (!state_expired(<, dp->dl_time)) 6111 break; 6112 WARN_ON(!unhash_delegation_locked(dp)); 6113 list_add(&dp->dl_recall_lru, &reaplist); 6114 } 6115 spin_unlock(&state_lock); 6116 while (!list_empty(&reaplist)) { 6117 dp = list_first_entry(&reaplist, struct nfs4_delegation, 6118 dl_recall_lru); 6119 list_del_init(&dp->dl_recall_lru); 6120 revoke_delegation(dp); 6121 } 6122 6123 spin_lock(&nn->client_lock); 6124 while (!list_empty(&nn->close_lru)) { 6125 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner, 6126 oo_close_lru); 6127 if (!state_expired(<, oo->oo_time)) 6128 break; 6129 list_del_init(&oo->oo_close_lru); 6130 stp = oo->oo_last_closed_stid; 6131 oo->oo_last_closed_stid = NULL; 6132 spin_unlock(&nn->client_lock); 6133 nfs4_put_stid(&stp->st_stid); 6134 spin_lock(&nn->client_lock); 6135 } 6136 spin_unlock(&nn->client_lock); 6137 6138 /* 6139 * It's possible for a client to try and acquire an already held lock 6140 * that is being held for a long time, and then lose interest in it. 6141 * So, we clean out any un-revisited request after a lease period 6142 * under the assumption that the client is no longer interested. 6143 * 6144 * RFC5661, sec. 9.6 states that the client must not rely on getting 6145 * notifications and must continue to poll for locks, even when the 6146 * server supports them. Thus this shouldn't lead to clients blocking 6147 * indefinitely once the lock does become free. 6148 */ 6149 BUG_ON(!list_empty(&reaplist)); 6150 spin_lock(&nn->blocked_locks_lock); 6151 while (!list_empty(&nn->blocked_locks_lru)) { 6152 nbl = list_first_entry(&nn->blocked_locks_lru, 6153 struct nfsd4_blocked_lock, nbl_lru); 6154 if (!state_expired(<, nbl->nbl_time)) 6155 break; 6156 list_move(&nbl->nbl_lru, &reaplist); 6157 list_del_init(&nbl->nbl_list); 6158 } 6159 spin_unlock(&nn->blocked_locks_lock); 6160 6161 while (!list_empty(&reaplist)) { 6162 nbl = list_first_entry(&reaplist, 6163 struct nfsd4_blocked_lock, nbl_lru); 6164 list_del_init(&nbl->nbl_lru); 6165 free_blocked_lock(nbl); 6166 } 6167 #ifdef CONFIG_NFSD_V4_2_INTER_SSC 6168 /* service the server-to-server copy delayed unmount list */ 6169 nfsd4_ssc_expire_umount(nn); 6170 #endif 6171 out: 6172 return max_t(time64_t, lt.new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT); 6173 } 6174 6175 static void laundromat_main(struct work_struct *); 6176 6177 static void 6178 laundromat_main(struct work_struct *laundry) 6179 { 6180 time64_t t; 6181 struct delayed_work *dwork = to_delayed_work(laundry); 6182 struct nfsd_net *nn = container_of(dwork, struct nfsd_net, 6183 laundromat_work); 6184 6185 t = nfs4_laundromat(nn); 6186 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ); 6187 } 6188 6189 static void 6190 courtesy_client_reaper(struct nfsd_net *nn) 6191 { 6192 struct list_head reaplist; 6193 6194 nfs4_get_courtesy_client_reaplist(nn, &reaplist); 6195 nfs4_process_client_reaplist(&reaplist); 6196 } 6197 6198 static void 6199 deleg_reaper(struct nfsd_net *nn) 6200 { 6201 struct list_head *pos, *next; 6202 struct nfs4_client *clp; 6203 struct list_head cblist; 6204 6205 INIT_LIST_HEAD(&cblist); 6206 spin_lock(&nn->client_lock); 6207 list_for_each_safe(pos, next, &nn->client_lru) { 6208 clp = list_entry(pos, struct nfs4_client, cl_lru); 6209 if (clp->cl_state != NFSD4_ACTIVE || 6210 list_empty(&clp->cl_delegations) || 6211 atomic_read(&clp->cl_delegs_in_recall) || 6212 test_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags) || 6213 (ktime_get_boottime_seconds() - 6214 clp->cl_ra_time < 5)) { 6215 continue; 6216 } 6217 list_add(&clp->cl_ra_cblist, &cblist); 6218 6219 /* release in nfsd4_cb_recall_any_release */ 6220 atomic_inc(&clp->cl_rpc_users); 6221 set_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags); 6222 clp->cl_ra_time = ktime_get_boottime_seconds(); 6223 } 6224 spin_unlock(&nn->client_lock); 6225 6226 while (!list_empty(&cblist)) { 6227 clp = list_first_entry(&cblist, struct nfs4_client, 6228 cl_ra_cblist); 6229 list_del_init(&clp->cl_ra_cblist); 6230 clp->cl_ra->ra_keep = 0; 6231 clp->cl_ra->ra_bmval[0] = BIT(RCA4_TYPE_MASK_RDATA_DLG); 6232 trace_nfsd_cb_recall_any(clp->cl_ra); 6233 nfsd4_run_cb(&clp->cl_ra->ra_cb); 6234 } 6235 } 6236 6237 static void 6238 nfsd4_state_shrinker_worker(struct work_struct *work) 6239 { 6240 struct nfsd_net *nn = container_of(work, struct nfsd_net, 6241 nfsd_shrinker_work); 6242 6243 courtesy_client_reaper(nn); 6244 deleg_reaper(nn); 6245 } 6246 6247 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp) 6248 { 6249 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle)) 6250 return nfserr_bad_stateid; 6251 return nfs_ok; 6252 } 6253 6254 static 6255 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags) 6256 { 6257 __be32 status = nfserr_openmode; 6258 6259 /* For lock stateid's, we test the parent open, not the lock: */ 6260 if (stp->st_openstp) 6261 stp = stp->st_openstp; 6262 if ((flags & WR_STATE) && !access_permit_write(stp)) 6263 goto out; 6264 if ((flags & RD_STATE) && !access_permit_read(stp)) 6265 goto out; 6266 status = nfs_ok; 6267 out: 6268 return status; 6269 } 6270 6271 static inline __be32 6272 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags) 6273 { 6274 if (ONE_STATEID(stateid) && (flags & RD_STATE)) 6275 return nfs_ok; 6276 else if (opens_in_grace(net)) { 6277 /* Answer in remaining cases depends on existence of 6278 * conflicting state; so we must wait out the grace period. */ 6279 return nfserr_grace; 6280 } else if (flags & WR_STATE) 6281 return nfs4_share_conflict(current_fh, 6282 NFS4_SHARE_DENY_WRITE); 6283 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */ 6284 return nfs4_share_conflict(current_fh, 6285 NFS4_SHARE_DENY_READ); 6286 } 6287 6288 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session) 6289 { 6290 /* 6291 * When sessions are used the stateid generation number is ignored 6292 * when it is zero. 6293 */ 6294 if (has_session && in->si_generation == 0) 6295 return nfs_ok; 6296 6297 if (in->si_generation == ref->si_generation) 6298 return nfs_ok; 6299 6300 /* If the client sends us a stateid from the future, it's buggy: */ 6301 if (nfsd4_stateid_generation_after(in, ref)) 6302 return nfserr_bad_stateid; 6303 /* 6304 * However, we could see a stateid from the past, even from a 6305 * non-buggy client. For example, if the client sends a lock 6306 * while some IO is outstanding, the lock may bump si_generation 6307 * while the IO is still in flight. The client could avoid that 6308 * situation by waiting for responses on all the IO requests, 6309 * but better performance may result in retrying IO that 6310 * receives an old_stateid error if requests are rarely 6311 * reordered in flight: 6312 */ 6313 return nfserr_old_stateid; 6314 } 6315 6316 static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session) 6317 { 6318 __be32 ret; 6319 6320 spin_lock(&s->sc_lock); 6321 ret = nfsd4_verify_open_stid(s); 6322 if (ret == nfs_ok) 6323 ret = check_stateid_generation(in, &s->sc_stateid, has_session); 6324 spin_unlock(&s->sc_lock); 6325 return ret; 6326 } 6327 6328 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols) 6329 { 6330 if (ols->st_stateowner->so_is_open_owner && 6331 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED)) 6332 return nfserr_bad_stateid; 6333 return nfs_ok; 6334 } 6335 6336 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) 6337 { 6338 struct nfs4_stid *s; 6339 __be32 status = nfserr_bad_stateid; 6340 6341 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) || 6342 CLOSE_STATEID(stateid)) 6343 return status; 6344 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) 6345 return status; 6346 spin_lock(&cl->cl_lock); 6347 s = find_stateid_locked(cl, stateid); 6348 if (!s) 6349 goto out_unlock; 6350 status = nfsd4_stid_check_stateid_generation(stateid, s, 1); 6351 if (status) 6352 goto out_unlock; 6353 switch (s->sc_type) { 6354 case NFS4_DELEG_STID: 6355 status = nfs_ok; 6356 break; 6357 case NFS4_REVOKED_DELEG_STID: 6358 status = nfserr_deleg_revoked; 6359 break; 6360 case NFS4_OPEN_STID: 6361 case NFS4_LOCK_STID: 6362 status = nfsd4_check_openowner_confirmed(openlockstateid(s)); 6363 break; 6364 default: 6365 printk("unknown stateid type %x\n", s->sc_type); 6366 fallthrough; 6367 case NFS4_CLOSED_STID: 6368 case NFS4_CLOSED_DELEG_STID: 6369 status = nfserr_bad_stateid; 6370 } 6371 out_unlock: 6372 spin_unlock(&cl->cl_lock); 6373 return status; 6374 } 6375 6376 __be32 6377 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, 6378 stateid_t *stateid, unsigned char typemask, 6379 struct nfs4_stid **s, struct nfsd_net *nn) 6380 { 6381 __be32 status; 6382 struct nfs4_stid *stid; 6383 bool return_revoked = false; 6384 6385 /* 6386 * only return revoked delegations if explicitly asked. 6387 * otherwise we report revoked or bad_stateid status. 6388 */ 6389 if (typemask & NFS4_REVOKED_DELEG_STID) 6390 return_revoked = true; 6391 else if (typemask & NFS4_DELEG_STID) 6392 typemask |= NFS4_REVOKED_DELEG_STID; 6393 6394 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) || 6395 CLOSE_STATEID(stateid)) 6396 return nfserr_bad_stateid; 6397 status = set_client(&stateid->si_opaque.so_clid, cstate, nn); 6398 if (status == nfserr_stale_clientid) { 6399 if (cstate->session) 6400 return nfserr_bad_stateid; 6401 return nfserr_stale_stateid; 6402 } 6403 if (status) 6404 return status; 6405 stid = find_stateid_by_type(cstate->clp, stateid, typemask); 6406 if (!stid) 6407 return nfserr_bad_stateid; 6408 if ((stid->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) { 6409 nfs4_put_stid(stid); 6410 if (cstate->minorversion) 6411 return nfserr_deleg_revoked; 6412 return nfserr_bad_stateid; 6413 } 6414 *s = stid; 6415 return nfs_ok; 6416 } 6417 6418 static struct nfsd_file * 6419 nfs4_find_file(struct nfs4_stid *s, int flags) 6420 { 6421 struct nfsd_file *ret = NULL; 6422 6423 if (!s) 6424 return NULL; 6425 6426 switch (s->sc_type) { 6427 case NFS4_DELEG_STID: 6428 spin_lock(&s->sc_file->fi_lock); 6429 ret = nfsd_file_get(s->sc_file->fi_deleg_file); 6430 spin_unlock(&s->sc_file->fi_lock); 6431 break; 6432 case NFS4_OPEN_STID: 6433 case NFS4_LOCK_STID: 6434 if (flags & RD_STATE) 6435 ret = find_readable_file(s->sc_file); 6436 else 6437 ret = find_writeable_file(s->sc_file); 6438 } 6439 6440 return ret; 6441 } 6442 6443 static __be32 6444 nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags) 6445 { 6446 __be32 status; 6447 6448 status = nfsd4_check_openowner_confirmed(ols); 6449 if (status) 6450 return status; 6451 return nfs4_check_openmode(ols, flags); 6452 } 6453 6454 static __be32 6455 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s, 6456 struct nfsd_file **nfp, int flags) 6457 { 6458 int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE; 6459 struct nfsd_file *nf; 6460 __be32 status; 6461 6462 nf = nfs4_find_file(s, flags); 6463 if (nf) { 6464 status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry, 6465 acc | NFSD_MAY_OWNER_OVERRIDE); 6466 if (status) { 6467 nfsd_file_put(nf); 6468 goto out; 6469 } 6470 } else { 6471 status = nfsd_file_acquire(rqstp, fhp, acc, &nf); 6472 if (status) 6473 return status; 6474 } 6475 *nfp = nf; 6476 out: 6477 return status; 6478 } 6479 static void 6480 _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps) 6481 { 6482 WARN_ON_ONCE(cps->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID); 6483 if (!refcount_dec_and_test(&cps->cp_stateid.cs_count)) 6484 return; 6485 list_del(&cps->cp_list); 6486 idr_remove(&nn->s2s_cp_stateids, 6487 cps->cp_stateid.cs_stid.si_opaque.so_id); 6488 kfree(cps); 6489 } 6490 /* 6491 * A READ from an inter server to server COPY will have a 6492 * copy stateid. Look up the copy notify stateid from the 6493 * idr structure and take a reference on it. 6494 */ 6495 __be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st, 6496 struct nfs4_client *clp, 6497 struct nfs4_cpntf_state **cps) 6498 { 6499 copy_stateid_t *cps_t; 6500 struct nfs4_cpntf_state *state = NULL; 6501 6502 if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id) 6503 return nfserr_bad_stateid; 6504 spin_lock(&nn->s2s_cp_lock); 6505 cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id); 6506 if (cps_t) { 6507 state = container_of(cps_t, struct nfs4_cpntf_state, 6508 cp_stateid); 6509 if (state->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID) { 6510 state = NULL; 6511 goto unlock; 6512 } 6513 if (!clp) 6514 refcount_inc(&state->cp_stateid.cs_count); 6515 else 6516 _free_cpntf_state_locked(nn, state); 6517 } 6518 unlock: 6519 spin_unlock(&nn->s2s_cp_lock); 6520 if (!state) 6521 return nfserr_bad_stateid; 6522 if (!clp && state) 6523 *cps = state; 6524 return 0; 6525 } 6526 6527 static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st, 6528 struct nfs4_stid **stid) 6529 { 6530 __be32 status; 6531 struct nfs4_cpntf_state *cps = NULL; 6532 struct nfs4_client *found; 6533 6534 status = manage_cpntf_state(nn, st, NULL, &cps); 6535 if (status) 6536 return status; 6537 6538 cps->cpntf_time = ktime_get_boottime_seconds(); 6539 6540 status = nfserr_expired; 6541 found = lookup_clientid(&cps->cp_p_clid, true, nn); 6542 if (!found) 6543 goto out; 6544 6545 *stid = find_stateid_by_type(found, &cps->cp_p_stateid, 6546 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID); 6547 if (*stid) 6548 status = nfs_ok; 6549 else 6550 status = nfserr_bad_stateid; 6551 6552 put_client_renew(found); 6553 out: 6554 nfs4_put_cpntf_state(nn, cps); 6555 return status; 6556 } 6557 6558 void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps) 6559 { 6560 spin_lock(&nn->s2s_cp_lock); 6561 _free_cpntf_state_locked(nn, cps); 6562 spin_unlock(&nn->s2s_cp_lock); 6563 } 6564 6565 /** 6566 * nfs4_preprocess_stateid_op - find and prep stateid for an operation 6567 * @rqstp: incoming request from client 6568 * @cstate: current compound state 6569 * @fhp: filehandle associated with requested stateid 6570 * @stateid: stateid (provided by client) 6571 * @flags: flags describing type of operation to be done 6572 * @nfp: optional nfsd_file return pointer (may be NULL) 6573 * @cstid: optional returned nfs4_stid pointer (may be NULL) 6574 * 6575 * Given info from the client, look up a nfs4_stid for the operation. On 6576 * success, it returns a reference to the nfs4_stid and/or the nfsd_file 6577 * associated with it. 6578 */ 6579 __be32 6580 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp, 6581 struct nfsd4_compound_state *cstate, struct svc_fh *fhp, 6582 stateid_t *stateid, int flags, struct nfsd_file **nfp, 6583 struct nfs4_stid **cstid) 6584 { 6585 struct net *net = SVC_NET(rqstp); 6586 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 6587 struct nfs4_stid *s = NULL; 6588 __be32 status; 6589 6590 if (nfp) 6591 *nfp = NULL; 6592 6593 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) { 6594 if (cstid) 6595 status = nfserr_bad_stateid; 6596 else 6597 status = check_special_stateids(net, fhp, stateid, 6598 flags); 6599 goto done; 6600 } 6601 6602 status = nfsd4_lookup_stateid(cstate, stateid, 6603 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID, 6604 &s, nn); 6605 if (status == nfserr_bad_stateid) 6606 status = find_cpntf_state(nn, stateid, &s); 6607 if (status) 6608 return status; 6609 status = nfsd4_stid_check_stateid_generation(stateid, s, 6610 nfsd4_has_session(cstate)); 6611 if (status) 6612 goto out; 6613 6614 switch (s->sc_type) { 6615 case NFS4_DELEG_STID: 6616 status = nfs4_check_delegmode(delegstateid(s), flags); 6617 break; 6618 case NFS4_OPEN_STID: 6619 case NFS4_LOCK_STID: 6620 status = nfs4_check_olstateid(openlockstateid(s), flags); 6621 break; 6622 default: 6623 status = nfserr_bad_stateid; 6624 break; 6625 } 6626 if (status) 6627 goto out; 6628 status = nfs4_check_fh(fhp, s); 6629 6630 done: 6631 if (status == nfs_ok && nfp) 6632 status = nfs4_check_file(rqstp, fhp, s, nfp, flags); 6633 out: 6634 if (s) { 6635 if (!status && cstid) 6636 *cstid = s; 6637 else 6638 nfs4_put_stid(s); 6639 } 6640 return status; 6641 } 6642 6643 /* 6644 * Test if the stateid is valid 6645 */ 6646 __be32 6647 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 6648 union nfsd4_op_u *u) 6649 { 6650 struct nfsd4_test_stateid *test_stateid = &u->test_stateid; 6651 struct nfsd4_test_stateid_id *stateid; 6652 struct nfs4_client *cl = cstate->clp; 6653 6654 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list) 6655 stateid->ts_id_status = 6656 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid); 6657 6658 return nfs_ok; 6659 } 6660 6661 static __be32 6662 nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s) 6663 { 6664 struct nfs4_ol_stateid *stp = openlockstateid(s); 6665 __be32 ret; 6666 6667 ret = nfsd4_lock_ol_stateid(stp); 6668 if (ret) 6669 goto out_put_stid; 6670 6671 ret = check_stateid_generation(stateid, &s->sc_stateid, 1); 6672 if (ret) 6673 goto out; 6674 6675 ret = nfserr_locks_held; 6676 if (check_for_locks(stp->st_stid.sc_file, 6677 lockowner(stp->st_stateowner))) 6678 goto out; 6679 6680 release_lock_stateid(stp); 6681 ret = nfs_ok; 6682 6683 out: 6684 mutex_unlock(&stp->st_mutex); 6685 out_put_stid: 6686 nfs4_put_stid(s); 6687 return ret; 6688 } 6689 6690 __be32 6691 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 6692 union nfsd4_op_u *u) 6693 { 6694 struct nfsd4_free_stateid *free_stateid = &u->free_stateid; 6695 stateid_t *stateid = &free_stateid->fr_stateid; 6696 struct nfs4_stid *s; 6697 struct nfs4_delegation *dp; 6698 struct nfs4_client *cl = cstate->clp; 6699 __be32 ret = nfserr_bad_stateid; 6700 6701 spin_lock(&cl->cl_lock); 6702 s = find_stateid_locked(cl, stateid); 6703 if (!s) 6704 goto out_unlock; 6705 spin_lock(&s->sc_lock); 6706 switch (s->sc_type) { 6707 case NFS4_DELEG_STID: 6708 ret = nfserr_locks_held; 6709 break; 6710 case NFS4_OPEN_STID: 6711 ret = check_stateid_generation(stateid, &s->sc_stateid, 1); 6712 if (ret) 6713 break; 6714 ret = nfserr_locks_held; 6715 break; 6716 case NFS4_LOCK_STID: 6717 spin_unlock(&s->sc_lock); 6718 refcount_inc(&s->sc_count); 6719 spin_unlock(&cl->cl_lock); 6720 ret = nfsd4_free_lock_stateid(stateid, s); 6721 goto out; 6722 case NFS4_REVOKED_DELEG_STID: 6723 spin_unlock(&s->sc_lock); 6724 dp = delegstateid(s); 6725 list_del_init(&dp->dl_recall_lru); 6726 spin_unlock(&cl->cl_lock); 6727 nfs4_put_stid(s); 6728 ret = nfs_ok; 6729 goto out; 6730 /* Default falls through and returns nfserr_bad_stateid */ 6731 } 6732 spin_unlock(&s->sc_lock); 6733 out_unlock: 6734 spin_unlock(&cl->cl_lock); 6735 out: 6736 return ret; 6737 } 6738 6739 static inline int 6740 setlkflg (int type) 6741 { 6742 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ? 6743 RD_STATE : WR_STATE; 6744 } 6745 6746 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp) 6747 { 6748 struct svc_fh *current_fh = &cstate->current_fh; 6749 struct nfs4_stateowner *sop = stp->st_stateowner; 6750 __be32 status; 6751 6752 status = nfsd4_check_seqid(cstate, sop, seqid); 6753 if (status) 6754 return status; 6755 status = nfsd4_lock_ol_stateid(stp); 6756 if (status != nfs_ok) 6757 return status; 6758 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); 6759 if (status == nfs_ok) 6760 status = nfs4_check_fh(current_fh, &stp->st_stid); 6761 if (status != nfs_ok) 6762 mutex_unlock(&stp->st_mutex); 6763 return status; 6764 } 6765 6766 /** 6767 * nfs4_preprocess_seqid_op - find and prep an ol_stateid for a seqid-morphing op 6768 * @cstate: compund state 6769 * @seqid: seqid (provided by client) 6770 * @stateid: stateid (provided by client) 6771 * @typemask: mask of allowable types for this operation 6772 * @stpp: return pointer for the stateid found 6773 * @nn: net namespace for request 6774 * 6775 * Given a stateid+seqid from a client, look up an nfs4_ol_stateid and 6776 * return it in @stpp. On a nfs_ok return, the returned stateid will 6777 * have its st_mutex locked. 6778 */ 6779 static __be32 6780 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, 6781 stateid_t *stateid, char typemask, 6782 struct nfs4_ol_stateid **stpp, 6783 struct nfsd_net *nn) 6784 { 6785 __be32 status; 6786 struct nfs4_stid *s; 6787 struct nfs4_ol_stateid *stp = NULL; 6788 6789 trace_nfsd_preprocess(seqid, stateid); 6790 6791 *stpp = NULL; 6792 status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn); 6793 if (status) 6794 return status; 6795 stp = openlockstateid(s); 6796 nfsd4_cstate_assign_replay(cstate, stp->st_stateowner); 6797 6798 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp); 6799 if (!status) 6800 *stpp = stp; 6801 else 6802 nfs4_put_stid(&stp->st_stid); 6803 return status; 6804 } 6805 6806 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, 6807 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn) 6808 { 6809 __be32 status; 6810 struct nfs4_openowner *oo; 6811 struct nfs4_ol_stateid *stp; 6812 6813 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid, 6814 NFS4_OPEN_STID, &stp, nn); 6815 if (status) 6816 return status; 6817 oo = openowner(stp->st_stateowner); 6818 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { 6819 mutex_unlock(&stp->st_mutex); 6820 nfs4_put_stid(&stp->st_stid); 6821 return nfserr_bad_stateid; 6822 } 6823 *stpp = stp; 6824 return nfs_ok; 6825 } 6826 6827 __be32 6828 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 6829 union nfsd4_op_u *u) 6830 { 6831 struct nfsd4_open_confirm *oc = &u->open_confirm; 6832 __be32 status; 6833 struct nfs4_openowner *oo; 6834 struct nfs4_ol_stateid *stp; 6835 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 6836 6837 dprintk("NFSD: nfsd4_open_confirm on file %pd\n", 6838 cstate->current_fh.fh_dentry); 6839 6840 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0); 6841 if (status) 6842 return status; 6843 6844 status = nfs4_preprocess_seqid_op(cstate, 6845 oc->oc_seqid, &oc->oc_req_stateid, 6846 NFS4_OPEN_STID, &stp, nn); 6847 if (status) 6848 goto out; 6849 oo = openowner(stp->st_stateowner); 6850 status = nfserr_bad_stateid; 6851 if (oo->oo_flags & NFS4_OO_CONFIRMED) { 6852 mutex_unlock(&stp->st_mutex); 6853 goto put_stateid; 6854 } 6855 oo->oo_flags |= NFS4_OO_CONFIRMED; 6856 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid); 6857 mutex_unlock(&stp->st_mutex); 6858 trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid); 6859 nfsd4_client_record_create(oo->oo_owner.so_client); 6860 status = nfs_ok; 6861 put_stateid: 6862 nfs4_put_stid(&stp->st_stid); 6863 out: 6864 nfsd4_bump_seqid(cstate, status); 6865 return status; 6866 } 6867 6868 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access) 6869 { 6870 if (!test_access(access, stp)) 6871 return; 6872 nfs4_file_put_access(stp->st_stid.sc_file, access); 6873 clear_access(access, stp); 6874 } 6875 6876 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access) 6877 { 6878 switch (to_access) { 6879 case NFS4_SHARE_ACCESS_READ: 6880 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE); 6881 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH); 6882 break; 6883 case NFS4_SHARE_ACCESS_WRITE: 6884 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ); 6885 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH); 6886 break; 6887 case NFS4_SHARE_ACCESS_BOTH: 6888 break; 6889 default: 6890 WARN_ON_ONCE(1); 6891 } 6892 } 6893 6894 __be32 6895 nfsd4_open_downgrade(struct svc_rqst *rqstp, 6896 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) 6897 { 6898 struct nfsd4_open_downgrade *od = &u->open_downgrade; 6899 __be32 status; 6900 struct nfs4_ol_stateid *stp; 6901 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 6902 6903 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n", 6904 cstate->current_fh.fh_dentry); 6905 6906 /* We don't yet support WANT bits: */ 6907 if (od->od_deleg_want) 6908 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__, 6909 od->od_deleg_want); 6910 6911 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid, 6912 &od->od_stateid, &stp, nn); 6913 if (status) 6914 goto out; 6915 status = nfserr_inval; 6916 if (!test_access(od->od_share_access, stp)) { 6917 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n", 6918 stp->st_access_bmap, od->od_share_access); 6919 goto put_stateid; 6920 } 6921 if (!test_deny(od->od_share_deny, stp)) { 6922 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n", 6923 stp->st_deny_bmap, od->od_share_deny); 6924 goto put_stateid; 6925 } 6926 nfs4_stateid_downgrade(stp, od->od_share_access); 6927 reset_union_bmap_deny(od->od_share_deny, stp); 6928 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid); 6929 status = nfs_ok; 6930 put_stateid: 6931 mutex_unlock(&stp->st_mutex); 6932 nfs4_put_stid(&stp->st_stid); 6933 out: 6934 nfsd4_bump_seqid(cstate, status); 6935 return status; 6936 } 6937 6938 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) 6939 { 6940 struct nfs4_client *clp = s->st_stid.sc_client; 6941 bool unhashed; 6942 LIST_HEAD(reaplist); 6943 struct nfs4_ol_stateid *stp; 6944 6945 spin_lock(&clp->cl_lock); 6946 unhashed = unhash_open_stateid(s, &reaplist); 6947 6948 if (clp->cl_minorversion) { 6949 if (unhashed) 6950 put_ol_stateid_locked(s, &reaplist); 6951 spin_unlock(&clp->cl_lock); 6952 list_for_each_entry(stp, &reaplist, st_locks) 6953 nfs4_free_cpntf_statelist(clp->net, &stp->st_stid); 6954 free_ol_stateid_reaplist(&reaplist); 6955 } else { 6956 spin_unlock(&clp->cl_lock); 6957 free_ol_stateid_reaplist(&reaplist); 6958 if (unhashed) 6959 move_to_close_lru(s, clp->net); 6960 } 6961 } 6962 6963 /* 6964 * nfs4_unlock_state() called after encode 6965 */ 6966 __be32 6967 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 6968 union nfsd4_op_u *u) 6969 { 6970 struct nfsd4_close *close = &u->close; 6971 __be32 status; 6972 struct nfs4_ol_stateid *stp; 6973 struct net *net = SVC_NET(rqstp); 6974 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 6975 6976 dprintk("NFSD: nfsd4_close on file %pd\n", 6977 cstate->current_fh.fh_dentry); 6978 6979 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid, 6980 &close->cl_stateid, 6981 NFS4_OPEN_STID|NFS4_CLOSED_STID, 6982 &stp, nn); 6983 nfsd4_bump_seqid(cstate, status); 6984 if (status) 6985 goto out; 6986 6987 stp->st_stid.sc_type = NFS4_CLOSED_STID; 6988 6989 /* 6990 * Technically we don't _really_ have to increment or copy it, since 6991 * it should just be gone after this operation and we clobber the 6992 * copied value below, but we continue to do so here just to ensure 6993 * that racing ops see that there was a state change. 6994 */ 6995 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid); 6996 6997 nfsd4_close_open_stateid(stp); 6998 mutex_unlock(&stp->st_mutex); 6999 7000 /* v4.1+ suggests that we send a special stateid in here, since the 7001 * clients should just ignore this anyway. Since this is not useful 7002 * for v4.0 clients either, we set it to the special close_stateid 7003 * universally. 7004 * 7005 * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5 7006 */ 7007 memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid)); 7008 7009 /* put reference from nfs4_preprocess_seqid_op */ 7010 nfs4_put_stid(&stp->st_stid); 7011 out: 7012 return status; 7013 } 7014 7015 __be32 7016 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 7017 union nfsd4_op_u *u) 7018 { 7019 struct nfsd4_delegreturn *dr = &u->delegreturn; 7020 struct nfs4_delegation *dp; 7021 stateid_t *stateid = &dr->dr_stateid; 7022 struct nfs4_stid *s; 7023 __be32 status; 7024 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 7025 7026 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) 7027 return status; 7028 7029 status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn); 7030 if (status) 7031 goto out; 7032 dp = delegstateid(s); 7033 status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate)); 7034 if (status) 7035 goto put_stateid; 7036 7037 trace_nfsd_deleg_return(stateid); 7038 wake_up_var(d_inode(cstate->current_fh.fh_dentry)); 7039 destroy_delegation(dp); 7040 put_stateid: 7041 nfs4_put_stid(&dp->dl_stid); 7042 out: 7043 return status; 7044 } 7045 7046 /* last octet in a range */ 7047 static inline u64 7048 last_byte_offset(u64 start, u64 len) 7049 { 7050 u64 end; 7051 7052 WARN_ON_ONCE(!len); 7053 end = start + len; 7054 return end > start ? end - 1: NFS4_MAX_UINT64; 7055 } 7056 7057 /* 7058 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that 7059 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th 7060 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit 7061 * locking, this prevents us from being completely protocol-compliant. The 7062 * real solution to this problem is to start using unsigned file offsets in 7063 * the VFS, but this is a very deep change! 7064 */ 7065 static inline void 7066 nfs4_transform_lock_offset(struct file_lock *lock) 7067 { 7068 if (lock->fl_start < 0) 7069 lock->fl_start = OFFSET_MAX; 7070 if (lock->fl_end < 0) 7071 lock->fl_end = OFFSET_MAX; 7072 } 7073 7074 static fl_owner_t 7075 nfsd4_lm_get_owner(fl_owner_t owner) 7076 { 7077 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner; 7078 7079 nfs4_get_stateowner(&lo->lo_owner); 7080 return owner; 7081 } 7082 7083 static void 7084 nfsd4_lm_put_owner(fl_owner_t owner) 7085 { 7086 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner; 7087 7088 if (lo) 7089 nfs4_put_stateowner(&lo->lo_owner); 7090 } 7091 7092 /* return pointer to struct nfs4_client if client is expirable */ 7093 static bool 7094 nfsd4_lm_lock_expirable(struct file_lock *cfl) 7095 { 7096 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)cfl->fl_owner; 7097 struct nfs4_client *clp = lo->lo_owner.so_client; 7098 struct nfsd_net *nn; 7099 7100 if (try_to_expire_client(clp)) { 7101 nn = net_generic(clp->net, nfsd_net_id); 7102 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0); 7103 return true; 7104 } 7105 return false; 7106 } 7107 7108 /* schedule laundromat to run immediately and wait for it to complete */ 7109 static void 7110 nfsd4_lm_expire_lock(void) 7111 { 7112 flush_workqueue(laundry_wq); 7113 } 7114 7115 static void 7116 nfsd4_lm_notify(struct file_lock *fl) 7117 { 7118 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner; 7119 struct net *net = lo->lo_owner.so_client->net; 7120 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 7121 struct nfsd4_blocked_lock *nbl = container_of(fl, 7122 struct nfsd4_blocked_lock, nbl_lock); 7123 bool queue = false; 7124 7125 /* An empty list means that something else is going to be using it */ 7126 spin_lock(&nn->blocked_locks_lock); 7127 if (!list_empty(&nbl->nbl_list)) { 7128 list_del_init(&nbl->nbl_list); 7129 list_del_init(&nbl->nbl_lru); 7130 queue = true; 7131 } 7132 spin_unlock(&nn->blocked_locks_lock); 7133 7134 if (queue) { 7135 trace_nfsd_cb_notify_lock(lo, nbl); 7136 nfsd4_run_cb(&nbl->nbl_cb); 7137 } 7138 } 7139 7140 static const struct lock_manager_operations nfsd_posix_mng_ops = { 7141 .lm_mod_owner = THIS_MODULE, 7142 .lm_notify = nfsd4_lm_notify, 7143 .lm_get_owner = nfsd4_lm_get_owner, 7144 .lm_put_owner = nfsd4_lm_put_owner, 7145 .lm_lock_expirable = nfsd4_lm_lock_expirable, 7146 .lm_expire_lock = nfsd4_lm_expire_lock, 7147 }; 7148 7149 static inline void 7150 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny) 7151 { 7152 struct nfs4_lockowner *lo; 7153 7154 if (fl->fl_lmops == &nfsd_posix_mng_ops) { 7155 lo = (struct nfs4_lockowner *) fl->fl_owner; 7156 xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner, 7157 GFP_KERNEL); 7158 if (!deny->ld_owner.data) 7159 /* We just don't care that much */ 7160 goto nevermind; 7161 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid; 7162 } else { 7163 nevermind: 7164 deny->ld_owner.len = 0; 7165 deny->ld_owner.data = NULL; 7166 deny->ld_clientid.cl_boot = 0; 7167 deny->ld_clientid.cl_id = 0; 7168 } 7169 deny->ld_start = fl->fl_start; 7170 deny->ld_length = NFS4_MAX_UINT64; 7171 if (fl->fl_end != NFS4_MAX_UINT64) 7172 deny->ld_length = fl->fl_end - fl->fl_start + 1; 7173 deny->ld_type = NFS4_READ_LT; 7174 if (fl->fl_type != F_RDLCK) 7175 deny->ld_type = NFS4_WRITE_LT; 7176 } 7177 7178 static struct nfs4_lockowner * 7179 find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner) 7180 { 7181 unsigned int strhashval = ownerstr_hashval(owner); 7182 struct nfs4_stateowner *so; 7183 7184 lockdep_assert_held(&clp->cl_lock); 7185 7186 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval], 7187 so_strhash) { 7188 if (so->so_is_open_owner) 7189 continue; 7190 if (same_owner_str(so, owner)) 7191 return lockowner(nfs4_get_stateowner(so)); 7192 } 7193 return NULL; 7194 } 7195 7196 static struct nfs4_lockowner * 7197 find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner) 7198 { 7199 struct nfs4_lockowner *lo; 7200 7201 spin_lock(&clp->cl_lock); 7202 lo = find_lockowner_str_locked(clp, owner); 7203 spin_unlock(&clp->cl_lock); 7204 return lo; 7205 } 7206 7207 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop) 7208 { 7209 unhash_lockowner_locked(lockowner(sop)); 7210 } 7211 7212 static void nfs4_free_lockowner(struct nfs4_stateowner *sop) 7213 { 7214 struct nfs4_lockowner *lo = lockowner(sop); 7215 7216 kmem_cache_free(lockowner_slab, lo); 7217 } 7218 7219 static const struct nfs4_stateowner_operations lockowner_ops = { 7220 .so_unhash = nfs4_unhash_lockowner, 7221 .so_free = nfs4_free_lockowner, 7222 }; 7223 7224 /* 7225 * Alloc a lock owner structure. 7226 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has 7227 * occurred. 7228 * 7229 * strhashval = ownerstr_hashval 7230 */ 7231 static struct nfs4_lockowner * 7232 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, 7233 struct nfs4_ol_stateid *open_stp, 7234 struct nfsd4_lock *lock) 7235 { 7236 struct nfs4_lockowner *lo, *ret; 7237 7238 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp); 7239 if (!lo) 7240 return NULL; 7241 INIT_LIST_HEAD(&lo->lo_blocked); 7242 INIT_LIST_HEAD(&lo->lo_owner.so_stateids); 7243 lo->lo_owner.so_is_open_owner = 0; 7244 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid; 7245 lo->lo_owner.so_ops = &lockowner_ops; 7246 spin_lock(&clp->cl_lock); 7247 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner); 7248 if (ret == NULL) { 7249 list_add(&lo->lo_owner.so_strhash, 7250 &clp->cl_ownerstr_hashtbl[strhashval]); 7251 ret = lo; 7252 } else 7253 nfs4_free_stateowner(&lo->lo_owner); 7254 7255 spin_unlock(&clp->cl_lock); 7256 return ret; 7257 } 7258 7259 static struct nfs4_ol_stateid * 7260 find_lock_stateid(const struct nfs4_lockowner *lo, 7261 const struct nfs4_ol_stateid *ost) 7262 { 7263 struct nfs4_ol_stateid *lst; 7264 7265 lockdep_assert_held(&ost->st_stid.sc_client->cl_lock); 7266 7267 /* If ost is not hashed, ost->st_locks will not be valid */ 7268 if (!nfs4_ol_stateid_unhashed(ost)) 7269 list_for_each_entry(lst, &ost->st_locks, st_locks) { 7270 if (lst->st_stateowner == &lo->lo_owner) { 7271 refcount_inc(&lst->st_stid.sc_count); 7272 return lst; 7273 } 7274 } 7275 return NULL; 7276 } 7277 7278 static struct nfs4_ol_stateid * 7279 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo, 7280 struct nfs4_file *fp, struct inode *inode, 7281 struct nfs4_ol_stateid *open_stp) 7282 { 7283 struct nfs4_client *clp = lo->lo_owner.so_client; 7284 struct nfs4_ol_stateid *retstp; 7285 7286 mutex_init(&stp->st_mutex); 7287 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX); 7288 retry: 7289 spin_lock(&clp->cl_lock); 7290 if (nfs4_ol_stateid_unhashed(open_stp)) 7291 goto out_close; 7292 retstp = find_lock_stateid(lo, open_stp); 7293 if (retstp) 7294 goto out_found; 7295 refcount_inc(&stp->st_stid.sc_count); 7296 stp->st_stid.sc_type = NFS4_LOCK_STID; 7297 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner); 7298 get_nfs4_file(fp); 7299 stp->st_stid.sc_file = fp; 7300 stp->st_access_bmap = 0; 7301 stp->st_deny_bmap = open_stp->st_deny_bmap; 7302 stp->st_openstp = open_stp; 7303 spin_lock(&fp->fi_lock); 7304 list_add(&stp->st_locks, &open_stp->st_locks); 7305 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); 7306 list_add(&stp->st_perfile, &fp->fi_stateids); 7307 spin_unlock(&fp->fi_lock); 7308 spin_unlock(&clp->cl_lock); 7309 return stp; 7310 out_found: 7311 spin_unlock(&clp->cl_lock); 7312 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) { 7313 nfs4_put_stid(&retstp->st_stid); 7314 goto retry; 7315 } 7316 /* To keep mutex tracking happy */ 7317 mutex_unlock(&stp->st_mutex); 7318 return retstp; 7319 out_close: 7320 spin_unlock(&clp->cl_lock); 7321 mutex_unlock(&stp->st_mutex); 7322 return NULL; 7323 } 7324 7325 static struct nfs4_ol_stateid * 7326 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi, 7327 struct inode *inode, struct nfs4_ol_stateid *ost, 7328 bool *new) 7329 { 7330 struct nfs4_stid *ns = NULL; 7331 struct nfs4_ol_stateid *lst; 7332 struct nfs4_openowner *oo = openowner(ost->st_stateowner); 7333 struct nfs4_client *clp = oo->oo_owner.so_client; 7334 7335 *new = false; 7336 spin_lock(&clp->cl_lock); 7337 lst = find_lock_stateid(lo, ost); 7338 spin_unlock(&clp->cl_lock); 7339 if (lst != NULL) { 7340 if (nfsd4_lock_ol_stateid(lst) == nfs_ok) 7341 goto out; 7342 nfs4_put_stid(&lst->st_stid); 7343 } 7344 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid); 7345 if (ns == NULL) 7346 return NULL; 7347 7348 lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost); 7349 if (lst == openlockstateid(ns)) 7350 *new = true; 7351 else 7352 nfs4_put_stid(ns); 7353 out: 7354 return lst; 7355 } 7356 7357 static int 7358 check_lock_length(u64 offset, u64 length) 7359 { 7360 return ((length == 0) || ((length != NFS4_MAX_UINT64) && 7361 (length > ~offset))); 7362 } 7363 7364 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access) 7365 { 7366 struct nfs4_file *fp = lock_stp->st_stid.sc_file; 7367 7368 lockdep_assert_held(&fp->fi_lock); 7369 7370 if (test_access(access, lock_stp)) 7371 return; 7372 __nfs4_file_get_access(fp, access); 7373 set_access(access, lock_stp); 7374 } 7375 7376 static __be32 7377 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, 7378 struct nfs4_ol_stateid *ost, 7379 struct nfsd4_lock *lock, 7380 struct nfs4_ol_stateid **plst, bool *new) 7381 { 7382 __be32 status; 7383 struct nfs4_file *fi = ost->st_stid.sc_file; 7384 struct nfs4_openowner *oo = openowner(ost->st_stateowner); 7385 struct nfs4_client *cl = oo->oo_owner.so_client; 7386 struct inode *inode = d_inode(cstate->current_fh.fh_dentry); 7387 struct nfs4_lockowner *lo; 7388 struct nfs4_ol_stateid *lst; 7389 unsigned int strhashval; 7390 7391 lo = find_lockowner_str(cl, &lock->lk_new_owner); 7392 if (!lo) { 7393 strhashval = ownerstr_hashval(&lock->lk_new_owner); 7394 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock); 7395 if (lo == NULL) 7396 return nfserr_jukebox; 7397 } else { 7398 /* with an existing lockowner, seqids must be the same */ 7399 status = nfserr_bad_seqid; 7400 if (!cstate->minorversion && 7401 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid) 7402 goto out; 7403 } 7404 7405 lst = find_or_create_lock_stateid(lo, fi, inode, ost, new); 7406 if (lst == NULL) { 7407 status = nfserr_jukebox; 7408 goto out; 7409 } 7410 7411 status = nfs_ok; 7412 *plst = lst; 7413 out: 7414 nfs4_put_stateowner(&lo->lo_owner); 7415 return status; 7416 } 7417 7418 /* 7419 * LOCK operation 7420 */ 7421 __be32 7422 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 7423 union nfsd4_op_u *u) 7424 { 7425 struct nfsd4_lock *lock = &u->lock; 7426 struct nfs4_openowner *open_sop = NULL; 7427 struct nfs4_lockowner *lock_sop = NULL; 7428 struct nfs4_ol_stateid *lock_stp = NULL; 7429 struct nfs4_ol_stateid *open_stp = NULL; 7430 struct nfs4_file *fp; 7431 struct nfsd_file *nf = NULL; 7432 struct nfsd4_blocked_lock *nbl = NULL; 7433 struct file_lock *file_lock = NULL; 7434 struct file_lock *conflock = NULL; 7435 __be32 status = 0; 7436 int lkflg; 7437 int err; 7438 bool new = false; 7439 unsigned char fl_type; 7440 unsigned int fl_flags = FL_POSIX; 7441 struct net *net = SVC_NET(rqstp); 7442 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 7443 7444 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n", 7445 (long long) lock->lk_offset, 7446 (long long) lock->lk_length); 7447 7448 if (check_lock_length(lock->lk_offset, lock->lk_length)) 7449 return nfserr_inval; 7450 7451 if ((status = fh_verify(rqstp, &cstate->current_fh, 7452 S_IFREG, NFSD_MAY_LOCK))) { 7453 dprintk("NFSD: nfsd4_lock: permission denied!\n"); 7454 return status; 7455 } 7456 7457 if (lock->lk_is_new) { 7458 if (nfsd4_has_session(cstate)) 7459 /* See rfc 5661 18.10.3: given clientid is ignored: */ 7460 memcpy(&lock->lk_new_clientid, 7461 &cstate->clp->cl_clientid, 7462 sizeof(clientid_t)); 7463 7464 /* validate and update open stateid and open seqid */ 7465 status = nfs4_preprocess_confirmed_seqid_op(cstate, 7466 lock->lk_new_open_seqid, 7467 &lock->lk_new_open_stateid, 7468 &open_stp, nn); 7469 if (status) 7470 goto out; 7471 mutex_unlock(&open_stp->st_mutex); 7472 open_sop = openowner(open_stp->st_stateowner); 7473 status = nfserr_bad_stateid; 7474 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid, 7475 &lock->lk_new_clientid)) 7476 goto out; 7477 status = lookup_or_create_lock_state(cstate, open_stp, lock, 7478 &lock_stp, &new); 7479 } else { 7480 status = nfs4_preprocess_seqid_op(cstate, 7481 lock->lk_old_lock_seqid, 7482 &lock->lk_old_lock_stateid, 7483 NFS4_LOCK_STID, &lock_stp, nn); 7484 } 7485 if (status) 7486 goto out; 7487 lock_sop = lockowner(lock_stp->st_stateowner); 7488 7489 lkflg = setlkflg(lock->lk_type); 7490 status = nfs4_check_openmode(lock_stp, lkflg); 7491 if (status) 7492 goto out; 7493 7494 status = nfserr_grace; 7495 if (locks_in_grace(net) && !lock->lk_reclaim) 7496 goto out; 7497 status = nfserr_no_grace; 7498 if (!locks_in_grace(net) && lock->lk_reclaim) 7499 goto out; 7500 7501 if (lock->lk_reclaim) 7502 fl_flags |= FL_RECLAIM; 7503 7504 fp = lock_stp->st_stid.sc_file; 7505 switch (lock->lk_type) { 7506 case NFS4_READW_LT: 7507 if (nfsd4_has_session(cstate)) 7508 fl_flags |= FL_SLEEP; 7509 fallthrough; 7510 case NFS4_READ_LT: 7511 spin_lock(&fp->fi_lock); 7512 nf = find_readable_file_locked(fp); 7513 if (nf) 7514 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ); 7515 spin_unlock(&fp->fi_lock); 7516 fl_type = F_RDLCK; 7517 break; 7518 case NFS4_WRITEW_LT: 7519 if (nfsd4_has_session(cstate)) 7520 fl_flags |= FL_SLEEP; 7521 fallthrough; 7522 case NFS4_WRITE_LT: 7523 spin_lock(&fp->fi_lock); 7524 nf = find_writeable_file_locked(fp); 7525 if (nf) 7526 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE); 7527 spin_unlock(&fp->fi_lock); 7528 fl_type = F_WRLCK; 7529 break; 7530 default: 7531 status = nfserr_inval; 7532 goto out; 7533 } 7534 7535 if (!nf) { 7536 status = nfserr_openmode; 7537 goto out; 7538 } 7539 7540 /* 7541 * Most filesystems with their own ->lock operations will block 7542 * the nfsd thread waiting to acquire the lock. That leads to 7543 * deadlocks (we don't want every nfsd thread tied up waiting 7544 * for file locks), so don't attempt blocking lock notifications 7545 * on those filesystems: 7546 */ 7547 if (nf->nf_file->f_op->lock) 7548 fl_flags &= ~FL_SLEEP; 7549 7550 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn); 7551 if (!nbl) { 7552 dprintk("NFSD: %s: unable to allocate block!\n", __func__); 7553 status = nfserr_jukebox; 7554 goto out; 7555 } 7556 7557 file_lock = &nbl->nbl_lock; 7558 file_lock->fl_type = fl_type; 7559 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner)); 7560 file_lock->fl_pid = current->tgid; 7561 file_lock->fl_file = nf->nf_file; 7562 file_lock->fl_flags = fl_flags; 7563 file_lock->fl_lmops = &nfsd_posix_mng_ops; 7564 file_lock->fl_start = lock->lk_offset; 7565 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length); 7566 nfs4_transform_lock_offset(file_lock); 7567 7568 conflock = locks_alloc_lock(); 7569 if (!conflock) { 7570 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 7571 status = nfserr_jukebox; 7572 goto out; 7573 } 7574 7575 if (fl_flags & FL_SLEEP) { 7576 nbl->nbl_time = ktime_get_boottime_seconds(); 7577 spin_lock(&nn->blocked_locks_lock); 7578 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked); 7579 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru); 7580 kref_get(&nbl->nbl_kref); 7581 spin_unlock(&nn->blocked_locks_lock); 7582 } 7583 7584 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock); 7585 switch (err) { 7586 case 0: /* success! */ 7587 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid); 7588 status = 0; 7589 if (lock->lk_reclaim) 7590 nn->somebody_reclaimed = true; 7591 break; 7592 case FILE_LOCK_DEFERRED: 7593 kref_put(&nbl->nbl_kref, free_nbl); 7594 nbl = NULL; 7595 fallthrough; 7596 case -EAGAIN: /* conflock holds conflicting lock */ 7597 status = nfserr_denied; 7598 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n"); 7599 nfs4_set_lock_denied(conflock, &lock->lk_denied); 7600 break; 7601 case -EDEADLK: 7602 status = nfserr_deadlock; 7603 break; 7604 default: 7605 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err); 7606 status = nfserrno(err); 7607 break; 7608 } 7609 out: 7610 if (nbl) { 7611 /* dequeue it if we queued it before */ 7612 if (fl_flags & FL_SLEEP) { 7613 spin_lock(&nn->blocked_locks_lock); 7614 if (!list_empty(&nbl->nbl_list) && 7615 !list_empty(&nbl->nbl_lru)) { 7616 list_del_init(&nbl->nbl_list); 7617 list_del_init(&nbl->nbl_lru); 7618 kref_put(&nbl->nbl_kref, free_nbl); 7619 } 7620 /* nbl can use one of lists to be linked to reaplist */ 7621 spin_unlock(&nn->blocked_locks_lock); 7622 } 7623 free_blocked_lock(nbl); 7624 } 7625 if (nf) 7626 nfsd_file_put(nf); 7627 if (lock_stp) { 7628 /* Bump seqid manually if the 4.0 replay owner is openowner */ 7629 if (cstate->replay_owner && 7630 cstate->replay_owner != &lock_sop->lo_owner && 7631 seqid_mutating_err(ntohl(status))) 7632 lock_sop->lo_owner.so_seqid++; 7633 7634 /* 7635 * If this is a new, never-before-used stateid, and we are 7636 * returning an error, then just go ahead and release it. 7637 */ 7638 if (status && new) 7639 release_lock_stateid(lock_stp); 7640 7641 mutex_unlock(&lock_stp->st_mutex); 7642 7643 nfs4_put_stid(&lock_stp->st_stid); 7644 } 7645 if (open_stp) 7646 nfs4_put_stid(&open_stp->st_stid); 7647 nfsd4_bump_seqid(cstate, status); 7648 if (conflock) 7649 locks_free_lock(conflock); 7650 return status; 7651 } 7652 7653 /* 7654 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN, 7655 * so we do a temporary open here just to get an open file to pass to 7656 * vfs_test_lock. 7657 */ 7658 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock) 7659 { 7660 struct nfsd_file *nf; 7661 struct inode *inode; 7662 __be32 err; 7663 7664 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf); 7665 if (err) 7666 return err; 7667 inode = fhp->fh_dentry->d_inode; 7668 inode_lock(inode); /* to block new leases till after test_lock: */ 7669 err = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ)); 7670 if (err) 7671 goto out; 7672 lock->fl_file = nf->nf_file; 7673 err = nfserrno(vfs_test_lock(nf->nf_file, lock)); 7674 lock->fl_file = NULL; 7675 out: 7676 inode_unlock(inode); 7677 nfsd_file_put(nf); 7678 return err; 7679 } 7680 7681 /* 7682 * LOCKT operation 7683 */ 7684 __be32 7685 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 7686 union nfsd4_op_u *u) 7687 { 7688 struct nfsd4_lockt *lockt = &u->lockt; 7689 struct file_lock *file_lock = NULL; 7690 struct nfs4_lockowner *lo = NULL; 7691 __be32 status; 7692 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 7693 7694 if (locks_in_grace(SVC_NET(rqstp))) 7695 return nfserr_grace; 7696 7697 if (check_lock_length(lockt->lt_offset, lockt->lt_length)) 7698 return nfserr_inval; 7699 7700 if (!nfsd4_has_session(cstate)) { 7701 status = set_client(&lockt->lt_clientid, cstate, nn); 7702 if (status) 7703 goto out; 7704 } 7705 7706 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) 7707 goto out; 7708 7709 file_lock = locks_alloc_lock(); 7710 if (!file_lock) { 7711 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 7712 status = nfserr_jukebox; 7713 goto out; 7714 } 7715 7716 switch (lockt->lt_type) { 7717 case NFS4_READ_LT: 7718 case NFS4_READW_LT: 7719 file_lock->fl_type = F_RDLCK; 7720 break; 7721 case NFS4_WRITE_LT: 7722 case NFS4_WRITEW_LT: 7723 file_lock->fl_type = F_WRLCK; 7724 break; 7725 default: 7726 dprintk("NFSD: nfs4_lockt: bad lock type!\n"); 7727 status = nfserr_inval; 7728 goto out; 7729 } 7730 7731 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner); 7732 if (lo) 7733 file_lock->fl_owner = (fl_owner_t)lo; 7734 file_lock->fl_pid = current->tgid; 7735 file_lock->fl_flags = FL_POSIX; 7736 7737 file_lock->fl_start = lockt->lt_offset; 7738 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length); 7739 7740 nfs4_transform_lock_offset(file_lock); 7741 7742 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock); 7743 if (status) 7744 goto out; 7745 7746 if (file_lock->fl_type != F_UNLCK) { 7747 status = nfserr_denied; 7748 nfs4_set_lock_denied(file_lock, &lockt->lt_denied); 7749 } 7750 out: 7751 if (lo) 7752 nfs4_put_stateowner(&lo->lo_owner); 7753 if (file_lock) 7754 locks_free_lock(file_lock); 7755 return status; 7756 } 7757 7758 __be32 7759 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 7760 union nfsd4_op_u *u) 7761 { 7762 struct nfsd4_locku *locku = &u->locku; 7763 struct nfs4_ol_stateid *stp; 7764 struct nfsd_file *nf = NULL; 7765 struct file_lock *file_lock = NULL; 7766 __be32 status; 7767 int err; 7768 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 7769 7770 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n", 7771 (long long) locku->lu_offset, 7772 (long long) locku->lu_length); 7773 7774 if (check_lock_length(locku->lu_offset, locku->lu_length)) 7775 return nfserr_inval; 7776 7777 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid, 7778 &locku->lu_stateid, NFS4_LOCK_STID, 7779 &stp, nn); 7780 if (status) 7781 goto out; 7782 nf = find_any_file(stp->st_stid.sc_file); 7783 if (!nf) { 7784 status = nfserr_lock_range; 7785 goto put_stateid; 7786 } 7787 file_lock = locks_alloc_lock(); 7788 if (!file_lock) { 7789 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 7790 status = nfserr_jukebox; 7791 goto put_file; 7792 } 7793 7794 file_lock->fl_type = F_UNLCK; 7795 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner)); 7796 file_lock->fl_pid = current->tgid; 7797 file_lock->fl_file = nf->nf_file; 7798 file_lock->fl_flags = FL_POSIX; 7799 file_lock->fl_lmops = &nfsd_posix_mng_ops; 7800 file_lock->fl_start = locku->lu_offset; 7801 7802 file_lock->fl_end = last_byte_offset(locku->lu_offset, 7803 locku->lu_length); 7804 nfs4_transform_lock_offset(file_lock); 7805 7806 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL); 7807 if (err) { 7808 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n"); 7809 goto out_nfserr; 7810 } 7811 nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid); 7812 put_file: 7813 nfsd_file_put(nf); 7814 put_stateid: 7815 mutex_unlock(&stp->st_mutex); 7816 nfs4_put_stid(&stp->st_stid); 7817 out: 7818 nfsd4_bump_seqid(cstate, status); 7819 if (file_lock) 7820 locks_free_lock(file_lock); 7821 return status; 7822 7823 out_nfserr: 7824 status = nfserrno(err); 7825 goto put_file; 7826 } 7827 7828 /* 7829 * returns 7830 * true: locks held by lockowner 7831 * false: no locks held by lockowner 7832 */ 7833 static bool 7834 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner) 7835 { 7836 struct file_lock *fl; 7837 int status = false; 7838 struct nfsd_file *nf = find_any_file(fp); 7839 struct inode *inode; 7840 struct file_lock_context *flctx; 7841 7842 if (!nf) { 7843 /* Any valid lock stateid should have some sort of access */ 7844 WARN_ON_ONCE(1); 7845 return status; 7846 } 7847 7848 inode = file_inode(nf->nf_file); 7849 flctx = locks_inode_context(inode); 7850 7851 if (flctx && !list_empty_careful(&flctx->flc_posix)) { 7852 spin_lock(&flctx->flc_lock); 7853 list_for_each_entry(fl, &flctx->flc_posix, fl_list) { 7854 if (fl->fl_owner == (fl_owner_t)lowner) { 7855 status = true; 7856 break; 7857 } 7858 } 7859 spin_unlock(&flctx->flc_lock); 7860 } 7861 nfsd_file_put(nf); 7862 return status; 7863 } 7864 7865 /** 7866 * nfsd4_release_lockowner - process NFSv4.0 RELEASE_LOCKOWNER operations 7867 * @rqstp: RPC transaction 7868 * @cstate: NFSv4 COMPOUND state 7869 * @u: RELEASE_LOCKOWNER arguments 7870 * 7871 * The lockowner's so_count is bumped when a lock record is added 7872 * or when copying a conflicting lock. The latter case is brief, 7873 * but can lead to fleeting false positives when looking for 7874 * locks-in-use. 7875 * 7876 * Return values: 7877 * %nfs_ok: lockowner released or not found 7878 * %nfserr_locks_held: lockowner still in use 7879 * %nfserr_stale_clientid: clientid no longer active 7880 * %nfserr_expired: clientid not recognized 7881 */ 7882 __be32 7883 nfsd4_release_lockowner(struct svc_rqst *rqstp, 7884 struct nfsd4_compound_state *cstate, 7885 union nfsd4_op_u *u) 7886 { 7887 struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner; 7888 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 7889 clientid_t *clid = &rlockowner->rl_clientid; 7890 struct nfs4_ol_stateid *stp; 7891 struct nfs4_lockowner *lo; 7892 struct nfs4_client *clp; 7893 LIST_HEAD(reaplist); 7894 __be32 status; 7895 7896 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n", 7897 clid->cl_boot, clid->cl_id); 7898 7899 status = set_client(clid, cstate, nn); 7900 if (status) 7901 return status; 7902 clp = cstate->clp; 7903 7904 spin_lock(&clp->cl_lock); 7905 lo = find_lockowner_str_locked(clp, &rlockowner->rl_owner); 7906 if (!lo) { 7907 spin_unlock(&clp->cl_lock); 7908 return nfs_ok; 7909 } 7910 if (atomic_read(&lo->lo_owner.so_count) != 2) { 7911 spin_unlock(&clp->cl_lock); 7912 nfs4_put_stateowner(&lo->lo_owner); 7913 return nfserr_locks_held; 7914 } 7915 unhash_lockowner_locked(lo); 7916 while (!list_empty(&lo->lo_owner.so_stateids)) { 7917 stp = list_first_entry(&lo->lo_owner.so_stateids, 7918 struct nfs4_ol_stateid, 7919 st_perstateowner); 7920 WARN_ON(!unhash_lock_stateid(stp)); 7921 put_ol_stateid_locked(stp, &reaplist); 7922 } 7923 spin_unlock(&clp->cl_lock); 7924 7925 free_ol_stateid_reaplist(&reaplist); 7926 remove_blocked_locks(lo); 7927 nfs4_put_stateowner(&lo->lo_owner); 7928 return nfs_ok; 7929 } 7930 7931 static inline struct nfs4_client_reclaim * 7932 alloc_reclaim(void) 7933 { 7934 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL); 7935 } 7936 7937 bool 7938 nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn) 7939 { 7940 struct nfs4_client_reclaim *crp; 7941 7942 crp = nfsd4_find_reclaim_client(name, nn); 7943 return (crp && crp->cr_clp); 7944 } 7945 7946 /* 7947 * failure => all reset bets are off, nfserr_no_grace... 7948 * 7949 * The caller is responsible for freeing name.data if NULL is returned (it 7950 * will be freed in nfs4_remove_reclaim_record in the normal case). 7951 */ 7952 struct nfs4_client_reclaim * 7953 nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash, 7954 struct nfsd_net *nn) 7955 { 7956 unsigned int strhashval; 7957 struct nfs4_client_reclaim *crp; 7958 7959 crp = alloc_reclaim(); 7960 if (crp) { 7961 strhashval = clientstr_hashval(name); 7962 INIT_LIST_HEAD(&crp->cr_strhash); 7963 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]); 7964 crp->cr_name.data = name.data; 7965 crp->cr_name.len = name.len; 7966 crp->cr_princhash.data = princhash.data; 7967 crp->cr_princhash.len = princhash.len; 7968 crp->cr_clp = NULL; 7969 nn->reclaim_str_hashtbl_size++; 7970 } 7971 return crp; 7972 } 7973 7974 void 7975 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn) 7976 { 7977 list_del(&crp->cr_strhash); 7978 kfree(crp->cr_name.data); 7979 kfree(crp->cr_princhash.data); 7980 kfree(crp); 7981 nn->reclaim_str_hashtbl_size--; 7982 } 7983 7984 void 7985 nfs4_release_reclaim(struct nfsd_net *nn) 7986 { 7987 struct nfs4_client_reclaim *crp = NULL; 7988 int i; 7989 7990 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 7991 while (!list_empty(&nn->reclaim_str_hashtbl[i])) { 7992 crp = list_entry(nn->reclaim_str_hashtbl[i].next, 7993 struct nfs4_client_reclaim, cr_strhash); 7994 nfs4_remove_reclaim_record(crp, nn); 7995 } 7996 } 7997 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size); 7998 } 7999 8000 /* 8001 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */ 8002 struct nfs4_client_reclaim * 8003 nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn) 8004 { 8005 unsigned int strhashval; 8006 struct nfs4_client_reclaim *crp = NULL; 8007 8008 strhashval = clientstr_hashval(name); 8009 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) { 8010 if (compare_blob(&crp->cr_name, &name) == 0) { 8011 return crp; 8012 } 8013 } 8014 return NULL; 8015 } 8016 8017 __be32 8018 nfs4_check_open_reclaim(struct nfs4_client *clp) 8019 { 8020 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags)) 8021 return nfserr_no_grace; 8022 8023 if (nfsd4_client_record_check(clp)) 8024 return nfserr_reclaim_bad; 8025 8026 return nfs_ok; 8027 } 8028 8029 /* 8030 * Since the lifetime of a delegation isn't limited to that of an open, a 8031 * client may quite reasonably hang on to a delegation as long as it has 8032 * the inode cached. This becomes an obvious problem the first time a 8033 * client's inode cache approaches the size of the server's total memory. 8034 * 8035 * For now we avoid this problem by imposing a hard limit on the number 8036 * of delegations, which varies according to the server's memory size. 8037 */ 8038 static void 8039 set_max_delegations(void) 8040 { 8041 /* 8042 * Allow at most 4 delegations per megabyte of RAM. Quick 8043 * estimates suggest that in the worst case (where every delegation 8044 * is for a different inode), a delegation could take about 1.5K, 8045 * giving a worst case usage of about 6% of memory. 8046 */ 8047 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT); 8048 } 8049 8050 static int nfs4_state_create_net(struct net *net) 8051 { 8052 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 8053 int i; 8054 8055 nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE, 8056 sizeof(struct list_head), 8057 GFP_KERNEL); 8058 if (!nn->conf_id_hashtbl) 8059 goto err; 8060 nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE, 8061 sizeof(struct list_head), 8062 GFP_KERNEL); 8063 if (!nn->unconf_id_hashtbl) 8064 goto err_unconf_id; 8065 nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE, 8066 sizeof(struct list_head), 8067 GFP_KERNEL); 8068 if (!nn->sessionid_hashtbl) 8069 goto err_sessionid; 8070 8071 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 8072 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]); 8073 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]); 8074 } 8075 for (i = 0; i < SESSION_HASH_SIZE; i++) 8076 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]); 8077 nn->conf_name_tree = RB_ROOT; 8078 nn->unconf_name_tree = RB_ROOT; 8079 nn->boot_time = ktime_get_real_seconds(); 8080 nn->grace_ended = false; 8081 nn->nfsd4_manager.block_opens = true; 8082 INIT_LIST_HEAD(&nn->nfsd4_manager.list); 8083 INIT_LIST_HEAD(&nn->client_lru); 8084 INIT_LIST_HEAD(&nn->close_lru); 8085 INIT_LIST_HEAD(&nn->del_recall_lru); 8086 spin_lock_init(&nn->client_lock); 8087 spin_lock_init(&nn->s2s_cp_lock); 8088 idr_init(&nn->s2s_cp_stateids); 8089 8090 spin_lock_init(&nn->blocked_locks_lock); 8091 INIT_LIST_HEAD(&nn->blocked_locks_lru); 8092 8093 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main); 8094 INIT_WORK(&nn->nfsd_shrinker_work, nfsd4_state_shrinker_worker); 8095 get_net(net); 8096 8097 nn->nfsd_client_shrinker.scan_objects = nfsd4_state_shrinker_scan; 8098 nn->nfsd_client_shrinker.count_objects = nfsd4_state_shrinker_count; 8099 nn->nfsd_client_shrinker.seeks = DEFAULT_SEEKS; 8100 8101 if (register_shrinker(&nn->nfsd_client_shrinker, "nfsd-client")) 8102 goto err_shrinker; 8103 return 0; 8104 8105 err_shrinker: 8106 put_net(net); 8107 kfree(nn->sessionid_hashtbl); 8108 err_sessionid: 8109 kfree(nn->unconf_id_hashtbl); 8110 err_unconf_id: 8111 kfree(nn->conf_id_hashtbl); 8112 err: 8113 return -ENOMEM; 8114 } 8115 8116 static void 8117 nfs4_state_destroy_net(struct net *net) 8118 { 8119 int i; 8120 struct nfs4_client *clp = NULL; 8121 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 8122 8123 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 8124 while (!list_empty(&nn->conf_id_hashtbl[i])) { 8125 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); 8126 destroy_client(clp); 8127 } 8128 } 8129 8130 WARN_ON(!list_empty(&nn->blocked_locks_lru)); 8131 8132 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 8133 while (!list_empty(&nn->unconf_id_hashtbl[i])) { 8134 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); 8135 destroy_client(clp); 8136 } 8137 } 8138 8139 kfree(nn->sessionid_hashtbl); 8140 kfree(nn->unconf_id_hashtbl); 8141 kfree(nn->conf_id_hashtbl); 8142 put_net(net); 8143 } 8144 8145 int 8146 nfs4_state_start_net(struct net *net) 8147 { 8148 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 8149 int ret; 8150 8151 ret = nfs4_state_create_net(net); 8152 if (ret) 8153 return ret; 8154 locks_start_grace(net, &nn->nfsd4_manager); 8155 nfsd4_client_tracking_init(net); 8156 if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0) 8157 goto skip_grace; 8158 printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n", 8159 nn->nfsd4_grace, net->ns.inum); 8160 trace_nfsd_grace_start(nn); 8161 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ); 8162 return 0; 8163 8164 skip_grace: 8165 printk(KERN_INFO "NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n", 8166 net->ns.inum); 8167 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ); 8168 nfsd4_end_grace(nn); 8169 return 0; 8170 } 8171 8172 /* initialization to perform when the nfsd service is started: */ 8173 8174 int 8175 nfs4_state_start(void) 8176 { 8177 int ret; 8178 8179 ret = rhltable_init(&nfs4_file_rhltable, &nfs4_file_rhash_params); 8180 if (ret) 8181 return ret; 8182 8183 ret = nfsd4_create_callback_queue(); 8184 if (ret) { 8185 rhltable_destroy(&nfs4_file_rhltable); 8186 return ret; 8187 } 8188 8189 set_max_delegations(); 8190 return 0; 8191 } 8192 8193 void 8194 nfs4_state_shutdown_net(struct net *net) 8195 { 8196 struct nfs4_delegation *dp = NULL; 8197 struct list_head *pos, *next, reaplist; 8198 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 8199 8200 unregister_shrinker(&nn->nfsd_client_shrinker); 8201 cancel_work(&nn->nfsd_shrinker_work); 8202 cancel_delayed_work_sync(&nn->laundromat_work); 8203 locks_end_grace(&nn->nfsd4_manager); 8204 8205 INIT_LIST_HEAD(&reaplist); 8206 spin_lock(&state_lock); 8207 list_for_each_safe(pos, next, &nn->del_recall_lru) { 8208 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 8209 WARN_ON(!unhash_delegation_locked(dp)); 8210 list_add(&dp->dl_recall_lru, &reaplist); 8211 } 8212 spin_unlock(&state_lock); 8213 list_for_each_safe(pos, next, &reaplist) { 8214 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 8215 list_del_init(&dp->dl_recall_lru); 8216 destroy_unhashed_deleg(dp); 8217 } 8218 8219 nfsd4_client_tracking_exit(net); 8220 nfs4_state_destroy_net(net); 8221 #ifdef CONFIG_NFSD_V4_2_INTER_SSC 8222 nfsd4_ssc_shutdown_umount(nn); 8223 #endif 8224 } 8225 8226 void 8227 nfs4_state_shutdown(void) 8228 { 8229 nfsd4_destroy_callback_queue(); 8230 rhltable_destroy(&nfs4_file_rhltable); 8231 } 8232 8233 static void 8234 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid) 8235 { 8236 if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) && 8237 CURRENT_STATEID(stateid)) 8238 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t)); 8239 } 8240 8241 static void 8242 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid) 8243 { 8244 if (cstate->minorversion) { 8245 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t)); 8246 SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG); 8247 } 8248 } 8249 8250 void 8251 clear_current_stateid(struct nfsd4_compound_state *cstate) 8252 { 8253 CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG); 8254 } 8255 8256 /* 8257 * functions to set current state id 8258 */ 8259 void 8260 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, 8261 union nfsd4_op_u *u) 8262 { 8263 put_stateid(cstate, &u->open_downgrade.od_stateid); 8264 } 8265 8266 void 8267 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, 8268 union nfsd4_op_u *u) 8269 { 8270 put_stateid(cstate, &u->open.op_stateid); 8271 } 8272 8273 void 8274 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, 8275 union nfsd4_op_u *u) 8276 { 8277 put_stateid(cstate, &u->close.cl_stateid); 8278 } 8279 8280 void 8281 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, 8282 union nfsd4_op_u *u) 8283 { 8284 put_stateid(cstate, &u->lock.lk_resp_stateid); 8285 } 8286 8287 /* 8288 * functions to consume current state id 8289 */ 8290 8291 void 8292 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, 8293 union nfsd4_op_u *u) 8294 { 8295 get_stateid(cstate, &u->open_downgrade.od_stateid); 8296 } 8297 8298 void 8299 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, 8300 union nfsd4_op_u *u) 8301 { 8302 get_stateid(cstate, &u->delegreturn.dr_stateid); 8303 } 8304 8305 void 8306 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, 8307 union nfsd4_op_u *u) 8308 { 8309 get_stateid(cstate, &u->free_stateid.fr_stateid); 8310 } 8311 8312 void 8313 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, 8314 union nfsd4_op_u *u) 8315 { 8316 get_stateid(cstate, &u->setattr.sa_stateid); 8317 } 8318 8319 void 8320 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, 8321 union nfsd4_op_u *u) 8322 { 8323 get_stateid(cstate, &u->close.cl_stateid); 8324 } 8325 8326 void 8327 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, 8328 union nfsd4_op_u *u) 8329 { 8330 get_stateid(cstate, &u->locku.lu_stateid); 8331 } 8332 8333 void 8334 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, 8335 union nfsd4_op_u *u) 8336 { 8337 get_stateid(cstate, &u->read.rd_stateid); 8338 } 8339 8340 void 8341 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, 8342 union nfsd4_op_u *u) 8343 { 8344 get_stateid(cstate, &u->write.wr_stateid); 8345 } 8346