1 /* 2 * Copyright (c) 2001 The Regents of the University of Michigan. 3 * All rights reserved. 4 * 5 * Kendrick Smith <kmsmith@umich.edu> 6 * Andy Adamson <kandros@umich.edu> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of the University nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 */ 34 35 #include <linux/file.h> 36 #include <linux/fs.h> 37 #include <linux/slab.h> 38 #include <linux/namei.h> 39 #include <linux/swap.h> 40 #include <linux/pagemap.h> 41 #include <linux/ratelimit.h> 42 #include <linux/sunrpc/svcauth_gss.h> 43 #include <linux/sunrpc/addr.h> 44 #include <linux/jhash.h> 45 #include <linux/string_helpers.h> 46 #include <linux/fsnotify.h> 47 #include <linux/rhashtable.h> 48 #include <linux/nfs_ssc.h> 49 50 #include "xdr4.h" 51 #include "xdr4cb.h" 52 #include "vfs.h" 53 #include "current_stateid.h" 54 55 #include "netns.h" 56 #include "pnfs.h" 57 #include "filecache.h" 58 #include "trace.h" 59 60 #define NFSDDBG_FACILITY NFSDDBG_PROC 61 62 #define all_ones {{ ~0, ~0}, ~0} 63 static const stateid_t one_stateid = { 64 .si_generation = ~0, 65 .si_opaque = all_ones, 66 }; 67 static const stateid_t zero_stateid = { 68 /* all fields zero */ 69 }; 70 static const stateid_t currentstateid = { 71 .si_generation = 1, 72 }; 73 static const stateid_t close_stateid = { 74 .si_generation = 0xffffffffU, 75 }; 76 77 static u64 current_sessionid = 1; 78 79 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t))) 80 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t))) 81 #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t))) 82 #define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t))) 83 84 /* forward declarations */ 85 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner); 86 static void nfs4_free_ol_stateid(struct nfs4_stid *stid); 87 void nfsd4_end_grace(struct nfsd_net *nn); 88 static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps); 89 static void nfsd4_file_hash_remove(struct nfs4_file *fi); 90 static void deleg_reaper(struct nfsd_net *nn); 91 92 /* Locking: */ 93 94 /* 95 * Currently used for the del_recall_lru and file hash table. In an 96 * effort to decrease the scope of the client_mutex, this spinlock may 97 * eventually cover more: 98 */ 99 static DEFINE_SPINLOCK(state_lock); 100 101 enum nfsd4_st_mutex_lock_subclass { 102 OPEN_STATEID_MUTEX = 0, 103 LOCK_STATEID_MUTEX = 1, 104 }; 105 106 /* 107 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for 108 * the refcount on the open stateid to drop. 109 */ 110 static DECLARE_WAIT_QUEUE_HEAD(close_wq); 111 112 /* 113 * A waitqueue where a writer to clients/#/ctl destroying a client can 114 * wait for cl_rpc_users to drop to 0 and then for the client to be 115 * unhashed. 116 */ 117 static DECLARE_WAIT_QUEUE_HEAD(expiry_wq); 118 119 static struct kmem_cache *client_slab; 120 static struct kmem_cache *openowner_slab; 121 static struct kmem_cache *lockowner_slab; 122 static struct kmem_cache *file_slab; 123 static struct kmem_cache *stateid_slab; 124 static struct kmem_cache *deleg_slab; 125 static struct kmem_cache *odstate_slab; 126 127 static void free_session(struct nfsd4_session *); 128 129 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops; 130 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops; 131 static const struct nfsd4_callback_ops nfsd4_cb_getattr_ops; 132 133 static struct workqueue_struct *laundry_wq; 134 135 int nfsd4_create_laundry_wq(void) 136 { 137 int rc = 0; 138 139 laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4"); 140 if (laundry_wq == NULL) 141 rc = -ENOMEM; 142 return rc; 143 } 144 145 void nfsd4_destroy_laundry_wq(void) 146 { 147 destroy_workqueue(laundry_wq); 148 } 149 150 static bool is_session_dead(struct nfsd4_session *ses) 151 { 152 return ses->se_flags & NFS4_SESSION_DEAD; 153 } 154 155 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me) 156 { 157 if (atomic_read(&ses->se_ref) > ref_held_by_me) 158 return nfserr_jukebox; 159 ses->se_flags |= NFS4_SESSION_DEAD; 160 return nfs_ok; 161 } 162 163 static bool is_client_expired(struct nfs4_client *clp) 164 { 165 return clp->cl_time == 0; 166 } 167 168 static void nfsd4_dec_courtesy_client_count(struct nfsd_net *nn, 169 struct nfs4_client *clp) 170 { 171 if (clp->cl_state != NFSD4_ACTIVE) 172 atomic_add_unless(&nn->nfsd_courtesy_clients, -1, 0); 173 } 174 175 static __be32 get_client_locked(struct nfs4_client *clp) 176 { 177 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 178 179 lockdep_assert_held(&nn->client_lock); 180 181 if (is_client_expired(clp)) 182 return nfserr_expired; 183 atomic_inc(&clp->cl_rpc_users); 184 nfsd4_dec_courtesy_client_count(nn, clp); 185 clp->cl_state = NFSD4_ACTIVE; 186 return nfs_ok; 187 } 188 189 /* must be called under the client_lock */ 190 static inline void 191 renew_client_locked(struct nfs4_client *clp) 192 { 193 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 194 195 if (is_client_expired(clp)) { 196 WARN_ON(1); 197 printk("%s: client (clientid %08x/%08x) already expired\n", 198 __func__, 199 clp->cl_clientid.cl_boot, 200 clp->cl_clientid.cl_id); 201 return; 202 } 203 204 list_move_tail(&clp->cl_lru, &nn->client_lru); 205 clp->cl_time = ktime_get_boottime_seconds(); 206 nfsd4_dec_courtesy_client_count(nn, clp); 207 clp->cl_state = NFSD4_ACTIVE; 208 } 209 210 static void put_client_renew_locked(struct nfs4_client *clp) 211 { 212 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 213 214 lockdep_assert_held(&nn->client_lock); 215 216 if (!atomic_dec_and_test(&clp->cl_rpc_users)) 217 return; 218 if (!is_client_expired(clp)) 219 renew_client_locked(clp); 220 else 221 wake_up_all(&expiry_wq); 222 } 223 224 static void put_client_renew(struct nfs4_client *clp) 225 { 226 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 227 228 if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock)) 229 return; 230 if (!is_client_expired(clp)) 231 renew_client_locked(clp); 232 else 233 wake_up_all(&expiry_wq); 234 spin_unlock(&nn->client_lock); 235 } 236 237 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses) 238 { 239 __be32 status; 240 241 if (is_session_dead(ses)) 242 return nfserr_badsession; 243 status = get_client_locked(ses->se_client); 244 if (status) 245 return status; 246 atomic_inc(&ses->se_ref); 247 return nfs_ok; 248 } 249 250 static void nfsd4_put_session_locked(struct nfsd4_session *ses) 251 { 252 struct nfs4_client *clp = ses->se_client; 253 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 254 255 lockdep_assert_held(&nn->client_lock); 256 257 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses)) 258 free_session(ses); 259 put_client_renew_locked(clp); 260 } 261 262 static void nfsd4_put_session(struct nfsd4_session *ses) 263 { 264 struct nfs4_client *clp = ses->se_client; 265 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 266 267 spin_lock(&nn->client_lock); 268 nfsd4_put_session_locked(ses); 269 spin_unlock(&nn->client_lock); 270 } 271 272 static struct nfsd4_blocked_lock * 273 find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh, 274 struct nfsd_net *nn) 275 { 276 struct nfsd4_blocked_lock *cur, *found = NULL; 277 278 spin_lock(&nn->blocked_locks_lock); 279 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) { 280 if (fh_match(fh, &cur->nbl_fh)) { 281 list_del_init(&cur->nbl_list); 282 WARN_ON(list_empty(&cur->nbl_lru)); 283 list_del_init(&cur->nbl_lru); 284 found = cur; 285 break; 286 } 287 } 288 spin_unlock(&nn->blocked_locks_lock); 289 if (found) 290 locks_delete_block(&found->nbl_lock); 291 return found; 292 } 293 294 static struct nfsd4_blocked_lock * 295 find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh, 296 struct nfsd_net *nn) 297 { 298 struct nfsd4_blocked_lock *nbl; 299 300 nbl = find_blocked_lock(lo, fh, nn); 301 if (!nbl) { 302 nbl = kmalloc(sizeof(*nbl), GFP_KERNEL); 303 if (nbl) { 304 INIT_LIST_HEAD(&nbl->nbl_list); 305 INIT_LIST_HEAD(&nbl->nbl_lru); 306 fh_copy_shallow(&nbl->nbl_fh, fh); 307 locks_init_lock(&nbl->nbl_lock); 308 kref_init(&nbl->nbl_kref); 309 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client, 310 &nfsd4_cb_notify_lock_ops, 311 NFSPROC4_CLNT_CB_NOTIFY_LOCK); 312 } 313 } 314 return nbl; 315 } 316 317 static void 318 free_nbl(struct kref *kref) 319 { 320 struct nfsd4_blocked_lock *nbl; 321 322 nbl = container_of(kref, struct nfsd4_blocked_lock, nbl_kref); 323 locks_release_private(&nbl->nbl_lock); 324 kfree(nbl); 325 } 326 327 static void 328 free_blocked_lock(struct nfsd4_blocked_lock *nbl) 329 { 330 locks_delete_block(&nbl->nbl_lock); 331 kref_put(&nbl->nbl_kref, free_nbl); 332 } 333 334 static void 335 remove_blocked_locks(struct nfs4_lockowner *lo) 336 { 337 struct nfs4_client *clp = lo->lo_owner.so_client; 338 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 339 struct nfsd4_blocked_lock *nbl; 340 LIST_HEAD(reaplist); 341 342 /* Dequeue all blocked locks */ 343 spin_lock(&nn->blocked_locks_lock); 344 while (!list_empty(&lo->lo_blocked)) { 345 nbl = list_first_entry(&lo->lo_blocked, 346 struct nfsd4_blocked_lock, 347 nbl_list); 348 list_del_init(&nbl->nbl_list); 349 WARN_ON(list_empty(&nbl->nbl_lru)); 350 list_move(&nbl->nbl_lru, &reaplist); 351 } 352 spin_unlock(&nn->blocked_locks_lock); 353 354 /* Now free them */ 355 while (!list_empty(&reaplist)) { 356 nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock, 357 nbl_lru); 358 list_del_init(&nbl->nbl_lru); 359 free_blocked_lock(nbl); 360 } 361 } 362 363 static void 364 nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb) 365 { 366 struct nfsd4_blocked_lock *nbl = container_of(cb, 367 struct nfsd4_blocked_lock, nbl_cb); 368 locks_delete_block(&nbl->nbl_lock); 369 } 370 371 static int 372 nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task) 373 { 374 trace_nfsd_cb_notify_lock_done(&zero_stateid, task); 375 376 /* 377 * Since this is just an optimization, we don't try very hard if it 378 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and 379 * just quit trying on anything else. 380 */ 381 switch (task->tk_status) { 382 case -NFS4ERR_DELAY: 383 rpc_delay(task, 1 * HZ); 384 return 0; 385 default: 386 return 1; 387 } 388 } 389 390 static void 391 nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb) 392 { 393 struct nfsd4_blocked_lock *nbl = container_of(cb, 394 struct nfsd4_blocked_lock, nbl_cb); 395 396 free_blocked_lock(nbl); 397 } 398 399 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = { 400 .prepare = nfsd4_cb_notify_lock_prepare, 401 .done = nfsd4_cb_notify_lock_done, 402 .release = nfsd4_cb_notify_lock_release, 403 }; 404 405 /* 406 * We store the NONE, READ, WRITE, and BOTH bits separately in the 407 * st_{access,deny}_bmap field of the stateid, in order to track not 408 * only what share bits are currently in force, but also what 409 * combinations of share bits previous opens have used. This allows us 410 * to enforce the recommendation in 411 * https://datatracker.ietf.org/doc/html/rfc7530#section-16.19.4 that 412 * the server return an error if the client attempt to downgrade to a 413 * combination of share bits not explicable by closing some of its 414 * previous opens. 415 * 416 * This enforcement is arguably incomplete, since we don't keep 417 * track of access/deny bit combinations; so, e.g., we allow: 418 * 419 * OPEN allow read, deny write 420 * OPEN allow both, deny none 421 * DOWNGRADE allow read, deny none 422 * 423 * which we should reject. 424 * 425 * But you could also argue that our current code is already overkill, 426 * since it only exists to return NFS4ERR_INVAL on incorrect client 427 * behavior. 428 */ 429 static unsigned int 430 bmap_to_share_mode(unsigned long bmap) 431 { 432 int i; 433 unsigned int access = 0; 434 435 for (i = 1; i < 4; i++) { 436 if (test_bit(i, &bmap)) 437 access |= i; 438 } 439 return access; 440 } 441 442 /* set share access for a given stateid */ 443 static inline void 444 set_access(u32 access, struct nfs4_ol_stateid *stp) 445 { 446 unsigned char mask = 1 << access; 447 448 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH); 449 stp->st_access_bmap |= mask; 450 } 451 452 /* clear share access for a given stateid */ 453 static inline void 454 clear_access(u32 access, struct nfs4_ol_stateid *stp) 455 { 456 unsigned char mask = 1 << access; 457 458 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH); 459 stp->st_access_bmap &= ~mask; 460 } 461 462 /* test whether a given stateid has access */ 463 static inline bool 464 test_access(u32 access, struct nfs4_ol_stateid *stp) 465 { 466 unsigned char mask = 1 << access; 467 468 return (bool)(stp->st_access_bmap & mask); 469 } 470 471 /* set share deny for a given stateid */ 472 static inline void 473 set_deny(u32 deny, struct nfs4_ol_stateid *stp) 474 { 475 unsigned char mask = 1 << deny; 476 477 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH); 478 stp->st_deny_bmap |= mask; 479 } 480 481 /* clear share deny for a given stateid */ 482 static inline void 483 clear_deny(u32 deny, struct nfs4_ol_stateid *stp) 484 { 485 unsigned char mask = 1 << deny; 486 487 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH); 488 stp->st_deny_bmap &= ~mask; 489 } 490 491 /* test whether a given stateid is denying specific access */ 492 static inline bool 493 test_deny(u32 deny, struct nfs4_ol_stateid *stp) 494 { 495 unsigned char mask = 1 << deny; 496 497 return (bool)(stp->st_deny_bmap & mask); 498 } 499 500 static int nfs4_access_to_omode(u32 access) 501 { 502 switch (access & NFS4_SHARE_ACCESS_BOTH) { 503 case NFS4_SHARE_ACCESS_READ: 504 return O_RDONLY; 505 case NFS4_SHARE_ACCESS_WRITE: 506 return O_WRONLY; 507 case NFS4_SHARE_ACCESS_BOTH: 508 return O_RDWR; 509 } 510 WARN_ON_ONCE(1); 511 return O_RDONLY; 512 } 513 514 static inline int 515 access_permit_read(struct nfs4_ol_stateid *stp) 516 { 517 return test_access(NFS4_SHARE_ACCESS_READ, stp) || 518 test_access(NFS4_SHARE_ACCESS_BOTH, stp) || 519 test_access(NFS4_SHARE_ACCESS_WRITE, stp); 520 } 521 522 static inline int 523 access_permit_write(struct nfs4_ol_stateid *stp) 524 { 525 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) || 526 test_access(NFS4_SHARE_ACCESS_BOTH, stp); 527 } 528 529 static inline struct nfs4_stateowner * 530 nfs4_get_stateowner(struct nfs4_stateowner *sop) 531 { 532 atomic_inc(&sop->so_count); 533 return sop; 534 } 535 536 static int 537 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner) 538 { 539 return (sop->so_owner.len == owner->len) && 540 0 == memcmp(sop->so_owner.data, owner->data, owner->len); 541 } 542 543 static struct nfs4_openowner * 544 find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open, 545 struct nfs4_client *clp) 546 { 547 struct nfs4_stateowner *so; 548 549 lockdep_assert_held(&clp->cl_lock); 550 551 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval], 552 so_strhash) { 553 if (!so->so_is_open_owner) 554 continue; 555 if (same_owner_str(so, &open->op_owner)) 556 return openowner(nfs4_get_stateowner(so)); 557 } 558 return NULL; 559 } 560 561 static struct nfs4_openowner * 562 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open, 563 struct nfs4_client *clp) 564 { 565 struct nfs4_openowner *oo; 566 567 spin_lock(&clp->cl_lock); 568 oo = find_openstateowner_str_locked(hashval, open, clp); 569 spin_unlock(&clp->cl_lock); 570 return oo; 571 } 572 573 static inline u32 574 opaque_hashval(const void *ptr, int nbytes) 575 { 576 unsigned char *cptr = (unsigned char *) ptr; 577 578 u32 x = 0; 579 while (nbytes--) { 580 x *= 37; 581 x += *cptr++; 582 } 583 return x; 584 } 585 586 static void nfsd4_free_file_rcu(struct rcu_head *rcu) 587 { 588 struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu); 589 590 kmem_cache_free(file_slab, fp); 591 } 592 593 void 594 put_nfs4_file(struct nfs4_file *fi) 595 { 596 if (refcount_dec_and_test(&fi->fi_ref)) { 597 nfsd4_file_hash_remove(fi); 598 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate)); 599 WARN_ON_ONCE(!list_empty(&fi->fi_delegations)); 600 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu); 601 } 602 } 603 604 static struct nfsd_file * 605 find_writeable_file_locked(struct nfs4_file *f) 606 { 607 struct nfsd_file *ret; 608 609 lockdep_assert_held(&f->fi_lock); 610 611 ret = nfsd_file_get(f->fi_fds[O_WRONLY]); 612 if (!ret) 613 ret = nfsd_file_get(f->fi_fds[O_RDWR]); 614 return ret; 615 } 616 617 static struct nfsd_file * 618 find_writeable_file(struct nfs4_file *f) 619 { 620 struct nfsd_file *ret; 621 622 spin_lock(&f->fi_lock); 623 ret = find_writeable_file_locked(f); 624 spin_unlock(&f->fi_lock); 625 626 return ret; 627 } 628 629 static struct nfsd_file * 630 find_readable_file_locked(struct nfs4_file *f) 631 { 632 struct nfsd_file *ret; 633 634 lockdep_assert_held(&f->fi_lock); 635 636 ret = nfsd_file_get(f->fi_fds[O_RDONLY]); 637 if (!ret) 638 ret = nfsd_file_get(f->fi_fds[O_RDWR]); 639 return ret; 640 } 641 642 static struct nfsd_file * 643 find_readable_file(struct nfs4_file *f) 644 { 645 struct nfsd_file *ret; 646 647 spin_lock(&f->fi_lock); 648 ret = find_readable_file_locked(f); 649 spin_unlock(&f->fi_lock); 650 651 return ret; 652 } 653 654 static struct nfsd_file * 655 find_rw_file(struct nfs4_file *f) 656 { 657 struct nfsd_file *ret; 658 659 spin_lock(&f->fi_lock); 660 ret = nfsd_file_get(f->fi_fds[O_RDWR]); 661 spin_unlock(&f->fi_lock); 662 663 return ret; 664 } 665 666 struct nfsd_file * 667 find_any_file(struct nfs4_file *f) 668 { 669 struct nfsd_file *ret; 670 671 if (!f) 672 return NULL; 673 spin_lock(&f->fi_lock); 674 ret = nfsd_file_get(f->fi_fds[O_RDWR]); 675 if (!ret) { 676 ret = nfsd_file_get(f->fi_fds[O_WRONLY]); 677 if (!ret) 678 ret = nfsd_file_get(f->fi_fds[O_RDONLY]); 679 } 680 spin_unlock(&f->fi_lock); 681 return ret; 682 } 683 684 static struct nfsd_file *find_any_file_locked(struct nfs4_file *f) 685 { 686 lockdep_assert_held(&f->fi_lock); 687 688 if (f->fi_fds[O_RDWR]) 689 return f->fi_fds[O_RDWR]; 690 if (f->fi_fds[O_WRONLY]) 691 return f->fi_fds[O_WRONLY]; 692 if (f->fi_fds[O_RDONLY]) 693 return f->fi_fds[O_RDONLY]; 694 return NULL; 695 } 696 697 static atomic_long_t num_delegations; 698 unsigned long max_delegations; 699 700 /* 701 * Open owner state (share locks) 702 */ 703 704 /* hash tables for lock and open owners */ 705 #define OWNER_HASH_BITS 8 706 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS) 707 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1) 708 709 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername) 710 { 711 unsigned int ret; 712 713 ret = opaque_hashval(ownername->data, ownername->len); 714 return ret & OWNER_HASH_MASK; 715 } 716 717 static struct rhltable nfs4_file_rhltable ____cacheline_aligned_in_smp; 718 719 static const struct rhashtable_params nfs4_file_rhash_params = { 720 .key_len = sizeof_field(struct nfs4_file, fi_inode), 721 .key_offset = offsetof(struct nfs4_file, fi_inode), 722 .head_offset = offsetof(struct nfs4_file, fi_rlist), 723 724 /* 725 * Start with a single page hash table to reduce resizing churn 726 * on light workloads. 727 */ 728 .min_size = 256, 729 .automatic_shrinking = true, 730 }; 731 732 /* 733 * Check if courtesy clients have conflicting access and resolve it if possible 734 * 735 * access: is op_share_access if share_access is true. 736 * Check if access mode, op_share_access, would conflict with 737 * the current deny mode of the file 'fp'. 738 * access: is op_share_deny if share_access is false. 739 * Check if the deny mode, op_share_deny, would conflict with 740 * current access of the file 'fp'. 741 * stp: skip checking this entry. 742 * new_stp: normal open, not open upgrade. 743 * 744 * Function returns: 745 * false - access/deny mode conflict with normal client. 746 * true - no conflict or conflict with courtesy client(s) is resolved. 747 */ 748 static bool 749 nfs4_resolve_deny_conflicts_locked(struct nfs4_file *fp, bool new_stp, 750 struct nfs4_ol_stateid *stp, u32 access, bool share_access) 751 { 752 struct nfs4_ol_stateid *st; 753 bool resolvable = true; 754 unsigned char bmap; 755 struct nfsd_net *nn; 756 struct nfs4_client *clp; 757 758 lockdep_assert_held(&fp->fi_lock); 759 list_for_each_entry(st, &fp->fi_stateids, st_perfile) { 760 /* ignore lock stateid */ 761 if (st->st_openstp) 762 continue; 763 if (st == stp && new_stp) 764 continue; 765 /* check file access against deny mode or vice versa */ 766 bmap = share_access ? st->st_deny_bmap : st->st_access_bmap; 767 if (!(access & bmap_to_share_mode(bmap))) 768 continue; 769 clp = st->st_stid.sc_client; 770 if (try_to_expire_client(clp)) 771 continue; 772 resolvable = false; 773 break; 774 } 775 if (resolvable) { 776 clp = stp->st_stid.sc_client; 777 nn = net_generic(clp->net, nfsd_net_id); 778 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0); 779 } 780 return resolvable; 781 } 782 783 static void 784 __nfs4_file_get_access(struct nfs4_file *fp, u32 access) 785 { 786 lockdep_assert_held(&fp->fi_lock); 787 788 if (access & NFS4_SHARE_ACCESS_WRITE) 789 atomic_inc(&fp->fi_access[O_WRONLY]); 790 if (access & NFS4_SHARE_ACCESS_READ) 791 atomic_inc(&fp->fi_access[O_RDONLY]); 792 } 793 794 static __be32 795 nfs4_file_get_access(struct nfs4_file *fp, u32 access) 796 { 797 lockdep_assert_held(&fp->fi_lock); 798 799 /* Does this access mode make sense? */ 800 if (access & ~NFS4_SHARE_ACCESS_BOTH) 801 return nfserr_inval; 802 803 /* Does it conflict with a deny mode already set? */ 804 if ((access & fp->fi_share_deny) != 0) 805 return nfserr_share_denied; 806 807 __nfs4_file_get_access(fp, access); 808 return nfs_ok; 809 } 810 811 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny) 812 { 813 /* Common case is that there is no deny mode. */ 814 if (deny) { 815 /* Does this deny mode make sense? */ 816 if (deny & ~NFS4_SHARE_DENY_BOTH) 817 return nfserr_inval; 818 819 if ((deny & NFS4_SHARE_DENY_READ) && 820 atomic_read(&fp->fi_access[O_RDONLY])) 821 return nfserr_share_denied; 822 823 if ((deny & NFS4_SHARE_DENY_WRITE) && 824 atomic_read(&fp->fi_access[O_WRONLY])) 825 return nfserr_share_denied; 826 } 827 return nfs_ok; 828 } 829 830 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag) 831 { 832 might_lock(&fp->fi_lock); 833 834 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) { 835 struct nfsd_file *f1 = NULL; 836 struct nfsd_file *f2 = NULL; 837 838 swap(f1, fp->fi_fds[oflag]); 839 if (atomic_read(&fp->fi_access[1 - oflag]) == 0) 840 swap(f2, fp->fi_fds[O_RDWR]); 841 spin_unlock(&fp->fi_lock); 842 if (f1) 843 nfsd_file_put(f1); 844 if (f2) 845 nfsd_file_put(f2); 846 } 847 } 848 849 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access) 850 { 851 WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH); 852 853 if (access & NFS4_SHARE_ACCESS_WRITE) 854 __nfs4_file_put_access(fp, O_WRONLY); 855 if (access & NFS4_SHARE_ACCESS_READ) 856 __nfs4_file_put_access(fp, O_RDONLY); 857 } 858 859 /* 860 * Allocate a new open/delegation state counter. This is needed for 861 * pNFS for proper return on close semantics. 862 * 863 * Note that we only allocate it for pNFS-enabled exports, otherwise 864 * all pointers to struct nfs4_clnt_odstate are always NULL. 865 */ 866 static struct nfs4_clnt_odstate * 867 alloc_clnt_odstate(struct nfs4_client *clp) 868 { 869 struct nfs4_clnt_odstate *co; 870 871 co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL); 872 if (co) { 873 co->co_client = clp; 874 refcount_set(&co->co_odcount, 1); 875 } 876 return co; 877 } 878 879 static void 880 hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co) 881 { 882 struct nfs4_file *fp = co->co_file; 883 884 lockdep_assert_held(&fp->fi_lock); 885 list_add(&co->co_perfile, &fp->fi_clnt_odstate); 886 } 887 888 static inline void 889 get_clnt_odstate(struct nfs4_clnt_odstate *co) 890 { 891 if (co) 892 refcount_inc(&co->co_odcount); 893 } 894 895 static void 896 put_clnt_odstate(struct nfs4_clnt_odstate *co) 897 { 898 struct nfs4_file *fp; 899 900 if (!co) 901 return; 902 903 fp = co->co_file; 904 if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) { 905 list_del(&co->co_perfile); 906 spin_unlock(&fp->fi_lock); 907 908 nfsd4_return_all_file_layouts(co->co_client, fp); 909 kmem_cache_free(odstate_slab, co); 910 } 911 } 912 913 static struct nfs4_clnt_odstate * 914 find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new) 915 { 916 struct nfs4_clnt_odstate *co; 917 struct nfs4_client *cl; 918 919 if (!new) 920 return NULL; 921 922 cl = new->co_client; 923 924 spin_lock(&fp->fi_lock); 925 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) { 926 if (co->co_client == cl) { 927 get_clnt_odstate(co); 928 goto out; 929 } 930 } 931 co = new; 932 co->co_file = fp; 933 hash_clnt_odstate_locked(new); 934 out: 935 spin_unlock(&fp->fi_lock); 936 return co; 937 } 938 939 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab, 940 void (*sc_free)(struct nfs4_stid *)) 941 { 942 struct nfs4_stid *stid; 943 int new_id; 944 945 stid = kmem_cache_zalloc(slab, GFP_KERNEL); 946 if (!stid) 947 return NULL; 948 949 idr_preload(GFP_KERNEL); 950 spin_lock(&cl->cl_lock); 951 /* Reserving 0 for start of file in nfsdfs "states" file: */ 952 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT); 953 spin_unlock(&cl->cl_lock); 954 idr_preload_end(); 955 if (new_id < 0) 956 goto out_free; 957 958 stid->sc_free = sc_free; 959 stid->sc_client = cl; 960 stid->sc_stateid.si_opaque.so_id = new_id; 961 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid; 962 /* Will be incremented before return to client: */ 963 refcount_set(&stid->sc_count, 1); 964 spin_lock_init(&stid->sc_lock); 965 INIT_LIST_HEAD(&stid->sc_cp_list); 966 967 /* 968 * It shouldn't be a problem to reuse an opaque stateid value. 969 * I don't think it is for 4.1. But with 4.0 I worry that, for 970 * example, a stray write retransmission could be accepted by 971 * the server when it should have been rejected. Therefore, 972 * adopt a trick from the sctp code to attempt to maximize the 973 * amount of time until an id is reused, by ensuring they always 974 * "increase" (mod INT_MAX): 975 */ 976 return stid; 977 out_free: 978 kmem_cache_free(slab, stid); 979 return NULL; 980 } 981 982 /* 983 * Create a unique stateid_t to represent each COPY. 984 */ 985 static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid, 986 unsigned char cs_type) 987 { 988 int new_id; 989 990 stid->cs_stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time; 991 stid->cs_stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id; 992 993 idr_preload(GFP_KERNEL); 994 spin_lock(&nn->s2s_cp_lock); 995 new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT); 996 stid->cs_stid.si_opaque.so_id = new_id; 997 stid->cs_stid.si_generation = 1; 998 spin_unlock(&nn->s2s_cp_lock); 999 idr_preload_end(); 1000 if (new_id < 0) 1001 return 0; 1002 stid->cs_type = cs_type; 1003 return 1; 1004 } 1005 1006 int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy) 1007 { 1008 return nfs4_init_cp_state(nn, ©->cp_stateid, NFS4_COPY_STID); 1009 } 1010 1011 struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn, 1012 struct nfs4_stid *p_stid) 1013 { 1014 struct nfs4_cpntf_state *cps; 1015 1016 cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL); 1017 if (!cps) 1018 return NULL; 1019 cps->cpntf_time = ktime_get_boottime_seconds(); 1020 refcount_set(&cps->cp_stateid.cs_count, 1); 1021 if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID)) 1022 goto out_free; 1023 spin_lock(&nn->s2s_cp_lock); 1024 list_add(&cps->cp_list, &p_stid->sc_cp_list); 1025 spin_unlock(&nn->s2s_cp_lock); 1026 return cps; 1027 out_free: 1028 kfree(cps); 1029 return NULL; 1030 } 1031 1032 void nfs4_free_copy_state(struct nfsd4_copy *copy) 1033 { 1034 struct nfsd_net *nn; 1035 1036 if (copy->cp_stateid.cs_type != NFS4_COPY_STID) 1037 return; 1038 nn = net_generic(copy->cp_clp->net, nfsd_net_id); 1039 spin_lock(&nn->s2s_cp_lock); 1040 idr_remove(&nn->s2s_cp_stateids, 1041 copy->cp_stateid.cs_stid.si_opaque.so_id); 1042 spin_unlock(&nn->s2s_cp_lock); 1043 } 1044 1045 static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid) 1046 { 1047 struct nfs4_cpntf_state *cps; 1048 struct nfsd_net *nn; 1049 1050 nn = net_generic(net, nfsd_net_id); 1051 spin_lock(&nn->s2s_cp_lock); 1052 while (!list_empty(&stid->sc_cp_list)) { 1053 cps = list_first_entry(&stid->sc_cp_list, 1054 struct nfs4_cpntf_state, cp_list); 1055 _free_cpntf_state_locked(nn, cps); 1056 } 1057 spin_unlock(&nn->s2s_cp_lock); 1058 } 1059 1060 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp) 1061 { 1062 struct nfs4_stid *stid; 1063 1064 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid); 1065 if (!stid) 1066 return NULL; 1067 1068 return openlockstateid(stid); 1069 } 1070 1071 static void nfs4_free_deleg(struct nfs4_stid *stid) 1072 { 1073 struct nfs4_delegation *dp = delegstateid(stid); 1074 1075 WARN_ON_ONCE(!list_empty(&stid->sc_cp_list)); 1076 WARN_ON_ONCE(!list_empty(&dp->dl_perfile)); 1077 WARN_ON_ONCE(!list_empty(&dp->dl_perclnt)); 1078 WARN_ON_ONCE(!list_empty(&dp->dl_recall_lru)); 1079 kmem_cache_free(deleg_slab, stid); 1080 atomic_long_dec(&num_delegations); 1081 } 1082 1083 /* 1084 * When we recall a delegation, we should be careful not to hand it 1085 * out again straight away. 1086 * To ensure this we keep a pair of bloom filters ('new' and 'old') 1087 * in which the filehandles of recalled delegations are "stored". 1088 * If a filehandle appear in either filter, a delegation is blocked. 1089 * When a delegation is recalled, the filehandle is stored in the "new" 1090 * filter. 1091 * Every 30 seconds we swap the filters and clear the "new" one, 1092 * unless both are empty of course. 1093 * 1094 * Each filter is 256 bits. We hash the filehandle to 32bit and use the 1095 * low 3 bytes as hash-table indices. 1096 * 1097 * 'blocked_delegations_lock', which is always taken in block_delegations(), 1098 * is used to manage concurrent access. Testing does not need the lock 1099 * except when swapping the two filters. 1100 */ 1101 static DEFINE_SPINLOCK(blocked_delegations_lock); 1102 static struct bloom_pair { 1103 int entries, old_entries; 1104 time64_t swap_time; 1105 int new; /* index into 'set' */ 1106 DECLARE_BITMAP(set[2], 256); 1107 } blocked_delegations; 1108 1109 static int delegation_blocked(struct knfsd_fh *fh) 1110 { 1111 u32 hash; 1112 struct bloom_pair *bd = &blocked_delegations; 1113 1114 if (bd->entries == 0) 1115 return 0; 1116 if (ktime_get_seconds() - bd->swap_time > 30) { 1117 spin_lock(&blocked_delegations_lock); 1118 if (ktime_get_seconds() - bd->swap_time > 30) { 1119 bd->entries -= bd->old_entries; 1120 bd->old_entries = bd->entries; 1121 memset(bd->set[bd->new], 0, 1122 sizeof(bd->set[0])); 1123 bd->new = 1-bd->new; 1124 bd->swap_time = ktime_get_seconds(); 1125 } 1126 spin_unlock(&blocked_delegations_lock); 1127 } 1128 hash = jhash(&fh->fh_raw, fh->fh_size, 0); 1129 if (test_bit(hash&255, bd->set[0]) && 1130 test_bit((hash>>8)&255, bd->set[0]) && 1131 test_bit((hash>>16)&255, bd->set[0])) 1132 return 1; 1133 1134 if (test_bit(hash&255, bd->set[1]) && 1135 test_bit((hash>>8)&255, bd->set[1]) && 1136 test_bit((hash>>16)&255, bd->set[1])) 1137 return 1; 1138 1139 return 0; 1140 } 1141 1142 static void block_delegations(struct knfsd_fh *fh) 1143 { 1144 u32 hash; 1145 struct bloom_pair *bd = &blocked_delegations; 1146 1147 hash = jhash(&fh->fh_raw, fh->fh_size, 0); 1148 1149 spin_lock(&blocked_delegations_lock); 1150 __set_bit(hash&255, bd->set[bd->new]); 1151 __set_bit((hash>>8)&255, bd->set[bd->new]); 1152 __set_bit((hash>>16)&255, bd->set[bd->new]); 1153 if (bd->entries == 0) 1154 bd->swap_time = ktime_get_seconds(); 1155 bd->entries += 1; 1156 spin_unlock(&blocked_delegations_lock); 1157 } 1158 1159 static struct nfs4_delegation * 1160 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp, 1161 struct nfs4_clnt_odstate *odstate, u32 dl_type) 1162 { 1163 struct nfs4_delegation *dp; 1164 struct nfs4_stid *stid; 1165 long n; 1166 1167 dprintk("NFSD alloc_init_deleg\n"); 1168 n = atomic_long_inc_return(&num_delegations); 1169 if (n < 0 || n > max_delegations) 1170 goto out_dec; 1171 if (delegation_blocked(&fp->fi_fhandle)) 1172 goto out_dec; 1173 stid = nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg); 1174 if (stid == NULL) 1175 goto out_dec; 1176 dp = delegstateid(stid); 1177 1178 /* 1179 * delegation seqid's are never incremented. The 4.1 special 1180 * meaning of seqid 0 isn't meaningful, really, but let's avoid 1181 * 0 anyway just for consistency and use 1: 1182 */ 1183 dp->dl_stid.sc_stateid.si_generation = 1; 1184 INIT_LIST_HEAD(&dp->dl_perfile); 1185 INIT_LIST_HEAD(&dp->dl_perclnt); 1186 INIT_LIST_HEAD(&dp->dl_recall_lru); 1187 dp->dl_clnt_odstate = odstate; 1188 get_clnt_odstate(odstate); 1189 dp->dl_type = dl_type; 1190 dp->dl_retries = 1; 1191 dp->dl_recalled = false; 1192 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client, 1193 &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL); 1194 nfsd4_init_cb(&dp->dl_cb_fattr.ncf_getattr, dp->dl_stid.sc_client, 1195 &nfsd4_cb_getattr_ops, NFSPROC4_CLNT_CB_GETATTR); 1196 dp->dl_cb_fattr.ncf_file_modified = false; 1197 dp->dl_cb_fattr.ncf_cb_bmap[0] = FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE; 1198 get_nfs4_file(fp); 1199 dp->dl_stid.sc_file = fp; 1200 return dp; 1201 out_dec: 1202 atomic_long_dec(&num_delegations); 1203 return NULL; 1204 } 1205 1206 void 1207 nfs4_put_stid(struct nfs4_stid *s) 1208 { 1209 struct nfs4_file *fp = s->sc_file; 1210 struct nfs4_client *clp = s->sc_client; 1211 1212 might_lock(&clp->cl_lock); 1213 1214 if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) { 1215 wake_up_all(&close_wq); 1216 return; 1217 } 1218 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); 1219 if (s->sc_status & SC_STATUS_ADMIN_REVOKED) 1220 atomic_dec(&s->sc_client->cl_admin_revoked); 1221 nfs4_free_cpntf_statelist(clp->net, s); 1222 spin_unlock(&clp->cl_lock); 1223 s->sc_free(s); 1224 if (fp) 1225 put_nfs4_file(fp); 1226 } 1227 1228 void 1229 nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid) 1230 { 1231 stateid_t *src = &stid->sc_stateid; 1232 1233 spin_lock(&stid->sc_lock); 1234 if (unlikely(++src->si_generation == 0)) 1235 src->si_generation = 1; 1236 memcpy(dst, src, sizeof(*dst)); 1237 spin_unlock(&stid->sc_lock); 1238 } 1239 1240 static void put_deleg_file(struct nfs4_file *fp) 1241 { 1242 struct nfsd_file *nf = NULL; 1243 1244 spin_lock(&fp->fi_lock); 1245 if (--fp->fi_delegees == 0) 1246 swap(nf, fp->fi_deleg_file); 1247 spin_unlock(&fp->fi_lock); 1248 1249 if (nf) 1250 nfsd_file_put(nf); 1251 } 1252 1253 static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp) 1254 { 1255 struct nfs4_file *fp = dp->dl_stid.sc_file; 1256 struct nfsd_file *nf = fp->fi_deleg_file; 1257 1258 WARN_ON_ONCE(!fp->fi_delegees); 1259 1260 kernel_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp); 1261 put_deleg_file(fp); 1262 } 1263 1264 static void destroy_unhashed_deleg(struct nfs4_delegation *dp) 1265 { 1266 put_clnt_odstate(dp->dl_clnt_odstate); 1267 nfs4_unlock_deleg_lease(dp); 1268 nfs4_put_stid(&dp->dl_stid); 1269 } 1270 1271 /** 1272 * nfs4_delegation_exists - Discover if this delegation already exists 1273 * @clp: a pointer to the nfs4_client we're granting a delegation to 1274 * @fp: a pointer to the nfs4_file we're granting a delegation on 1275 * 1276 * Return: 1277 * On success: true iff an existing delegation is found 1278 */ 1279 1280 static bool 1281 nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp) 1282 { 1283 struct nfs4_delegation *searchdp = NULL; 1284 struct nfs4_client *searchclp = NULL; 1285 1286 lockdep_assert_held(&state_lock); 1287 lockdep_assert_held(&fp->fi_lock); 1288 1289 list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) { 1290 searchclp = searchdp->dl_stid.sc_client; 1291 if (clp == searchclp) { 1292 return true; 1293 } 1294 } 1295 return false; 1296 } 1297 1298 /** 1299 * hash_delegation_locked - Add a delegation to the appropriate lists 1300 * @dp: a pointer to the nfs4_delegation we are adding. 1301 * @fp: a pointer to the nfs4_file we're granting a delegation on 1302 * 1303 * Return: 1304 * On success: NULL if the delegation was successfully hashed. 1305 * 1306 * On error: -EAGAIN if one was previously granted to this 1307 * nfs4_client for this nfs4_file. Delegation is not hashed. 1308 * 1309 */ 1310 1311 static int 1312 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp) 1313 { 1314 struct nfs4_client *clp = dp->dl_stid.sc_client; 1315 1316 lockdep_assert_held(&state_lock); 1317 lockdep_assert_held(&fp->fi_lock); 1318 lockdep_assert_held(&clp->cl_lock); 1319 1320 if (nfs4_delegation_exists(clp, fp)) 1321 return -EAGAIN; 1322 refcount_inc(&dp->dl_stid.sc_count); 1323 dp->dl_stid.sc_type = SC_TYPE_DELEG; 1324 list_add(&dp->dl_perfile, &fp->fi_delegations); 1325 list_add(&dp->dl_perclnt, &clp->cl_delegations); 1326 return 0; 1327 } 1328 1329 static bool delegation_hashed(struct nfs4_delegation *dp) 1330 { 1331 return !(list_empty(&dp->dl_perfile)); 1332 } 1333 1334 static bool 1335 unhash_delegation_locked(struct nfs4_delegation *dp, unsigned short statusmask) 1336 { 1337 struct nfs4_file *fp = dp->dl_stid.sc_file; 1338 1339 lockdep_assert_held(&state_lock); 1340 1341 if (!delegation_hashed(dp)) 1342 return false; 1343 1344 if (statusmask == SC_STATUS_REVOKED && 1345 dp->dl_stid.sc_client->cl_minorversion == 0) 1346 statusmask = SC_STATUS_CLOSED; 1347 dp->dl_stid.sc_status |= statusmask; 1348 if (statusmask & SC_STATUS_ADMIN_REVOKED) 1349 atomic_inc(&dp->dl_stid.sc_client->cl_admin_revoked); 1350 1351 /* Ensure that deleg break won't try to requeue it */ 1352 ++dp->dl_time; 1353 spin_lock(&fp->fi_lock); 1354 list_del_init(&dp->dl_perclnt); 1355 list_del_init(&dp->dl_recall_lru); 1356 list_del_init(&dp->dl_perfile); 1357 spin_unlock(&fp->fi_lock); 1358 return true; 1359 } 1360 1361 static void destroy_delegation(struct nfs4_delegation *dp) 1362 { 1363 bool unhashed; 1364 1365 spin_lock(&state_lock); 1366 unhashed = unhash_delegation_locked(dp, SC_STATUS_CLOSED); 1367 spin_unlock(&state_lock); 1368 if (unhashed) 1369 destroy_unhashed_deleg(dp); 1370 } 1371 1372 static void revoke_delegation(struct nfs4_delegation *dp) 1373 { 1374 struct nfs4_client *clp = dp->dl_stid.sc_client; 1375 1376 WARN_ON(!list_empty(&dp->dl_recall_lru)); 1377 1378 trace_nfsd_stid_revoke(&dp->dl_stid); 1379 1380 if (dp->dl_stid.sc_status & 1381 (SC_STATUS_REVOKED | SC_STATUS_ADMIN_REVOKED)) { 1382 spin_lock(&clp->cl_lock); 1383 refcount_inc(&dp->dl_stid.sc_count); 1384 list_add(&dp->dl_recall_lru, &clp->cl_revoked); 1385 spin_unlock(&clp->cl_lock); 1386 } 1387 destroy_unhashed_deleg(dp); 1388 } 1389 1390 /* 1391 * SETCLIENTID state 1392 */ 1393 1394 static unsigned int clientid_hashval(u32 id) 1395 { 1396 return id & CLIENT_HASH_MASK; 1397 } 1398 1399 static unsigned int clientstr_hashval(struct xdr_netobj name) 1400 { 1401 return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK; 1402 } 1403 1404 /* 1405 * A stateid that had a deny mode associated with it is being released 1406 * or downgraded. Recalculate the deny mode on the file. 1407 */ 1408 static void 1409 recalculate_deny_mode(struct nfs4_file *fp) 1410 { 1411 struct nfs4_ol_stateid *stp; 1412 1413 spin_lock(&fp->fi_lock); 1414 fp->fi_share_deny = 0; 1415 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) 1416 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap); 1417 spin_unlock(&fp->fi_lock); 1418 } 1419 1420 static void 1421 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp) 1422 { 1423 int i; 1424 bool change = false; 1425 1426 for (i = 1; i < 4; i++) { 1427 if ((i & deny) != i) { 1428 change = true; 1429 clear_deny(i, stp); 1430 } 1431 } 1432 1433 /* Recalculate per-file deny mode if there was a change */ 1434 if (change) 1435 recalculate_deny_mode(stp->st_stid.sc_file); 1436 } 1437 1438 /* release all access and file references for a given stateid */ 1439 static void 1440 release_all_access(struct nfs4_ol_stateid *stp) 1441 { 1442 int i; 1443 struct nfs4_file *fp = stp->st_stid.sc_file; 1444 1445 if (fp && stp->st_deny_bmap != 0) 1446 recalculate_deny_mode(fp); 1447 1448 for (i = 1; i < 4; i++) { 1449 if (test_access(i, stp)) 1450 nfs4_file_put_access(stp->st_stid.sc_file, i); 1451 clear_access(i, stp); 1452 } 1453 } 1454 1455 static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop) 1456 { 1457 kfree(sop->so_owner.data); 1458 sop->so_ops->so_free(sop); 1459 } 1460 1461 static void nfs4_put_stateowner(struct nfs4_stateowner *sop) 1462 { 1463 struct nfs4_client *clp = sop->so_client; 1464 1465 might_lock(&clp->cl_lock); 1466 1467 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock)) 1468 return; 1469 sop->so_ops->so_unhash(sop); 1470 spin_unlock(&clp->cl_lock); 1471 nfs4_free_stateowner(sop); 1472 } 1473 1474 static bool 1475 nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp) 1476 { 1477 return list_empty(&stp->st_perfile); 1478 } 1479 1480 static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp) 1481 { 1482 struct nfs4_file *fp = stp->st_stid.sc_file; 1483 1484 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock); 1485 1486 if (list_empty(&stp->st_perfile)) 1487 return false; 1488 1489 spin_lock(&fp->fi_lock); 1490 list_del_init(&stp->st_perfile); 1491 spin_unlock(&fp->fi_lock); 1492 list_del(&stp->st_perstateowner); 1493 return true; 1494 } 1495 1496 static void nfs4_free_ol_stateid(struct nfs4_stid *stid) 1497 { 1498 struct nfs4_ol_stateid *stp = openlockstateid(stid); 1499 1500 put_clnt_odstate(stp->st_clnt_odstate); 1501 release_all_access(stp); 1502 if (stp->st_stateowner) 1503 nfs4_put_stateowner(stp->st_stateowner); 1504 WARN_ON(!list_empty(&stid->sc_cp_list)); 1505 kmem_cache_free(stateid_slab, stid); 1506 } 1507 1508 static void nfs4_free_lock_stateid(struct nfs4_stid *stid) 1509 { 1510 struct nfs4_ol_stateid *stp = openlockstateid(stid); 1511 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner); 1512 struct nfsd_file *nf; 1513 1514 nf = find_any_file(stp->st_stid.sc_file); 1515 if (nf) { 1516 get_file(nf->nf_file); 1517 filp_close(nf->nf_file, (fl_owner_t)lo); 1518 nfsd_file_put(nf); 1519 } 1520 nfs4_free_ol_stateid(stid); 1521 } 1522 1523 /* 1524 * Put the persistent reference to an already unhashed generic stateid, while 1525 * holding the cl_lock. If it's the last reference, then put it onto the 1526 * reaplist for later destruction. 1527 */ 1528 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp, 1529 struct list_head *reaplist) 1530 { 1531 struct nfs4_stid *s = &stp->st_stid; 1532 struct nfs4_client *clp = s->sc_client; 1533 1534 lockdep_assert_held(&clp->cl_lock); 1535 1536 WARN_ON_ONCE(!list_empty(&stp->st_locks)); 1537 1538 if (!refcount_dec_and_test(&s->sc_count)) { 1539 wake_up_all(&close_wq); 1540 return; 1541 } 1542 1543 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); 1544 if (s->sc_status & SC_STATUS_ADMIN_REVOKED) 1545 atomic_dec(&s->sc_client->cl_admin_revoked); 1546 list_add(&stp->st_locks, reaplist); 1547 } 1548 1549 static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp) 1550 { 1551 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); 1552 1553 if (!unhash_ol_stateid(stp)) 1554 return false; 1555 list_del_init(&stp->st_locks); 1556 stp->st_stid.sc_status |= SC_STATUS_CLOSED; 1557 return true; 1558 } 1559 1560 static void release_lock_stateid(struct nfs4_ol_stateid *stp) 1561 { 1562 struct nfs4_client *clp = stp->st_stid.sc_client; 1563 bool unhashed; 1564 1565 spin_lock(&clp->cl_lock); 1566 unhashed = unhash_lock_stateid(stp); 1567 spin_unlock(&clp->cl_lock); 1568 if (unhashed) 1569 nfs4_put_stid(&stp->st_stid); 1570 } 1571 1572 static void unhash_lockowner_locked(struct nfs4_lockowner *lo) 1573 { 1574 struct nfs4_client *clp = lo->lo_owner.so_client; 1575 1576 lockdep_assert_held(&clp->cl_lock); 1577 1578 list_del_init(&lo->lo_owner.so_strhash); 1579 } 1580 1581 /* 1582 * Free a list of generic stateids that were collected earlier after being 1583 * fully unhashed. 1584 */ 1585 static void 1586 free_ol_stateid_reaplist(struct list_head *reaplist) 1587 { 1588 struct nfs4_ol_stateid *stp; 1589 struct nfs4_file *fp; 1590 1591 might_sleep(); 1592 1593 while (!list_empty(reaplist)) { 1594 stp = list_first_entry(reaplist, struct nfs4_ol_stateid, 1595 st_locks); 1596 list_del(&stp->st_locks); 1597 fp = stp->st_stid.sc_file; 1598 stp->st_stid.sc_free(&stp->st_stid); 1599 if (fp) 1600 put_nfs4_file(fp); 1601 } 1602 } 1603 1604 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp, 1605 struct list_head *reaplist) 1606 { 1607 struct nfs4_ol_stateid *stp; 1608 1609 lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock); 1610 1611 while (!list_empty(&open_stp->st_locks)) { 1612 stp = list_entry(open_stp->st_locks.next, 1613 struct nfs4_ol_stateid, st_locks); 1614 unhash_lock_stateid(stp); 1615 put_ol_stateid_locked(stp, reaplist); 1616 } 1617 } 1618 1619 static bool unhash_open_stateid(struct nfs4_ol_stateid *stp, 1620 struct list_head *reaplist) 1621 { 1622 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); 1623 1624 if (!unhash_ol_stateid(stp)) 1625 return false; 1626 release_open_stateid_locks(stp, reaplist); 1627 return true; 1628 } 1629 1630 static void release_open_stateid(struct nfs4_ol_stateid *stp) 1631 { 1632 LIST_HEAD(reaplist); 1633 1634 spin_lock(&stp->st_stid.sc_client->cl_lock); 1635 stp->st_stid.sc_status |= SC_STATUS_CLOSED; 1636 if (unhash_open_stateid(stp, &reaplist)) 1637 put_ol_stateid_locked(stp, &reaplist); 1638 spin_unlock(&stp->st_stid.sc_client->cl_lock); 1639 free_ol_stateid_reaplist(&reaplist); 1640 } 1641 1642 static void unhash_openowner_locked(struct nfs4_openowner *oo) 1643 { 1644 struct nfs4_client *clp = oo->oo_owner.so_client; 1645 1646 lockdep_assert_held(&clp->cl_lock); 1647 1648 list_del_init(&oo->oo_owner.so_strhash); 1649 list_del_init(&oo->oo_perclient); 1650 } 1651 1652 static void release_last_closed_stateid(struct nfs4_openowner *oo) 1653 { 1654 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net, 1655 nfsd_net_id); 1656 struct nfs4_ol_stateid *s; 1657 1658 spin_lock(&nn->client_lock); 1659 s = oo->oo_last_closed_stid; 1660 if (s) { 1661 list_del_init(&oo->oo_close_lru); 1662 oo->oo_last_closed_stid = NULL; 1663 } 1664 spin_unlock(&nn->client_lock); 1665 if (s) 1666 nfs4_put_stid(&s->st_stid); 1667 } 1668 1669 static void release_openowner(struct nfs4_openowner *oo) 1670 { 1671 struct nfs4_ol_stateid *stp; 1672 struct nfs4_client *clp = oo->oo_owner.so_client; 1673 struct list_head reaplist; 1674 1675 INIT_LIST_HEAD(&reaplist); 1676 1677 spin_lock(&clp->cl_lock); 1678 unhash_openowner_locked(oo); 1679 while (!list_empty(&oo->oo_owner.so_stateids)) { 1680 stp = list_first_entry(&oo->oo_owner.so_stateids, 1681 struct nfs4_ol_stateid, st_perstateowner); 1682 if (unhash_open_stateid(stp, &reaplist)) 1683 put_ol_stateid_locked(stp, &reaplist); 1684 } 1685 spin_unlock(&clp->cl_lock); 1686 free_ol_stateid_reaplist(&reaplist); 1687 release_last_closed_stateid(oo); 1688 nfs4_put_stateowner(&oo->oo_owner); 1689 } 1690 1691 static struct nfs4_stid *find_one_sb_stid(struct nfs4_client *clp, 1692 struct super_block *sb, 1693 unsigned int sc_types) 1694 { 1695 unsigned long id, tmp; 1696 struct nfs4_stid *stid; 1697 1698 spin_lock(&clp->cl_lock); 1699 idr_for_each_entry_ul(&clp->cl_stateids, stid, tmp, id) 1700 if ((stid->sc_type & sc_types) && 1701 stid->sc_status == 0 && 1702 stid->sc_file->fi_inode->i_sb == sb) { 1703 refcount_inc(&stid->sc_count); 1704 break; 1705 } 1706 spin_unlock(&clp->cl_lock); 1707 return stid; 1708 } 1709 1710 /** 1711 * nfsd4_revoke_states - revoke all nfsv4 states associated with given filesystem 1712 * @net: used to identify instance of nfsd (there is one per net namespace) 1713 * @sb: super_block used to identify target filesystem 1714 * 1715 * All nfs4 states (open, lock, delegation, layout) held by the server instance 1716 * and associated with a file on the given filesystem will be revoked resulting 1717 * in any files being closed and so all references from nfsd to the filesystem 1718 * being released. Thus nfsd will no longer prevent the filesystem from being 1719 * unmounted. 1720 * 1721 * The clients which own the states will subsequently being notified that the 1722 * states have been "admin-revoked". 1723 */ 1724 void nfsd4_revoke_states(struct net *net, struct super_block *sb) 1725 { 1726 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 1727 unsigned int idhashval; 1728 unsigned int sc_types; 1729 1730 sc_types = SC_TYPE_OPEN | SC_TYPE_LOCK | SC_TYPE_DELEG | SC_TYPE_LAYOUT; 1731 1732 spin_lock(&nn->client_lock); 1733 for (idhashval = 0; idhashval < CLIENT_HASH_MASK; idhashval++) { 1734 struct list_head *head = &nn->conf_id_hashtbl[idhashval]; 1735 struct nfs4_client *clp; 1736 retry: 1737 list_for_each_entry(clp, head, cl_idhash) { 1738 struct nfs4_stid *stid = find_one_sb_stid(clp, sb, 1739 sc_types); 1740 if (stid) { 1741 struct nfs4_ol_stateid *stp; 1742 struct nfs4_delegation *dp; 1743 struct nfs4_layout_stateid *ls; 1744 1745 spin_unlock(&nn->client_lock); 1746 switch (stid->sc_type) { 1747 case SC_TYPE_OPEN: 1748 stp = openlockstateid(stid); 1749 mutex_lock_nested(&stp->st_mutex, 1750 OPEN_STATEID_MUTEX); 1751 1752 spin_lock(&clp->cl_lock); 1753 if (stid->sc_status == 0) { 1754 stid->sc_status |= 1755 SC_STATUS_ADMIN_REVOKED; 1756 atomic_inc(&clp->cl_admin_revoked); 1757 spin_unlock(&clp->cl_lock); 1758 release_all_access(stp); 1759 } else 1760 spin_unlock(&clp->cl_lock); 1761 mutex_unlock(&stp->st_mutex); 1762 break; 1763 case SC_TYPE_LOCK: 1764 stp = openlockstateid(stid); 1765 mutex_lock_nested(&stp->st_mutex, 1766 LOCK_STATEID_MUTEX); 1767 spin_lock(&clp->cl_lock); 1768 if (stid->sc_status == 0) { 1769 struct nfs4_lockowner *lo = 1770 lockowner(stp->st_stateowner); 1771 struct nfsd_file *nf; 1772 1773 stid->sc_status |= 1774 SC_STATUS_ADMIN_REVOKED; 1775 atomic_inc(&clp->cl_admin_revoked); 1776 spin_unlock(&clp->cl_lock); 1777 nf = find_any_file(stp->st_stid.sc_file); 1778 if (nf) { 1779 get_file(nf->nf_file); 1780 filp_close(nf->nf_file, 1781 (fl_owner_t)lo); 1782 nfsd_file_put(nf); 1783 } 1784 release_all_access(stp); 1785 } else 1786 spin_unlock(&clp->cl_lock); 1787 mutex_unlock(&stp->st_mutex); 1788 break; 1789 case SC_TYPE_DELEG: 1790 dp = delegstateid(stid); 1791 spin_lock(&state_lock); 1792 if (!unhash_delegation_locked( 1793 dp, SC_STATUS_ADMIN_REVOKED)) 1794 dp = NULL; 1795 spin_unlock(&state_lock); 1796 if (dp) 1797 revoke_delegation(dp); 1798 break; 1799 case SC_TYPE_LAYOUT: 1800 ls = layoutstateid(stid); 1801 nfsd4_close_layout(ls); 1802 break; 1803 } 1804 nfs4_put_stid(stid); 1805 spin_lock(&nn->client_lock); 1806 if (clp->cl_minorversion == 0) 1807 /* Allow cleanup after a lease period. 1808 * store_release ensures cleanup will 1809 * see any newly revoked states if it 1810 * sees the time updated. 1811 */ 1812 nn->nfs40_last_revoke = 1813 ktime_get_boottime_seconds(); 1814 goto retry; 1815 } 1816 } 1817 } 1818 spin_unlock(&nn->client_lock); 1819 } 1820 1821 static inline int 1822 hash_sessionid(struct nfs4_sessionid *sessionid) 1823 { 1824 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid; 1825 1826 return sid->sequence % SESSION_HASH_SIZE; 1827 } 1828 1829 #ifdef CONFIG_SUNRPC_DEBUG 1830 static inline void 1831 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) 1832 { 1833 u32 *ptr = (u32 *)(&sessionid->data[0]); 1834 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]); 1835 } 1836 #else 1837 static inline void 1838 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) 1839 { 1840 } 1841 #endif 1842 1843 /* 1844 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it 1845 * won't be used for replay. 1846 */ 1847 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr) 1848 { 1849 struct nfs4_stateowner *so = cstate->replay_owner; 1850 1851 if (nfserr == nfserr_replay_me) 1852 return; 1853 1854 if (!seqid_mutating_err(ntohl(nfserr))) { 1855 nfsd4_cstate_clear_replay(cstate); 1856 return; 1857 } 1858 if (!so) 1859 return; 1860 if (so->so_is_open_owner) 1861 release_last_closed_stateid(openowner(so)); 1862 so->so_seqid++; 1863 return; 1864 } 1865 1866 static void 1867 gen_sessionid(struct nfsd4_session *ses) 1868 { 1869 struct nfs4_client *clp = ses->se_client; 1870 struct nfsd4_sessionid *sid; 1871 1872 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data; 1873 sid->clientid = clp->cl_clientid; 1874 sid->sequence = current_sessionid++; 1875 sid->reserved = 0; 1876 } 1877 1878 /* 1879 * The protocol defines ca_maxresponssize_cached to include the size of 1880 * the rpc header, but all we need to cache is the data starting after 1881 * the end of the initial SEQUENCE operation--the rest we regenerate 1882 * each time. Therefore we can advertise a ca_maxresponssize_cached 1883 * value that is the number of bytes in our cache plus a few additional 1884 * bytes. In order to stay on the safe side, and not promise more than 1885 * we can cache, those additional bytes must be the minimum possible: 24 1886 * bytes of rpc header (xid through accept state, with AUTH_NULL 1887 * verifier), 12 for the compound header (with zero-length tag), and 44 1888 * for the SEQUENCE op response: 1889 */ 1890 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44) 1891 1892 static void 1893 free_session_slots(struct nfsd4_session *ses) 1894 { 1895 int i; 1896 1897 for (i = 0; i < ses->se_fchannel.maxreqs; i++) { 1898 free_svc_cred(&ses->se_slots[i]->sl_cred); 1899 kfree(ses->se_slots[i]); 1900 } 1901 } 1902 1903 /* 1904 * We don't actually need to cache the rpc and session headers, so we 1905 * can allocate a little less for each slot: 1906 */ 1907 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca) 1908 { 1909 u32 size; 1910 1911 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ) 1912 size = 0; 1913 else 1914 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ; 1915 return size + sizeof(struct nfsd4_slot); 1916 } 1917 1918 /* 1919 * XXX: If we run out of reserved DRC memory we could (up to a point) 1920 * re-negotiate active sessions and reduce their slot usage to make 1921 * room for new connections. For now we just fail the create session. 1922 */ 1923 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn) 1924 { 1925 u32 slotsize = slot_bytes(ca); 1926 u32 num = ca->maxreqs; 1927 unsigned long avail, total_avail; 1928 unsigned int scale_factor; 1929 1930 spin_lock(&nfsd_drc_lock); 1931 if (nfsd_drc_max_mem > nfsd_drc_mem_used) 1932 total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used; 1933 else 1934 /* We have handed out more space than we chose in 1935 * set_max_drc() to allow. That isn't really a 1936 * problem as long as that doesn't make us think we 1937 * have lots more due to integer overflow. 1938 */ 1939 total_avail = 0; 1940 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail); 1941 /* 1942 * Never use more than a fraction of the remaining memory, 1943 * unless it's the only way to give this client a slot. 1944 * The chosen fraction is either 1/8 or 1/number of threads, 1945 * whichever is smaller. This ensures there are adequate 1946 * slots to support multiple clients per thread. 1947 * Give the client one slot even if that would require 1948 * over-allocation--it is better than failure. 1949 */ 1950 scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads); 1951 1952 avail = clamp_t(unsigned long, avail, slotsize, 1953 total_avail/scale_factor); 1954 num = min_t(int, num, avail / slotsize); 1955 num = max_t(int, num, 1); 1956 nfsd_drc_mem_used += num * slotsize; 1957 spin_unlock(&nfsd_drc_lock); 1958 1959 return num; 1960 } 1961 1962 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca) 1963 { 1964 int slotsize = slot_bytes(ca); 1965 1966 spin_lock(&nfsd_drc_lock); 1967 nfsd_drc_mem_used -= slotsize * ca->maxreqs; 1968 spin_unlock(&nfsd_drc_lock); 1969 } 1970 1971 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs, 1972 struct nfsd4_channel_attrs *battrs) 1973 { 1974 int numslots = fattrs->maxreqs; 1975 int slotsize = slot_bytes(fattrs); 1976 struct nfsd4_session *new; 1977 int i; 1978 1979 BUILD_BUG_ON(struct_size(new, se_slots, NFSD_MAX_SLOTS_PER_SESSION) 1980 > PAGE_SIZE); 1981 1982 new = kzalloc(struct_size(new, se_slots, numslots), GFP_KERNEL); 1983 if (!new) 1984 return NULL; 1985 /* allocate each struct nfsd4_slot and data cache in one piece */ 1986 for (i = 0; i < numslots; i++) { 1987 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL); 1988 if (!new->se_slots[i]) 1989 goto out_free; 1990 } 1991 1992 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs)); 1993 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs)); 1994 1995 return new; 1996 out_free: 1997 while (i--) 1998 kfree(new->se_slots[i]); 1999 kfree(new); 2000 return NULL; 2001 } 2002 2003 static void free_conn(struct nfsd4_conn *c) 2004 { 2005 svc_xprt_put(c->cn_xprt); 2006 kfree(c); 2007 } 2008 2009 static void nfsd4_conn_lost(struct svc_xpt_user *u) 2010 { 2011 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user); 2012 struct nfs4_client *clp = c->cn_session->se_client; 2013 2014 trace_nfsd_cb_lost(clp); 2015 2016 spin_lock(&clp->cl_lock); 2017 if (!list_empty(&c->cn_persession)) { 2018 list_del(&c->cn_persession); 2019 free_conn(c); 2020 } 2021 nfsd4_probe_callback(clp); 2022 spin_unlock(&clp->cl_lock); 2023 } 2024 2025 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags) 2026 { 2027 struct nfsd4_conn *conn; 2028 2029 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL); 2030 if (!conn) 2031 return NULL; 2032 svc_xprt_get(rqstp->rq_xprt); 2033 conn->cn_xprt = rqstp->rq_xprt; 2034 conn->cn_flags = flags; 2035 INIT_LIST_HEAD(&conn->cn_xpt_user.list); 2036 return conn; 2037 } 2038 2039 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) 2040 { 2041 conn->cn_session = ses; 2042 list_add(&conn->cn_persession, &ses->se_conns); 2043 } 2044 2045 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) 2046 { 2047 struct nfs4_client *clp = ses->se_client; 2048 2049 spin_lock(&clp->cl_lock); 2050 __nfsd4_hash_conn(conn, ses); 2051 spin_unlock(&clp->cl_lock); 2052 } 2053 2054 static int nfsd4_register_conn(struct nfsd4_conn *conn) 2055 { 2056 conn->cn_xpt_user.callback = nfsd4_conn_lost; 2057 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user); 2058 } 2059 2060 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses) 2061 { 2062 int ret; 2063 2064 nfsd4_hash_conn(conn, ses); 2065 ret = nfsd4_register_conn(conn); 2066 if (ret) 2067 /* oops; xprt is already down: */ 2068 nfsd4_conn_lost(&conn->cn_xpt_user); 2069 /* We may have gained or lost a callback channel: */ 2070 nfsd4_probe_callback_sync(ses->se_client); 2071 } 2072 2073 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses) 2074 { 2075 u32 dir = NFS4_CDFC4_FORE; 2076 2077 if (cses->flags & SESSION4_BACK_CHAN) 2078 dir |= NFS4_CDFC4_BACK; 2079 return alloc_conn(rqstp, dir); 2080 } 2081 2082 /* must be called under client_lock */ 2083 static void nfsd4_del_conns(struct nfsd4_session *s) 2084 { 2085 struct nfs4_client *clp = s->se_client; 2086 struct nfsd4_conn *c; 2087 2088 spin_lock(&clp->cl_lock); 2089 while (!list_empty(&s->se_conns)) { 2090 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession); 2091 list_del_init(&c->cn_persession); 2092 spin_unlock(&clp->cl_lock); 2093 2094 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user); 2095 free_conn(c); 2096 2097 spin_lock(&clp->cl_lock); 2098 } 2099 spin_unlock(&clp->cl_lock); 2100 } 2101 2102 static void __free_session(struct nfsd4_session *ses) 2103 { 2104 free_session_slots(ses); 2105 kfree(ses); 2106 } 2107 2108 static void free_session(struct nfsd4_session *ses) 2109 { 2110 nfsd4_del_conns(ses); 2111 nfsd4_put_drc_mem(&ses->se_fchannel); 2112 __free_session(ses); 2113 } 2114 2115 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses) 2116 { 2117 int idx; 2118 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 2119 2120 new->se_client = clp; 2121 gen_sessionid(new); 2122 2123 INIT_LIST_HEAD(&new->se_conns); 2124 2125 new->se_cb_seq_nr = 1; 2126 new->se_flags = cses->flags; 2127 new->se_cb_prog = cses->callback_prog; 2128 new->se_cb_sec = cses->cb_sec; 2129 atomic_set(&new->se_ref, 0); 2130 idx = hash_sessionid(&new->se_sessionid); 2131 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]); 2132 spin_lock(&clp->cl_lock); 2133 list_add(&new->se_perclnt, &clp->cl_sessions); 2134 spin_unlock(&clp->cl_lock); 2135 2136 { 2137 struct sockaddr *sa = svc_addr(rqstp); 2138 /* 2139 * This is a little silly; with sessions there's no real 2140 * use for the callback address. Use the peer address 2141 * as a reasonable default for now, but consider fixing 2142 * the rpc client not to require an address in the 2143 * future: 2144 */ 2145 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa); 2146 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa); 2147 } 2148 } 2149 2150 /* caller must hold client_lock */ 2151 static struct nfsd4_session * 2152 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net) 2153 { 2154 struct nfsd4_session *elem; 2155 int idx; 2156 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 2157 2158 lockdep_assert_held(&nn->client_lock); 2159 2160 dump_sessionid(__func__, sessionid); 2161 idx = hash_sessionid(sessionid); 2162 /* Search in the appropriate list */ 2163 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) { 2164 if (!memcmp(elem->se_sessionid.data, sessionid->data, 2165 NFS4_MAX_SESSIONID_LEN)) { 2166 return elem; 2167 } 2168 } 2169 2170 dprintk("%s: session not found\n", __func__); 2171 return NULL; 2172 } 2173 2174 static struct nfsd4_session * 2175 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net, 2176 __be32 *ret) 2177 { 2178 struct nfsd4_session *session; 2179 __be32 status = nfserr_badsession; 2180 2181 session = __find_in_sessionid_hashtbl(sessionid, net); 2182 if (!session) 2183 goto out; 2184 status = nfsd4_get_session_locked(session); 2185 if (status) 2186 session = NULL; 2187 out: 2188 *ret = status; 2189 return session; 2190 } 2191 2192 /* caller must hold client_lock */ 2193 static void 2194 unhash_session(struct nfsd4_session *ses) 2195 { 2196 struct nfs4_client *clp = ses->se_client; 2197 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2198 2199 lockdep_assert_held(&nn->client_lock); 2200 2201 list_del(&ses->se_hash); 2202 spin_lock(&ses->se_client->cl_lock); 2203 list_del(&ses->se_perclnt); 2204 spin_unlock(&ses->se_client->cl_lock); 2205 } 2206 2207 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */ 2208 static int 2209 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn) 2210 { 2211 /* 2212 * We're assuming the clid was not given out from a boot 2213 * precisely 2^32 (about 136 years) before this one. That seems 2214 * a safe assumption: 2215 */ 2216 if (clid->cl_boot == (u32)nn->boot_time) 2217 return 0; 2218 trace_nfsd_clid_stale(clid); 2219 return 1; 2220 } 2221 2222 /* 2223 * XXX Should we use a slab cache ? 2224 * This type of memory management is somewhat inefficient, but we use it 2225 * anyway since SETCLIENTID is not a common operation. 2226 */ 2227 static struct nfs4_client *alloc_client(struct xdr_netobj name, 2228 struct nfsd_net *nn) 2229 { 2230 struct nfs4_client *clp; 2231 int i; 2232 2233 if (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) { 2234 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0); 2235 return NULL; 2236 } 2237 clp = kmem_cache_zalloc(client_slab, GFP_KERNEL); 2238 if (clp == NULL) 2239 return NULL; 2240 xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL); 2241 if (clp->cl_name.data == NULL) 2242 goto err_no_name; 2243 clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE, 2244 sizeof(struct list_head), 2245 GFP_KERNEL); 2246 if (!clp->cl_ownerstr_hashtbl) 2247 goto err_no_hashtbl; 2248 for (i = 0; i < OWNER_HASH_SIZE; i++) 2249 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]); 2250 INIT_LIST_HEAD(&clp->cl_sessions); 2251 idr_init(&clp->cl_stateids); 2252 atomic_set(&clp->cl_rpc_users, 0); 2253 clp->cl_cb_state = NFSD4_CB_UNKNOWN; 2254 clp->cl_state = NFSD4_ACTIVE; 2255 atomic_inc(&nn->nfs4_client_count); 2256 atomic_set(&clp->cl_delegs_in_recall, 0); 2257 INIT_LIST_HEAD(&clp->cl_idhash); 2258 INIT_LIST_HEAD(&clp->cl_openowners); 2259 INIT_LIST_HEAD(&clp->cl_delegations); 2260 INIT_LIST_HEAD(&clp->cl_lru); 2261 INIT_LIST_HEAD(&clp->cl_revoked); 2262 #ifdef CONFIG_NFSD_PNFS 2263 INIT_LIST_HEAD(&clp->cl_lo_states); 2264 #endif 2265 INIT_LIST_HEAD(&clp->async_copies); 2266 spin_lock_init(&clp->async_lock); 2267 spin_lock_init(&clp->cl_lock); 2268 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); 2269 return clp; 2270 err_no_hashtbl: 2271 kfree(clp->cl_name.data); 2272 err_no_name: 2273 kmem_cache_free(client_slab, clp); 2274 return NULL; 2275 } 2276 2277 static void __free_client(struct kref *k) 2278 { 2279 struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref); 2280 struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs); 2281 2282 free_svc_cred(&clp->cl_cred); 2283 kfree(clp->cl_ownerstr_hashtbl); 2284 kfree(clp->cl_name.data); 2285 kfree(clp->cl_nii_domain.data); 2286 kfree(clp->cl_nii_name.data); 2287 idr_destroy(&clp->cl_stateids); 2288 kfree(clp->cl_ra); 2289 kmem_cache_free(client_slab, clp); 2290 } 2291 2292 static void drop_client(struct nfs4_client *clp) 2293 { 2294 kref_put(&clp->cl_nfsdfs.cl_ref, __free_client); 2295 } 2296 2297 static void 2298 free_client(struct nfs4_client *clp) 2299 { 2300 while (!list_empty(&clp->cl_sessions)) { 2301 struct nfsd4_session *ses; 2302 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, 2303 se_perclnt); 2304 list_del(&ses->se_perclnt); 2305 WARN_ON_ONCE(atomic_read(&ses->se_ref)); 2306 free_session(ses); 2307 } 2308 rpc_destroy_wait_queue(&clp->cl_cb_waitq); 2309 if (clp->cl_nfsd_dentry) { 2310 nfsd_client_rmdir(clp->cl_nfsd_dentry); 2311 clp->cl_nfsd_dentry = NULL; 2312 wake_up_all(&expiry_wq); 2313 } 2314 drop_client(clp); 2315 } 2316 2317 /* must be called under the client_lock */ 2318 static void 2319 unhash_client_locked(struct nfs4_client *clp) 2320 { 2321 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2322 struct nfsd4_session *ses; 2323 2324 lockdep_assert_held(&nn->client_lock); 2325 2326 /* Mark the client as expired! */ 2327 clp->cl_time = 0; 2328 /* Make it invisible */ 2329 if (!list_empty(&clp->cl_idhash)) { 2330 list_del_init(&clp->cl_idhash); 2331 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags)) 2332 rb_erase(&clp->cl_namenode, &nn->conf_name_tree); 2333 else 2334 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); 2335 } 2336 list_del_init(&clp->cl_lru); 2337 spin_lock(&clp->cl_lock); 2338 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) 2339 list_del_init(&ses->se_hash); 2340 spin_unlock(&clp->cl_lock); 2341 } 2342 2343 static void 2344 unhash_client(struct nfs4_client *clp) 2345 { 2346 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2347 2348 spin_lock(&nn->client_lock); 2349 unhash_client_locked(clp); 2350 spin_unlock(&nn->client_lock); 2351 } 2352 2353 static __be32 mark_client_expired_locked(struct nfs4_client *clp) 2354 { 2355 if (atomic_read(&clp->cl_rpc_users)) 2356 return nfserr_jukebox; 2357 unhash_client_locked(clp); 2358 return nfs_ok; 2359 } 2360 2361 static void 2362 __destroy_client(struct nfs4_client *clp) 2363 { 2364 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2365 int i; 2366 struct nfs4_openowner *oo; 2367 struct nfs4_delegation *dp; 2368 struct list_head reaplist; 2369 2370 INIT_LIST_HEAD(&reaplist); 2371 spin_lock(&state_lock); 2372 while (!list_empty(&clp->cl_delegations)) { 2373 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); 2374 unhash_delegation_locked(dp, SC_STATUS_CLOSED); 2375 list_add(&dp->dl_recall_lru, &reaplist); 2376 } 2377 spin_unlock(&state_lock); 2378 while (!list_empty(&reaplist)) { 2379 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); 2380 list_del_init(&dp->dl_recall_lru); 2381 destroy_unhashed_deleg(dp); 2382 } 2383 while (!list_empty(&clp->cl_revoked)) { 2384 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru); 2385 list_del_init(&dp->dl_recall_lru); 2386 nfs4_put_stid(&dp->dl_stid); 2387 } 2388 while (!list_empty(&clp->cl_openowners)) { 2389 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient); 2390 nfs4_get_stateowner(&oo->oo_owner); 2391 release_openowner(oo); 2392 } 2393 for (i = 0; i < OWNER_HASH_SIZE; i++) { 2394 struct nfs4_stateowner *so, *tmp; 2395 2396 list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i], 2397 so_strhash) { 2398 /* Should be no openowners at this point */ 2399 WARN_ON_ONCE(so->so_is_open_owner); 2400 remove_blocked_locks(lockowner(so)); 2401 } 2402 } 2403 nfsd4_return_all_client_layouts(clp); 2404 nfsd4_shutdown_copy(clp); 2405 nfsd4_shutdown_callback(clp); 2406 if (clp->cl_cb_conn.cb_xprt) 2407 svc_xprt_put(clp->cl_cb_conn.cb_xprt); 2408 atomic_add_unless(&nn->nfs4_client_count, -1, 0); 2409 nfsd4_dec_courtesy_client_count(nn, clp); 2410 free_client(clp); 2411 wake_up_all(&expiry_wq); 2412 } 2413 2414 static void 2415 destroy_client(struct nfs4_client *clp) 2416 { 2417 unhash_client(clp); 2418 __destroy_client(clp); 2419 } 2420 2421 static void inc_reclaim_complete(struct nfs4_client *clp) 2422 { 2423 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2424 2425 if (!nn->track_reclaim_completes) 2426 return; 2427 if (!nfsd4_find_reclaim_client(clp->cl_name, nn)) 2428 return; 2429 if (atomic_inc_return(&nn->nr_reclaim_complete) == 2430 nn->reclaim_str_hashtbl_size) { 2431 printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n", 2432 clp->net->ns.inum); 2433 nfsd4_end_grace(nn); 2434 } 2435 } 2436 2437 static void expire_client(struct nfs4_client *clp) 2438 { 2439 unhash_client(clp); 2440 nfsd4_client_record_remove(clp); 2441 __destroy_client(clp); 2442 } 2443 2444 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source) 2445 { 2446 memcpy(target->cl_verifier.data, source->data, 2447 sizeof(target->cl_verifier.data)); 2448 } 2449 2450 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source) 2451 { 2452 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; 2453 target->cl_clientid.cl_id = source->cl_clientid.cl_id; 2454 } 2455 2456 static int copy_cred(struct svc_cred *target, struct svc_cred *source) 2457 { 2458 target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL); 2459 target->cr_raw_principal = kstrdup(source->cr_raw_principal, 2460 GFP_KERNEL); 2461 target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL); 2462 if ((source->cr_principal && !target->cr_principal) || 2463 (source->cr_raw_principal && !target->cr_raw_principal) || 2464 (source->cr_targ_princ && !target->cr_targ_princ)) 2465 return -ENOMEM; 2466 2467 target->cr_flavor = source->cr_flavor; 2468 target->cr_uid = source->cr_uid; 2469 target->cr_gid = source->cr_gid; 2470 target->cr_group_info = source->cr_group_info; 2471 get_group_info(target->cr_group_info); 2472 target->cr_gss_mech = source->cr_gss_mech; 2473 if (source->cr_gss_mech) 2474 gss_mech_get(source->cr_gss_mech); 2475 return 0; 2476 } 2477 2478 static int 2479 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2) 2480 { 2481 if (o1->len < o2->len) 2482 return -1; 2483 if (o1->len > o2->len) 2484 return 1; 2485 return memcmp(o1->data, o2->data, o1->len); 2486 } 2487 2488 static int 2489 same_verf(nfs4_verifier *v1, nfs4_verifier *v2) 2490 { 2491 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data)); 2492 } 2493 2494 static int 2495 same_clid(clientid_t *cl1, clientid_t *cl2) 2496 { 2497 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id); 2498 } 2499 2500 static bool groups_equal(struct group_info *g1, struct group_info *g2) 2501 { 2502 int i; 2503 2504 if (g1->ngroups != g2->ngroups) 2505 return false; 2506 for (i=0; i<g1->ngroups; i++) 2507 if (!gid_eq(g1->gid[i], g2->gid[i])) 2508 return false; 2509 return true; 2510 } 2511 2512 /* 2513 * RFC 3530 language requires clid_inuse be returned when the 2514 * "principal" associated with a requests differs from that previously 2515 * used. We use uid, gid's, and gss principal string as our best 2516 * approximation. We also don't want to allow non-gss use of a client 2517 * established using gss: in theory cr_principal should catch that 2518 * change, but in practice cr_principal can be null even in the gss case 2519 * since gssd doesn't always pass down a principal string. 2520 */ 2521 static bool is_gss_cred(struct svc_cred *cr) 2522 { 2523 /* Is cr_flavor one of the gss "pseudoflavors"?: */ 2524 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR); 2525 } 2526 2527 2528 static bool 2529 same_creds(struct svc_cred *cr1, struct svc_cred *cr2) 2530 { 2531 if ((is_gss_cred(cr1) != is_gss_cred(cr2)) 2532 || (!uid_eq(cr1->cr_uid, cr2->cr_uid)) 2533 || (!gid_eq(cr1->cr_gid, cr2->cr_gid)) 2534 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info)) 2535 return false; 2536 /* XXX: check that cr_targ_princ fields match ? */ 2537 if (cr1->cr_principal == cr2->cr_principal) 2538 return true; 2539 if (!cr1->cr_principal || !cr2->cr_principal) 2540 return false; 2541 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal); 2542 } 2543 2544 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp) 2545 { 2546 struct svc_cred *cr = &rqstp->rq_cred; 2547 u32 service; 2548 2549 if (!cr->cr_gss_mech) 2550 return false; 2551 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor); 2552 return service == RPC_GSS_SVC_INTEGRITY || 2553 service == RPC_GSS_SVC_PRIVACY; 2554 } 2555 2556 bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp) 2557 { 2558 struct svc_cred *cr = &rqstp->rq_cred; 2559 2560 if (!cl->cl_mach_cred) 2561 return true; 2562 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech) 2563 return false; 2564 if (!svc_rqst_integrity_protected(rqstp)) 2565 return false; 2566 if (cl->cl_cred.cr_raw_principal) 2567 return 0 == strcmp(cl->cl_cred.cr_raw_principal, 2568 cr->cr_raw_principal); 2569 if (!cr->cr_principal) 2570 return false; 2571 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal); 2572 } 2573 2574 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn) 2575 { 2576 __be32 verf[2]; 2577 2578 /* 2579 * This is opaque to client, so no need to byte-swap. Use 2580 * __force to keep sparse happy 2581 */ 2582 verf[0] = (__force __be32)(u32)ktime_get_real_seconds(); 2583 verf[1] = (__force __be32)nn->clverifier_counter++; 2584 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data)); 2585 } 2586 2587 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn) 2588 { 2589 clp->cl_clientid.cl_boot = (u32)nn->boot_time; 2590 clp->cl_clientid.cl_id = nn->clientid_counter++; 2591 gen_confirm(clp, nn); 2592 } 2593 2594 static struct nfs4_stid * 2595 find_stateid_locked(struct nfs4_client *cl, stateid_t *t) 2596 { 2597 struct nfs4_stid *ret; 2598 2599 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id); 2600 if (!ret || !ret->sc_type) 2601 return NULL; 2602 return ret; 2603 } 2604 2605 static struct nfs4_stid * 2606 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, 2607 unsigned short typemask, unsigned short ok_states) 2608 { 2609 struct nfs4_stid *s; 2610 2611 spin_lock(&cl->cl_lock); 2612 s = find_stateid_locked(cl, t); 2613 if (s != NULL) { 2614 if ((s->sc_status & ~ok_states) == 0 && 2615 (typemask & s->sc_type)) 2616 refcount_inc(&s->sc_count); 2617 else 2618 s = NULL; 2619 } 2620 spin_unlock(&cl->cl_lock); 2621 return s; 2622 } 2623 2624 static struct nfs4_client *get_nfsdfs_clp(struct inode *inode) 2625 { 2626 struct nfsdfs_client *nc; 2627 nc = get_nfsdfs_client(inode); 2628 if (!nc) 2629 return NULL; 2630 return container_of(nc, struct nfs4_client, cl_nfsdfs); 2631 } 2632 2633 static void seq_quote_mem(struct seq_file *m, char *data, int len) 2634 { 2635 seq_puts(m, "\""); 2636 seq_escape_mem(m, data, len, ESCAPE_HEX | ESCAPE_NAP | ESCAPE_APPEND, "\"\\"); 2637 seq_puts(m, "\""); 2638 } 2639 2640 static const char *cb_state2str(int state) 2641 { 2642 switch (state) { 2643 case NFSD4_CB_UP: 2644 return "UP"; 2645 case NFSD4_CB_UNKNOWN: 2646 return "UNKNOWN"; 2647 case NFSD4_CB_DOWN: 2648 return "DOWN"; 2649 case NFSD4_CB_FAULT: 2650 return "FAULT"; 2651 } 2652 return "UNDEFINED"; 2653 } 2654 2655 static int client_info_show(struct seq_file *m, void *v) 2656 { 2657 struct inode *inode = file_inode(m->file); 2658 struct nfs4_client *clp; 2659 u64 clid; 2660 2661 clp = get_nfsdfs_clp(inode); 2662 if (!clp) 2663 return -ENXIO; 2664 memcpy(&clid, &clp->cl_clientid, sizeof(clid)); 2665 seq_printf(m, "clientid: 0x%llx\n", clid); 2666 seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr); 2667 2668 if (clp->cl_state == NFSD4_COURTESY) 2669 seq_puts(m, "status: courtesy\n"); 2670 else if (clp->cl_state == NFSD4_EXPIRABLE) 2671 seq_puts(m, "status: expirable\n"); 2672 else if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags)) 2673 seq_puts(m, "status: confirmed\n"); 2674 else 2675 seq_puts(m, "status: unconfirmed\n"); 2676 seq_printf(m, "seconds from last renew: %lld\n", 2677 ktime_get_boottime_seconds() - clp->cl_time); 2678 seq_puts(m, "name: "); 2679 seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len); 2680 seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion); 2681 if (clp->cl_nii_domain.data) { 2682 seq_puts(m, "Implementation domain: "); 2683 seq_quote_mem(m, clp->cl_nii_domain.data, 2684 clp->cl_nii_domain.len); 2685 seq_puts(m, "\nImplementation name: "); 2686 seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len); 2687 seq_printf(m, "\nImplementation time: [%lld, %ld]\n", 2688 clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec); 2689 } 2690 seq_printf(m, "callback state: %s\n", cb_state2str(clp->cl_cb_state)); 2691 seq_printf(m, "callback address: %pISpc\n", &clp->cl_cb_conn.cb_addr); 2692 seq_printf(m, "admin-revoked states: %d\n", 2693 atomic_read(&clp->cl_admin_revoked)); 2694 drop_client(clp); 2695 2696 return 0; 2697 } 2698 2699 DEFINE_SHOW_ATTRIBUTE(client_info); 2700 2701 static void *states_start(struct seq_file *s, loff_t *pos) 2702 __acquires(&clp->cl_lock) 2703 { 2704 struct nfs4_client *clp = s->private; 2705 unsigned long id = *pos; 2706 void *ret; 2707 2708 spin_lock(&clp->cl_lock); 2709 ret = idr_get_next_ul(&clp->cl_stateids, &id); 2710 *pos = id; 2711 return ret; 2712 } 2713 2714 static void *states_next(struct seq_file *s, void *v, loff_t *pos) 2715 { 2716 struct nfs4_client *clp = s->private; 2717 unsigned long id = *pos; 2718 void *ret; 2719 2720 id = *pos; 2721 id++; 2722 ret = idr_get_next_ul(&clp->cl_stateids, &id); 2723 *pos = id; 2724 return ret; 2725 } 2726 2727 static void states_stop(struct seq_file *s, void *v) 2728 __releases(&clp->cl_lock) 2729 { 2730 struct nfs4_client *clp = s->private; 2731 2732 spin_unlock(&clp->cl_lock); 2733 } 2734 2735 static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f) 2736 { 2737 seq_printf(s, "filename: \"%pD2\"", f->nf_file); 2738 } 2739 2740 static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f) 2741 { 2742 struct inode *inode = file_inode(f->nf_file); 2743 2744 seq_printf(s, "superblock: \"%02x:%02x:%ld\"", 2745 MAJOR(inode->i_sb->s_dev), 2746 MINOR(inode->i_sb->s_dev), 2747 inode->i_ino); 2748 } 2749 2750 static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo) 2751 { 2752 seq_puts(s, "owner: "); 2753 seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len); 2754 } 2755 2756 static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid) 2757 { 2758 seq_printf(s, "0x%.8x", stid->si_generation); 2759 seq_printf(s, "%12phN", &stid->si_opaque); 2760 } 2761 2762 static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st) 2763 { 2764 struct nfs4_ol_stateid *ols; 2765 struct nfs4_file *nf; 2766 struct nfsd_file *file; 2767 struct nfs4_stateowner *oo; 2768 unsigned int access, deny; 2769 2770 ols = openlockstateid(st); 2771 oo = ols->st_stateowner; 2772 nf = st->sc_file; 2773 2774 seq_puts(s, "- "); 2775 nfs4_show_stateid(s, &st->sc_stateid); 2776 seq_puts(s, ": { type: open, "); 2777 2778 access = bmap_to_share_mode(ols->st_access_bmap); 2779 deny = bmap_to_share_mode(ols->st_deny_bmap); 2780 2781 seq_printf(s, "access: %s%s, ", 2782 access & NFS4_SHARE_ACCESS_READ ? "r" : "-", 2783 access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-"); 2784 seq_printf(s, "deny: %s%s, ", 2785 deny & NFS4_SHARE_ACCESS_READ ? "r" : "-", 2786 deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-"); 2787 2788 spin_lock(&nf->fi_lock); 2789 file = find_any_file_locked(nf); 2790 if (file) { 2791 nfs4_show_superblock(s, file); 2792 seq_puts(s, ", "); 2793 nfs4_show_fname(s, file); 2794 seq_puts(s, ", "); 2795 } 2796 spin_unlock(&nf->fi_lock); 2797 nfs4_show_owner(s, oo); 2798 if (st->sc_status & SC_STATUS_ADMIN_REVOKED) 2799 seq_puts(s, ", admin-revoked"); 2800 seq_puts(s, " }\n"); 2801 return 0; 2802 } 2803 2804 static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st) 2805 { 2806 struct nfs4_ol_stateid *ols; 2807 struct nfs4_file *nf; 2808 struct nfsd_file *file; 2809 struct nfs4_stateowner *oo; 2810 2811 ols = openlockstateid(st); 2812 oo = ols->st_stateowner; 2813 nf = st->sc_file; 2814 2815 seq_puts(s, "- "); 2816 nfs4_show_stateid(s, &st->sc_stateid); 2817 seq_puts(s, ": { type: lock, "); 2818 2819 spin_lock(&nf->fi_lock); 2820 file = find_any_file_locked(nf); 2821 if (file) { 2822 /* 2823 * Note: a lock stateid isn't really the same thing as a lock, 2824 * it's the locking state held by one owner on a file, and there 2825 * may be multiple (or no) lock ranges associated with it. 2826 * (Same for the matter is true of open stateids.) 2827 */ 2828 2829 nfs4_show_superblock(s, file); 2830 /* XXX: open stateid? */ 2831 seq_puts(s, ", "); 2832 nfs4_show_fname(s, file); 2833 seq_puts(s, ", "); 2834 } 2835 nfs4_show_owner(s, oo); 2836 if (st->sc_status & SC_STATUS_ADMIN_REVOKED) 2837 seq_puts(s, ", admin-revoked"); 2838 seq_puts(s, " }\n"); 2839 spin_unlock(&nf->fi_lock); 2840 return 0; 2841 } 2842 2843 static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st) 2844 { 2845 struct nfs4_delegation *ds; 2846 struct nfs4_file *nf; 2847 struct nfsd_file *file; 2848 2849 ds = delegstateid(st); 2850 nf = st->sc_file; 2851 2852 seq_puts(s, "- "); 2853 nfs4_show_stateid(s, &st->sc_stateid); 2854 seq_puts(s, ": { type: deleg, "); 2855 2856 seq_printf(s, "access: %s", 2857 ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w"); 2858 2859 /* XXX: lease time, whether it's being recalled. */ 2860 2861 spin_lock(&nf->fi_lock); 2862 file = nf->fi_deleg_file; 2863 if (file) { 2864 seq_puts(s, ", "); 2865 nfs4_show_superblock(s, file); 2866 seq_puts(s, ", "); 2867 nfs4_show_fname(s, file); 2868 } 2869 spin_unlock(&nf->fi_lock); 2870 if (st->sc_status & SC_STATUS_ADMIN_REVOKED) 2871 seq_puts(s, ", admin-revoked"); 2872 seq_puts(s, " }\n"); 2873 return 0; 2874 } 2875 2876 static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st) 2877 { 2878 struct nfs4_layout_stateid *ls; 2879 struct nfsd_file *file; 2880 2881 ls = container_of(st, struct nfs4_layout_stateid, ls_stid); 2882 2883 seq_puts(s, "- "); 2884 nfs4_show_stateid(s, &st->sc_stateid); 2885 seq_puts(s, ": { type: layout"); 2886 2887 /* XXX: What else would be useful? */ 2888 2889 spin_lock(&ls->ls_stid.sc_file->fi_lock); 2890 file = ls->ls_file; 2891 if (file) { 2892 seq_puts(s, ", "); 2893 nfs4_show_superblock(s, file); 2894 seq_puts(s, ", "); 2895 nfs4_show_fname(s, file); 2896 } 2897 spin_unlock(&ls->ls_stid.sc_file->fi_lock); 2898 if (st->sc_status & SC_STATUS_ADMIN_REVOKED) 2899 seq_puts(s, ", admin-revoked"); 2900 seq_puts(s, " }\n"); 2901 2902 return 0; 2903 } 2904 2905 static int states_show(struct seq_file *s, void *v) 2906 { 2907 struct nfs4_stid *st = v; 2908 2909 switch (st->sc_type) { 2910 case SC_TYPE_OPEN: 2911 return nfs4_show_open(s, st); 2912 case SC_TYPE_LOCK: 2913 return nfs4_show_lock(s, st); 2914 case SC_TYPE_DELEG: 2915 return nfs4_show_deleg(s, st); 2916 case SC_TYPE_LAYOUT: 2917 return nfs4_show_layout(s, st); 2918 default: 2919 return 0; /* XXX: or SEQ_SKIP? */ 2920 } 2921 /* XXX: copy stateids? */ 2922 } 2923 2924 static struct seq_operations states_seq_ops = { 2925 .start = states_start, 2926 .next = states_next, 2927 .stop = states_stop, 2928 .show = states_show 2929 }; 2930 2931 static int client_states_open(struct inode *inode, struct file *file) 2932 { 2933 struct seq_file *s; 2934 struct nfs4_client *clp; 2935 int ret; 2936 2937 clp = get_nfsdfs_clp(inode); 2938 if (!clp) 2939 return -ENXIO; 2940 2941 ret = seq_open(file, &states_seq_ops); 2942 if (ret) 2943 return ret; 2944 s = file->private_data; 2945 s->private = clp; 2946 return 0; 2947 } 2948 2949 static int client_opens_release(struct inode *inode, struct file *file) 2950 { 2951 struct seq_file *m = file->private_data; 2952 struct nfs4_client *clp = m->private; 2953 2954 /* XXX: alternatively, we could get/drop in seq start/stop */ 2955 drop_client(clp); 2956 return seq_release(inode, file); 2957 } 2958 2959 static const struct file_operations client_states_fops = { 2960 .open = client_states_open, 2961 .read = seq_read, 2962 .llseek = seq_lseek, 2963 .release = client_opens_release, 2964 }; 2965 2966 /* 2967 * Normally we refuse to destroy clients that are in use, but here the 2968 * administrator is telling us to just do it. We also want to wait 2969 * so the caller has a guarantee that the client's locks are gone by 2970 * the time the write returns: 2971 */ 2972 static void force_expire_client(struct nfs4_client *clp) 2973 { 2974 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2975 bool already_expired; 2976 2977 trace_nfsd_clid_admin_expired(&clp->cl_clientid); 2978 2979 spin_lock(&nn->client_lock); 2980 clp->cl_time = 0; 2981 spin_unlock(&nn->client_lock); 2982 2983 wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0); 2984 spin_lock(&nn->client_lock); 2985 already_expired = list_empty(&clp->cl_lru); 2986 if (!already_expired) 2987 unhash_client_locked(clp); 2988 spin_unlock(&nn->client_lock); 2989 2990 if (!already_expired) 2991 expire_client(clp); 2992 else 2993 wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL); 2994 } 2995 2996 static ssize_t client_ctl_write(struct file *file, const char __user *buf, 2997 size_t size, loff_t *pos) 2998 { 2999 char *data; 3000 struct nfs4_client *clp; 3001 3002 data = simple_transaction_get(file, buf, size); 3003 if (IS_ERR(data)) 3004 return PTR_ERR(data); 3005 if (size != 7 || 0 != memcmp(data, "expire\n", 7)) 3006 return -EINVAL; 3007 clp = get_nfsdfs_clp(file_inode(file)); 3008 if (!clp) 3009 return -ENXIO; 3010 force_expire_client(clp); 3011 drop_client(clp); 3012 return 7; 3013 } 3014 3015 static const struct file_operations client_ctl_fops = { 3016 .write = client_ctl_write, 3017 .release = simple_transaction_release, 3018 }; 3019 3020 static const struct tree_descr client_files[] = { 3021 [0] = {"info", &client_info_fops, S_IRUSR}, 3022 [1] = {"states", &client_states_fops, S_IRUSR}, 3023 [2] = {"ctl", &client_ctl_fops, S_IWUSR}, 3024 [3] = {""}, 3025 }; 3026 3027 static int 3028 nfsd4_cb_recall_any_done(struct nfsd4_callback *cb, 3029 struct rpc_task *task) 3030 { 3031 trace_nfsd_cb_recall_any_done(cb, task); 3032 switch (task->tk_status) { 3033 case -NFS4ERR_DELAY: 3034 rpc_delay(task, 2 * HZ); 3035 return 0; 3036 default: 3037 return 1; 3038 } 3039 } 3040 3041 static void 3042 nfsd4_cb_recall_any_release(struct nfsd4_callback *cb) 3043 { 3044 struct nfs4_client *clp = cb->cb_clp; 3045 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 3046 3047 spin_lock(&nn->client_lock); 3048 clear_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags); 3049 put_client_renew_locked(clp); 3050 spin_unlock(&nn->client_lock); 3051 } 3052 3053 static int 3054 nfsd4_cb_getattr_done(struct nfsd4_callback *cb, struct rpc_task *task) 3055 { 3056 struct nfs4_cb_fattr *ncf = 3057 container_of(cb, struct nfs4_cb_fattr, ncf_getattr); 3058 3059 ncf->ncf_cb_status = task->tk_status; 3060 switch (task->tk_status) { 3061 case -NFS4ERR_DELAY: 3062 rpc_delay(task, 2 * HZ); 3063 return 0; 3064 default: 3065 return 1; 3066 } 3067 } 3068 3069 static void 3070 nfsd4_cb_getattr_release(struct nfsd4_callback *cb) 3071 { 3072 struct nfs4_cb_fattr *ncf = 3073 container_of(cb, struct nfs4_cb_fattr, ncf_getattr); 3074 struct nfs4_delegation *dp = 3075 container_of(ncf, struct nfs4_delegation, dl_cb_fattr); 3076 3077 nfs4_put_stid(&dp->dl_stid); 3078 clear_bit(CB_GETATTR_BUSY, &ncf->ncf_cb_flags); 3079 wake_up_bit(&ncf->ncf_cb_flags, CB_GETATTR_BUSY); 3080 } 3081 3082 static const struct nfsd4_callback_ops nfsd4_cb_recall_any_ops = { 3083 .done = nfsd4_cb_recall_any_done, 3084 .release = nfsd4_cb_recall_any_release, 3085 }; 3086 3087 static const struct nfsd4_callback_ops nfsd4_cb_getattr_ops = { 3088 .done = nfsd4_cb_getattr_done, 3089 .release = nfsd4_cb_getattr_release, 3090 }; 3091 3092 static void nfs4_cb_getattr(struct nfs4_cb_fattr *ncf) 3093 { 3094 struct nfs4_delegation *dp = 3095 container_of(ncf, struct nfs4_delegation, dl_cb_fattr); 3096 3097 if (test_and_set_bit(CB_GETATTR_BUSY, &ncf->ncf_cb_flags)) 3098 return; 3099 /* set to proper status when nfsd4_cb_getattr_done runs */ 3100 ncf->ncf_cb_status = NFS4ERR_IO; 3101 3102 refcount_inc(&dp->dl_stid.sc_count); 3103 nfsd4_run_cb(&ncf->ncf_getattr); 3104 } 3105 3106 static struct nfs4_client *create_client(struct xdr_netobj name, 3107 struct svc_rqst *rqstp, nfs4_verifier *verf) 3108 { 3109 struct nfs4_client *clp; 3110 struct sockaddr *sa = svc_addr(rqstp); 3111 int ret; 3112 struct net *net = SVC_NET(rqstp); 3113 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 3114 struct dentry *dentries[ARRAY_SIZE(client_files)]; 3115 3116 clp = alloc_client(name, nn); 3117 if (clp == NULL) 3118 return NULL; 3119 3120 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred); 3121 if (ret) { 3122 free_client(clp); 3123 return NULL; 3124 } 3125 gen_clid(clp, nn); 3126 kref_init(&clp->cl_nfsdfs.cl_ref); 3127 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL); 3128 clp->cl_time = ktime_get_boottime_seconds(); 3129 clear_bit(0, &clp->cl_cb_slot_busy); 3130 copy_verf(clp, verf); 3131 memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage)); 3132 clp->cl_cb_session = NULL; 3133 clp->net = net; 3134 clp->cl_nfsd_dentry = nfsd_client_mkdir( 3135 nn, &clp->cl_nfsdfs, 3136 clp->cl_clientid.cl_id - nn->clientid_base, 3137 client_files, dentries); 3138 clp->cl_nfsd_info_dentry = dentries[0]; 3139 if (!clp->cl_nfsd_dentry) { 3140 free_client(clp); 3141 return NULL; 3142 } 3143 clp->cl_ra = kzalloc(sizeof(*clp->cl_ra), GFP_KERNEL); 3144 if (!clp->cl_ra) { 3145 free_client(clp); 3146 return NULL; 3147 } 3148 clp->cl_ra_time = 0; 3149 nfsd4_init_cb(&clp->cl_ra->ra_cb, clp, &nfsd4_cb_recall_any_ops, 3150 NFSPROC4_CLNT_CB_RECALL_ANY); 3151 return clp; 3152 } 3153 3154 static void 3155 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root) 3156 { 3157 struct rb_node **new = &(root->rb_node), *parent = NULL; 3158 struct nfs4_client *clp; 3159 3160 while (*new) { 3161 clp = rb_entry(*new, struct nfs4_client, cl_namenode); 3162 parent = *new; 3163 3164 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0) 3165 new = &((*new)->rb_left); 3166 else 3167 new = &((*new)->rb_right); 3168 } 3169 3170 rb_link_node(&new_clp->cl_namenode, parent, new); 3171 rb_insert_color(&new_clp->cl_namenode, root); 3172 } 3173 3174 static struct nfs4_client * 3175 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root) 3176 { 3177 int cmp; 3178 struct rb_node *node = root->rb_node; 3179 struct nfs4_client *clp; 3180 3181 while (node) { 3182 clp = rb_entry(node, struct nfs4_client, cl_namenode); 3183 cmp = compare_blob(&clp->cl_name, name); 3184 if (cmp > 0) 3185 node = node->rb_left; 3186 else if (cmp < 0) 3187 node = node->rb_right; 3188 else 3189 return clp; 3190 } 3191 return NULL; 3192 } 3193 3194 static void 3195 add_to_unconfirmed(struct nfs4_client *clp) 3196 { 3197 unsigned int idhashval; 3198 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 3199 3200 lockdep_assert_held(&nn->client_lock); 3201 3202 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); 3203 add_clp_to_name_tree(clp, &nn->unconf_name_tree); 3204 idhashval = clientid_hashval(clp->cl_clientid.cl_id); 3205 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]); 3206 renew_client_locked(clp); 3207 } 3208 3209 static void 3210 move_to_confirmed(struct nfs4_client *clp) 3211 { 3212 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id); 3213 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 3214 3215 lockdep_assert_held(&nn->client_lock); 3216 3217 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]); 3218 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); 3219 add_clp_to_name_tree(clp, &nn->conf_name_tree); 3220 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); 3221 trace_nfsd_clid_confirmed(&clp->cl_clientid); 3222 renew_client_locked(clp); 3223 } 3224 3225 static struct nfs4_client * 3226 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions) 3227 { 3228 struct nfs4_client *clp; 3229 unsigned int idhashval = clientid_hashval(clid->cl_id); 3230 3231 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) { 3232 if (same_clid(&clp->cl_clientid, clid)) { 3233 if ((bool)clp->cl_minorversion != sessions) 3234 return NULL; 3235 renew_client_locked(clp); 3236 return clp; 3237 } 3238 } 3239 return NULL; 3240 } 3241 3242 static struct nfs4_client * 3243 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) 3244 { 3245 struct list_head *tbl = nn->conf_id_hashtbl; 3246 3247 lockdep_assert_held(&nn->client_lock); 3248 return find_client_in_id_table(tbl, clid, sessions); 3249 } 3250 3251 static struct nfs4_client * 3252 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) 3253 { 3254 struct list_head *tbl = nn->unconf_id_hashtbl; 3255 3256 lockdep_assert_held(&nn->client_lock); 3257 return find_client_in_id_table(tbl, clid, sessions); 3258 } 3259 3260 static bool clp_used_exchangeid(struct nfs4_client *clp) 3261 { 3262 return clp->cl_exchange_flags != 0; 3263 } 3264 3265 static struct nfs4_client * 3266 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn) 3267 { 3268 lockdep_assert_held(&nn->client_lock); 3269 return find_clp_in_name_tree(name, &nn->conf_name_tree); 3270 } 3271 3272 static struct nfs4_client * 3273 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn) 3274 { 3275 lockdep_assert_held(&nn->client_lock); 3276 return find_clp_in_name_tree(name, &nn->unconf_name_tree); 3277 } 3278 3279 static void 3280 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp) 3281 { 3282 struct nfs4_cb_conn *conn = &clp->cl_cb_conn; 3283 struct sockaddr *sa = svc_addr(rqstp); 3284 u32 scopeid = rpc_get_scope_id(sa); 3285 unsigned short expected_family; 3286 3287 /* Currently, we only support tcp and tcp6 for the callback channel */ 3288 if (se->se_callback_netid_len == 3 && 3289 !memcmp(se->se_callback_netid_val, "tcp", 3)) 3290 expected_family = AF_INET; 3291 else if (se->se_callback_netid_len == 4 && 3292 !memcmp(se->se_callback_netid_val, "tcp6", 4)) 3293 expected_family = AF_INET6; 3294 else 3295 goto out_err; 3296 3297 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val, 3298 se->se_callback_addr_len, 3299 (struct sockaddr *)&conn->cb_addr, 3300 sizeof(conn->cb_addr)); 3301 3302 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family) 3303 goto out_err; 3304 3305 if (conn->cb_addr.ss_family == AF_INET6) 3306 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid; 3307 3308 conn->cb_prog = se->se_callback_prog; 3309 conn->cb_ident = se->se_callback_ident; 3310 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen); 3311 trace_nfsd_cb_args(clp, conn); 3312 return; 3313 out_err: 3314 conn->cb_addr.ss_family = AF_UNSPEC; 3315 conn->cb_addrlen = 0; 3316 trace_nfsd_cb_nodelegs(clp); 3317 return; 3318 } 3319 3320 /* 3321 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size. 3322 */ 3323 static void 3324 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp) 3325 { 3326 struct xdr_buf *buf = resp->xdr->buf; 3327 struct nfsd4_slot *slot = resp->cstate.slot; 3328 unsigned int base; 3329 3330 dprintk("--> %s slot %p\n", __func__, slot); 3331 3332 slot->sl_flags |= NFSD4_SLOT_INITIALIZED; 3333 slot->sl_opcnt = resp->opcnt; 3334 slot->sl_status = resp->cstate.status; 3335 free_svc_cred(&slot->sl_cred); 3336 copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred); 3337 3338 if (!nfsd4_cache_this(resp)) { 3339 slot->sl_flags &= ~NFSD4_SLOT_CACHED; 3340 return; 3341 } 3342 slot->sl_flags |= NFSD4_SLOT_CACHED; 3343 3344 base = resp->cstate.data_offset; 3345 slot->sl_datalen = buf->len - base; 3346 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen)) 3347 WARN(1, "%s: sessions DRC could not cache compound\n", 3348 __func__); 3349 return; 3350 } 3351 3352 /* 3353 * Encode the replay sequence operation from the slot values. 3354 * If cachethis is FALSE encode the uncached rep error on the next 3355 * operation which sets resp->p and increments resp->opcnt for 3356 * nfs4svc_encode_compoundres. 3357 * 3358 */ 3359 static __be32 3360 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args, 3361 struct nfsd4_compoundres *resp) 3362 { 3363 struct nfsd4_op *op; 3364 struct nfsd4_slot *slot = resp->cstate.slot; 3365 3366 /* Encode the replayed sequence operation */ 3367 op = &args->ops[resp->opcnt - 1]; 3368 nfsd4_encode_operation(resp, op); 3369 3370 if (slot->sl_flags & NFSD4_SLOT_CACHED) 3371 return op->status; 3372 if (args->opcnt == 1) { 3373 /* 3374 * The original operation wasn't a solo sequence--we 3375 * always cache those--so this retry must not match the 3376 * original: 3377 */ 3378 op->status = nfserr_seq_false_retry; 3379 } else { 3380 op = &args->ops[resp->opcnt++]; 3381 op->status = nfserr_retry_uncached_rep; 3382 nfsd4_encode_operation(resp, op); 3383 } 3384 return op->status; 3385 } 3386 3387 /* 3388 * The sequence operation is not cached because we can use the slot and 3389 * session values. 3390 */ 3391 static __be32 3392 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, 3393 struct nfsd4_sequence *seq) 3394 { 3395 struct nfsd4_slot *slot = resp->cstate.slot; 3396 struct xdr_stream *xdr = resp->xdr; 3397 __be32 *p; 3398 __be32 status; 3399 3400 dprintk("--> %s slot %p\n", __func__, slot); 3401 3402 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp); 3403 if (status) 3404 return status; 3405 3406 p = xdr_reserve_space(xdr, slot->sl_datalen); 3407 if (!p) { 3408 WARN_ON_ONCE(1); 3409 return nfserr_serverfault; 3410 } 3411 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen); 3412 xdr_commit_encode(xdr); 3413 3414 resp->opcnt = slot->sl_opcnt; 3415 return slot->sl_status; 3416 } 3417 3418 /* 3419 * Set the exchange_id flags returned by the server. 3420 */ 3421 static void 3422 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid) 3423 { 3424 #ifdef CONFIG_NFSD_PNFS 3425 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS; 3426 #else 3427 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS; 3428 #endif 3429 3430 /* Referrals are supported, Migration is not. */ 3431 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER; 3432 3433 /* set the wire flags to return to client. */ 3434 clid->flags = new->cl_exchange_flags; 3435 } 3436 3437 static bool client_has_openowners(struct nfs4_client *clp) 3438 { 3439 struct nfs4_openowner *oo; 3440 3441 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) { 3442 if (!list_empty(&oo->oo_owner.so_stateids)) 3443 return true; 3444 } 3445 return false; 3446 } 3447 3448 static bool client_has_state(struct nfs4_client *clp) 3449 { 3450 return client_has_openowners(clp) 3451 #ifdef CONFIG_NFSD_PNFS 3452 || !list_empty(&clp->cl_lo_states) 3453 #endif 3454 || !list_empty(&clp->cl_delegations) 3455 || !list_empty(&clp->cl_sessions) 3456 || !list_empty(&clp->async_copies); 3457 } 3458 3459 static __be32 copy_impl_id(struct nfs4_client *clp, 3460 struct nfsd4_exchange_id *exid) 3461 { 3462 if (!exid->nii_domain.data) 3463 return 0; 3464 xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL); 3465 if (!clp->cl_nii_domain.data) 3466 return nfserr_jukebox; 3467 xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL); 3468 if (!clp->cl_nii_name.data) 3469 return nfserr_jukebox; 3470 clp->cl_nii_time = exid->nii_time; 3471 return 0; 3472 } 3473 3474 __be32 3475 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3476 union nfsd4_op_u *u) 3477 { 3478 struct nfsd4_exchange_id *exid = &u->exchange_id; 3479 struct nfs4_client *conf, *new; 3480 struct nfs4_client *unconf = NULL; 3481 __be32 status; 3482 char addr_str[INET6_ADDRSTRLEN]; 3483 nfs4_verifier verf = exid->verifier; 3484 struct sockaddr *sa = svc_addr(rqstp); 3485 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A; 3486 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3487 3488 rpc_ntop(sa, addr_str, sizeof(addr_str)); 3489 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p " 3490 "ip_addr=%s flags %x, spa_how %u\n", 3491 __func__, rqstp, exid, exid->clname.len, exid->clname.data, 3492 addr_str, exid->flags, exid->spa_how); 3493 3494 if (exid->flags & ~EXCHGID4_FLAG_MASK_A) 3495 return nfserr_inval; 3496 3497 new = create_client(exid->clname, rqstp, &verf); 3498 if (new == NULL) 3499 return nfserr_jukebox; 3500 status = copy_impl_id(new, exid); 3501 if (status) 3502 goto out_nolock; 3503 3504 switch (exid->spa_how) { 3505 case SP4_MACH_CRED: 3506 exid->spo_must_enforce[0] = 0; 3507 exid->spo_must_enforce[1] = ( 3508 1 << (OP_BIND_CONN_TO_SESSION - 32) | 3509 1 << (OP_EXCHANGE_ID - 32) | 3510 1 << (OP_CREATE_SESSION - 32) | 3511 1 << (OP_DESTROY_SESSION - 32) | 3512 1 << (OP_DESTROY_CLIENTID - 32)); 3513 3514 exid->spo_must_allow[0] &= (1 << (OP_CLOSE) | 3515 1 << (OP_OPEN_DOWNGRADE) | 3516 1 << (OP_LOCKU) | 3517 1 << (OP_DELEGRETURN)); 3518 3519 exid->spo_must_allow[1] &= ( 3520 1 << (OP_TEST_STATEID - 32) | 3521 1 << (OP_FREE_STATEID - 32)); 3522 if (!svc_rqst_integrity_protected(rqstp)) { 3523 status = nfserr_inval; 3524 goto out_nolock; 3525 } 3526 /* 3527 * Sometimes userspace doesn't give us a principal. 3528 * Which is a bug, really. Anyway, we can't enforce 3529 * MACH_CRED in that case, better to give up now: 3530 */ 3531 if (!new->cl_cred.cr_principal && 3532 !new->cl_cred.cr_raw_principal) { 3533 status = nfserr_serverfault; 3534 goto out_nolock; 3535 } 3536 new->cl_mach_cred = true; 3537 break; 3538 case SP4_NONE: 3539 break; 3540 default: /* checked by xdr code */ 3541 WARN_ON_ONCE(1); 3542 fallthrough; 3543 case SP4_SSV: 3544 status = nfserr_encr_alg_unsupp; 3545 goto out_nolock; 3546 } 3547 3548 /* Cases below refer to rfc 5661 section 18.35.4: */ 3549 spin_lock(&nn->client_lock); 3550 conf = find_confirmed_client_by_name(&exid->clname, nn); 3551 if (conf) { 3552 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred); 3553 bool verfs_match = same_verf(&verf, &conf->cl_verifier); 3554 3555 if (update) { 3556 if (!clp_used_exchangeid(conf)) { /* buggy client */ 3557 status = nfserr_inval; 3558 goto out; 3559 } 3560 if (!nfsd4_mach_creds_match(conf, rqstp)) { 3561 status = nfserr_wrong_cred; 3562 goto out; 3563 } 3564 if (!creds_match) { /* case 9 */ 3565 status = nfserr_perm; 3566 goto out; 3567 } 3568 if (!verfs_match) { /* case 8 */ 3569 status = nfserr_not_same; 3570 goto out; 3571 } 3572 /* case 6 */ 3573 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R; 3574 trace_nfsd_clid_confirmed_r(conf); 3575 goto out_copy; 3576 } 3577 if (!creds_match) { /* case 3 */ 3578 if (client_has_state(conf)) { 3579 status = nfserr_clid_inuse; 3580 trace_nfsd_clid_cred_mismatch(conf, rqstp); 3581 goto out; 3582 } 3583 goto out_new; 3584 } 3585 if (verfs_match) { /* case 2 */ 3586 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; 3587 trace_nfsd_clid_confirmed_r(conf); 3588 goto out_copy; 3589 } 3590 /* case 5, client reboot */ 3591 trace_nfsd_clid_verf_mismatch(conf, rqstp, &verf); 3592 conf = NULL; 3593 goto out_new; 3594 } 3595 3596 if (update) { /* case 7 */ 3597 status = nfserr_noent; 3598 goto out; 3599 } 3600 3601 unconf = find_unconfirmed_client_by_name(&exid->clname, nn); 3602 if (unconf) /* case 4, possible retry or client restart */ 3603 unhash_client_locked(unconf); 3604 3605 /* case 1, new owner ID */ 3606 trace_nfsd_clid_fresh(new); 3607 3608 out_new: 3609 if (conf) { 3610 status = mark_client_expired_locked(conf); 3611 if (status) 3612 goto out; 3613 trace_nfsd_clid_replaced(&conf->cl_clientid); 3614 } 3615 new->cl_minorversion = cstate->minorversion; 3616 new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0]; 3617 new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1]; 3618 3619 /* Contrived initial CREATE_SESSION response */ 3620 new->cl_cs_slot.sl_status = nfserr_seq_misordered; 3621 3622 add_to_unconfirmed(new); 3623 swap(new, conf); 3624 out_copy: 3625 exid->clientid.cl_boot = conf->cl_clientid.cl_boot; 3626 exid->clientid.cl_id = conf->cl_clientid.cl_id; 3627 3628 exid->seqid = conf->cl_cs_slot.sl_seqid + 1; 3629 nfsd4_set_ex_flags(conf, exid); 3630 3631 dprintk("nfsd4_exchange_id seqid %d flags %x\n", 3632 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags); 3633 status = nfs_ok; 3634 3635 out: 3636 spin_unlock(&nn->client_lock); 3637 out_nolock: 3638 if (new) 3639 expire_client(new); 3640 if (unconf) { 3641 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid); 3642 expire_client(unconf); 3643 } 3644 return status; 3645 } 3646 3647 static __be32 3648 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse) 3649 { 3650 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid, 3651 slot_seqid); 3652 3653 /* The slot is in use, and no response has been sent. */ 3654 if (slot_inuse) { 3655 if (seqid == slot_seqid) 3656 return nfserr_jukebox; 3657 else 3658 return nfserr_seq_misordered; 3659 } 3660 /* Note unsigned 32-bit arithmetic handles wraparound: */ 3661 if (likely(seqid == slot_seqid + 1)) 3662 return nfs_ok; 3663 if (seqid == slot_seqid) 3664 return nfserr_replay_cache; 3665 return nfserr_seq_misordered; 3666 } 3667 3668 /* 3669 * Cache the create session result into the create session single DRC 3670 * slot cache by saving the xdr structure. sl_seqid has been set. 3671 * Do this for solo or embedded create session operations. 3672 */ 3673 static void 3674 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses, 3675 struct nfsd4_clid_slot *slot, __be32 nfserr) 3676 { 3677 slot->sl_status = nfserr; 3678 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses)); 3679 } 3680 3681 static __be32 3682 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses, 3683 struct nfsd4_clid_slot *slot) 3684 { 3685 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses)); 3686 return slot->sl_status; 3687 } 3688 3689 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\ 3690 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \ 3691 1 + /* MIN tag is length with zero, only length */ \ 3692 3 + /* version, opcount, opcode */ \ 3693 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \ 3694 /* seqid, slotID, slotID, cache */ \ 3695 4 ) * sizeof(__be32)) 3696 3697 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\ 3698 2 + /* verifier: AUTH_NULL, length 0 */\ 3699 1 + /* status */ \ 3700 1 + /* MIN tag is length with zero, only length */ \ 3701 3 + /* opcount, opcode, opstatus*/ \ 3702 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \ 3703 /* seqid, slotID, slotID, slotID, status */ \ 3704 5 ) * sizeof(__be32)) 3705 3706 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn) 3707 { 3708 u32 maxrpc = nn->nfsd_serv->sv_max_mesg; 3709 3710 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ) 3711 return nfserr_toosmall; 3712 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ) 3713 return nfserr_toosmall; 3714 ca->headerpadsz = 0; 3715 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc); 3716 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc); 3717 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND); 3718 ca->maxresp_cached = min_t(u32, ca->maxresp_cached, 3719 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ); 3720 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION); 3721 /* 3722 * Note decreasing slot size below client's request may make it 3723 * difficult for client to function correctly, whereas 3724 * decreasing the number of slots will (just?) affect 3725 * performance. When short on memory we therefore prefer to 3726 * decrease number of slots instead of their size. Clients that 3727 * request larger slots than they need will get poor results: 3728 * Note that we always allow at least one slot, because our 3729 * accounting is soft and provides no guarantees either way. 3730 */ 3731 ca->maxreqs = nfsd4_get_drc_mem(ca, nn); 3732 3733 return nfs_ok; 3734 } 3735 3736 /* 3737 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now. 3738 * These are based on similar macros in linux/sunrpc/msg_prot.h . 3739 */ 3740 #define RPC_MAX_HEADER_WITH_AUTH_SYS \ 3741 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK)) 3742 3743 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \ 3744 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK)) 3745 3746 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \ 3747 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32)) 3748 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \ 3749 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \ 3750 sizeof(__be32)) 3751 3752 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca) 3753 { 3754 ca->headerpadsz = 0; 3755 3756 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ) 3757 return nfserr_toosmall; 3758 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ) 3759 return nfserr_toosmall; 3760 ca->maxresp_cached = 0; 3761 if (ca->maxops < 2) 3762 return nfserr_toosmall; 3763 3764 return nfs_ok; 3765 } 3766 3767 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs) 3768 { 3769 switch (cbs->flavor) { 3770 case RPC_AUTH_NULL: 3771 case RPC_AUTH_UNIX: 3772 return nfs_ok; 3773 default: 3774 /* 3775 * GSS case: the spec doesn't allow us to return this 3776 * error. But it also doesn't allow us not to support 3777 * GSS. 3778 * I'd rather this fail hard than return some error the 3779 * client might think it can already handle: 3780 */ 3781 return nfserr_encr_alg_unsupp; 3782 } 3783 } 3784 3785 __be32 3786 nfsd4_create_session(struct svc_rqst *rqstp, 3787 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) 3788 { 3789 struct nfsd4_create_session *cr_ses = &u->create_session; 3790 struct sockaddr *sa = svc_addr(rqstp); 3791 struct nfs4_client *conf, *unconf; 3792 struct nfsd4_clid_slot *cs_slot; 3793 struct nfs4_client *old = NULL; 3794 struct nfsd4_session *new; 3795 struct nfsd4_conn *conn; 3796 __be32 status = 0; 3797 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3798 3799 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A) 3800 return nfserr_inval; 3801 status = nfsd4_check_cb_sec(&cr_ses->cb_sec); 3802 if (status) 3803 return status; 3804 status = check_forechannel_attrs(&cr_ses->fore_channel, nn); 3805 if (status) 3806 return status; 3807 status = check_backchannel_attrs(&cr_ses->back_channel); 3808 if (status) 3809 goto out_release_drc_mem; 3810 status = nfserr_jukebox; 3811 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel); 3812 if (!new) 3813 goto out_release_drc_mem; 3814 conn = alloc_conn_from_crses(rqstp, cr_ses); 3815 if (!conn) 3816 goto out_free_session; 3817 3818 spin_lock(&nn->client_lock); 3819 3820 /* RFC 8881 Section 18.36.4 Phase 1: Client record look-up. */ 3821 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn); 3822 conf = find_confirmed_client(&cr_ses->clientid, true, nn); 3823 if (!conf && !unconf) { 3824 status = nfserr_stale_clientid; 3825 goto out_free_conn; 3826 } 3827 3828 /* RFC 8881 Section 18.36.4 Phase 2: Sequence ID processing. */ 3829 if (conf) 3830 cs_slot = &conf->cl_cs_slot; 3831 else 3832 cs_slot = &unconf->cl_cs_slot; 3833 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); 3834 if (status) { 3835 if (status == nfserr_replay_cache) { 3836 status = nfsd4_replay_create_session(cr_ses, cs_slot); 3837 goto out_free_conn; 3838 } 3839 goto out_cache_error; 3840 } 3841 cs_slot->sl_seqid++; 3842 cr_ses->seqid = cs_slot->sl_seqid; 3843 3844 /* RFC 8881 Section 18.36.4 Phase 3: Client ID confirmation. */ 3845 if (conf) { 3846 status = nfserr_wrong_cred; 3847 if (!nfsd4_mach_creds_match(conf, rqstp)) 3848 goto out_cache_error; 3849 } else { 3850 status = nfserr_clid_inuse; 3851 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) || 3852 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) { 3853 trace_nfsd_clid_cred_mismatch(unconf, rqstp); 3854 goto out_cache_error; 3855 } 3856 status = nfserr_wrong_cred; 3857 if (!nfsd4_mach_creds_match(unconf, rqstp)) 3858 goto out_cache_error; 3859 old = find_confirmed_client_by_name(&unconf->cl_name, nn); 3860 if (old) { 3861 status = mark_client_expired_locked(old); 3862 if (status) { 3863 old = NULL; 3864 goto out_cache_error; 3865 } 3866 trace_nfsd_clid_replaced(&old->cl_clientid); 3867 } 3868 move_to_confirmed(unconf); 3869 conf = unconf; 3870 } 3871 3872 /* RFC 8881 Section 18.36.4 Phase 4: Session creation. */ 3873 status = nfs_ok; 3874 /* Persistent sessions are not supported */ 3875 cr_ses->flags &= ~SESSION4_PERSIST; 3876 /* Upshifting from TCP to RDMA is not supported */ 3877 cr_ses->flags &= ~SESSION4_RDMA; 3878 3879 init_session(rqstp, new, conf, cr_ses); 3880 nfsd4_get_session_locked(new); 3881 3882 memcpy(cr_ses->sessionid.data, new->se_sessionid.data, 3883 NFS4_MAX_SESSIONID_LEN); 3884 3885 /* cache solo and embedded create sessions under the client_lock */ 3886 nfsd4_cache_create_session(cr_ses, cs_slot, status); 3887 spin_unlock(&nn->client_lock); 3888 if (conf == unconf) 3889 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY); 3890 /* init connection and backchannel */ 3891 nfsd4_init_conn(rqstp, conn, new); 3892 nfsd4_put_session(new); 3893 if (old) 3894 expire_client(old); 3895 return status; 3896 3897 out_cache_error: 3898 nfsd4_cache_create_session(cr_ses, cs_slot, status); 3899 out_free_conn: 3900 spin_unlock(&nn->client_lock); 3901 free_conn(conn); 3902 if (old) 3903 expire_client(old); 3904 out_free_session: 3905 __free_session(new); 3906 out_release_drc_mem: 3907 nfsd4_put_drc_mem(&cr_ses->fore_channel); 3908 return status; 3909 } 3910 3911 static __be32 nfsd4_map_bcts_dir(u32 *dir) 3912 { 3913 switch (*dir) { 3914 case NFS4_CDFC4_FORE: 3915 case NFS4_CDFC4_BACK: 3916 return nfs_ok; 3917 case NFS4_CDFC4_FORE_OR_BOTH: 3918 case NFS4_CDFC4_BACK_OR_BOTH: 3919 *dir = NFS4_CDFC4_BOTH; 3920 return nfs_ok; 3921 } 3922 return nfserr_inval; 3923 } 3924 3925 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, 3926 struct nfsd4_compound_state *cstate, 3927 union nfsd4_op_u *u) 3928 { 3929 struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl; 3930 struct nfsd4_session *session = cstate->session; 3931 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3932 __be32 status; 3933 3934 status = nfsd4_check_cb_sec(&bc->bc_cb_sec); 3935 if (status) 3936 return status; 3937 spin_lock(&nn->client_lock); 3938 session->se_cb_prog = bc->bc_cb_program; 3939 session->se_cb_sec = bc->bc_cb_sec; 3940 spin_unlock(&nn->client_lock); 3941 3942 nfsd4_probe_callback(session->se_client); 3943 3944 return nfs_ok; 3945 } 3946 3947 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s) 3948 { 3949 struct nfsd4_conn *c; 3950 3951 list_for_each_entry(c, &s->se_conns, cn_persession) { 3952 if (c->cn_xprt == xpt) { 3953 return c; 3954 } 3955 } 3956 return NULL; 3957 } 3958 3959 static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst, 3960 struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn) 3961 { 3962 struct nfs4_client *clp = session->se_client; 3963 struct svc_xprt *xpt = rqst->rq_xprt; 3964 struct nfsd4_conn *c; 3965 __be32 status; 3966 3967 /* Following the last paragraph of RFC 5661 Section 18.34.3: */ 3968 spin_lock(&clp->cl_lock); 3969 c = __nfsd4_find_conn(xpt, session); 3970 if (!c) 3971 status = nfserr_noent; 3972 else if (req == c->cn_flags) 3973 status = nfs_ok; 3974 else if (req == NFS4_CDFC4_FORE_OR_BOTH && 3975 c->cn_flags != NFS4_CDFC4_BACK) 3976 status = nfs_ok; 3977 else if (req == NFS4_CDFC4_BACK_OR_BOTH && 3978 c->cn_flags != NFS4_CDFC4_FORE) 3979 status = nfs_ok; 3980 else 3981 status = nfserr_inval; 3982 spin_unlock(&clp->cl_lock); 3983 if (status == nfs_ok && conn) 3984 *conn = c; 3985 return status; 3986 } 3987 3988 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp, 3989 struct nfsd4_compound_state *cstate, 3990 union nfsd4_op_u *u) 3991 { 3992 struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session; 3993 __be32 status; 3994 struct nfsd4_conn *conn; 3995 struct nfsd4_session *session; 3996 struct net *net = SVC_NET(rqstp); 3997 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 3998 3999 if (!nfsd4_last_compound_op(rqstp)) 4000 return nfserr_not_only_op; 4001 spin_lock(&nn->client_lock); 4002 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status); 4003 spin_unlock(&nn->client_lock); 4004 if (!session) 4005 goto out_no_session; 4006 status = nfserr_wrong_cred; 4007 if (!nfsd4_mach_creds_match(session->se_client, rqstp)) 4008 goto out; 4009 status = nfsd4_match_existing_connection(rqstp, session, 4010 bcts->dir, &conn); 4011 if (status == nfs_ok) { 4012 if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH || 4013 bcts->dir == NFS4_CDFC4_BACK) 4014 conn->cn_flags |= NFS4_CDFC4_BACK; 4015 nfsd4_probe_callback(session->se_client); 4016 goto out; 4017 } 4018 if (status == nfserr_inval) 4019 goto out; 4020 status = nfsd4_map_bcts_dir(&bcts->dir); 4021 if (status) 4022 goto out; 4023 conn = alloc_conn(rqstp, bcts->dir); 4024 status = nfserr_jukebox; 4025 if (!conn) 4026 goto out; 4027 nfsd4_init_conn(rqstp, conn, session); 4028 status = nfs_ok; 4029 out: 4030 nfsd4_put_session(session); 4031 out_no_session: 4032 return status; 4033 } 4034 4035 static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid) 4036 { 4037 if (!cstate->session) 4038 return false; 4039 return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid)); 4040 } 4041 4042 __be32 4043 nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate, 4044 union nfsd4_op_u *u) 4045 { 4046 struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid; 4047 struct nfsd4_session *ses; 4048 __be32 status; 4049 int ref_held_by_me = 0; 4050 struct net *net = SVC_NET(r); 4051 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 4052 4053 status = nfserr_not_only_op; 4054 if (nfsd4_compound_in_session(cstate, sessionid)) { 4055 if (!nfsd4_last_compound_op(r)) 4056 goto out; 4057 ref_held_by_me++; 4058 } 4059 dump_sessionid(__func__, sessionid); 4060 spin_lock(&nn->client_lock); 4061 ses = find_in_sessionid_hashtbl(sessionid, net, &status); 4062 if (!ses) 4063 goto out_client_lock; 4064 status = nfserr_wrong_cred; 4065 if (!nfsd4_mach_creds_match(ses->se_client, r)) 4066 goto out_put_session; 4067 status = mark_session_dead_locked(ses, 1 + ref_held_by_me); 4068 if (status) 4069 goto out_put_session; 4070 unhash_session(ses); 4071 spin_unlock(&nn->client_lock); 4072 4073 nfsd4_probe_callback_sync(ses->se_client); 4074 4075 spin_lock(&nn->client_lock); 4076 status = nfs_ok; 4077 out_put_session: 4078 nfsd4_put_session_locked(ses); 4079 out_client_lock: 4080 spin_unlock(&nn->client_lock); 4081 out: 4082 return status; 4083 } 4084 4085 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses) 4086 { 4087 struct nfs4_client *clp = ses->se_client; 4088 struct nfsd4_conn *c; 4089 __be32 status = nfs_ok; 4090 int ret; 4091 4092 spin_lock(&clp->cl_lock); 4093 c = __nfsd4_find_conn(new->cn_xprt, ses); 4094 if (c) 4095 goto out_free; 4096 status = nfserr_conn_not_bound_to_session; 4097 if (clp->cl_mach_cred) 4098 goto out_free; 4099 __nfsd4_hash_conn(new, ses); 4100 spin_unlock(&clp->cl_lock); 4101 ret = nfsd4_register_conn(new); 4102 if (ret) 4103 /* oops; xprt is already down: */ 4104 nfsd4_conn_lost(&new->cn_xpt_user); 4105 return nfs_ok; 4106 out_free: 4107 spin_unlock(&clp->cl_lock); 4108 free_conn(new); 4109 return status; 4110 } 4111 4112 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session) 4113 { 4114 struct nfsd4_compoundargs *args = rqstp->rq_argp; 4115 4116 return args->opcnt > session->se_fchannel.maxops; 4117 } 4118 4119 static bool nfsd4_request_too_big(struct svc_rqst *rqstp, 4120 struct nfsd4_session *session) 4121 { 4122 struct xdr_buf *xb = &rqstp->rq_arg; 4123 4124 return xb->len > session->se_fchannel.maxreq_sz; 4125 } 4126 4127 static bool replay_matches_cache(struct svc_rqst *rqstp, 4128 struct nfsd4_sequence *seq, struct nfsd4_slot *slot) 4129 { 4130 struct nfsd4_compoundargs *argp = rqstp->rq_argp; 4131 4132 if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) != 4133 (bool)seq->cachethis) 4134 return false; 4135 /* 4136 * If there's an error then the reply can have fewer ops than 4137 * the call. 4138 */ 4139 if (slot->sl_opcnt < argp->opcnt && !slot->sl_status) 4140 return false; 4141 /* 4142 * But if we cached a reply with *more* ops than the call you're 4143 * sending us now, then this new call is clearly not really a 4144 * replay of the old one: 4145 */ 4146 if (slot->sl_opcnt > argp->opcnt) 4147 return false; 4148 /* This is the only check explicitly called by spec: */ 4149 if (!same_creds(&rqstp->rq_cred, &slot->sl_cred)) 4150 return false; 4151 /* 4152 * There may be more comparisons we could actually do, but the 4153 * spec doesn't require us to catch every case where the calls 4154 * don't match (that would require caching the call as well as 4155 * the reply), so we don't bother. 4156 */ 4157 return true; 4158 } 4159 4160 __be32 4161 nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 4162 union nfsd4_op_u *u) 4163 { 4164 struct nfsd4_sequence *seq = &u->sequence; 4165 struct nfsd4_compoundres *resp = rqstp->rq_resp; 4166 struct xdr_stream *xdr = resp->xdr; 4167 struct nfsd4_session *session; 4168 struct nfs4_client *clp; 4169 struct nfsd4_slot *slot; 4170 struct nfsd4_conn *conn; 4171 __be32 status; 4172 int buflen; 4173 struct net *net = SVC_NET(rqstp); 4174 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 4175 4176 if (resp->opcnt != 1) 4177 return nfserr_sequence_pos; 4178 4179 /* 4180 * Will be either used or freed by nfsd4_sequence_check_conn 4181 * below. 4182 */ 4183 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE); 4184 if (!conn) 4185 return nfserr_jukebox; 4186 4187 spin_lock(&nn->client_lock); 4188 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status); 4189 if (!session) 4190 goto out_no_session; 4191 clp = session->se_client; 4192 4193 status = nfserr_too_many_ops; 4194 if (nfsd4_session_too_many_ops(rqstp, session)) 4195 goto out_put_session; 4196 4197 status = nfserr_req_too_big; 4198 if (nfsd4_request_too_big(rqstp, session)) 4199 goto out_put_session; 4200 4201 status = nfserr_badslot; 4202 if (seq->slotid >= session->se_fchannel.maxreqs) 4203 goto out_put_session; 4204 4205 slot = session->se_slots[seq->slotid]; 4206 dprintk("%s: slotid %d\n", __func__, seq->slotid); 4207 4208 /* We do not negotiate the number of slots yet, so set the 4209 * maxslots to the session maxreqs which is used to encode 4210 * sr_highest_slotid and the sr_target_slot id to maxslots */ 4211 seq->maxslots = session->se_fchannel.maxreqs; 4212 4213 status = check_slot_seqid(seq->seqid, slot->sl_seqid, 4214 slot->sl_flags & NFSD4_SLOT_INUSE); 4215 if (status == nfserr_replay_cache) { 4216 status = nfserr_seq_misordered; 4217 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED)) 4218 goto out_put_session; 4219 status = nfserr_seq_false_retry; 4220 if (!replay_matches_cache(rqstp, seq, slot)) 4221 goto out_put_session; 4222 cstate->slot = slot; 4223 cstate->session = session; 4224 cstate->clp = clp; 4225 /* Return the cached reply status and set cstate->status 4226 * for nfsd4_proc_compound processing */ 4227 status = nfsd4_replay_cache_entry(resp, seq); 4228 cstate->status = nfserr_replay_cache; 4229 goto out; 4230 } 4231 if (status) 4232 goto out_put_session; 4233 4234 status = nfsd4_sequence_check_conn(conn, session); 4235 conn = NULL; 4236 if (status) 4237 goto out_put_session; 4238 4239 buflen = (seq->cachethis) ? 4240 session->se_fchannel.maxresp_cached : 4241 session->se_fchannel.maxresp_sz; 4242 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache : 4243 nfserr_rep_too_big; 4244 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack)) 4245 goto out_put_session; 4246 svc_reserve(rqstp, buflen); 4247 4248 status = nfs_ok; 4249 /* Success! bump slot seqid */ 4250 slot->sl_seqid = seq->seqid; 4251 slot->sl_flags |= NFSD4_SLOT_INUSE; 4252 if (seq->cachethis) 4253 slot->sl_flags |= NFSD4_SLOT_CACHETHIS; 4254 else 4255 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS; 4256 4257 cstate->slot = slot; 4258 cstate->session = session; 4259 cstate->clp = clp; 4260 4261 out: 4262 switch (clp->cl_cb_state) { 4263 case NFSD4_CB_DOWN: 4264 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN; 4265 break; 4266 case NFSD4_CB_FAULT: 4267 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT; 4268 break; 4269 default: 4270 seq->status_flags = 0; 4271 } 4272 if (!list_empty(&clp->cl_revoked)) 4273 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED; 4274 if (atomic_read(&clp->cl_admin_revoked)) 4275 seq->status_flags |= SEQ4_STATUS_ADMIN_STATE_REVOKED; 4276 trace_nfsd_seq4_status(rqstp, seq); 4277 out_no_session: 4278 if (conn) 4279 free_conn(conn); 4280 spin_unlock(&nn->client_lock); 4281 return status; 4282 out_put_session: 4283 nfsd4_put_session_locked(session); 4284 goto out_no_session; 4285 } 4286 4287 void 4288 nfsd4_sequence_done(struct nfsd4_compoundres *resp) 4289 { 4290 struct nfsd4_compound_state *cs = &resp->cstate; 4291 4292 if (nfsd4_has_session(cs)) { 4293 if (cs->status != nfserr_replay_cache) { 4294 nfsd4_store_cache_entry(resp); 4295 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE; 4296 } 4297 /* Drop session reference that was taken in nfsd4_sequence() */ 4298 nfsd4_put_session(cs->session); 4299 } else if (cs->clp) 4300 put_client_renew(cs->clp); 4301 } 4302 4303 __be32 4304 nfsd4_destroy_clientid(struct svc_rqst *rqstp, 4305 struct nfsd4_compound_state *cstate, 4306 union nfsd4_op_u *u) 4307 { 4308 struct nfsd4_destroy_clientid *dc = &u->destroy_clientid; 4309 struct nfs4_client *conf, *unconf; 4310 struct nfs4_client *clp = NULL; 4311 __be32 status = 0; 4312 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 4313 4314 spin_lock(&nn->client_lock); 4315 unconf = find_unconfirmed_client(&dc->clientid, true, nn); 4316 conf = find_confirmed_client(&dc->clientid, true, nn); 4317 WARN_ON_ONCE(conf && unconf); 4318 4319 if (conf) { 4320 if (client_has_state(conf)) { 4321 status = nfserr_clientid_busy; 4322 goto out; 4323 } 4324 status = mark_client_expired_locked(conf); 4325 if (status) 4326 goto out; 4327 clp = conf; 4328 } else if (unconf) 4329 clp = unconf; 4330 else { 4331 status = nfserr_stale_clientid; 4332 goto out; 4333 } 4334 if (!nfsd4_mach_creds_match(clp, rqstp)) { 4335 clp = NULL; 4336 status = nfserr_wrong_cred; 4337 goto out; 4338 } 4339 trace_nfsd_clid_destroyed(&clp->cl_clientid); 4340 unhash_client_locked(clp); 4341 out: 4342 spin_unlock(&nn->client_lock); 4343 if (clp) 4344 expire_client(clp); 4345 return status; 4346 } 4347 4348 __be32 4349 nfsd4_reclaim_complete(struct svc_rqst *rqstp, 4350 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) 4351 { 4352 struct nfsd4_reclaim_complete *rc = &u->reclaim_complete; 4353 struct nfs4_client *clp = cstate->clp; 4354 __be32 status = 0; 4355 4356 if (rc->rca_one_fs) { 4357 if (!cstate->current_fh.fh_dentry) 4358 return nfserr_nofilehandle; 4359 /* 4360 * We don't take advantage of the rca_one_fs case. 4361 * That's OK, it's optional, we can safely ignore it. 4362 */ 4363 return nfs_ok; 4364 } 4365 4366 status = nfserr_complete_already; 4367 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags)) 4368 goto out; 4369 4370 status = nfserr_stale_clientid; 4371 if (is_client_expired(clp)) 4372 /* 4373 * The following error isn't really legal. 4374 * But we only get here if the client just explicitly 4375 * destroyed the client. Surely it no longer cares what 4376 * error it gets back on an operation for the dead 4377 * client. 4378 */ 4379 goto out; 4380 4381 status = nfs_ok; 4382 trace_nfsd_clid_reclaim_complete(&clp->cl_clientid); 4383 nfsd4_client_record_create(clp); 4384 inc_reclaim_complete(clp); 4385 out: 4386 return status; 4387 } 4388 4389 __be32 4390 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 4391 union nfsd4_op_u *u) 4392 { 4393 struct nfsd4_setclientid *setclid = &u->setclientid; 4394 struct xdr_netobj clname = setclid->se_name; 4395 nfs4_verifier clverifier = setclid->se_verf; 4396 struct nfs4_client *conf, *new; 4397 struct nfs4_client *unconf = NULL; 4398 __be32 status; 4399 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 4400 4401 new = create_client(clname, rqstp, &clverifier); 4402 if (new == NULL) 4403 return nfserr_jukebox; 4404 spin_lock(&nn->client_lock); 4405 conf = find_confirmed_client_by_name(&clname, nn); 4406 if (conf && client_has_state(conf)) { 4407 status = nfserr_clid_inuse; 4408 if (clp_used_exchangeid(conf)) 4409 goto out; 4410 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) { 4411 trace_nfsd_clid_cred_mismatch(conf, rqstp); 4412 goto out; 4413 } 4414 } 4415 unconf = find_unconfirmed_client_by_name(&clname, nn); 4416 if (unconf) 4417 unhash_client_locked(unconf); 4418 if (conf) { 4419 if (same_verf(&conf->cl_verifier, &clverifier)) { 4420 copy_clid(new, conf); 4421 gen_confirm(new, nn); 4422 } else 4423 trace_nfsd_clid_verf_mismatch(conf, rqstp, 4424 &clverifier); 4425 } else 4426 trace_nfsd_clid_fresh(new); 4427 new->cl_minorversion = 0; 4428 gen_callback(new, setclid, rqstp); 4429 add_to_unconfirmed(new); 4430 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; 4431 setclid->se_clientid.cl_id = new->cl_clientid.cl_id; 4432 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data)); 4433 new = NULL; 4434 status = nfs_ok; 4435 out: 4436 spin_unlock(&nn->client_lock); 4437 if (new) 4438 free_client(new); 4439 if (unconf) { 4440 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid); 4441 expire_client(unconf); 4442 } 4443 return status; 4444 } 4445 4446 __be32 4447 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, 4448 struct nfsd4_compound_state *cstate, 4449 union nfsd4_op_u *u) 4450 { 4451 struct nfsd4_setclientid_confirm *setclientid_confirm = 4452 &u->setclientid_confirm; 4453 struct nfs4_client *conf, *unconf; 4454 struct nfs4_client *old = NULL; 4455 nfs4_verifier confirm = setclientid_confirm->sc_confirm; 4456 clientid_t * clid = &setclientid_confirm->sc_clientid; 4457 __be32 status; 4458 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 4459 4460 if (STALE_CLIENTID(clid, nn)) 4461 return nfserr_stale_clientid; 4462 4463 spin_lock(&nn->client_lock); 4464 conf = find_confirmed_client(clid, false, nn); 4465 unconf = find_unconfirmed_client(clid, false, nn); 4466 /* 4467 * We try hard to give out unique clientid's, so if we get an 4468 * attempt to confirm the same clientid with a different cred, 4469 * the client may be buggy; this should never happen. 4470 * 4471 * Nevertheless, RFC 7530 recommends INUSE for this case: 4472 */ 4473 status = nfserr_clid_inuse; 4474 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) { 4475 trace_nfsd_clid_cred_mismatch(unconf, rqstp); 4476 goto out; 4477 } 4478 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) { 4479 trace_nfsd_clid_cred_mismatch(conf, rqstp); 4480 goto out; 4481 } 4482 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) { 4483 if (conf && same_verf(&confirm, &conf->cl_confirm)) { 4484 status = nfs_ok; 4485 } else 4486 status = nfserr_stale_clientid; 4487 goto out; 4488 } 4489 status = nfs_ok; 4490 if (conf) { 4491 old = unconf; 4492 unhash_client_locked(old); 4493 nfsd4_change_callback(conf, &unconf->cl_cb_conn); 4494 } else { 4495 old = find_confirmed_client_by_name(&unconf->cl_name, nn); 4496 if (old) { 4497 status = nfserr_clid_inuse; 4498 if (client_has_state(old) 4499 && !same_creds(&unconf->cl_cred, 4500 &old->cl_cred)) { 4501 old = NULL; 4502 goto out; 4503 } 4504 status = mark_client_expired_locked(old); 4505 if (status) { 4506 old = NULL; 4507 goto out; 4508 } 4509 trace_nfsd_clid_replaced(&old->cl_clientid); 4510 } 4511 move_to_confirmed(unconf); 4512 conf = unconf; 4513 } 4514 get_client_locked(conf); 4515 spin_unlock(&nn->client_lock); 4516 if (conf == unconf) 4517 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY); 4518 nfsd4_probe_callback(conf); 4519 spin_lock(&nn->client_lock); 4520 put_client_renew_locked(conf); 4521 out: 4522 spin_unlock(&nn->client_lock); 4523 if (old) 4524 expire_client(old); 4525 return status; 4526 } 4527 4528 static struct nfs4_file *nfsd4_alloc_file(void) 4529 { 4530 return kmem_cache_alloc(file_slab, GFP_KERNEL); 4531 } 4532 4533 /* OPEN Share state helper functions */ 4534 4535 static void nfsd4_file_init(const struct svc_fh *fh, struct nfs4_file *fp) 4536 { 4537 refcount_set(&fp->fi_ref, 1); 4538 spin_lock_init(&fp->fi_lock); 4539 INIT_LIST_HEAD(&fp->fi_stateids); 4540 INIT_LIST_HEAD(&fp->fi_delegations); 4541 INIT_LIST_HEAD(&fp->fi_clnt_odstate); 4542 fh_copy_shallow(&fp->fi_fhandle, &fh->fh_handle); 4543 fp->fi_deleg_file = NULL; 4544 fp->fi_had_conflict = false; 4545 fp->fi_share_deny = 0; 4546 memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); 4547 memset(fp->fi_access, 0, sizeof(fp->fi_access)); 4548 fp->fi_aliased = false; 4549 fp->fi_inode = d_inode(fh->fh_dentry); 4550 #ifdef CONFIG_NFSD_PNFS 4551 INIT_LIST_HEAD(&fp->fi_lo_states); 4552 atomic_set(&fp->fi_lo_recalls, 0); 4553 #endif 4554 } 4555 4556 void 4557 nfsd4_free_slabs(void) 4558 { 4559 kmem_cache_destroy(client_slab); 4560 kmem_cache_destroy(openowner_slab); 4561 kmem_cache_destroy(lockowner_slab); 4562 kmem_cache_destroy(file_slab); 4563 kmem_cache_destroy(stateid_slab); 4564 kmem_cache_destroy(deleg_slab); 4565 kmem_cache_destroy(odstate_slab); 4566 } 4567 4568 int 4569 nfsd4_init_slabs(void) 4570 { 4571 client_slab = KMEM_CACHE(nfs4_client, 0); 4572 if (client_slab == NULL) 4573 goto out; 4574 openowner_slab = KMEM_CACHE(nfs4_openowner, 0); 4575 if (openowner_slab == NULL) 4576 goto out_free_client_slab; 4577 lockowner_slab = KMEM_CACHE(nfs4_lockowner, 0); 4578 if (lockowner_slab == NULL) 4579 goto out_free_openowner_slab; 4580 file_slab = KMEM_CACHE(nfs4_file, 0); 4581 if (file_slab == NULL) 4582 goto out_free_lockowner_slab; 4583 stateid_slab = KMEM_CACHE(nfs4_ol_stateid, 0); 4584 if (stateid_slab == NULL) 4585 goto out_free_file_slab; 4586 deleg_slab = KMEM_CACHE(nfs4_delegation, 0); 4587 if (deleg_slab == NULL) 4588 goto out_free_stateid_slab; 4589 odstate_slab = KMEM_CACHE(nfs4_clnt_odstate, 0); 4590 if (odstate_slab == NULL) 4591 goto out_free_deleg_slab; 4592 return 0; 4593 4594 out_free_deleg_slab: 4595 kmem_cache_destroy(deleg_slab); 4596 out_free_stateid_slab: 4597 kmem_cache_destroy(stateid_slab); 4598 out_free_file_slab: 4599 kmem_cache_destroy(file_slab); 4600 out_free_lockowner_slab: 4601 kmem_cache_destroy(lockowner_slab); 4602 out_free_openowner_slab: 4603 kmem_cache_destroy(openowner_slab); 4604 out_free_client_slab: 4605 kmem_cache_destroy(client_slab); 4606 out: 4607 return -ENOMEM; 4608 } 4609 4610 static unsigned long 4611 nfsd4_state_shrinker_count(struct shrinker *shrink, struct shrink_control *sc) 4612 { 4613 int count; 4614 struct nfsd_net *nn = shrink->private_data; 4615 4616 count = atomic_read(&nn->nfsd_courtesy_clients); 4617 if (!count) 4618 count = atomic_long_read(&num_delegations); 4619 if (count) 4620 queue_work(laundry_wq, &nn->nfsd_shrinker_work); 4621 return (unsigned long)count; 4622 } 4623 4624 static unsigned long 4625 nfsd4_state_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc) 4626 { 4627 return SHRINK_STOP; 4628 } 4629 4630 void 4631 nfsd4_init_leases_net(struct nfsd_net *nn) 4632 { 4633 struct sysinfo si; 4634 u64 max_clients; 4635 4636 nn->nfsd4_lease = 90; /* default lease time */ 4637 nn->nfsd4_grace = 90; 4638 nn->somebody_reclaimed = false; 4639 nn->track_reclaim_completes = false; 4640 nn->clverifier_counter = get_random_u32(); 4641 nn->clientid_base = get_random_u32(); 4642 nn->clientid_counter = nn->clientid_base + 1; 4643 nn->s2s_cp_cl_id = nn->clientid_counter++; 4644 4645 atomic_set(&nn->nfs4_client_count, 0); 4646 si_meminfo(&si); 4647 max_clients = (u64)si.totalram * si.mem_unit / (1024 * 1024 * 1024); 4648 max_clients *= NFS4_CLIENTS_PER_GB; 4649 nn->nfs4_max_clients = max_t(int, max_clients, NFS4_CLIENTS_PER_GB); 4650 4651 atomic_set(&nn->nfsd_courtesy_clients, 0); 4652 } 4653 4654 static void init_nfs4_replay(struct nfs4_replay *rp) 4655 { 4656 rp->rp_status = nfserr_serverfault; 4657 rp->rp_buflen = 0; 4658 rp->rp_buf = rp->rp_ibuf; 4659 mutex_init(&rp->rp_mutex); 4660 } 4661 4662 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate, 4663 struct nfs4_stateowner *so) 4664 { 4665 if (!nfsd4_has_session(cstate)) { 4666 mutex_lock(&so->so_replay.rp_mutex); 4667 cstate->replay_owner = nfs4_get_stateowner(so); 4668 } 4669 } 4670 4671 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate) 4672 { 4673 struct nfs4_stateowner *so = cstate->replay_owner; 4674 4675 if (so != NULL) { 4676 cstate->replay_owner = NULL; 4677 mutex_unlock(&so->so_replay.rp_mutex); 4678 nfs4_put_stateowner(so); 4679 } 4680 } 4681 4682 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp) 4683 { 4684 struct nfs4_stateowner *sop; 4685 4686 sop = kmem_cache_alloc(slab, GFP_KERNEL); 4687 if (!sop) 4688 return NULL; 4689 4690 xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL); 4691 if (!sop->so_owner.data) { 4692 kmem_cache_free(slab, sop); 4693 return NULL; 4694 } 4695 4696 INIT_LIST_HEAD(&sop->so_stateids); 4697 sop->so_client = clp; 4698 init_nfs4_replay(&sop->so_replay); 4699 atomic_set(&sop->so_count, 1); 4700 return sop; 4701 } 4702 4703 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval) 4704 { 4705 lockdep_assert_held(&clp->cl_lock); 4706 4707 list_add(&oo->oo_owner.so_strhash, 4708 &clp->cl_ownerstr_hashtbl[strhashval]); 4709 list_add(&oo->oo_perclient, &clp->cl_openowners); 4710 } 4711 4712 static void nfs4_unhash_openowner(struct nfs4_stateowner *so) 4713 { 4714 unhash_openowner_locked(openowner(so)); 4715 } 4716 4717 static void nfs4_free_openowner(struct nfs4_stateowner *so) 4718 { 4719 struct nfs4_openowner *oo = openowner(so); 4720 4721 kmem_cache_free(openowner_slab, oo); 4722 } 4723 4724 static const struct nfs4_stateowner_operations openowner_ops = { 4725 .so_unhash = nfs4_unhash_openowner, 4726 .so_free = nfs4_free_openowner, 4727 }; 4728 4729 static struct nfs4_ol_stateid * 4730 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open) 4731 { 4732 struct nfs4_ol_stateid *local, *ret = NULL; 4733 struct nfs4_openowner *oo = open->op_openowner; 4734 4735 lockdep_assert_held(&fp->fi_lock); 4736 4737 list_for_each_entry(local, &fp->fi_stateids, st_perfile) { 4738 /* ignore lock owners */ 4739 if (local->st_stateowner->so_is_open_owner == 0) 4740 continue; 4741 if (local->st_stateowner != &oo->oo_owner) 4742 continue; 4743 if (local->st_stid.sc_type == SC_TYPE_OPEN && 4744 !local->st_stid.sc_status) { 4745 ret = local; 4746 refcount_inc(&ret->st_stid.sc_count); 4747 break; 4748 } 4749 } 4750 return ret; 4751 } 4752 4753 static void nfsd4_drop_revoked_stid(struct nfs4_stid *s) 4754 __releases(&s->sc_client->cl_lock) 4755 { 4756 struct nfs4_client *cl = s->sc_client; 4757 LIST_HEAD(reaplist); 4758 struct nfs4_ol_stateid *stp; 4759 struct nfs4_delegation *dp; 4760 bool unhashed; 4761 4762 switch (s->sc_type) { 4763 case SC_TYPE_OPEN: 4764 stp = openlockstateid(s); 4765 if (unhash_open_stateid(stp, &reaplist)) 4766 put_ol_stateid_locked(stp, &reaplist); 4767 spin_unlock(&cl->cl_lock); 4768 free_ol_stateid_reaplist(&reaplist); 4769 break; 4770 case SC_TYPE_LOCK: 4771 stp = openlockstateid(s); 4772 unhashed = unhash_lock_stateid(stp); 4773 spin_unlock(&cl->cl_lock); 4774 if (unhashed) 4775 nfs4_put_stid(s); 4776 break; 4777 case SC_TYPE_DELEG: 4778 dp = delegstateid(s); 4779 list_del_init(&dp->dl_recall_lru); 4780 spin_unlock(&cl->cl_lock); 4781 nfs4_put_stid(s); 4782 break; 4783 default: 4784 spin_unlock(&cl->cl_lock); 4785 } 4786 } 4787 4788 static void nfsd40_drop_revoked_stid(struct nfs4_client *cl, 4789 stateid_t *stid) 4790 { 4791 /* NFSv4.0 has no way for the client to tell the server 4792 * that it can forget an admin-revoked stateid. 4793 * So we keep it around until the first time that the 4794 * client uses it, and drop it the first time 4795 * nfserr_admin_revoked is returned. 4796 * For v4.1 and later we wait until explicitly told 4797 * to free the stateid. 4798 */ 4799 if (cl->cl_minorversion == 0) { 4800 struct nfs4_stid *st; 4801 4802 spin_lock(&cl->cl_lock); 4803 st = find_stateid_locked(cl, stid); 4804 if (st) 4805 nfsd4_drop_revoked_stid(st); 4806 else 4807 spin_unlock(&cl->cl_lock); 4808 } 4809 } 4810 4811 static __be32 4812 nfsd4_verify_open_stid(struct nfs4_stid *s) 4813 { 4814 __be32 ret = nfs_ok; 4815 4816 if (s->sc_status & SC_STATUS_ADMIN_REVOKED) 4817 ret = nfserr_admin_revoked; 4818 else if (s->sc_status & SC_STATUS_REVOKED) 4819 ret = nfserr_deleg_revoked; 4820 else if (s->sc_status & SC_STATUS_CLOSED) 4821 ret = nfserr_bad_stateid; 4822 return ret; 4823 } 4824 4825 /* Lock the stateid st_mutex, and deal with races with CLOSE */ 4826 static __be32 4827 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp) 4828 { 4829 __be32 ret; 4830 4831 mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX); 4832 ret = nfsd4_verify_open_stid(&stp->st_stid); 4833 if (ret == nfserr_admin_revoked) 4834 nfsd40_drop_revoked_stid(stp->st_stid.sc_client, 4835 &stp->st_stid.sc_stateid); 4836 4837 if (ret != nfs_ok) 4838 mutex_unlock(&stp->st_mutex); 4839 return ret; 4840 } 4841 4842 static struct nfs4_ol_stateid * 4843 nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open) 4844 { 4845 struct nfs4_ol_stateid *stp; 4846 for (;;) { 4847 spin_lock(&fp->fi_lock); 4848 stp = nfsd4_find_existing_open(fp, open); 4849 spin_unlock(&fp->fi_lock); 4850 if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok) 4851 break; 4852 nfs4_put_stid(&stp->st_stid); 4853 } 4854 return stp; 4855 } 4856 4857 static struct nfs4_openowner * 4858 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open, 4859 struct nfsd4_compound_state *cstate) 4860 { 4861 struct nfs4_client *clp = cstate->clp; 4862 struct nfs4_openowner *oo, *ret; 4863 4864 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp); 4865 if (!oo) 4866 return NULL; 4867 oo->oo_owner.so_ops = &openowner_ops; 4868 oo->oo_owner.so_is_open_owner = 1; 4869 oo->oo_owner.so_seqid = open->op_seqid; 4870 oo->oo_flags = 0; 4871 if (nfsd4_has_session(cstate)) 4872 oo->oo_flags |= NFS4_OO_CONFIRMED; 4873 oo->oo_time = 0; 4874 oo->oo_last_closed_stid = NULL; 4875 INIT_LIST_HEAD(&oo->oo_close_lru); 4876 spin_lock(&clp->cl_lock); 4877 ret = find_openstateowner_str_locked(strhashval, open, clp); 4878 if (ret == NULL) { 4879 hash_openowner(oo, clp, strhashval); 4880 ret = oo; 4881 } else 4882 nfs4_free_stateowner(&oo->oo_owner); 4883 4884 spin_unlock(&clp->cl_lock); 4885 return ret; 4886 } 4887 4888 static struct nfs4_ol_stateid * 4889 init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open) 4890 { 4891 4892 struct nfs4_openowner *oo = open->op_openowner; 4893 struct nfs4_ol_stateid *retstp = NULL; 4894 struct nfs4_ol_stateid *stp; 4895 4896 stp = open->op_stp; 4897 /* We are moving these outside of the spinlocks to avoid the warnings */ 4898 mutex_init(&stp->st_mutex); 4899 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX); 4900 4901 retry: 4902 spin_lock(&oo->oo_owner.so_client->cl_lock); 4903 spin_lock(&fp->fi_lock); 4904 4905 retstp = nfsd4_find_existing_open(fp, open); 4906 if (retstp) 4907 goto out_unlock; 4908 4909 open->op_stp = NULL; 4910 refcount_inc(&stp->st_stid.sc_count); 4911 stp->st_stid.sc_type = SC_TYPE_OPEN; 4912 INIT_LIST_HEAD(&stp->st_locks); 4913 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner); 4914 get_nfs4_file(fp); 4915 stp->st_stid.sc_file = fp; 4916 stp->st_access_bmap = 0; 4917 stp->st_deny_bmap = 0; 4918 stp->st_openstp = NULL; 4919 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids); 4920 list_add(&stp->st_perfile, &fp->fi_stateids); 4921 4922 out_unlock: 4923 spin_unlock(&fp->fi_lock); 4924 spin_unlock(&oo->oo_owner.so_client->cl_lock); 4925 if (retstp) { 4926 /* Handle races with CLOSE */ 4927 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) { 4928 nfs4_put_stid(&retstp->st_stid); 4929 goto retry; 4930 } 4931 /* To keep mutex tracking happy */ 4932 mutex_unlock(&stp->st_mutex); 4933 stp = retstp; 4934 } 4935 return stp; 4936 } 4937 4938 /* 4939 * In the 4.0 case we need to keep the owners around a little while to handle 4940 * CLOSE replay. We still do need to release any file access that is held by 4941 * them before returning however. 4942 */ 4943 static void 4944 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net) 4945 { 4946 struct nfs4_ol_stateid *last; 4947 struct nfs4_openowner *oo = openowner(s->st_stateowner); 4948 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net, 4949 nfsd_net_id); 4950 4951 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo); 4952 4953 /* 4954 * We know that we hold one reference via nfsd4_close, and another 4955 * "persistent" reference for the client. If the refcount is higher 4956 * than 2, then there are still calls in progress that are using this 4957 * stateid. We can't put the sc_file reference until they are finished. 4958 * Wait for the refcount to drop to 2. Since it has been unhashed, 4959 * there should be no danger of the refcount going back up again at 4960 * this point. 4961 */ 4962 wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2); 4963 4964 release_all_access(s); 4965 if (s->st_stid.sc_file) { 4966 put_nfs4_file(s->st_stid.sc_file); 4967 s->st_stid.sc_file = NULL; 4968 } 4969 4970 spin_lock(&nn->client_lock); 4971 last = oo->oo_last_closed_stid; 4972 oo->oo_last_closed_stid = s; 4973 list_move_tail(&oo->oo_close_lru, &nn->close_lru); 4974 oo->oo_time = ktime_get_boottime_seconds(); 4975 spin_unlock(&nn->client_lock); 4976 if (last) 4977 nfs4_put_stid(&last->st_stid); 4978 } 4979 4980 static noinline_for_stack struct nfs4_file * 4981 nfsd4_file_hash_lookup(const struct svc_fh *fhp) 4982 { 4983 struct inode *inode = d_inode(fhp->fh_dentry); 4984 struct rhlist_head *tmp, *list; 4985 struct nfs4_file *fi; 4986 4987 rcu_read_lock(); 4988 list = rhltable_lookup(&nfs4_file_rhltable, &inode, 4989 nfs4_file_rhash_params); 4990 rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) { 4991 if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) { 4992 if (refcount_inc_not_zero(&fi->fi_ref)) { 4993 rcu_read_unlock(); 4994 return fi; 4995 } 4996 } 4997 } 4998 rcu_read_unlock(); 4999 return NULL; 5000 } 5001 5002 /* 5003 * On hash insertion, identify entries with the same inode but 5004 * distinct filehandles. They will all be on the list returned 5005 * by rhltable_lookup(). 5006 * 5007 * inode->i_lock prevents racing insertions from adding an entry 5008 * for the same inode/fhp pair twice. 5009 */ 5010 static noinline_for_stack struct nfs4_file * 5011 nfsd4_file_hash_insert(struct nfs4_file *new, const struct svc_fh *fhp) 5012 { 5013 struct inode *inode = d_inode(fhp->fh_dentry); 5014 struct rhlist_head *tmp, *list; 5015 struct nfs4_file *ret = NULL; 5016 bool alias_found = false; 5017 struct nfs4_file *fi; 5018 int err; 5019 5020 rcu_read_lock(); 5021 spin_lock(&inode->i_lock); 5022 5023 list = rhltable_lookup(&nfs4_file_rhltable, &inode, 5024 nfs4_file_rhash_params); 5025 rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) { 5026 if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) { 5027 if (refcount_inc_not_zero(&fi->fi_ref)) 5028 ret = fi; 5029 } else 5030 fi->fi_aliased = alias_found = true; 5031 } 5032 if (ret) 5033 goto out_unlock; 5034 5035 nfsd4_file_init(fhp, new); 5036 err = rhltable_insert(&nfs4_file_rhltable, &new->fi_rlist, 5037 nfs4_file_rhash_params); 5038 if (err) 5039 goto out_unlock; 5040 5041 new->fi_aliased = alias_found; 5042 ret = new; 5043 5044 out_unlock: 5045 spin_unlock(&inode->i_lock); 5046 rcu_read_unlock(); 5047 return ret; 5048 } 5049 5050 static noinline_for_stack void nfsd4_file_hash_remove(struct nfs4_file *fi) 5051 { 5052 rhltable_remove(&nfs4_file_rhltable, &fi->fi_rlist, 5053 nfs4_file_rhash_params); 5054 } 5055 5056 /* 5057 * Called to check deny when READ with all zero stateid or 5058 * WRITE with all zero or all one stateid 5059 */ 5060 static __be32 5061 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type) 5062 { 5063 struct nfs4_file *fp; 5064 __be32 ret = nfs_ok; 5065 5066 fp = nfsd4_file_hash_lookup(current_fh); 5067 if (!fp) 5068 return ret; 5069 5070 /* Check for conflicting share reservations */ 5071 spin_lock(&fp->fi_lock); 5072 if (fp->fi_share_deny & deny_type) 5073 ret = nfserr_locked; 5074 spin_unlock(&fp->fi_lock); 5075 put_nfs4_file(fp); 5076 return ret; 5077 } 5078 5079 static bool nfsd4_deleg_present(const struct inode *inode) 5080 { 5081 struct file_lock_context *ctx = locks_inode_context(inode); 5082 5083 return ctx && !list_empty_careful(&ctx->flc_lease); 5084 } 5085 5086 /** 5087 * nfsd_wait_for_delegreturn - wait for delegations to be returned 5088 * @rqstp: the RPC transaction being executed 5089 * @inode: in-core inode of the file being waited for 5090 * 5091 * The timeout prevents deadlock if all nfsd threads happen to be 5092 * tied up waiting for returning delegations. 5093 * 5094 * Return values: 5095 * %true: delegation was returned 5096 * %false: timed out waiting for delegreturn 5097 */ 5098 bool nfsd_wait_for_delegreturn(struct svc_rqst *rqstp, struct inode *inode) 5099 { 5100 long __maybe_unused timeo; 5101 5102 timeo = wait_var_event_timeout(inode, !nfsd4_deleg_present(inode), 5103 NFSD_DELEGRETURN_TIMEOUT); 5104 trace_nfsd_delegret_wakeup(rqstp, inode, timeo); 5105 return timeo > 0; 5106 } 5107 5108 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb) 5109 { 5110 struct nfs4_delegation *dp = cb_to_delegation(cb); 5111 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net, 5112 nfsd_net_id); 5113 5114 block_delegations(&dp->dl_stid.sc_file->fi_fhandle); 5115 5116 /* 5117 * We can't do this in nfsd_break_deleg_cb because it is 5118 * already holding inode->i_lock. 5119 * 5120 * If the dl_time != 0, then we know that it has already been 5121 * queued for a lease break. Don't queue it again. 5122 */ 5123 spin_lock(&state_lock); 5124 if (delegation_hashed(dp) && dp->dl_time == 0) { 5125 dp->dl_time = ktime_get_boottime_seconds(); 5126 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru); 5127 } 5128 spin_unlock(&state_lock); 5129 } 5130 5131 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb, 5132 struct rpc_task *task) 5133 { 5134 struct nfs4_delegation *dp = cb_to_delegation(cb); 5135 5136 trace_nfsd_cb_recall_done(&dp->dl_stid.sc_stateid, task); 5137 5138 if (dp->dl_stid.sc_status) 5139 /* CLOSED or REVOKED */ 5140 return 1; 5141 5142 switch (task->tk_status) { 5143 case 0: 5144 return 1; 5145 case -NFS4ERR_DELAY: 5146 rpc_delay(task, 2 * HZ); 5147 return 0; 5148 case -EBADHANDLE: 5149 case -NFS4ERR_BAD_STATEID: 5150 /* 5151 * Race: client probably got cb_recall before open reply 5152 * granting delegation. 5153 */ 5154 if (dp->dl_retries--) { 5155 rpc_delay(task, 2 * HZ); 5156 return 0; 5157 } 5158 fallthrough; 5159 default: 5160 return 1; 5161 } 5162 } 5163 5164 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb) 5165 { 5166 struct nfs4_delegation *dp = cb_to_delegation(cb); 5167 5168 nfs4_put_stid(&dp->dl_stid); 5169 } 5170 5171 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = { 5172 .prepare = nfsd4_cb_recall_prepare, 5173 .done = nfsd4_cb_recall_done, 5174 .release = nfsd4_cb_recall_release, 5175 }; 5176 5177 static void nfsd_break_one_deleg(struct nfs4_delegation *dp) 5178 { 5179 /* 5180 * We're assuming the state code never drops its reference 5181 * without first removing the lease. Since we're in this lease 5182 * callback (and since the lease code is serialized by the 5183 * flc_lock) we know the server hasn't removed the lease yet, and 5184 * we know it's safe to take a reference. 5185 */ 5186 refcount_inc(&dp->dl_stid.sc_count); 5187 WARN_ON_ONCE(!nfsd4_run_cb(&dp->dl_recall)); 5188 } 5189 5190 /* Called from break_lease() with flc_lock held. */ 5191 static bool 5192 nfsd_break_deleg_cb(struct file_lease *fl) 5193 { 5194 struct nfs4_delegation *dp = (struct nfs4_delegation *) fl->c.flc_owner; 5195 struct nfs4_file *fp = dp->dl_stid.sc_file; 5196 struct nfs4_client *clp = dp->dl_stid.sc_client; 5197 struct nfsd_net *nn; 5198 5199 trace_nfsd_cb_recall(&dp->dl_stid); 5200 5201 dp->dl_recalled = true; 5202 atomic_inc(&clp->cl_delegs_in_recall); 5203 if (try_to_expire_client(clp)) { 5204 nn = net_generic(clp->net, nfsd_net_id); 5205 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0); 5206 } 5207 5208 /* 5209 * We don't want the locks code to timeout the lease for us; 5210 * we'll remove it ourself if a delegation isn't returned 5211 * in time: 5212 */ 5213 fl->fl_break_time = 0; 5214 5215 fp->fi_had_conflict = true; 5216 nfsd_break_one_deleg(dp); 5217 return false; 5218 } 5219 5220 /** 5221 * nfsd_breaker_owns_lease - Check if lease conflict was resolved 5222 * @fl: Lock state to check 5223 * 5224 * Return values: 5225 * %true: Lease conflict was resolved 5226 * %false: Lease conflict was not resolved. 5227 */ 5228 static bool nfsd_breaker_owns_lease(struct file_lease *fl) 5229 { 5230 struct nfs4_delegation *dl = fl->c.flc_owner; 5231 struct svc_rqst *rqst; 5232 struct nfs4_client *clp; 5233 5234 if (!i_am_nfsd()) 5235 return false; 5236 rqst = kthread_data(current); 5237 /* Note rq_prog == NFS_ACL_PROGRAM is also possible: */ 5238 if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4) 5239 return false; 5240 clp = *(rqst->rq_lease_breaker); 5241 return dl->dl_stid.sc_client == clp; 5242 } 5243 5244 static int 5245 nfsd_change_deleg_cb(struct file_lease *onlist, int arg, 5246 struct list_head *dispose) 5247 { 5248 struct nfs4_delegation *dp = (struct nfs4_delegation *) onlist->c.flc_owner; 5249 struct nfs4_client *clp = dp->dl_stid.sc_client; 5250 5251 if (arg & F_UNLCK) { 5252 if (dp->dl_recalled) 5253 atomic_dec(&clp->cl_delegs_in_recall); 5254 return lease_modify(onlist, arg, dispose); 5255 } else 5256 return -EAGAIN; 5257 } 5258 5259 static const struct lease_manager_operations nfsd_lease_mng_ops = { 5260 .lm_breaker_owns_lease = nfsd_breaker_owns_lease, 5261 .lm_break = nfsd_break_deleg_cb, 5262 .lm_change = nfsd_change_deleg_cb, 5263 }; 5264 5265 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid) 5266 { 5267 if (nfsd4_has_session(cstate)) 5268 return nfs_ok; 5269 if (seqid == so->so_seqid - 1) 5270 return nfserr_replay_me; 5271 if (seqid == so->so_seqid) 5272 return nfs_ok; 5273 return nfserr_bad_seqid; 5274 } 5275 5276 static struct nfs4_client *lookup_clientid(clientid_t *clid, bool sessions, 5277 struct nfsd_net *nn) 5278 { 5279 struct nfs4_client *found; 5280 5281 spin_lock(&nn->client_lock); 5282 found = find_confirmed_client(clid, sessions, nn); 5283 if (found) 5284 atomic_inc(&found->cl_rpc_users); 5285 spin_unlock(&nn->client_lock); 5286 return found; 5287 } 5288 5289 static __be32 set_client(clientid_t *clid, 5290 struct nfsd4_compound_state *cstate, 5291 struct nfsd_net *nn) 5292 { 5293 if (cstate->clp) { 5294 if (!same_clid(&cstate->clp->cl_clientid, clid)) 5295 return nfserr_stale_clientid; 5296 return nfs_ok; 5297 } 5298 if (STALE_CLIENTID(clid, nn)) 5299 return nfserr_stale_clientid; 5300 /* 5301 * We're in the 4.0 case (otherwise the SEQUENCE op would have 5302 * set cstate->clp), so session = false: 5303 */ 5304 cstate->clp = lookup_clientid(clid, false, nn); 5305 if (!cstate->clp) 5306 return nfserr_expired; 5307 return nfs_ok; 5308 } 5309 5310 __be32 5311 nfsd4_process_open1(struct nfsd4_compound_state *cstate, 5312 struct nfsd4_open *open, struct nfsd_net *nn) 5313 { 5314 clientid_t *clientid = &open->op_clientid; 5315 struct nfs4_client *clp = NULL; 5316 unsigned int strhashval; 5317 struct nfs4_openowner *oo = NULL; 5318 __be32 status; 5319 5320 /* 5321 * In case we need it later, after we've already created the 5322 * file and don't want to risk a further failure: 5323 */ 5324 open->op_file = nfsd4_alloc_file(); 5325 if (open->op_file == NULL) 5326 return nfserr_jukebox; 5327 5328 status = set_client(clientid, cstate, nn); 5329 if (status) 5330 return status; 5331 clp = cstate->clp; 5332 5333 strhashval = ownerstr_hashval(&open->op_owner); 5334 oo = find_openstateowner_str(strhashval, open, clp); 5335 open->op_openowner = oo; 5336 if (!oo) { 5337 goto new_owner; 5338 } 5339 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { 5340 /* Replace unconfirmed owners without checking for replay. */ 5341 release_openowner(oo); 5342 open->op_openowner = NULL; 5343 goto new_owner; 5344 } 5345 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid); 5346 if (status) 5347 return status; 5348 goto alloc_stateid; 5349 new_owner: 5350 oo = alloc_init_open_stateowner(strhashval, open, cstate); 5351 if (oo == NULL) 5352 return nfserr_jukebox; 5353 open->op_openowner = oo; 5354 alloc_stateid: 5355 open->op_stp = nfs4_alloc_open_stateid(clp); 5356 if (!open->op_stp) 5357 return nfserr_jukebox; 5358 5359 if (nfsd4_has_session(cstate) && 5360 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) { 5361 open->op_odstate = alloc_clnt_odstate(clp); 5362 if (!open->op_odstate) 5363 return nfserr_jukebox; 5364 } 5365 5366 return nfs_ok; 5367 } 5368 5369 static inline __be32 5370 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags) 5371 { 5372 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ)) 5373 return nfserr_openmode; 5374 else 5375 return nfs_ok; 5376 } 5377 5378 static int share_access_to_flags(u32 share_access) 5379 { 5380 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE; 5381 } 5382 5383 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, 5384 stateid_t *s) 5385 { 5386 struct nfs4_stid *ret; 5387 5388 ret = find_stateid_by_type(cl, s, SC_TYPE_DELEG, SC_STATUS_REVOKED); 5389 if (!ret) 5390 return NULL; 5391 return delegstateid(ret); 5392 } 5393 5394 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open) 5395 { 5396 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR || 5397 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH; 5398 } 5399 5400 static __be32 5401 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open, 5402 struct nfs4_delegation **dp) 5403 { 5404 int flags; 5405 __be32 status = nfserr_bad_stateid; 5406 struct nfs4_delegation *deleg; 5407 5408 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid); 5409 if (deleg == NULL) 5410 goto out; 5411 if (deleg->dl_stid.sc_status & SC_STATUS_ADMIN_REVOKED) { 5412 nfs4_put_stid(&deleg->dl_stid); 5413 status = nfserr_admin_revoked; 5414 goto out; 5415 } 5416 if (deleg->dl_stid.sc_status & SC_STATUS_REVOKED) { 5417 nfs4_put_stid(&deleg->dl_stid); 5418 nfsd40_drop_revoked_stid(cl, &open->op_delegate_stateid); 5419 status = nfserr_deleg_revoked; 5420 goto out; 5421 } 5422 flags = share_access_to_flags(open->op_share_access); 5423 status = nfs4_check_delegmode(deleg, flags); 5424 if (status) { 5425 nfs4_put_stid(&deleg->dl_stid); 5426 goto out; 5427 } 5428 *dp = deleg; 5429 out: 5430 if (!nfsd4_is_deleg_cur(open)) 5431 return nfs_ok; 5432 if (status) 5433 return status; 5434 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; 5435 return nfs_ok; 5436 } 5437 5438 static inline int nfs4_access_to_access(u32 nfs4_access) 5439 { 5440 int flags = 0; 5441 5442 if (nfs4_access & NFS4_SHARE_ACCESS_READ) 5443 flags |= NFSD_MAY_READ; 5444 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE) 5445 flags |= NFSD_MAY_WRITE; 5446 return flags; 5447 } 5448 5449 static inline __be32 5450 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh, 5451 struct nfsd4_open *open) 5452 { 5453 struct iattr iattr = { 5454 .ia_valid = ATTR_SIZE, 5455 .ia_size = 0, 5456 }; 5457 struct nfsd_attrs attrs = { 5458 .na_iattr = &iattr, 5459 }; 5460 if (!open->op_truncate) 5461 return 0; 5462 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE)) 5463 return nfserr_inval; 5464 return nfsd_setattr(rqstp, fh, &attrs, NULL); 5465 } 5466 5467 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp, 5468 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, 5469 struct nfsd4_open *open, bool new_stp) 5470 { 5471 struct nfsd_file *nf = NULL; 5472 __be32 status; 5473 int oflag = nfs4_access_to_omode(open->op_share_access); 5474 int access = nfs4_access_to_access(open->op_share_access); 5475 unsigned char old_access_bmap, old_deny_bmap; 5476 5477 spin_lock(&fp->fi_lock); 5478 5479 /* 5480 * Are we trying to set a deny mode that would conflict with 5481 * current access? 5482 */ 5483 status = nfs4_file_check_deny(fp, open->op_share_deny); 5484 if (status != nfs_ok) { 5485 if (status != nfserr_share_denied) { 5486 spin_unlock(&fp->fi_lock); 5487 goto out; 5488 } 5489 if (nfs4_resolve_deny_conflicts_locked(fp, new_stp, 5490 stp, open->op_share_deny, false)) 5491 status = nfserr_jukebox; 5492 spin_unlock(&fp->fi_lock); 5493 goto out; 5494 } 5495 5496 /* set access to the file */ 5497 status = nfs4_file_get_access(fp, open->op_share_access); 5498 if (status != nfs_ok) { 5499 if (status != nfserr_share_denied) { 5500 spin_unlock(&fp->fi_lock); 5501 goto out; 5502 } 5503 if (nfs4_resolve_deny_conflicts_locked(fp, new_stp, 5504 stp, open->op_share_access, true)) 5505 status = nfserr_jukebox; 5506 spin_unlock(&fp->fi_lock); 5507 goto out; 5508 } 5509 5510 /* Set access bits in stateid */ 5511 old_access_bmap = stp->st_access_bmap; 5512 set_access(open->op_share_access, stp); 5513 5514 /* Set new deny mask */ 5515 old_deny_bmap = stp->st_deny_bmap; 5516 set_deny(open->op_share_deny, stp); 5517 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH); 5518 5519 if (!fp->fi_fds[oflag]) { 5520 spin_unlock(&fp->fi_lock); 5521 5522 status = nfsd_file_acquire_opened(rqstp, cur_fh, access, 5523 open->op_filp, &nf); 5524 if (status != nfs_ok) 5525 goto out_put_access; 5526 5527 spin_lock(&fp->fi_lock); 5528 if (!fp->fi_fds[oflag]) { 5529 fp->fi_fds[oflag] = nf; 5530 nf = NULL; 5531 } 5532 } 5533 spin_unlock(&fp->fi_lock); 5534 if (nf) 5535 nfsd_file_put(nf); 5536 5537 status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode, 5538 access)); 5539 if (status) 5540 goto out_put_access; 5541 5542 status = nfsd4_truncate(rqstp, cur_fh, open); 5543 if (status) 5544 goto out_put_access; 5545 out: 5546 return status; 5547 out_put_access: 5548 stp->st_access_bmap = old_access_bmap; 5549 nfs4_file_put_access(fp, open->op_share_access); 5550 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp); 5551 goto out; 5552 } 5553 5554 static __be32 5555 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, 5556 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, 5557 struct nfsd4_open *open) 5558 { 5559 __be32 status; 5560 unsigned char old_deny_bmap = stp->st_deny_bmap; 5561 5562 if (!test_access(open->op_share_access, stp)) 5563 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open, false); 5564 5565 /* test and set deny mode */ 5566 spin_lock(&fp->fi_lock); 5567 status = nfs4_file_check_deny(fp, open->op_share_deny); 5568 switch (status) { 5569 case nfs_ok: 5570 set_deny(open->op_share_deny, stp); 5571 fp->fi_share_deny |= 5572 (open->op_share_deny & NFS4_SHARE_DENY_BOTH); 5573 break; 5574 case nfserr_share_denied: 5575 if (nfs4_resolve_deny_conflicts_locked(fp, false, 5576 stp, open->op_share_deny, false)) 5577 status = nfserr_jukebox; 5578 break; 5579 } 5580 spin_unlock(&fp->fi_lock); 5581 5582 if (status != nfs_ok) 5583 return status; 5584 5585 status = nfsd4_truncate(rqstp, cur_fh, open); 5586 if (status != nfs_ok) 5587 reset_union_bmap_deny(old_deny_bmap, stp); 5588 return status; 5589 } 5590 5591 /* Should we give out recallable state?: */ 5592 static bool nfsd4_cb_channel_good(struct nfs4_client *clp) 5593 { 5594 if (clp->cl_cb_state == NFSD4_CB_UP) 5595 return true; 5596 /* 5597 * In the sessions case, since we don't have to establish a 5598 * separate connection for callbacks, we assume it's OK 5599 * until we hear otherwise: 5600 */ 5601 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; 5602 } 5603 5604 static struct file_lease *nfs4_alloc_init_lease(struct nfs4_delegation *dp, 5605 int flag) 5606 { 5607 struct file_lease *fl; 5608 5609 fl = locks_alloc_lease(); 5610 if (!fl) 5611 return NULL; 5612 fl->fl_lmops = &nfsd_lease_mng_ops; 5613 fl->c.flc_flags = FL_DELEG; 5614 fl->c.flc_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; 5615 fl->c.flc_owner = (fl_owner_t)dp; 5616 fl->c.flc_pid = current->tgid; 5617 fl->c.flc_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file; 5618 return fl; 5619 } 5620 5621 static int nfsd4_check_conflicting_opens(struct nfs4_client *clp, 5622 struct nfs4_file *fp) 5623 { 5624 struct nfs4_ol_stateid *st; 5625 struct file *f = fp->fi_deleg_file->nf_file; 5626 struct inode *ino = file_inode(f); 5627 int writes; 5628 5629 writes = atomic_read(&ino->i_writecount); 5630 if (!writes) 5631 return 0; 5632 /* 5633 * There could be multiple filehandles (hence multiple 5634 * nfs4_files) referencing this file, but that's not too 5635 * common; let's just give up in that case rather than 5636 * trying to go look up all the clients using that other 5637 * nfs4_file as well: 5638 */ 5639 if (fp->fi_aliased) 5640 return -EAGAIN; 5641 /* 5642 * If there's a close in progress, make sure that we see it 5643 * clear any fi_fds[] entries before we see it decrement 5644 * i_writecount: 5645 */ 5646 smp_mb__after_atomic(); 5647 5648 if (fp->fi_fds[O_WRONLY]) 5649 writes--; 5650 if (fp->fi_fds[O_RDWR]) 5651 writes--; 5652 if (writes > 0) 5653 return -EAGAIN; /* There may be non-NFSv4 writers */ 5654 /* 5655 * It's possible there are non-NFSv4 write opens in progress, 5656 * but if they haven't incremented i_writecount yet then they 5657 * also haven't called break lease yet; so, they'll break this 5658 * lease soon enough. So, all that's left to check for is NFSv4 5659 * opens: 5660 */ 5661 spin_lock(&fp->fi_lock); 5662 list_for_each_entry(st, &fp->fi_stateids, st_perfile) { 5663 if (st->st_openstp == NULL /* it's an open */ && 5664 access_permit_write(st) && 5665 st->st_stid.sc_client != clp) { 5666 spin_unlock(&fp->fi_lock); 5667 return -EAGAIN; 5668 } 5669 } 5670 spin_unlock(&fp->fi_lock); 5671 /* 5672 * There's a small chance that we could be racing with another 5673 * NFSv4 open. However, any open that hasn't added itself to 5674 * the fi_stateids list also hasn't called break_lease yet; so, 5675 * they'll break this lease soon enough. 5676 */ 5677 return 0; 5678 } 5679 5680 /* 5681 * It's possible that between opening the dentry and setting the delegation, 5682 * that it has been renamed or unlinked. Redo the lookup to verify that this 5683 * hasn't happened. 5684 */ 5685 static int 5686 nfsd4_verify_deleg_dentry(struct nfsd4_open *open, struct nfs4_file *fp, 5687 struct svc_fh *parent) 5688 { 5689 struct svc_export *exp; 5690 struct dentry *child; 5691 __be32 err; 5692 5693 err = nfsd_lookup_dentry(open->op_rqstp, parent, 5694 open->op_fname, open->op_fnamelen, 5695 &exp, &child); 5696 5697 if (err) 5698 return -EAGAIN; 5699 5700 exp_put(exp); 5701 dput(child); 5702 if (child != file_dentry(fp->fi_deleg_file->nf_file)) 5703 return -EAGAIN; 5704 5705 return 0; 5706 } 5707 5708 /* 5709 * We avoid breaking delegations held by a client due to its own activity, but 5710 * clearing setuid/setgid bits on a write is an implicit activity and the client 5711 * may not notice and continue using the old mode. Avoid giving out a delegation 5712 * on setuid/setgid files when the client is requesting an open for write. 5713 */ 5714 static int 5715 nfsd4_verify_setuid_write(struct nfsd4_open *open, struct nfsd_file *nf) 5716 { 5717 struct inode *inode = file_inode(nf->nf_file); 5718 5719 if ((open->op_share_access & NFS4_SHARE_ACCESS_WRITE) && 5720 (inode->i_mode & (S_ISUID|S_ISGID))) 5721 return -EAGAIN; 5722 return 0; 5723 } 5724 5725 static struct nfs4_delegation * 5726 nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, 5727 struct svc_fh *parent) 5728 { 5729 int status = 0; 5730 struct nfs4_client *clp = stp->st_stid.sc_client; 5731 struct nfs4_file *fp = stp->st_stid.sc_file; 5732 struct nfs4_clnt_odstate *odstate = stp->st_clnt_odstate; 5733 struct nfs4_delegation *dp; 5734 struct nfsd_file *nf = NULL; 5735 struct file_lease *fl; 5736 u32 dl_type; 5737 5738 /* 5739 * The fi_had_conflict and nfs_get_existing_delegation checks 5740 * here are just optimizations; we'll need to recheck them at 5741 * the end: 5742 */ 5743 if (fp->fi_had_conflict) 5744 return ERR_PTR(-EAGAIN); 5745 5746 /* 5747 * Try for a write delegation first. RFC8881 section 10.4 says: 5748 * 5749 * "An OPEN_DELEGATE_WRITE delegation allows the client to handle, 5750 * on its own, all opens." 5751 * 5752 * Furthermore the client can use a write delegation for most READ 5753 * operations as well, so we require a O_RDWR file here. 5754 * 5755 * Offer a write delegation in the case of a BOTH open, and ensure 5756 * we get the O_RDWR descriptor. 5757 */ 5758 if ((open->op_share_access & NFS4_SHARE_ACCESS_BOTH) == NFS4_SHARE_ACCESS_BOTH) { 5759 nf = find_rw_file(fp); 5760 dl_type = NFS4_OPEN_DELEGATE_WRITE; 5761 } 5762 5763 /* 5764 * If the file is being opened O_RDONLY or we couldn't get a O_RDWR 5765 * file for some reason, then try for a read delegation instead. 5766 */ 5767 if (!nf && (open->op_share_access & NFS4_SHARE_ACCESS_READ)) { 5768 nf = find_readable_file(fp); 5769 dl_type = NFS4_OPEN_DELEGATE_READ; 5770 } 5771 5772 if (!nf) 5773 return ERR_PTR(-EAGAIN); 5774 5775 spin_lock(&state_lock); 5776 spin_lock(&fp->fi_lock); 5777 if (nfs4_delegation_exists(clp, fp)) 5778 status = -EAGAIN; 5779 else if (nfsd4_verify_setuid_write(open, nf)) 5780 status = -EAGAIN; 5781 else if (!fp->fi_deleg_file) { 5782 fp->fi_deleg_file = nf; 5783 /* increment early to prevent fi_deleg_file from being 5784 * cleared */ 5785 fp->fi_delegees = 1; 5786 nf = NULL; 5787 } else 5788 fp->fi_delegees++; 5789 spin_unlock(&fp->fi_lock); 5790 spin_unlock(&state_lock); 5791 if (nf) 5792 nfsd_file_put(nf); 5793 if (status) 5794 return ERR_PTR(status); 5795 5796 status = -ENOMEM; 5797 dp = alloc_init_deleg(clp, fp, odstate, dl_type); 5798 if (!dp) 5799 goto out_delegees; 5800 5801 fl = nfs4_alloc_init_lease(dp, dl_type); 5802 if (!fl) 5803 goto out_clnt_odstate; 5804 5805 status = kernel_setlease(fp->fi_deleg_file->nf_file, 5806 fl->c.flc_type, &fl, NULL); 5807 if (fl) 5808 locks_free_lease(fl); 5809 if (status) 5810 goto out_clnt_odstate; 5811 5812 if (parent) { 5813 status = nfsd4_verify_deleg_dentry(open, fp, parent); 5814 if (status) 5815 goto out_unlock; 5816 } 5817 5818 status = nfsd4_check_conflicting_opens(clp, fp); 5819 if (status) 5820 goto out_unlock; 5821 5822 /* 5823 * Now that the deleg is set, check again to ensure that nothing 5824 * raced in and changed the mode while we weren't lookng. 5825 */ 5826 status = nfsd4_verify_setuid_write(open, fp->fi_deleg_file); 5827 if (status) 5828 goto out_unlock; 5829 5830 status = -EAGAIN; 5831 if (fp->fi_had_conflict) 5832 goto out_unlock; 5833 5834 spin_lock(&state_lock); 5835 spin_lock(&clp->cl_lock); 5836 spin_lock(&fp->fi_lock); 5837 status = hash_delegation_locked(dp, fp); 5838 spin_unlock(&fp->fi_lock); 5839 spin_unlock(&clp->cl_lock); 5840 spin_unlock(&state_lock); 5841 5842 if (status) 5843 goto out_unlock; 5844 5845 return dp; 5846 out_unlock: 5847 kernel_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp); 5848 out_clnt_odstate: 5849 put_clnt_odstate(dp->dl_clnt_odstate); 5850 nfs4_put_stid(&dp->dl_stid); 5851 out_delegees: 5852 put_deleg_file(fp); 5853 return ERR_PTR(status); 5854 } 5855 5856 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status) 5857 { 5858 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 5859 if (status == -EAGAIN) 5860 open->op_why_no_deleg = WND4_CONTENTION; 5861 else { 5862 open->op_why_no_deleg = WND4_RESOURCE; 5863 switch (open->op_deleg_want) { 5864 case NFS4_SHARE_WANT_READ_DELEG: 5865 case NFS4_SHARE_WANT_WRITE_DELEG: 5866 case NFS4_SHARE_WANT_ANY_DELEG: 5867 break; 5868 case NFS4_SHARE_WANT_CANCEL: 5869 open->op_why_no_deleg = WND4_CANCELLED; 5870 break; 5871 case NFS4_SHARE_WANT_NO_DELEG: 5872 WARN_ON_ONCE(1); 5873 } 5874 } 5875 } 5876 5877 /* 5878 * The Linux NFS server does not offer write delegations to NFSv4.0 5879 * clients in order to avoid conflicts between write delegations and 5880 * GETATTRs requesting CHANGE or SIZE attributes. 5881 * 5882 * With NFSv4.1 and later minorversions, the SEQUENCE operation that 5883 * begins each COMPOUND contains a client ID. Delegation recall can 5884 * be avoided when the server recognizes the client sending a 5885 * GETATTR also holds write delegation it conflicts with. 5886 * 5887 * However, the NFSv4.0 protocol does not enable a server to 5888 * determine that a GETATTR originated from the client holding the 5889 * conflicting delegation versus coming from some other client. Per 5890 * RFC 7530 Section 16.7.5, the server must recall or send a 5891 * CB_GETATTR even when the GETATTR originates from the client that 5892 * holds the conflicting delegation. 5893 * 5894 * An NFSv4.0 client can trigger a pathological situation if it 5895 * always sends a DELEGRETURN preceded by a conflicting GETATTR in 5896 * the same COMPOUND. COMPOUND execution will always stop at the 5897 * GETATTR and the DELEGRETURN will never get executed. The server 5898 * eventually revokes the delegation, which can result in loss of 5899 * open or lock state. 5900 */ 5901 static void 5902 nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, 5903 struct svc_fh *currentfh) 5904 { 5905 struct nfs4_delegation *dp; 5906 struct nfs4_openowner *oo = openowner(stp->st_stateowner); 5907 struct nfs4_client *clp = stp->st_stid.sc_client; 5908 struct svc_fh *parent = NULL; 5909 int cb_up; 5910 int status = 0; 5911 struct kstat stat; 5912 struct path path; 5913 5914 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client); 5915 open->op_recall = false; 5916 switch (open->op_claim_type) { 5917 case NFS4_OPEN_CLAIM_PREVIOUS: 5918 if (!cb_up) 5919 open->op_recall = true; 5920 break; 5921 case NFS4_OPEN_CLAIM_NULL: 5922 parent = currentfh; 5923 fallthrough; 5924 case NFS4_OPEN_CLAIM_FH: 5925 /* 5926 * Let's not give out any delegations till everyone's 5927 * had the chance to reclaim theirs, *and* until 5928 * NLM locks have all been reclaimed: 5929 */ 5930 if (locks_in_grace(clp->net)) 5931 goto out_no_deleg; 5932 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED)) 5933 goto out_no_deleg; 5934 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE && 5935 !clp->cl_minorversion) 5936 goto out_no_deleg; 5937 break; 5938 default: 5939 goto out_no_deleg; 5940 } 5941 dp = nfs4_set_delegation(open, stp, parent); 5942 if (IS_ERR(dp)) 5943 goto out_no_deleg; 5944 5945 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid)); 5946 5947 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) { 5948 open->op_delegate_type = NFS4_OPEN_DELEGATE_WRITE; 5949 trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid); 5950 path.mnt = currentfh->fh_export->ex_path.mnt; 5951 path.dentry = currentfh->fh_dentry; 5952 if (vfs_getattr(&path, &stat, 5953 (STATX_SIZE | STATX_CTIME | STATX_CHANGE_COOKIE), 5954 AT_STATX_SYNC_AS_STAT)) { 5955 nfs4_put_stid(&dp->dl_stid); 5956 destroy_delegation(dp); 5957 goto out_no_deleg; 5958 } 5959 dp->dl_cb_fattr.ncf_cur_fsize = stat.size; 5960 dp->dl_cb_fattr.ncf_initial_cinfo = 5961 nfsd4_change_attribute(&stat, d_inode(currentfh->fh_dentry)); 5962 } else { 5963 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ; 5964 trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid); 5965 } 5966 nfs4_put_stid(&dp->dl_stid); 5967 return; 5968 out_no_deleg: 5969 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE; 5970 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS && 5971 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) { 5972 dprintk("NFSD: WARNING: refusing delegation reclaim\n"); 5973 open->op_recall = true; 5974 } 5975 5976 /* 4.1 client asking for a delegation? */ 5977 if (open->op_deleg_want) 5978 nfsd4_open_deleg_none_ext(open, status); 5979 return; 5980 } 5981 5982 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open, 5983 struct nfs4_delegation *dp) 5984 { 5985 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG && 5986 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) { 5987 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 5988 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE; 5989 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG && 5990 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) { 5991 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 5992 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE; 5993 } 5994 /* Otherwise the client must be confused wanting a delegation 5995 * it already has, therefore we don't return 5996 * NFS4_OPEN_DELEGATE_NONE_EXT and reason. 5997 */ 5998 } 5999 6000 /** 6001 * nfsd4_process_open2 - finish open processing 6002 * @rqstp: the RPC transaction being executed 6003 * @current_fh: NFSv4 COMPOUND's current filehandle 6004 * @open: OPEN arguments 6005 * 6006 * If successful, (1) truncate the file if open->op_truncate was 6007 * set, (2) set open->op_stateid, (3) set open->op_delegation. 6008 * 6009 * Returns %nfs_ok on success; otherwise an nfs4stat value in 6010 * network byte order is returned. 6011 */ 6012 __be32 6013 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open) 6014 { 6015 struct nfsd4_compoundres *resp = rqstp->rq_resp; 6016 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client; 6017 struct nfs4_file *fp = NULL; 6018 struct nfs4_ol_stateid *stp = NULL; 6019 struct nfs4_delegation *dp = NULL; 6020 __be32 status; 6021 bool new_stp = false; 6022 6023 /* 6024 * Lookup file; if found, lookup stateid and check open request, 6025 * and check for delegations in the process of being recalled. 6026 * If not found, create the nfs4_file struct 6027 */ 6028 fp = nfsd4_file_hash_insert(open->op_file, current_fh); 6029 if (unlikely(!fp)) 6030 return nfserr_jukebox; 6031 if (fp != open->op_file) { 6032 status = nfs4_check_deleg(cl, open, &dp); 6033 if (status) 6034 goto out; 6035 stp = nfsd4_find_and_lock_existing_open(fp, open); 6036 } else { 6037 open->op_file = NULL; 6038 status = nfserr_bad_stateid; 6039 if (nfsd4_is_deleg_cur(open)) 6040 goto out; 6041 } 6042 6043 if (!stp) { 6044 stp = init_open_stateid(fp, open); 6045 if (!open->op_stp) 6046 new_stp = true; 6047 } 6048 6049 /* 6050 * OPEN the file, or upgrade an existing OPEN. 6051 * If truncate fails, the OPEN fails. 6052 * 6053 * stp is already locked. 6054 */ 6055 if (!new_stp) { 6056 /* Stateid was found, this is an OPEN upgrade */ 6057 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); 6058 if (status) { 6059 mutex_unlock(&stp->st_mutex); 6060 goto out; 6061 } 6062 } else { 6063 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open, true); 6064 if (status) { 6065 release_open_stateid(stp); 6066 mutex_unlock(&stp->st_mutex); 6067 goto out; 6068 } 6069 6070 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp, 6071 open->op_odstate); 6072 if (stp->st_clnt_odstate == open->op_odstate) 6073 open->op_odstate = NULL; 6074 } 6075 6076 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid); 6077 mutex_unlock(&stp->st_mutex); 6078 6079 if (nfsd4_has_session(&resp->cstate)) { 6080 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) { 6081 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 6082 open->op_why_no_deleg = WND4_NOT_WANTED; 6083 goto nodeleg; 6084 } 6085 } 6086 6087 /* 6088 * Attempt to hand out a delegation. No error return, because the 6089 * OPEN succeeds even if we fail. 6090 */ 6091 nfs4_open_delegation(open, stp, &resp->cstate.current_fh); 6092 nodeleg: 6093 status = nfs_ok; 6094 trace_nfsd_open(&stp->st_stid.sc_stateid); 6095 out: 6096 /* 4.1 client trying to upgrade/downgrade delegation? */ 6097 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp && 6098 open->op_deleg_want) 6099 nfsd4_deleg_xgrade_none_ext(open, dp); 6100 6101 if (fp) 6102 put_nfs4_file(fp); 6103 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS) 6104 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; 6105 /* 6106 * To finish the open response, we just need to set the rflags. 6107 */ 6108 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX; 6109 if (nfsd4_has_session(&resp->cstate)) 6110 open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK; 6111 else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED)) 6112 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM; 6113 6114 if (dp) 6115 nfs4_put_stid(&dp->dl_stid); 6116 if (stp) 6117 nfs4_put_stid(&stp->st_stid); 6118 6119 return status; 6120 } 6121 6122 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate, 6123 struct nfsd4_open *open) 6124 { 6125 if (open->op_openowner) { 6126 struct nfs4_stateowner *so = &open->op_openowner->oo_owner; 6127 6128 nfsd4_cstate_assign_replay(cstate, so); 6129 nfs4_put_stateowner(so); 6130 } 6131 if (open->op_file) 6132 kmem_cache_free(file_slab, open->op_file); 6133 if (open->op_stp) 6134 nfs4_put_stid(&open->op_stp->st_stid); 6135 if (open->op_odstate) 6136 kmem_cache_free(odstate_slab, open->op_odstate); 6137 } 6138 6139 __be32 6140 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 6141 union nfsd4_op_u *u) 6142 { 6143 clientid_t *clid = &u->renew; 6144 struct nfs4_client *clp; 6145 __be32 status; 6146 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 6147 6148 trace_nfsd_clid_renew(clid); 6149 status = set_client(clid, cstate, nn); 6150 if (status) 6151 return status; 6152 clp = cstate->clp; 6153 if (!list_empty(&clp->cl_delegations) 6154 && clp->cl_cb_state != NFSD4_CB_UP) 6155 return nfserr_cb_path_down; 6156 return nfs_ok; 6157 } 6158 6159 void 6160 nfsd4_end_grace(struct nfsd_net *nn) 6161 { 6162 /* do nothing if grace period already ended */ 6163 if (nn->grace_ended) 6164 return; 6165 6166 trace_nfsd_grace_complete(nn); 6167 nn->grace_ended = true; 6168 /* 6169 * If the server goes down again right now, an NFSv4 6170 * client will still be allowed to reclaim after it comes back up, 6171 * even if it hasn't yet had a chance to reclaim state this time. 6172 * 6173 */ 6174 nfsd4_record_grace_done(nn); 6175 /* 6176 * At this point, NFSv4 clients can still reclaim. But if the 6177 * server crashes, any that have not yet reclaimed will be out 6178 * of luck on the next boot. 6179 * 6180 * (NFSv4.1+ clients are considered to have reclaimed once they 6181 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to 6182 * have reclaimed after their first OPEN.) 6183 */ 6184 locks_end_grace(&nn->nfsd4_manager); 6185 /* 6186 * At this point, and once lockd and/or any other containers 6187 * exit their grace period, further reclaims will fail and 6188 * regular locking can resume. 6189 */ 6190 } 6191 6192 /* 6193 * If we've waited a lease period but there are still clients trying to 6194 * reclaim, wait a little longer to give them a chance to finish. 6195 */ 6196 static bool clients_still_reclaiming(struct nfsd_net *nn) 6197 { 6198 time64_t double_grace_period_end = nn->boot_time + 6199 2 * nn->nfsd4_lease; 6200 6201 if (nn->track_reclaim_completes && 6202 atomic_read(&nn->nr_reclaim_complete) == 6203 nn->reclaim_str_hashtbl_size) 6204 return false; 6205 if (!nn->somebody_reclaimed) 6206 return false; 6207 nn->somebody_reclaimed = false; 6208 /* 6209 * If we've given them *two* lease times to reclaim, and they're 6210 * still not done, give up: 6211 */ 6212 if (ktime_get_boottime_seconds() > double_grace_period_end) 6213 return false; 6214 return true; 6215 } 6216 6217 struct laundry_time { 6218 time64_t cutoff; 6219 time64_t new_timeo; 6220 }; 6221 6222 static bool state_expired(struct laundry_time *lt, time64_t last_refresh) 6223 { 6224 time64_t time_remaining; 6225 6226 if (last_refresh < lt->cutoff) 6227 return true; 6228 time_remaining = last_refresh - lt->cutoff; 6229 lt->new_timeo = min(lt->new_timeo, time_remaining); 6230 return false; 6231 } 6232 6233 #ifdef CONFIG_NFSD_V4_2_INTER_SSC 6234 void nfsd4_ssc_init_umount_work(struct nfsd_net *nn) 6235 { 6236 spin_lock_init(&nn->nfsd_ssc_lock); 6237 INIT_LIST_HEAD(&nn->nfsd_ssc_mount_list); 6238 init_waitqueue_head(&nn->nfsd_ssc_waitq); 6239 } 6240 EXPORT_SYMBOL_GPL(nfsd4_ssc_init_umount_work); 6241 6242 /* 6243 * This is called when nfsd is being shutdown, after all inter_ssc 6244 * cleanup were done, to destroy the ssc delayed unmount list. 6245 */ 6246 static void nfsd4_ssc_shutdown_umount(struct nfsd_net *nn) 6247 { 6248 struct nfsd4_ssc_umount_item *ni = NULL; 6249 struct nfsd4_ssc_umount_item *tmp; 6250 6251 spin_lock(&nn->nfsd_ssc_lock); 6252 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) { 6253 list_del(&ni->nsui_list); 6254 spin_unlock(&nn->nfsd_ssc_lock); 6255 mntput(ni->nsui_vfsmount); 6256 kfree(ni); 6257 spin_lock(&nn->nfsd_ssc_lock); 6258 } 6259 spin_unlock(&nn->nfsd_ssc_lock); 6260 } 6261 6262 static void nfsd4_ssc_expire_umount(struct nfsd_net *nn) 6263 { 6264 bool do_wakeup = false; 6265 struct nfsd4_ssc_umount_item *ni = NULL; 6266 struct nfsd4_ssc_umount_item *tmp; 6267 6268 spin_lock(&nn->nfsd_ssc_lock); 6269 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) { 6270 if (time_after(jiffies, ni->nsui_expire)) { 6271 if (refcount_read(&ni->nsui_refcnt) > 1) 6272 continue; 6273 6274 /* mark being unmount */ 6275 ni->nsui_busy = true; 6276 spin_unlock(&nn->nfsd_ssc_lock); 6277 mntput(ni->nsui_vfsmount); 6278 spin_lock(&nn->nfsd_ssc_lock); 6279 6280 /* waiters need to start from begin of list */ 6281 list_del(&ni->nsui_list); 6282 kfree(ni); 6283 6284 /* wakeup ssc_connect waiters */ 6285 do_wakeup = true; 6286 continue; 6287 } 6288 break; 6289 } 6290 if (do_wakeup) 6291 wake_up_all(&nn->nfsd_ssc_waitq); 6292 spin_unlock(&nn->nfsd_ssc_lock); 6293 } 6294 #endif 6295 6296 /* Check if any lock belonging to this lockowner has any blockers */ 6297 static bool 6298 nfs4_lockowner_has_blockers(struct nfs4_lockowner *lo) 6299 { 6300 struct file_lock_context *ctx; 6301 struct nfs4_ol_stateid *stp; 6302 struct nfs4_file *nf; 6303 6304 list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) { 6305 nf = stp->st_stid.sc_file; 6306 ctx = locks_inode_context(nf->fi_inode); 6307 if (!ctx) 6308 continue; 6309 if (locks_owner_has_blockers(ctx, lo)) 6310 return true; 6311 } 6312 return false; 6313 } 6314 6315 static bool 6316 nfs4_anylock_blockers(struct nfs4_client *clp) 6317 { 6318 int i; 6319 struct nfs4_stateowner *so; 6320 struct nfs4_lockowner *lo; 6321 6322 if (atomic_read(&clp->cl_delegs_in_recall)) 6323 return true; 6324 spin_lock(&clp->cl_lock); 6325 for (i = 0; i < OWNER_HASH_SIZE; i++) { 6326 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[i], 6327 so_strhash) { 6328 if (so->so_is_open_owner) 6329 continue; 6330 lo = lockowner(so); 6331 if (nfs4_lockowner_has_blockers(lo)) { 6332 spin_unlock(&clp->cl_lock); 6333 return true; 6334 } 6335 } 6336 } 6337 spin_unlock(&clp->cl_lock); 6338 return false; 6339 } 6340 6341 static void 6342 nfs4_get_client_reaplist(struct nfsd_net *nn, struct list_head *reaplist, 6343 struct laundry_time *lt) 6344 { 6345 unsigned int maxreap, reapcnt = 0; 6346 struct list_head *pos, *next; 6347 struct nfs4_client *clp; 6348 6349 maxreap = (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) ? 6350 NFSD_CLIENT_MAX_TRIM_PER_RUN : 0; 6351 INIT_LIST_HEAD(reaplist); 6352 spin_lock(&nn->client_lock); 6353 list_for_each_safe(pos, next, &nn->client_lru) { 6354 clp = list_entry(pos, struct nfs4_client, cl_lru); 6355 if (clp->cl_state == NFSD4_EXPIRABLE) 6356 goto exp_client; 6357 if (!state_expired(lt, clp->cl_time)) 6358 break; 6359 if (!atomic_read(&clp->cl_rpc_users)) { 6360 if (clp->cl_state == NFSD4_ACTIVE) 6361 atomic_inc(&nn->nfsd_courtesy_clients); 6362 clp->cl_state = NFSD4_COURTESY; 6363 } 6364 if (!client_has_state(clp)) 6365 goto exp_client; 6366 if (!nfs4_anylock_blockers(clp)) 6367 if (reapcnt >= maxreap) 6368 continue; 6369 exp_client: 6370 if (!mark_client_expired_locked(clp)) { 6371 list_add(&clp->cl_lru, reaplist); 6372 reapcnt++; 6373 } 6374 } 6375 spin_unlock(&nn->client_lock); 6376 } 6377 6378 static void 6379 nfs4_get_courtesy_client_reaplist(struct nfsd_net *nn, 6380 struct list_head *reaplist) 6381 { 6382 unsigned int maxreap = 0, reapcnt = 0; 6383 struct list_head *pos, *next; 6384 struct nfs4_client *clp; 6385 6386 maxreap = NFSD_CLIENT_MAX_TRIM_PER_RUN; 6387 INIT_LIST_HEAD(reaplist); 6388 6389 spin_lock(&nn->client_lock); 6390 list_for_each_safe(pos, next, &nn->client_lru) { 6391 clp = list_entry(pos, struct nfs4_client, cl_lru); 6392 if (clp->cl_state == NFSD4_ACTIVE) 6393 break; 6394 if (reapcnt >= maxreap) 6395 break; 6396 if (!mark_client_expired_locked(clp)) { 6397 list_add(&clp->cl_lru, reaplist); 6398 reapcnt++; 6399 } 6400 } 6401 spin_unlock(&nn->client_lock); 6402 } 6403 6404 static void 6405 nfs4_process_client_reaplist(struct list_head *reaplist) 6406 { 6407 struct list_head *pos, *next; 6408 struct nfs4_client *clp; 6409 6410 list_for_each_safe(pos, next, reaplist) { 6411 clp = list_entry(pos, struct nfs4_client, cl_lru); 6412 trace_nfsd_clid_purged(&clp->cl_clientid); 6413 list_del_init(&clp->cl_lru); 6414 expire_client(clp); 6415 } 6416 } 6417 6418 static void nfs40_clean_admin_revoked(struct nfsd_net *nn, 6419 struct laundry_time *lt) 6420 { 6421 struct nfs4_client *clp; 6422 6423 spin_lock(&nn->client_lock); 6424 if (nn->nfs40_last_revoke == 0 || 6425 nn->nfs40_last_revoke > lt->cutoff) { 6426 spin_unlock(&nn->client_lock); 6427 return; 6428 } 6429 nn->nfs40_last_revoke = 0; 6430 6431 retry: 6432 list_for_each_entry(clp, &nn->client_lru, cl_lru) { 6433 unsigned long id, tmp; 6434 struct nfs4_stid *stid; 6435 6436 if (atomic_read(&clp->cl_admin_revoked) == 0) 6437 continue; 6438 6439 spin_lock(&clp->cl_lock); 6440 idr_for_each_entry_ul(&clp->cl_stateids, stid, tmp, id) 6441 if (stid->sc_status & SC_STATUS_ADMIN_REVOKED) { 6442 refcount_inc(&stid->sc_count); 6443 spin_unlock(&nn->client_lock); 6444 /* this function drops ->cl_lock */ 6445 nfsd4_drop_revoked_stid(stid); 6446 nfs4_put_stid(stid); 6447 spin_lock(&nn->client_lock); 6448 goto retry; 6449 } 6450 spin_unlock(&clp->cl_lock); 6451 } 6452 spin_unlock(&nn->client_lock); 6453 } 6454 6455 static time64_t 6456 nfs4_laundromat(struct nfsd_net *nn) 6457 { 6458 struct nfs4_openowner *oo; 6459 struct nfs4_delegation *dp; 6460 struct nfs4_ol_stateid *stp; 6461 struct nfsd4_blocked_lock *nbl; 6462 struct list_head *pos, *next, reaplist; 6463 struct laundry_time lt = { 6464 .cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease, 6465 .new_timeo = nn->nfsd4_lease 6466 }; 6467 struct nfs4_cpntf_state *cps; 6468 copy_stateid_t *cps_t; 6469 int i; 6470 6471 if (clients_still_reclaiming(nn)) { 6472 lt.new_timeo = 0; 6473 goto out; 6474 } 6475 nfsd4_end_grace(nn); 6476 6477 spin_lock(&nn->s2s_cp_lock); 6478 idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) { 6479 cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid); 6480 if (cps->cp_stateid.cs_type == NFS4_COPYNOTIFY_STID && 6481 state_expired(<, cps->cpntf_time)) 6482 _free_cpntf_state_locked(nn, cps); 6483 } 6484 spin_unlock(&nn->s2s_cp_lock); 6485 nfs4_get_client_reaplist(nn, &reaplist, <); 6486 nfs4_process_client_reaplist(&reaplist); 6487 6488 nfs40_clean_admin_revoked(nn, <); 6489 6490 spin_lock(&state_lock); 6491 list_for_each_safe(pos, next, &nn->del_recall_lru) { 6492 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 6493 if (!state_expired(<, dp->dl_time)) 6494 break; 6495 unhash_delegation_locked(dp, SC_STATUS_REVOKED); 6496 list_add(&dp->dl_recall_lru, &reaplist); 6497 } 6498 spin_unlock(&state_lock); 6499 while (!list_empty(&reaplist)) { 6500 dp = list_first_entry(&reaplist, struct nfs4_delegation, 6501 dl_recall_lru); 6502 list_del_init(&dp->dl_recall_lru); 6503 revoke_delegation(dp); 6504 } 6505 6506 spin_lock(&nn->client_lock); 6507 while (!list_empty(&nn->close_lru)) { 6508 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner, 6509 oo_close_lru); 6510 if (!state_expired(<, oo->oo_time)) 6511 break; 6512 list_del_init(&oo->oo_close_lru); 6513 stp = oo->oo_last_closed_stid; 6514 oo->oo_last_closed_stid = NULL; 6515 spin_unlock(&nn->client_lock); 6516 nfs4_put_stid(&stp->st_stid); 6517 spin_lock(&nn->client_lock); 6518 } 6519 spin_unlock(&nn->client_lock); 6520 6521 /* 6522 * It's possible for a client to try and acquire an already held lock 6523 * that is being held for a long time, and then lose interest in it. 6524 * So, we clean out any un-revisited request after a lease period 6525 * under the assumption that the client is no longer interested. 6526 * 6527 * RFC5661, sec. 9.6 states that the client must not rely on getting 6528 * notifications and must continue to poll for locks, even when the 6529 * server supports them. Thus this shouldn't lead to clients blocking 6530 * indefinitely once the lock does become free. 6531 */ 6532 BUG_ON(!list_empty(&reaplist)); 6533 spin_lock(&nn->blocked_locks_lock); 6534 while (!list_empty(&nn->blocked_locks_lru)) { 6535 nbl = list_first_entry(&nn->blocked_locks_lru, 6536 struct nfsd4_blocked_lock, nbl_lru); 6537 if (!state_expired(<, nbl->nbl_time)) 6538 break; 6539 list_move(&nbl->nbl_lru, &reaplist); 6540 list_del_init(&nbl->nbl_list); 6541 } 6542 spin_unlock(&nn->blocked_locks_lock); 6543 6544 while (!list_empty(&reaplist)) { 6545 nbl = list_first_entry(&reaplist, 6546 struct nfsd4_blocked_lock, nbl_lru); 6547 list_del_init(&nbl->nbl_lru); 6548 free_blocked_lock(nbl); 6549 } 6550 #ifdef CONFIG_NFSD_V4_2_INTER_SSC 6551 /* service the server-to-server copy delayed unmount list */ 6552 nfsd4_ssc_expire_umount(nn); 6553 #endif 6554 if (atomic_long_read(&num_delegations) >= max_delegations) 6555 deleg_reaper(nn); 6556 out: 6557 return max_t(time64_t, lt.new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT); 6558 } 6559 6560 static void laundromat_main(struct work_struct *); 6561 6562 static void 6563 laundromat_main(struct work_struct *laundry) 6564 { 6565 time64_t t; 6566 struct delayed_work *dwork = to_delayed_work(laundry); 6567 struct nfsd_net *nn = container_of(dwork, struct nfsd_net, 6568 laundromat_work); 6569 6570 t = nfs4_laundromat(nn); 6571 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ); 6572 } 6573 6574 static void 6575 courtesy_client_reaper(struct nfsd_net *nn) 6576 { 6577 struct list_head reaplist; 6578 6579 nfs4_get_courtesy_client_reaplist(nn, &reaplist); 6580 nfs4_process_client_reaplist(&reaplist); 6581 } 6582 6583 static void 6584 deleg_reaper(struct nfsd_net *nn) 6585 { 6586 struct list_head *pos, *next; 6587 struct nfs4_client *clp; 6588 struct list_head cblist; 6589 6590 INIT_LIST_HEAD(&cblist); 6591 spin_lock(&nn->client_lock); 6592 list_for_each_safe(pos, next, &nn->client_lru) { 6593 clp = list_entry(pos, struct nfs4_client, cl_lru); 6594 if (clp->cl_state != NFSD4_ACTIVE || 6595 list_empty(&clp->cl_delegations) || 6596 atomic_read(&clp->cl_delegs_in_recall) || 6597 test_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags) || 6598 (ktime_get_boottime_seconds() - 6599 clp->cl_ra_time < 5)) { 6600 continue; 6601 } 6602 list_add(&clp->cl_ra_cblist, &cblist); 6603 6604 /* release in nfsd4_cb_recall_any_release */ 6605 atomic_inc(&clp->cl_rpc_users); 6606 set_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags); 6607 clp->cl_ra_time = ktime_get_boottime_seconds(); 6608 } 6609 spin_unlock(&nn->client_lock); 6610 6611 while (!list_empty(&cblist)) { 6612 clp = list_first_entry(&cblist, struct nfs4_client, 6613 cl_ra_cblist); 6614 list_del_init(&clp->cl_ra_cblist); 6615 clp->cl_ra->ra_keep = 0; 6616 clp->cl_ra->ra_bmval[0] = BIT(RCA4_TYPE_MASK_RDATA_DLG); 6617 clp->cl_ra->ra_bmval[0] = BIT(RCA4_TYPE_MASK_RDATA_DLG) | 6618 BIT(RCA4_TYPE_MASK_WDATA_DLG); 6619 trace_nfsd_cb_recall_any(clp->cl_ra); 6620 nfsd4_run_cb(&clp->cl_ra->ra_cb); 6621 } 6622 } 6623 6624 static void 6625 nfsd4_state_shrinker_worker(struct work_struct *work) 6626 { 6627 struct nfsd_net *nn = container_of(work, struct nfsd_net, 6628 nfsd_shrinker_work); 6629 6630 courtesy_client_reaper(nn); 6631 deleg_reaper(nn); 6632 } 6633 6634 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp) 6635 { 6636 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle)) 6637 return nfserr_bad_stateid; 6638 return nfs_ok; 6639 } 6640 6641 static 6642 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags) 6643 { 6644 __be32 status = nfserr_openmode; 6645 6646 /* For lock stateid's, we test the parent open, not the lock: */ 6647 if (stp->st_openstp) 6648 stp = stp->st_openstp; 6649 if ((flags & WR_STATE) && !access_permit_write(stp)) 6650 goto out; 6651 if ((flags & RD_STATE) && !access_permit_read(stp)) 6652 goto out; 6653 status = nfs_ok; 6654 out: 6655 return status; 6656 } 6657 6658 static inline __be32 6659 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags) 6660 { 6661 if (ONE_STATEID(stateid) && (flags & RD_STATE)) 6662 return nfs_ok; 6663 else if (opens_in_grace(net)) { 6664 /* Answer in remaining cases depends on existence of 6665 * conflicting state; so we must wait out the grace period. */ 6666 return nfserr_grace; 6667 } else if (flags & WR_STATE) 6668 return nfs4_share_conflict(current_fh, 6669 NFS4_SHARE_DENY_WRITE); 6670 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */ 6671 return nfs4_share_conflict(current_fh, 6672 NFS4_SHARE_DENY_READ); 6673 } 6674 6675 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session) 6676 { 6677 /* 6678 * When sessions are used the stateid generation number is ignored 6679 * when it is zero. 6680 */ 6681 if (has_session && in->si_generation == 0) 6682 return nfs_ok; 6683 6684 if (in->si_generation == ref->si_generation) 6685 return nfs_ok; 6686 6687 /* If the client sends us a stateid from the future, it's buggy: */ 6688 if (nfsd4_stateid_generation_after(in, ref)) 6689 return nfserr_bad_stateid; 6690 /* 6691 * However, we could see a stateid from the past, even from a 6692 * non-buggy client. For example, if the client sends a lock 6693 * while some IO is outstanding, the lock may bump si_generation 6694 * while the IO is still in flight. The client could avoid that 6695 * situation by waiting for responses on all the IO requests, 6696 * but better performance may result in retrying IO that 6697 * receives an old_stateid error if requests are rarely 6698 * reordered in flight: 6699 */ 6700 return nfserr_old_stateid; 6701 } 6702 6703 static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session) 6704 { 6705 __be32 ret; 6706 6707 spin_lock(&s->sc_lock); 6708 ret = nfsd4_verify_open_stid(s); 6709 if (ret == nfs_ok) 6710 ret = check_stateid_generation(in, &s->sc_stateid, has_session); 6711 spin_unlock(&s->sc_lock); 6712 if (ret == nfserr_admin_revoked) 6713 nfsd40_drop_revoked_stid(s->sc_client, 6714 &s->sc_stateid); 6715 return ret; 6716 } 6717 6718 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols) 6719 { 6720 if (ols->st_stateowner->so_is_open_owner && 6721 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED)) 6722 return nfserr_bad_stateid; 6723 return nfs_ok; 6724 } 6725 6726 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) 6727 { 6728 struct nfs4_stid *s; 6729 __be32 status = nfserr_bad_stateid; 6730 6731 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) || 6732 CLOSE_STATEID(stateid)) 6733 return status; 6734 spin_lock(&cl->cl_lock); 6735 s = find_stateid_locked(cl, stateid); 6736 if (!s) 6737 goto out_unlock; 6738 status = nfsd4_stid_check_stateid_generation(stateid, s, 1); 6739 if (status) 6740 goto out_unlock; 6741 status = nfsd4_verify_open_stid(s); 6742 if (status) 6743 goto out_unlock; 6744 6745 switch (s->sc_type) { 6746 case SC_TYPE_DELEG: 6747 status = nfs_ok; 6748 break; 6749 case SC_TYPE_OPEN: 6750 case SC_TYPE_LOCK: 6751 status = nfsd4_check_openowner_confirmed(openlockstateid(s)); 6752 break; 6753 default: 6754 printk("unknown stateid type %x\n", s->sc_type); 6755 status = nfserr_bad_stateid; 6756 } 6757 out_unlock: 6758 spin_unlock(&cl->cl_lock); 6759 if (status == nfserr_admin_revoked) 6760 nfsd40_drop_revoked_stid(cl, stateid); 6761 return status; 6762 } 6763 6764 __be32 6765 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, 6766 stateid_t *stateid, 6767 unsigned short typemask, unsigned short statusmask, 6768 struct nfs4_stid **s, struct nfsd_net *nn) 6769 { 6770 __be32 status; 6771 struct nfs4_stid *stid; 6772 bool return_revoked = false; 6773 6774 /* 6775 * only return revoked delegations if explicitly asked. 6776 * otherwise we report revoked or bad_stateid status. 6777 */ 6778 if (statusmask & SC_STATUS_REVOKED) 6779 return_revoked = true; 6780 if (typemask & SC_TYPE_DELEG) 6781 /* Always allow REVOKED for DELEG so we can 6782 * retturn the appropriate error. 6783 */ 6784 statusmask |= SC_STATUS_REVOKED; 6785 6786 statusmask |= SC_STATUS_ADMIN_REVOKED; 6787 6788 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) || 6789 CLOSE_STATEID(stateid)) 6790 return nfserr_bad_stateid; 6791 status = set_client(&stateid->si_opaque.so_clid, cstate, nn); 6792 if (status == nfserr_stale_clientid) { 6793 if (cstate->session) 6794 return nfserr_bad_stateid; 6795 return nfserr_stale_stateid; 6796 } 6797 if (status) 6798 return status; 6799 stid = find_stateid_by_type(cstate->clp, stateid, typemask, statusmask); 6800 if (!stid) 6801 return nfserr_bad_stateid; 6802 if ((stid->sc_status & SC_STATUS_REVOKED) && !return_revoked) { 6803 nfs4_put_stid(stid); 6804 return nfserr_deleg_revoked; 6805 } 6806 if (stid->sc_status & SC_STATUS_ADMIN_REVOKED) { 6807 nfsd40_drop_revoked_stid(cstate->clp, stateid); 6808 nfs4_put_stid(stid); 6809 return nfserr_admin_revoked; 6810 } 6811 *s = stid; 6812 return nfs_ok; 6813 } 6814 6815 static struct nfsd_file * 6816 nfs4_find_file(struct nfs4_stid *s, int flags) 6817 { 6818 struct nfsd_file *ret = NULL; 6819 6820 if (!s || s->sc_status) 6821 return NULL; 6822 6823 switch (s->sc_type) { 6824 case SC_TYPE_DELEG: 6825 spin_lock(&s->sc_file->fi_lock); 6826 ret = nfsd_file_get(s->sc_file->fi_deleg_file); 6827 spin_unlock(&s->sc_file->fi_lock); 6828 break; 6829 case SC_TYPE_OPEN: 6830 case SC_TYPE_LOCK: 6831 if (flags & RD_STATE) 6832 ret = find_readable_file(s->sc_file); 6833 else 6834 ret = find_writeable_file(s->sc_file); 6835 } 6836 6837 return ret; 6838 } 6839 6840 static __be32 6841 nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags) 6842 { 6843 __be32 status; 6844 6845 status = nfsd4_check_openowner_confirmed(ols); 6846 if (status) 6847 return status; 6848 return nfs4_check_openmode(ols, flags); 6849 } 6850 6851 static __be32 6852 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s, 6853 struct nfsd_file **nfp, int flags) 6854 { 6855 int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE; 6856 struct nfsd_file *nf; 6857 __be32 status; 6858 6859 nf = nfs4_find_file(s, flags); 6860 if (nf) { 6861 status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry, 6862 acc | NFSD_MAY_OWNER_OVERRIDE); 6863 if (status) { 6864 nfsd_file_put(nf); 6865 goto out; 6866 } 6867 } else { 6868 status = nfsd_file_acquire(rqstp, fhp, acc, &nf); 6869 if (status) 6870 return status; 6871 } 6872 *nfp = nf; 6873 out: 6874 return status; 6875 } 6876 static void 6877 _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps) 6878 { 6879 WARN_ON_ONCE(cps->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID); 6880 if (!refcount_dec_and_test(&cps->cp_stateid.cs_count)) 6881 return; 6882 list_del(&cps->cp_list); 6883 idr_remove(&nn->s2s_cp_stateids, 6884 cps->cp_stateid.cs_stid.si_opaque.so_id); 6885 kfree(cps); 6886 } 6887 /* 6888 * A READ from an inter server to server COPY will have a 6889 * copy stateid. Look up the copy notify stateid from the 6890 * idr structure and take a reference on it. 6891 */ 6892 __be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st, 6893 struct nfs4_client *clp, 6894 struct nfs4_cpntf_state **cps) 6895 { 6896 copy_stateid_t *cps_t; 6897 struct nfs4_cpntf_state *state = NULL; 6898 6899 if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id) 6900 return nfserr_bad_stateid; 6901 spin_lock(&nn->s2s_cp_lock); 6902 cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id); 6903 if (cps_t) { 6904 state = container_of(cps_t, struct nfs4_cpntf_state, 6905 cp_stateid); 6906 if (state->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID) { 6907 state = NULL; 6908 goto unlock; 6909 } 6910 if (!clp) 6911 refcount_inc(&state->cp_stateid.cs_count); 6912 else 6913 _free_cpntf_state_locked(nn, state); 6914 } 6915 unlock: 6916 spin_unlock(&nn->s2s_cp_lock); 6917 if (!state) 6918 return nfserr_bad_stateid; 6919 if (!clp) 6920 *cps = state; 6921 return 0; 6922 } 6923 6924 static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st, 6925 struct nfs4_stid **stid) 6926 { 6927 __be32 status; 6928 struct nfs4_cpntf_state *cps = NULL; 6929 struct nfs4_client *found; 6930 6931 status = manage_cpntf_state(nn, st, NULL, &cps); 6932 if (status) 6933 return status; 6934 6935 cps->cpntf_time = ktime_get_boottime_seconds(); 6936 6937 status = nfserr_expired; 6938 found = lookup_clientid(&cps->cp_p_clid, true, nn); 6939 if (!found) 6940 goto out; 6941 6942 *stid = find_stateid_by_type(found, &cps->cp_p_stateid, 6943 SC_TYPE_DELEG|SC_TYPE_OPEN|SC_TYPE_LOCK, 6944 0); 6945 if (*stid) 6946 status = nfs_ok; 6947 else 6948 status = nfserr_bad_stateid; 6949 6950 put_client_renew(found); 6951 out: 6952 nfs4_put_cpntf_state(nn, cps); 6953 return status; 6954 } 6955 6956 void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps) 6957 { 6958 spin_lock(&nn->s2s_cp_lock); 6959 _free_cpntf_state_locked(nn, cps); 6960 spin_unlock(&nn->s2s_cp_lock); 6961 } 6962 6963 /** 6964 * nfs4_preprocess_stateid_op - find and prep stateid for an operation 6965 * @rqstp: incoming request from client 6966 * @cstate: current compound state 6967 * @fhp: filehandle associated with requested stateid 6968 * @stateid: stateid (provided by client) 6969 * @flags: flags describing type of operation to be done 6970 * @nfp: optional nfsd_file return pointer (may be NULL) 6971 * @cstid: optional returned nfs4_stid pointer (may be NULL) 6972 * 6973 * Given info from the client, look up a nfs4_stid for the operation. On 6974 * success, it returns a reference to the nfs4_stid and/or the nfsd_file 6975 * associated with it. 6976 */ 6977 __be32 6978 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp, 6979 struct nfsd4_compound_state *cstate, struct svc_fh *fhp, 6980 stateid_t *stateid, int flags, struct nfsd_file **nfp, 6981 struct nfs4_stid **cstid) 6982 { 6983 struct net *net = SVC_NET(rqstp); 6984 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 6985 struct nfs4_stid *s = NULL; 6986 __be32 status; 6987 6988 if (nfp) 6989 *nfp = NULL; 6990 6991 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) { 6992 if (cstid) 6993 status = nfserr_bad_stateid; 6994 else 6995 status = check_special_stateids(net, fhp, stateid, 6996 flags); 6997 goto done; 6998 } 6999 7000 status = nfsd4_lookup_stateid(cstate, stateid, 7001 SC_TYPE_DELEG|SC_TYPE_OPEN|SC_TYPE_LOCK, 7002 0, &s, nn); 7003 if (status == nfserr_bad_stateid) 7004 status = find_cpntf_state(nn, stateid, &s); 7005 if (status) 7006 return status; 7007 status = nfsd4_stid_check_stateid_generation(stateid, s, 7008 nfsd4_has_session(cstate)); 7009 if (status) 7010 goto out; 7011 7012 switch (s->sc_type) { 7013 case SC_TYPE_DELEG: 7014 status = nfs4_check_delegmode(delegstateid(s), flags); 7015 break; 7016 case SC_TYPE_OPEN: 7017 case SC_TYPE_LOCK: 7018 status = nfs4_check_olstateid(openlockstateid(s), flags); 7019 break; 7020 } 7021 if (status) 7022 goto out; 7023 status = nfs4_check_fh(fhp, s); 7024 7025 done: 7026 if (status == nfs_ok && nfp) 7027 status = nfs4_check_file(rqstp, fhp, s, nfp, flags); 7028 out: 7029 if (s) { 7030 if (!status && cstid) 7031 *cstid = s; 7032 else 7033 nfs4_put_stid(s); 7034 } 7035 return status; 7036 } 7037 7038 /* 7039 * Test if the stateid is valid 7040 */ 7041 __be32 7042 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 7043 union nfsd4_op_u *u) 7044 { 7045 struct nfsd4_test_stateid *test_stateid = &u->test_stateid; 7046 struct nfsd4_test_stateid_id *stateid; 7047 struct nfs4_client *cl = cstate->clp; 7048 7049 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list) 7050 stateid->ts_id_status = 7051 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid); 7052 7053 return nfs_ok; 7054 } 7055 7056 static __be32 7057 nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s) 7058 { 7059 struct nfs4_ol_stateid *stp = openlockstateid(s); 7060 __be32 ret; 7061 7062 ret = nfsd4_lock_ol_stateid(stp); 7063 if (ret) 7064 goto out_put_stid; 7065 7066 ret = check_stateid_generation(stateid, &s->sc_stateid, 1); 7067 if (ret) 7068 goto out; 7069 7070 ret = nfserr_locks_held; 7071 if (check_for_locks(stp->st_stid.sc_file, 7072 lockowner(stp->st_stateowner))) 7073 goto out; 7074 7075 release_lock_stateid(stp); 7076 ret = nfs_ok; 7077 7078 out: 7079 mutex_unlock(&stp->st_mutex); 7080 out_put_stid: 7081 nfs4_put_stid(s); 7082 return ret; 7083 } 7084 7085 __be32 7086 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 7087 union nfsd4_op_u *u) 7088 { 7089 struct nfsd4_free_stateid *free_stateid = &u->free_stateid; 7090 stateid_t *stateid = &free_stateid->fr_stateid; 7091 struct nfs4_stid *s; 7092 struct nfs4_delegation *dp; 7093 struct nfs4_client *cl = cstate->clp; 7094 __be32 ret = nfserr_bad_stateid; 7095 7096 spin_lock(&cl->cl_lock); 7097 s = find_stateid_locked(cl, stateid); 7098 if (!s || s->sc_status & SC_STATUS_CLOSED) 7099 goto out_unlock; 7100 if (s->sc_status & SC_STATUS_ADMIN_REVOKED) { 7101 nfsd4_drop_revoked_stid(s); 7102 ret = nfs_ok; 7103 goto out; 7104 } 7105 spin_lock(&s->sc_lock); 7106 switch (s->sc_type) { 7107 case SC_TYPE_DELEG: 7108 if (s->sc_status & SC_STATUS_REVOKED) { 7109 spin_unlock(&s->sc_lock); 7110 dp = delegstateid(s); 7111 list_del_init(&dp->dl_recall_lru); 7112 spin_unlock(&cl->cl_lock); 7113 nfs4_put_stid(s); 7114 ret = nfs_ok; 7115 goto out; 7116 } 7117 ret = nfserr_locks_held; 7118 break; 7119 case SC_TYPE_OPEN: 7120 ret = check_stateid_generation(stateid, &s->sc_stateid, 1); 7121 if (ret) 7122 break; 7123 ret = nfserr_locks_held; 7124 break; 7125 case SC_TYPE_LOCK: 7126 spin_unlock(&s->sc_lock); 7127 refcount_inc(&s->sc_count); 7128 spin_unlock(&cl->cl_lock); 7129 ret = nfsd4_free_lock_stateid(stateid, s); 7130 goto out; 7131 } 7132 spin_unlock(&s->sc_lock); 7133 out_unlock: 7134 spin_unlock(&cl->cl_lock); 7135 out: 7136 return ret; 7137 } 7138 7139 static inline int 7140 setlkflg (int type) 7141 { 7142 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ? 7143 RD_STATE : WR_STATE; 7144 } 7145 7146 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp) 7147 { 7148 struct svc_fh *current_fh = &cstate->current_fh; 7149 struct nfs4_stateowner *sop = stp->st_stateowner; 7150 __be32 status; 7151 7152 status = nfsd4_check_seqid(cstate, sop, seqid); 7153 if (status) 7154 return status; 7155 status = nfsd4_lock_ol_stateid(stp); 7156 if (status != nfs_ok) 7157 return status; 7158 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); 7159 if (status == nfs_ok) 7160 status = nfs4_check_fh(current_fh, &stp->st_stid); 7161 if (status != nfs_ok) 7162 mutex_unlock(&stp->st_mutex); 7163 return status; 7164 } 7165 7166 /** 7167 * nfs4_preprocess_seqid_op - find and prep an ol_stateid for a seqid-morphing op 7168 * @cstate: compund state 7169 * @seqid: seqid (provided by client) 7170 * @stateid: stateid (provided by client) 7171 * @typemask: mask of allowable types for this operation 7172 * @statusmask: mask of allowed states: 0 or STID_CLOSED 7173 * @stpp: return pointer for the stateid found 7174 * @nn: net namespace for request 7175 * 7176 * Given a stateid+seqid from a client, look up an nfs4_ol_stateid and 7177 * return it in @stpp. On a nfs_ok return, the returned stateid will 7178 * have its st_mutex locked. 7179 */ 7180 static __be32 7181 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, 7182 stateid_t *stateid, 7183 unsigned short typemask, unsigned short statusmask, 7184 struct nfs4_ol_stateid **stpp, 7185 struct nfsd_net *nn) 7186 { 7187 __be32 status; 7188 struct nfs4_stid *s; 7189 struct nfs4_ol_stateid *stp = NULL; 7190 7191 trace_nfsd_preprocess(seqid, stateid); 7192 7193 *stpp = NULL; 7194 status = nfsd4_lookup_stateid(cstate, stateid, 7195 typemask, statusmask, &s, nn); 7196 if (status) 7197 return status; 7198 stp = openlockstateid(s); 7199 nfsd4_cstate_assign_replay(cstate, stp->st_stateowner); 7200 7201 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp); 7202 if (!status) 7203 *stpp = stp; 7204 else 7205 nfs4_put_stid(&stp->st_stid); 7206 return status; 7207 } 7208 7209 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, 7210 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn) 7211 { 7212 __be32 status; 7213 struct nfs4_openowner *oo; 7214 struct nfs4_ol_stateid *stp; 7215 7216 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid, 7217 SC_TYPE_OPEN, 0, &stp, nn); 7218 if (status) 7219 return status; 7220 oo = openowner(stp->st_stateowner); 7221 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { 7222 mutex_unlock(&stp->st_mutex); 7223 nfs4_put_stid(&stp->st_stid); 7224 return nfserr_bad_stateid; 7225 } 7226 *stpp = stp; 7227 return nfs_ok; 7228 } 7229 7230 __be32 7231 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 7232 union nfsd4_op_u *u) 7233 { 7234 struct nfsd4_open_confirm *oc = &u->open_confirm; 7235 __be32 status; 7236 struct nfs4_openowner *oo; 7237 struct nfs4_ol_stateid *stp; 7238 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 7239 7240 dprintk("NFSD: nfsd4_open_confirm on file %pd\n", 7241 cstate->current_fh.fh_dentry); 7242 7243 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0); 7244 if (status) 7245 return status; 7246 7247 status = nfs4_preprocess_seqid_op(cstate, 7248 oc->oc_seqid, &oc->oc_req_stateid, 7249 SC_TYPE_OPEN, 0, &stp, nn); 7250 if (status) 7251 goto out; 7252 oo = openowner(stp->st_stateowner); 7253 status = nfserr_bad_stateid; 7254 if (oo->oo_flags & NFS4_OO_CONFIRMED) { 7255 mutex_unlock(&stp->st_mutex); 7256 goto put_stateid; 7257 } 7258 oo->oo_flags |= NFS4_OO_CONFIRMED; 7259 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid); 7260 mutex_unlock(&stp->st_mutex); 7261 trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid); 7262 nfsd4_client_record_create(oo->oo_owner.so_client); 7263 status = nfs_ok; 7264 put_stateid: 7265 nfs4_put_stid(&stp->st_stid); 7266 out: 7267 nfsd4_bump_seqid(cstate, status); 7268 return status; 7269 } 7270 7271 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access) 7272 { 7273 if (!test_access(access, stp)) 7274 return; 7275 nfs4_file_put_access(stp->st_stid.sc_file, access); 7276 clear_access(access, stp); 7277 } 7278 7279 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access) 7280 { 7281 switch (to_access) { 7282 case NFS4_SHARE_ACCESS_READ: 7283 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE); 7284 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH); 7285 break; 7286 case NFS4_SHARE_ACCESS_WRITE: 7287 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ); 7288 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH); 7289 break; 7290 case NFS4_SHARE_ACCESS_BOTH: 7291 break; 7292 default: 7293 WARN_ON_ONCE(1); 7294 } 7295 } 7296 7297 __be32 7298 nfsd4_open_downgrade(struct svc_rqst *rqstp, 7299 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) 7300 { 7301 struct nfsd4_open_downgrade *od = &u->open_downgrade; 7302 __be32 status; 7303 struct nfs4_ol_stateid *stp; 7304 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 7305 7306 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n", 7307 cstate->current_fh.fh_dentry); 7308 7309 /* We don't yet support WANT bits: */ 7310 if (od->od_deleg_want) 7311 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__, 7312 od->od_deleg_want); 7313 7314 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid, 7315 &od->od_stateid, &stp, nn); 7316 if (status) 7317 goto out; 7318 status = nfserr_inval; 7319 if (!test_access(od->od_share_access, stp)) { 7320 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n", 7321 stp->st_access_bmap, od->od_share_access); 7322 goto put_stateid; 7323 } 7324 if (!test_deny(od->od_share_deny, stp)) { 7325 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n", 7326 stp->st_deny_bmap, od->od_share_deny); 7327 goto put_stateid; 7328 } 7329 nfs4_stateid_downgrade(stp, od->od_share_access); 7330 reset_union_bmap_deny(od->od_share_deny, stp); 7331 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid); 7332 status = nfs_ok; 7333 put_stateid: 7334 mutex_unlock(&stp->st_mutex); 7335 nfs4_put_stid(&stp->st_stid); 7336 out: 7337 nfsd4_bump_seqid(cstate, status); 7338 return status; 7339 } 7340 7341 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) 7342 { 7343 struct nfs4_client *clp = s->st_stid.sc_client; 7344 bool unhashed; 7345 LIST_HEAD(reaplist); 7346 struct nfs4_ol_stateid *stp; 7347 7348 spin_lock(&clp->cl_lock); 7349 unhashed = unhash_open_stateid(s, &reaplist); 7350 7351 if (clp->cl_minorversion) { 7352 if (unhashed) 7353 put_ol_stateid_locked(s, &reaplist); 7354 spin_unlock(&clp->cl_lock); 7355 list_for_each_entry(stp, &reaplist, st_locks) 7356 nfs4_free_cpntf_statelist(clp->net, &stp->st_stid); 7357 free_ol_stateid_reaplist(&reaplist); 7358 } else { 7359 spin_unlock(&clp->cl_lock); 7360 free_ol_stateid_reaplist(&reaplist); 7361 if (unhashed) 7362 move_to_close_lru(s, clp->net); 7363 } 7364 } 7365 7366 /* 7367 * nfs4_unlock_state() called after encode 7368 */ 7369 __be32 7370 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 7371 union nfsd4_op_u *u) 7372 { 7373 struct nfsd4_close *close = &u->close; 7374 __be32 status; 7375 struct nfs4_ol_stateid *stp; 7376 struct net *net = SVC_NET(rqstp); 7377 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 7378 7379 dprintk("NFSD: nfsd4_close on file %pd\n", 7380 cstate->current_fh.fh_dentry); 7381 7382 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid, 7383 &close->cl_stateid, 7384 SC_TYPE_OPEN, SC_STATUS_CLOSED, 7385 &stp, nn); 7386 nfsd4_bump_seqid(cstate, status); 7387 if (status) 7388 goto out; 7389 7390 spin_lock(&stp->st_stid.sc_client->cl_lock); 7391 stp->st_stid.sc_status |= SC_STATUS_CLOSED; 7392 spin_unlock(&stp->st_stid.sc_client->cl_lock); 7393 7394 /* 7395 * Technically we don't _really_ have to increment or copy it, since 7396 * it should just be gone after this operation and we clobber the 7397 * copied value below, but we continue to do so here just to ensure 7398 * that racing ops see that there was a state change. 7399 */ 7400 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid); 7401 7402 nfsd4_close_open_stateid(stp); 7403 mutex_unlock(&stp->st_mutex); 7404 7405 /* v4.1+ suggests that we send a special stateid in here, since the 7406 * clients should just ignore this anyway. Since this is not useful 7407 * for v4.0 clients either, we set it to the special close_stateid 7408 * universally. 7409 * 7410 * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5 7411 */ 7412 memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid)); 7413 7414 /* put reference from nfs4_preprocess_seqid_op */ 7415 nfs4_put_stid(&stp->st_stid); 7416 out: 7417 return status; 7418 } 7419 7420 __be32 7421 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 7422 union nfsd4_op_u *u) 7423 { 7424 struct nfsd4_delegreturn *dr = &u->delegreturn; 7425 struct nfs4_delegation *dp; 7426 stateid_t *stateid = &dr->dr_stateid; 7427 struct nfs4_stid *s; 7428 __be32 status; 7429 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 7430 7431 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) 7432 return status; 7433 7434 status = nfsd4_lookup_stateid(cstate, stateid, SC_TYPE_DELEG, 0, &s, nn); 7435 if (status) 7436 goto out; 7437 dp = delegstateid(s); 7438 status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate)); 7439 if (status) 7440 goto put_stateid; 7441 7442 trace_nfsd_deleg_return(stateid); 7443 wake_up_var(d_inode(cstate->current_fh.fh_dentry)); 7444 destroy_delegation(dp); 7445 put_stateid: 7446 nfs4_put_stid(&dp->dl_stid); 7447 out: 7448 return status; 7449 } 7450 7451 /* last octet in a range */ 7452 static inline u64 7453 last_byte_offset(u64 start, u64 len) 7454 { 7455 u64 end; 7456 7457 WARN_ON_ONCE(!len); 7458 end = start + len; 7459 return end > start ? end - 1: NFS4_MAX_UINT64; 7460 } 7461 7462 /* 7463 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that 7464 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th 7465 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit 7466 * locking, this prevents us from being completely protocol-compliant. The 7467 * real solution to this problem is to start using unsigned file offsets in 7468 * the VFS, but this is a very deep change! 7469 */ 7470 static inline void 7471 nfs4_transform_lock_offset(struct file_lock *lock) 7472 { 7473 if (lock->fl_start < 0) 7474 lock->fl_start = OFFSET_MAX; 7475 if (lock->fl_end < 0) 7476 lock->fl_end = OFFSET_MAX; 7477 } 7478 7479 static fl_owner_t 7480 nfsd4_lm_get_owner(fl_owner_t owner) 7481 { 7482 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner; 7483 7484 nfs4_get_stateowner(&lo->lo_owner); 7485 return owner; 7486 } 7487 7488 static void 7489 nfsd4_lm_put_owner(fl_owner_t owner) 7490 { 7491 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner; 7492 7493 if (lo) 7494 nfs4_put_stateowner(&lo->lo_owner); 7495 } 7496 7497 /* return pointer to struct nfs4_client if client is expirable */ 7498 static bool 7499 nfsd4_lm_lock_expirable(struct file_lock *cfl) 7500 { 7501 struct nfs4_lockowner *lo = (struct nfs4_lockowner *) cfl->c.flc_owner; 7502 struct nfs4_client *clp = lo->lo_owner.so_client; 7503 struct nfsd_net *nn; 7504 7505 if (try_to_expire_client(clp)) { 7506 nn = net_generic(clp->net, nfsd_net_id); 7507 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0); 7508 return true; 7509 } 7510 return false; 7511 } 7512 7513 /* schedule laundromat to run immediately and wait for it to complete */ 7514 static void 7515 nfsd4_lm_expire_lock(void) 7516 { 7517 flush_workqueue(laundry_wq); 7518 } 7519 7520 static void 7521 nfsd4_lm_notify(struct file_lock *fl) 7522 { 7523 struct nfs4_lockowner *lo = (struct nfs4_lockowner *) fl->c.flc_owner; 7524 struct net *net = lo->lo_owner.so_client->net; 7525 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 7526 struct nfsd4_blocked_lock *nbl = container_of(fl, 7527 struct nfsd4_blocked_lock, nbl_lock); 7528 bool queue = false; 7529 7530 /* An empty list means that something else is going to be using it */ 7531 spin_lock(&nn->blocked_locks_lock); 7532 if (!list_empty(&nbl->nbl_list)) { 7533 list_del_init(&nbl->nbl_list); 7534 list_del_init(&nbl->nbl_lru); 7535 queue = true; 7536 } 7537 spin_unlock(&nn->blocked_locks_lock); 7538 7539 if (queue) { 7540 trace_nfsd_cb_notify_lock(lo, nbl); 7541 nfsd4_run_cb(&nbl->nbl_cb); 7542 } 7543 } 7544 7545 static const struct lock_manager_operations nfsd_posix_mng_ops = { 7546 .lm_mod_owner = THIS_MODULE, 7547 .lm_notify = nfsd4_lm_notify, 7548 .lm_get_owner = nfsd4_lm_get_owner, 7549 .lm_put_owner = nfsd4_lm_put_owner, 7550 .lm_lock_expirable = nfsd4_lm_lock_expirable, 7551 .lm_expire_lock = nfsd4_lm_expire_lock, 7552 }; 7553 7554 static inline void 7555 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny) 7556 { 7557 struct nfs4_lockowner *lo; 7558 7559 if (fl->fl_lmops == &nfsd_posix_mng_ops) { 7560 lo = (struct nfs4_lockowner *) fl->c.flc_owner; 7561 xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner, 7562 GFP_KERNEL); 7563 if (!deny->ld_owner.data) 7564 /* We just don't care that much */ 7565 goto nevermind; 7566 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid; 7567 } else { 7568 nevermind: 7569 deny->ld_owner.len = 0; 7570 deny->ld_owner.data = NULL; 7571 deny->ld_clientid.cl_boot = 0; 7572 deny->ld_clientid.cl_id = 0; 7573 } 7574 deny->ld_start = fl->fl_start; 7575 deny->ld_length = NFS4_MAX_UINT64; 7576 if (fl->fl_end != NFS4_MAX_UINT64) 7577 deny->ld_length = fl->fl_end - fl->fl_start + 1; 7578 deny->ld_type = NFS4_READ_LT; 7579 if (fl->c.flc_type != F_RDLCK) 7580 deny->ld_type = NFS4_WRITE_LT; 7581 } 7582 7583 static struct nfs4_lockowner * 7584 find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner) 7585 { 7586 unsigned int strhashval = ownerstr_hashval(owner); 7587 struct nfs4_stateowner *so; 7588 7589 lockdep_assert_held(&clp->cl_lock); 7590 7591 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval], 7592 so_strhash) { 7593 if (so->so_is_open_owner) 7594 continue; 7595 if (same_owner_str(so, owner)) 7596 return lockowner(nfs4_get_stateowner(so)); 7597 } 7598 return NULL; 7599 } 7600 7601 static struct nfs4_lockowner * 7602 find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner) 7603 { 7604 struct nfs4_lockowner *lo; 7605 7606 spin_lock(&clp->cl_lock); 7607 lo = find_lockowner_str_locked(clp, owner); 7608 spin_unlock(&clp->cl_lock); 7609 return lo; 7610 } 7611 7612 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop) 7613 { 7614 unhash_lockowner_locked(lockowner(sop)); 7615 } 7616 7617 static void nfs4_free_lockowner(struct nfs4_stateowner *sop) 7618 { 7619 struct nfs4_lockowner *lo = lockowner(sop); 7620 7621 kmem_cache_free(lockowner_slab, lo); 7622 } 7623 7624 static const struct nfs4_stateowner_operations lockowner_ops = { 7625 .so_unhash = nfs4_unhash_lockowner, 7626 .so_free = nfs4_free_lockowner, 7627 }; 7628 7629 /* 7630 * Alloc a lock owner structure. 7631 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has 7632 * occurred. 7633 * 7634 * strhashval = ownerstr_hashval 7635 */ 7636 static struct nfs4_lockowner * 7637 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, 7638 struct nfs4_ol_stateid *open_stp, 7639 struct nfsd4_lock *lock) 7640 { 7641 struct nfs4_lockowner *lo, *ret; 7642 7643 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp); 7644 if (!lo) 7645 return NULL; 7646 INIT_LIST_HEAD(&lo->lo_blocked); 7647 INIT_LIST_HEAD(&lo->lo_owner.so_stateids); 7648 lo->lo_owner.so_is_open_owner = 0; 7649 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid; 7650 lo->lo_owner.so_ops = &lockowner_ops; 7651 spin_lock(&clp->cl_lock); 7652 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner); 7653 if (ret == NULL) { 7654 list_add(&lo->lo_owner.so_strhash, 7655 &clp->cl_ownerstr_hashtbl[strhashval]); 7656 ret = lo; 7657 } else 7658 nfs4_free_stateowner(&lo->lo_owner); 7659 7660 spin_unlock(&clp->cl_lock); 7661 return ret; 7662 } 7663 7664 static struct nfs4_ol_stateid * 7665 find_lock_stateid(const struct nfs4_lockowner *lo, 7666 const struct nfs4_ol_stateid *ost) 7667 { 7668 struct nfs4_ol_stateid *lst; 7669 7670 lockdep_assert_held(&ost->st_stid.sc_client->cl_lock); 7671 7672 /* If ost is not hashed, ost->st_locks will not be valid */ 7673 if (!nfs4_ol_stateid_unhashed(ost)) 7674 list_for_each_entry(lst, &ost->st_locks, st_locks) { 7675 if (lst->st_stateowner == &lo->lo_owner) { 7676 refcount_inc(&lst->st_stid.sc_count); 7677 return lst; 7678 } 7679 } 7680 return NULL; 7681 } 7682 7683 static struct nfs4_ol_stateid * 7684 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo, 7685 struct nfs4_file *fp, struct inode *inode, 7686 struct nfs4_ol_stateid *open_stp) 7687 { 7688 struct nfs4_client *clp = lo->lo_owner.so_client; 7689 struct nfs4_ol_stateid *retstp; 7690 7691 mutex_init(&stp->st_mutex); 7692 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX); 7693 retry: 7694 spin_lock(&clp->cl_lock); 7695 if (nfs4_ol_stateid_unhashed(open_stp)) 7696 goto out_close; 7697 retstp = find_lock_stateid(lo, open_stp); 7698 if (retstp) 7699 goto out_found; 7700 refcount_inc(&stp->st_stid.sc_count); 7701 stp->st_stid.sc_type = SC_TYPE_LOCK; 7702 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner); 7703 get_nfs4_file(fp); 7704 stp->st_stid.sc_file = fp; 7705 stp->st_access_bmap = 0; 7706 stp->st_deny_bmap = open_stp->st_deny_bmap; 7707 stp->st_openstp = open_stp; 7708 spin_lock(&fp->fi_lock); 7709 list_add(&stp->st_locks, &open_stp->st_locks); 7710 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); 7711 list_add(&stp->st_perfile, &fp->fi_stateids); 7712 spin_unlock(&fp->fi_lock); 7713 spin_unlock(&clp->cl_lock); 7714 return stp; 7715 out_found: 7716 spin_unlock(&clp->cl_lock); 7717 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) { 7718 nfs4_put_stid(&retstp->st_stid); 7719 goto retry; 7720 } 7721 /* To keep mutex tracking happy */ 7722 mutex_unlock(&stp->st_mutex); 7723 return retstp; 7724 out_close: 7725 spin_unlock(&clp->cl_lock); 7726 mutex_unlock(&stp->st_mutex); 7727 return NULL; 7728 } 7729 7730 static struct nfs4_ol_stateid * 7731 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi, 7732 struct inode *inode, struct nfs4_ol_stateid *ost, 7733 bool *new) 7734 { 7735 struct nfs4_stid *ns = NULL; 7736 struct nfs4_ol_stateid *lst; 7737 struct nfs4_openowner *oo = openowner(ost->st_stateowner); 7738 struct nfs4_client *clp = oo->oo_owner.so_client; 7739 7740 *new = false; 7741 spin_lock(&clp->cl_lock); 7742 lst = find_lock_stateid(lo, ost); 7743 spin_unlock(&clp->cl_lock); 7744 if (lst != NULL) { 7745 if (nfsd4_lock_ol_stateid(lst) == nfs_ok) 7746 goto out; 7747 nfs4_put_stid(&lst->st_stid); 7748 } 7749 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid); 7750 if (ns == NULL) 7751 return NULL; 7752 7753 lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost); 7754 if (lst == openlockstateid(ns)) 7755 *new = true; 7756 else 7757 nfs4_put_stid(ns); 7758 out: 7759 return lst; 7760 } 7761 7762 static int 7763 check_lock_length(u64 offset, u64 length) 7764 { 7765 return ((length == 0) || ((length != NFS4_MAX_UINT64) && 7766 (length > ~offset))); 7767 } 7768 7769 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access) 7770 { 7771 struct nfs4_file *fp = lock_stp->st_stid.sc_file; 7772 7773 lockdep_assert_held(&fp->fi_lock); 7774 7775 if (test_access(access, lock_stp)) 7776 return; 7777 __nfs4_file_get_access(fp, access); 7778 set_access(access, lock_stp); 7779 } 7780 7781 static __be32 7782 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, 7783 struct nfs4_ol_stateid *ost, 7784 struct nfsd4_lock *lock, 7785 struct nfs4_ol_stateid **plst, bool *new) 7786 { 7787 __be32 status; 7788 struct nfs4_file *fi = ost->st_stid.sc_file; 7789 struct nfs4_openowner *oo = openowner(ost->st_stateowner); 7790 struct nfs4_client *cl = oo->oo_owner.so_client; 7791 struct inode *inode = d_inode(cstate->current_fh.fh_dentry); 7792 struct nfs4_lockowner *lo; 7793 struct nfs4_ol_stateid *lst; 7794 unsigned int strhashval; 7795 7796 lo = find_lockowner_str(cl, &lock->lk_new_owner); 7797 if (!lo) { 7798 strhashval = ownerstr_hashval(&lock->lk_new_owner); 7799 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock); 7800 if (lo == NULL) 7801 return nfserr_jukebox; 7802 } else { 7803 /* with an existing lockowner, seqids must be the same */ 7804 status = nfserr_bad_seqid; 7805 if (!cstate->minorversion && 7806 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid) 7807 goto out; 7808 } 7809 7810 lst = find_or_create_lock_stateid(lo, fi, inode, ost, new); 7811 if (lst == NULL) { 7812 status = nfserr_jukebox; 7813 goto out; 7814 } 7815 7816 status = nfs_ok; 7817 *plst = lst; 7818 out: 7819 nfs4_put_stateowner(&lo->lo_owner); 7820 return status; 7821 } 7822 7823 /* 7824 * LOCK operation 7825 */ 7826 __be32 7827 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 7828 union nfsd4_op_u *u) 7829 { 7830 struct nfsd4_lock *lock = &u->lock; 7831 struct nfs4_openowner *open_sop = NULL; 7832 struct nfs4_lockowner *lock_sop = NULL; 7833 struct nfs4_ol_stateid *lock_stp = NULL; 7834 struct nfs4_ol_stateid *open_stp = NULL; 7835 struct nfs4_file *fp; 7836 struct nfsd_file *nf = NULL; 7837 struct nfsd4_blocked_lock *nbl = NULL; 7838 struct file_lock *file_lock = NULL; 7839 struct file_lock *conflock = NULL; 7840 struct super_block *sb; 7841 __be32 status = 0; 7842 int lkflg; 7843 int err; 7844 bool new = false; 7845 unsigned char type; 7846 unsigned int flags = FL_POSIX; 7847 struct net *net = SVC_NET(rqstp); 7848 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 7849 7850 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n", 7851 (long long) lock->lk_offset, 7852 (long long) lock->lk_length); 7853 7854 if (check_lock_length(lock->lk_offset, lock->lk_length)) 7855 return nfserr_inval; 7856 7857 if ((status = fh_verify(rqstp, &cstate->current_fh, 7858 S_IFREG, NFSD_MAY_LOCK))) { 7859 dprintk("NFSD: nfsd4_lock: permission denied!\n"); 7860 return status; 7861 } 7862 sb = cstate->current_fh.fh_dentry->d_sb; 7863 7864 if (lock->lk_is_new) { 7865 if (nfsd4_has_session(cstate)) 7866 /* See rfc 5661 18.10.3: given clientid is ignored: */ 7867 memcpy(&lock->lk_new_clientid, 7868 &cstate->clp->cl_clientid, 7869 sizeof(clientid_t)); 7870 7871 /* validate and update open stateid and open seqid */ 7872 status = nfs4_preprocess_confirmed_seqid_op(cstate, 7873 lock->lk_new_open_seqid, 7874 &lock->lk_new_open_stateid, 7875 &open_stp, nn); 7876 if (status) 7877 goto out; 7878 mutex_unlock(&open_stp->st_mutex); 7879 open_sop = openowner(open_stp->st_stateowner); 7880 status = nfserr_bad_stateid; 7881 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid, 7882 &lock->lk_new_clientid)) 7883 goto out; 7884 status = lookup_or_create_lock_state(cstate, open_stp, lock, 7885 &lock_stp, &new); 7886 } else { 7887 status = nfs4_preprocess_seqid_op(cstate, 7888 lock->lk_old_lock_seqid, 7889 &lock->lk_old_lock_stateid, 7890 SC_TYPE_LOCK, 0, &lock_stp, 7891 nn); 7892 } 7893 if (status) 7894 goto out; 7895 lock_sop = lockowner(lock_stp->st_stateowner); 7896 7897 lkflg = setlkflg(lock->lk_type); 7898 status = nfs4_check_openmode(lock_stp, lkflg); 7899 if (status) 7900 goto out; 7901 7902 status = nfserr_grace; 7903 if (locks_in_grace(net) && !lock->lk_reclaim) 7904 goto out; 7905 status = nfserr_no_grace; 7906 if (!locks_in_grace(net) && lock->lk_reclaim) 7907 goto out; 7908 7909 if (lock->lk_reclaim) 7910 flags |= FL_RECLAIM; 7911 7912 fp = lock_stp->st_stid.sc_file; 7913 switch (lock->lk_type) { 7914 case NFS4_READW_LT: 7915 if (nfsd4_has_session(cstate) || 7916 exportfs_lock_op_is_async(sb->s_export_op)) 7917 flags |= FL_SLEEP; 7918 fallthrough; 7919 case NFS4_READ_LT: 7920 spin_lock(&fp->fi_lock); 7921 nf = find_readable_file_locked(fp); 7922 if (nf) 7923 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ); 7924 spin_unlock(&fp->fi_lock); 7925 type = F_RDLCK; 7926 break; 7927 case NFS4_WRITEW_LT: 7928 if (nfsd4_has_session(cstate) || 7929 exportfs_lock_op_is_async(sb->s_export_op)) 7930 flags |= FL_SLEEP; 7931 fallthrough; 7932 case NFS4_WRITE_LT: 7933 spin_lock(&fp->fi_lock); 7934 nf = find_writeable_file_locked(fp); 7935 if (nf) 7936 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE); 7937 spin_unlock(&fp->fi_lock); 7938 type = F_WRLCK; 7939 break; 7940 default: 7941 status = nfserr_inval; 7942 goto out; 7943 } 7944 7945 if (!nf) { 7946 status = nfserr_openmode; 7947 goto out; 7948 } 7949 7950 /* 7951 * Most filesystems with their own ->lock operations will block 7952 * the nfsd thread waiting to acquire the lock. That leads to 7953 * deadlocks (we don't want every nfsd thread tied up waiting 7954 * for file locks), so don't attempt blocking lock notifications 7955 * on those filesystems: 7956 */ 7957 if (!exportfs_lock_op_is_async(sb->s_export_op)) 7958 flags &= ~FL_SLEEP; 7959 7960 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn); 7961 if (!nbl) { 7962 dprintk("NFSD: %s: unable to allocate block!\n", __func__); 7963 status = nfserr_jukebox; 7964 goto out; 7965 } 7966 7967 file_lock = &nbl->nbl_lock; 7968 file_lock->c.flc_type = type; 7969 file_lock->c.flc_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner)); 7970 file_lock->c.flc_pid = current->tgid; 7971 file_lock->c.flc_file = nf->nf_file; 7972 file_lock->c.flc_flags = flags; 7973 file_lock->fl_lmops = &nfsd_posix_mng_ops; 7974 file_lock->fl_start = lock->lk_offset; 7975 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length); 7976 nfs4_transform_lock_offset(file_lock); 7977 7978 conflock = locks_alloc_lock(); 7979 if (!conflock) { 7980 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 7981 status = nfserr_jukebox; 7982 goto out; 7983 } 7984 7985 if (flags & FL_SLEEP) { 7986 nbl->nbl_time = ktime_get_boottime_seconds(); 7987 spin_lock(&nn->blocked_locks_lock); 7988 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked); 7989 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru); 7990 kref_get(&nbl->nbl_kref); 7991 spin_unlock(&nn->blocked_locks_lock); 7992 } 7993 7994 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock); 7995 switch (err) { 7996 case 0: /* success! */ 7997 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid); 7998 status = 0; 7999 if (lock->lk_reclaim) 8000 nn->somebody_reclaimed = true; 8001 break; 8002 case FILE_LOCK_DEFERRED: 8003 kref_put(&nbl->nbl_kref, free_nbl); 8004 nbl = NULL; 8005 fallthrough; 8006 case -EAGAIN: /* conflock holds conflicting lock */ 8007 status = nfserr_denied; 8008 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n"); 8009 nfs4_set_lock_denied(conflock, &lock->lk_denied); 8010 break; 8011 case -EDEADLK: 8012 status = nfserr_deadlock; 8013 break; 8014 default: 8015 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err); 8016 status = nfserrno(err); 8017 break; 8018 } 8019 out: 8020 if (nbl) { 8021 /* dequeue it if we queued it before */ 8022 if (flags & FL_SLEEP) { 8023 spin_lock(&nn->blocked_locks_lock); 8024 if (!list_empty(&nbl->nbl_list) && 8025 !list_empty(&nbl->nbl_lru)) { 8026 list_del_init(&nbl->nbl_list); 8027 list_del_init(&nbl->nbl_lru); 8028 kref_put(&nbl->nbl_kref, free_nbl); 8029 } 8030 /* nbl can use one of lists to be linked to reaplist */ 8031 spin_unlock(&nn->blocked_locks_lock); 8032 } 8033 free_blocked_lock(nbl); 8034 } 8035 if (nf) 8036 nfsd_file_put(nf); 8037 if (lock_stp) { 8038 /* Bump seqid manually if the 4.0 replay owner is openowner */ 8039 if (cstate->replay_owner && 8040 cstate->replay_owner != &lock_sop->lo_owner && 8041 seqid_mutating_err(ntohl(status))) 8042 lock_sop->lo_owner.so_seqid++; 8043 8044 /* 8045 * If this is a new, never-before-used stateid, and we are 8046 * returning an error, then just go ahead and release it. 8047 */ 8048 if (status && new) 8049 release_lock_stateid(lock_stp); 8050 8051 mutex_unlock(&lock_stp->st_mutex); 8052 8053 nfs4_put_stid(&lock_stp->st_stid); 8054 } 8055 if (open_stp) 8056 nfs4_put_stid(&open_stp->st_stid); 8057 nfsd4_bump_seqid(cstate, status); 8058 if (conflock) 8059 locks_free_lock(conflock); 8060 return status; 8061 } 8062 8063 void nfsd4_lock_release(union nfsd4_op_u *u) 8064 { 8065 struct nfsd4_lock *lock = &u->lock; 8066 struct nfsd4_lock_denied *deny = &lock->lk_denied; 8067 8068 kfree(deny->ld_owner.data); 8069 } 8070 8071 /* 8072 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN, 8073 * so we do a temporary open here just to get an open file to pass to 8074 * vfs_test_lock. 8075 */ 8076 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock) 8077 { 8078 struct nfsd_file *nf; 8079 struct inode *inode; 8080 __be32 err; 8081 8082 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf); 8083 if (err) 8084 return err; 8085 inode = fhp->fh_dentry->d_inode; 8086 inode_lock(inode); /* to block new leases till after test_lock: */ 8087 err = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ)); 8088 if (err) 8089 goto out; 8090 lock->c.flc_file = nf->nf_file; 8091 err = nfserrno(vfs_test_lock(nf->nf_file, lock)); 8092 lock->c.flc_file = NULL; 8093 out: 8094 inode_unlock(inode); 8095 nfsd_file_put(nf); 8096 return err; 8097 } 8098 8099 /* 8100 * LOCKT operation 8101 */ 8102 __be32 8103 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 8104 union nfsd4_op_u *u) 8105 { 8106 struct nfsd4_lockt *lockt = &u->lockt; 8107 struct file_lock *file_lock = NULL; 8108 struct nfs4_lockowner *lo = NULL; 8109 __be32 status; 8110 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 8111 8112 if (locks_in_grace(SVC_NET(rqstp))) 8113 return nfserr_grace; 8114 8115 if (check_lock_length(lockt->lt_offset, lockt->lt_length)) 8116 return nfserr_inval; 8117 8118 if (!nfsd4_has_session(cstate)) { 8119 status = set_client(&lockt->lt_clientid, cstate, nn); 8120 if (status) 8121 goto out; 8122 } 8123 8124 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) 8125 goto out; 8126 8127 file_lock = locks_alloc_lock(); 8128 if (!file_lock) { 8129 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 8130 status = nfserr_jukebox; 8131 goto out; 8132 } 8133 8134 switch (lockt->lt_type) { 8135 case NFS4_READ_LT: 8136 case NFS4_READW_LT: 8137 file_lock->c.flc_type = F_RDLCK; 8138 break; 8139 case NFS4_WRITE_LT: 8140 case NFS4_WRITEW_LT: 8141 file_lock->c.flc_type = F_WRLCK; 8142 break; 8143 default: 8144 dprintk("NFSD: nfs4_lockt: bad lock type!\n"); 8145 status = nfserr_inval; 8146 goto out; 8147 } 8148 8149 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner); 8150 if (lo) 8151 file_lock->c.flc_owner = (fl_owner_t)lo; 8152 file_lock->c.flc_pid = current->tgid; 8153 file_lock->c.flc_flags = FL_POSIX; 8154 8155 file_lock->fl_start = lockt->lt_offset; 8156 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length); 8157 8158 nfs4_transform_lock_offset(file_lock); 8159 8160 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock); 8161 if (status) 8162 goto out; 8163 8164 if (file_lock->c.flc_type != F_UNLCK) { 8165 status = nfserr_denied; 8166 nfs4_set_lock_denied(file_lock, &lockt->lt_denied); 8167 } 8168 out: 8169 if (lo) 8170 nfs4_put_stateowner(&lo->lo_owner); 8171 if (file_lock) 8172 locks_free_lock(file_lock); 8173 return status; 8174 } 8175 8176 void nfsd4_lockt_release(union nfsd4_op_u *u) 8177 { 8178 struct nfsd4_lockt *lockt = &u->lockt; 8179 struct nfsd4_lock_denied *deny = &lockt->lt_denied; 8180 8181 kfree(deny->ld_owner.data); 8182 } 8183 8184 __be32 8185 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 8186 union nfsd4_op_u *u) 8187 { 8188 struct nfsd4_locku *locku = &u->locku; 8189 struct nfs4_ol_stateid *stp; 8190 struct nfsd_file *nf = NULL; 8191 struct file_lock *file_lock = NULL; 8192 __be32 status; 8193 int err; 8194 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 8195 8196 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n", 8197 (long long) locku->lu_offset, 8198 (long long) locku->lu_length); 8199 8200 if (check_lock_length(locku->lu_offset, locku->lu_length)) 8201 return nfserr_inval; 8202 8203 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid, 8204 &locku->lu_stateid, SC_TYPE_LOCK, 0, 8205 &stp, nn); 8206 if (status) 8207 goto out; 8208 nf = find_any_file(stp->st_stid.sc_file); 8209 if (!nf) { 8210 status = nfserr_lock_range; 8211 goto put_stateid; 8212 } 8213 file_lock = locks_alloc_lock(); 8214 if (!file_lock) { 8215 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 8216 status = nfserr_jukebox; 8217 goto put_file; 8218 } 8219 8220 file_lock->c.flc_type = F_UNLCK; 8221 file_lock->c.flc_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner)); 8222 file_lock->c.flc_pid = current->tgid; 8223 file_lock->c.flc_file = nf->nf_file; 8224 file_lock->c.flc_flags = FL_POSIX; 8225 file_lock->fl_lmops = &nfsd_posix_mng_ops; 8226 file_lock->fl_start = locku->lu_offset; 8227 8228 file_lock->fl_end = last_byte_offset(locku->lu_offset, 8229 locku->lu_length); 8230 nfs4_transform_lock_offset(file_lock); 8231 8232 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL); 8233 if (err) { 8234 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n"); 8235 goto out_nfserr; 8236 } 8237 nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid); 8238 put_file: 8239 nfsd_file_put(nf); 8240 put_stateid: 8241 mutex_unlock(&stp->st_mutex); 8242 nfs4_put_stid(&stp->st_stid); 8243 out: 8244 nfsd4_bump_seqid(cstate, status); 8245 if (file_lock) 8246 locks_free_lock(file_lock); 8247 return status; 8248 8249 out_nfserr: 8250 status = nfserrno(err); 8251 goto put_file; 8252 } 8253 8254 /* 8255 * returns 8256 * true: locks held by lockowner 8257 * false: no locks held by lockowner 8258 */ 8259 static bool 8260 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner) 8261 { 8262 struct file_lock *fl; 8263 int status = false; 8264 struct nfsd_file *nf; 8265 struct inode *inode; 8266 struct file_lock_context *flctx; 8267 8268 spin_lock(&fp->fi_lock); 8269 nf = find_any_file_locked(fp); 8270 if (!nf) { 8271 /* Any valid lock stateid should have some sort of access */ 8272 WARN_ON_ONCE(1); 8273 goto out; 8274 } 8275 8276 inode = file_inode(nf->nf_file); 8277 flctx = locks_inode_context(inode); 8278 8279 if (flctx && !list_empty_careful(&flctx->flc_posix)) { 8280 spin_lock(&flctx->flc_lock); 8281 for_each_file_lock(fl, &flctx->flc_posix) { 8282 if (fl->c.flc_owner == (fl_owner_t)lowner) { 8283 status = true; 8284 break; 8285 } 8286 } 8287 spin_unlock(&flctx->flc_lock); 8288 } 8289 out: 8290 spin_unlock(&fp->fi_lock); 8291 return status; 8292 } 8293 8294 /** 8295 * nfsd4_release_lockowner - process NFSv4.0 RELEASE_LOCKOWNER operations 8296 * @rqstp: RPC transaction 8297 * @cstate: NFSv4 COMPOUND state 8298 * @u: RELEASE_LOCKOWNER arguments 8299 * 8300 * Check if theree are any locks still held and if not - free the lockowner 8301 * and any lock state that is owned. 8302 * 8303 * Return values: 8304 * %nfs_ok: lockowner released or not found 8305 * %nfserr_locks_held: lockowner still in use 8306 * %nfserr_stale_clientid: clientid no longer active 8307 * %nfserr_expired: clientid not recognized 8308 */ 8309 __be32 8310 nfsd4_release_lockowner(struct svc_rqst *rqstp, 8311 struct nfsd4_compound_state *cstate, 8312 union nfsd4_op_u *u) 8313 { 8314 struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner; 8315 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 8316 clientid_t *clid = &rlockowner->rl_clientid; 8317 struct nfs4_ol_stateid *stp; 8318 struct nfs4_lockowner *lo; 8319 struct nfs4_client *clp; 8320 LIST_HEAD(reaplist); 8321 __be32 status; 8322 8323 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n", 8324 clid->cl_boot, clid->cl_id); 8325 8326 status = set_client(clid, cstate, nn); 8327 if (status) 8328 return status; 8329 clp = cstate->clp; 8330 8331 spin_lock(&clp->cl_lock); 8332 lo = find_lockowner_str_locked(clp, &rlockowner->rl_owner); 8333 if (!lo) { 8334 spin_unlock(&clp->cl_lock); 8335 return nfs_ok; 8336 } 8337 8338 list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) { 8339 if (check_for_locks(stp->st_stid.sc_file, lo)) { 8340 spin_unlock(&clp->cl_lock); 8341 nfs4_put_stateowner(&lo->lo_owner); 8342 return nfserr_locks_held; 8343 } 8344 } 8345 unhash_lockowner_locked(lo); 8346 while (!list_empty(&lo->lo_owner.so_stateids)) { 8347 stp = list_first_entry(&lo->lo_owner.so_stateids, 8348 struct nfs4_ol_stateid, 8349 st_perstateowner); 8350 unhash_lock_stateid(stp); 8351 put_ol_stateid_locked(stp, &reaplist); 8352 } 8353 spin_unlock(&clp->cl_lock); 8354 8355 free_ol_stateid_reaplist(&reaplist); 8356 remove_blocked_locks(lo); 8357 nfs4_put_stateowner(&lo->lo_owner); 8358 return nfs_ok; 8359 } 8360 8361 static inline struct nfs4_client_reclaim * 8362 alloc_reclaim(void) 8363 { 8364 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL); 8365 } 8366 8367 bool 8368 nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn) 8369 { 8370 struct nfs4_client_reclaim *crp; 8371 8372 crp = nfsd4_find_reclaim_client(name, nn); 8373 return (crp && crp->cr_clp); 8374 } 8375 8376 /* 8377 * failure => all reset bets are off, nfserr_no_grace... 8378 * 8379 * The caller is responsible for freeing name.data if NULL is returned (it 8380 * will be freed in nfs4_remove_reclaim_record in the normal case). 8381 */ 8382 struct nfs4_client_reclaim * 8383 nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash, 8384 struct nfsd_net *nn) 8385 { 8386 unsigned int strhashval; 8387 struct nfs4_client_reclaim *crp; 8388 8389 crp = alloc_reclaim(); 8390 if (crp) { 8391 strhashval = clientstr_hashval(name); 8392 INIT_LIST_HEAD(&crp->cr_strhash); 8393 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]); 8394 crp->cr_name.data = name.data; 8395 crp->cr_name.len = name.len; 8396 crp->cr_princhash.data = princhash.data; 8397 crp->cr_princhash.len = princhash.len; 8398 crp->cr_clp = NULL; 8399 nn->reclaim_str_hashtbl_size++; 8400 } 8401 return crp; 8402 } 8403 8404 void 8405 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn) 8406 { 8407 list_del(&crp->cr_strhash); 8408 kfree(crp->cr_name.data); 8409 kfree(crp->cr_princhash.data); 8410 kfree(crp); 8411 nn->reclaim_str_hashtbl_size--; 8412 } 8413 8414 void 8415 nfs4_release_reclaim(struct nfsd_net *nn) 8416 { 8417 struct nfs4_client_reclaim *crp = NULL; 8418 int i; 8419 8420 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 8421 while (!list_empty(&nn->reclaim_str_hashtbl[i])) { 8422 crp = list_entry(nn->reclaim_str_hashtbl[i].next, 8423 struct nfs4_client_reclaim, cr_strhash); 8424 nfs4_remove_reclaim_record(crp, nn); 8425 } 8426 } 8427 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size); 8428 } 8429 8430 /* 8431 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */ 8432 struct nfs4_client_reclaim * 8433 nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn) 8434 { 8435 unsigned int strhashval; 8436 struct nfs4_client_reclaim *crp = NULL; 8437 8438 strhashval = clientstr_hashval(name); 8439 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) { 8440 if (compare_blob(&crp->cr_name, &name) == 0) { 8441 return crp; 8442 } 8443 } 8444 return NULL; 8445 } 8446 8447 __be32 8448 nfs4_check_open_reclaim(struct nfs4_client *clp) 8449 { 8450 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags)) 8451 return nfserr_no_grace; 8452 8453 if (nfsd4_client_record_check(clp)) 8454 return nfserr_reclaim_bad; 8455 8456 return nfs_ok; 8457 } 8458 8459 /* 8460 * Since the lifetime of a delegation isn't limited to that of an open, a 8461 * client may quite reasonably hang on to a delegation as long as it has 8462 * the inode cached. This becomes an obvious problem the first time a 8463 * client's inode cache approaches the size of the server's total memory. 8464 * 8465 * For now we avoid this problem by imposing a hard limit on the number 8466 * of delegations, which varies according to the server's memory size. 8467 */ 8468 static void 8469 set_max_delegations(void) 8470 { 8471 /* 8472 * Allow at most 4 delegations per megabyte of RAM. Quick 8473 * estimates suggest that in the worst case (where every delegation 8474 * is for a different inode), a delegation could take about 1.5K, 8475 * giving a worst case usage of about 6% of memory. 8476 */ 8477 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT); 8478 } 8479 8480 static int nfs4_state_create_net(struct net *net) 8481 { 8482 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 8483 int i; 8484 8485 nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE, 8486 sizeof(struct list_head), 8487 GFP_KERNEL); 8488 if (!nn->conf_id_hashtbl) 8489 goto err; 8490 nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE, 8491 sizeof(struct list_head), 8492 GFP_KERNEL); 8493 if (!nn->unconf_id_hashtbl) 8494 goto err_unconf_id; 8495 nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE, 8496 sizeof(struct list_head), 8497 GFP_KERNEL); 8498 if (!nn->sessionid_hashtbl) 8499 goto err_sessionid; 8500 8501 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 8502 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]); 8503 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]); 8504 } 8505 for (i = 0; i < SESSION_HASH_SIZE; i++) 8506 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]); 8507 nn->conf_name_tree = RB_ROOT; 8508 nn->unconf_name_tree = RB_ROOT; 8509 nn->boot_time = ktime_get_real_seconds(); 8510 nn->grace_ended = false; 8511 nn->nfsd4_manager.block_opens = true; 8512 INIT_LIST_HEAD(&nn->nfsd4_manager.list); 8513 INIT_LIST_HEAD(&nn->client_lru); 8514 INIT_LIST_HEAD(&nn->close_lru); 8515 INIT_LIST_HEAD(&nn->del_recall_lru); 8516 spin_lock_init(&nn->client_lock); 8517 spin_lock_init(&nn->s2s_cp_lock); 8518 idr_init(&nn->s2s_cp_stateids); 8519 8520 spin_lock_init(&nn->blocked_locks_lock); 8521 INIT_LIST_HEAD(&nn->blocked_locks_lru); 8522 8523 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main); 8524 INIT_WORK(&nn->nfsd_shrinker_work, nfsd4_state_shrinker_worker); 8525 get_net(net); 8526 8527 nn->nfsd_client_shrinker = shrinker_alloc(0, "nfsd-client"); 8528 if (!nn->nfsd_client_shrinker) 8529 goto err_shrinker; 8530 8531 nn->nfsd_client_shrinker->scan_objects = nfsd4_state_shrinker_scan; 8532 nn->nfsd_client_shrinker->count_objects = nfsd4_state_shrinker_count; 8533 nn->nfsd_client_shrinker->private_data = nn; 8534 8535 shrinker_register(nn->nfsd_client_shrinker); 8536 8537 return 0; 8538 8539 err_shrinker: 8540 put_net(net); 8541 kfree(nn->sessionid_hashtbl); 8542 err_sessionid: 8543 kfree(nn->unconf_id_hashtbl); 8544 err_unconf_id: 8545 kfree(nn->conf_id_hashtbl); 8546 err: 8547 return -ENOMEM; 8548 } 8549 8550 static void 8551 nfs4_state_destroy_net(struct net *net) 8552 { 8553 int i; 8554 struct nfs4_client *clp = NULL; 8555 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 8556 8557 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 8558 while (!list_empty(&nn->conf_id_hashtbl[i])) { 8559 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); 8560 destroy_client(clp); 8561 } 8562 } 8563 8564 WARN_ON(!list_empty(&nn->blocked_locks_lru)); 8565 8566 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 8567 while (!list_empty(&nn->unconf_id_hashtbl[i])) { 8568 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); 8569 destroy_client(clp); 8570 } 8571 } 8572 8573 kfree(nn->sessionid_hashtbl); 8574 kfree(nn->unconf_id_hashtbl); 8575 kfree(nn->conf_id_hashtbl); 8576 put_net(net); 8577 } 8578 8579 int 8580 nfs4_state_start_net(struct net *net) 8581 { 8582 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 8583 int ret; 8584 8585 ret = nfs4_state_create_net(net); 8586 if (ret) 8587 return ret; 8588 locks_start_grace(net, &nn->nfsd4_manager); 8589 nfsd4_client_tracking_init(net); 8590 if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0) 8591 goto skip_grace; 8592 printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n", 8593 nn->nfsd4_grace, net->ns.inum); 8594 trace_nfsd_grace_start(nn); 8595 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ); 8596 return 0; 8597 8598 skip_grace: 8599 printk(KERN_INFO "NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n", 8600 net->ns.inum); 8601 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ); 8602 nfsd4_end_grace(nn); 8603 return 0; 8604 } 8605 8606 /* initialization to perform when the nfsd service is started: */ 8607 8608 int 8609 nfs4_state_start(void) 8610 { 8611 int ret; 8612 8613 ret = rhltable_init(&nfs4_file_rhltable, &nfs4_file_rhash_params); 8614 if (ret) 8615 return ret; 8616 8617 ret = nfsd4_create_callback_queue(); 8618 if (ret) { 8619 rhltable_destroy(&nfs4_file_rhltable); 8620 return ret; 8621 } 8622 8623 set_max_delegations(); 8624 return 0; 8625 } 8626 8627 void 8628 nfs4_state_shutdown_net(struct net *net) 8629 { 8630 struct nfs4_delegation *dp = NULL; 8631 struct list_head *pos, *next, reaplist; 8632 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 8633 8634 shrinker_free(nn->nfsd_client_shrinker); 8635 cancel_work(&nn->nfsd_shrinker_work); 8636 cancel_delayed_work_sync(&nn->laundromat_work); 8637 locks_end_grace(&nn->nfsd4_manager); 8638 8639 INIT_LIST_HEAD(&reaplist); 8640 spin_lock(&state_lock); 8641 list_for_each_safe(pos, next, &nn->del_recall_lru) { 8642 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 8643 unhash_delegation_locked(dp, SC_STATUS_CLOSED); 8644 list_add(&dp->dl_recall_lru, &reaplist); 8645 } 8646 spin_unlock(&state_lock); 8647 list_for_each_safe(pos, next, &reaplist) { 8648 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 8649 list_del_init(&dp->dl_recall_lru); 8650 destroy_unhashed_deleg(dp); 8651 } 8652 8653 nfsd4_client_tracking_exit(net); 8654 nfs4_state_destroy_net(net); 8655 #ifdef CONFIG_NFSD_V4_2_INTER_SSC 8656 nfsd4_ssc_shutdown_umount(nn); 8657 #endif 8658 } 8659 8660 void 8661 nfs4_state_shutdown(void) 8662 { 8663 nfsd4_destroy_callback_queue(); 8664 rhltable_destroy(&nfs4_file_rhltable); 8665 } 8666 8667 static void 8668 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid) 8669 { 8670 if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) && 8671 CURRENT_STATEID(stateid)) 8672 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t)); 8673 } 8674 8675 static void 8676 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid) 8677 { 8678 if (cstate->minorversion) { 8679 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t)); 8680 SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG); 8681 } 8682 } 8683 8684 void 8685 clear_current_stateid(struct nfsd4_compound_state *cstate) 8686 { 8687 CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG); 8688 } 8689 8690 /* 8691 * functions to set current state id 8692 */ 8693 void 8694 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, 8695 union nfsd4_op_u *u) 8696 { 8697 put_stateid(cstate, &u->open_downgrade.od_stateid); 8698 } 8699 8700 void 8701 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, 8702 union nfsd4_op_u *u) 8703 { 8704 put_stateid(cstate, &u->open.op_stateid); 8705 } 8706 8707 void 8708 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, 8709 union nfsd4_op_u *u) 8710 { 8711 put_stateid(cstate, &u->close.cl_stateid); 8712 } 8713 8714 void 8715 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, 8716 union nfsd4_op_u *u) 8717 { 8718 put_stateid(cstate, &u->lock.lk_resp_stateid); 8719 } 8720 8721 /* 8722 * functions to consume current state id 8723 */ 8724 8725 void 8726 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, 8727 union nfsd4_op_u *u) 8728 { 8729 get_stateid(cstate, &u->open_downgrade.od_stateid); 8730 } 8731 8732 void 8733 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, 8734 union nfsd4_op_u *u) 8735 { 8736 get_stateid(cstate, &u->delegreturn.dr_stateid); 8737 } 8738 8739 void 8740 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, 8741 union nfsd4_op_u *u) 8742 { 8743 get_stateid(cstate, &u->free_stateid.fr_stateid); 8744 } 8745 8746 void 8747 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, 8748 union nfsd4_op_u *u) 8749 { 8750 get_stateid(cstate, &u->setattr.sa_stateid); 8751 } 8752 8753 void 8754 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, 8755 union nfsd4_op_u *u) 8756 { 8757 get_stateid(cstate, &u->close.cl_stateid); 8758 } 8759 8760 void 8761 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, 8762 union nfsd4_op_u *u) 8763 { 8764 get_stateid(cstate, &u->locku.lu_stateid); 8765 } 8766 8767 void 8768 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, 8769 union nfsd4_op_u *u) 8770 { 8771 get_stateid(cstate, &u->read.rd_stateid); 8772 } 8773 8774 void 8775 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, 8776 union nfsd4_op_u *u) 8777 { 8778 get_stateid(cstate, &u->write.wr_stateid); 8779 } 8780 8781 /** 8782 * nfsd4_deleg_getattr_conflict - Recall if GETATTR causes conflict 8783 * @rqstp: RPC transaction context 8784 * @inode: file to be checked for a conflict 8785 * @modified: return true if file was modified 8786 * @size: new size of file if modified is true 8787 * 8788 * This function is called when there is a conflict between a write 8789 * delegation and a change/size GETATTR from another client. The server 8790 * must either use the CB_GETATTR to get the current values of the 8791 * attributes from the client that holds the delegation or recall the 8792 * delegation before replying to the GETATTR. See RFC 8881 section 8793 * 18.7.4. 8794 * 8795 * Returns 0 if there is no conflict; otherwise an nfs_stat 8796 * code is returned. 8797 */ 8798 __be32 8799 nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode, 8800 bool *modified, u64 *size) 8801 { 8802 __be32 status; 8803 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 8804 struct file_lock_context *ctx; 8805 struct file_lease *fl; 8806 struct nfs4_delegation *dp; 8807 struct iattr attrs; 8808 struct nfs4_cb_fattr *ncf; 8809 8810 *modified = false; 8811 ctx = locks_inode_context(inode); 8812 if (!ctx) 8813 return 0; 8814 spin_lock(&ctx->flc_lock); 8815 for_each_file_lock(fl, &ctx->flc_lease) { 8816 unsigned char type = fl->c.flc_type; 8817 8818 if (fl->c.flc_flags == FL_LAYOUT) 8819 continue; 8820 if (fl->fl_lmops != &nfsd_lease_mng_ops) { 8821 /* 8822 * non-nfs lease, if it's a lease with F_RDLCK then 8823 * we are done; there isn't any write delegation 8824 * on this inode 8825 */ 8826 if (type == F_RDLCK) 8827 break; 8828 goto break_lease; 8829 } 8830 if (type == F_WRLCK) { 8831 dp = fl->c.flc_owner; 8832 if (dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) { 8833 spin_unlock(&ctx->flc_lock); 8834 return 0; 8835 } 8836 break_lease: 8837 nfsd_stats_wdeleg_getattr_inc(nn); 8838 dp = fl->c.flc_owner; 8839 ncf = &dp->dl_cb_fattr; 8840 nfs4_cb_getattr(&dp->dl_cb_fattr); 8841 spin_unlock(&ctx->flc_lock); 8842 wait_on_bit_timeout(&ncf->ncf_cb_flags, CB_GETATTR_BUSY, 8843 TASK_INTERRUPTIBLE, NFSD_CB_GETATTR_TIMEOUT); 8844 if (ncf->ncf_cb_status) { 8845 /* Recall delegation only if client didn't respond */ 8846 status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ)); 8847 if (status != nfserr_jukebox || 8848 !nfsd_wait_for_delegreturn(rqstp, inode)) 8849 return status; 8850 } 8851 if (!ncf->ncf_file_modified && 8852 (ncf->ncf_initial_cinfo != ncf->ncf_cb_change || 8853 ncf->ncf_cur_fsize != ncf->ncf_cb_fsize)) 8854 ncf->ncf_file_modified = true; 8855 if (ncf->ncf_file_modified) { 8856 /* 8857 * Per section 10.4.3 of RFC 8881, the server would 8858 * not update the file's metadata with the client's 8859 * modified size 8860 */ 8861 attrs.ia_mtime = attrs.ia_ctime = current_time(inode); 8862 attrs.ia_valid = ATTR_MTIME | ATTR_CTIME; 8863 setattr_copy(&nop_mnt_idmap, inode, &attrs); 8864 mark_inode_dirty(inode); 8865 ncf->ncf_cur_fsize = ncf->ncf_cb_fsize; 8866 *size = ncf->ncf_cur_fsize; 8867 *modified = true; 8868 } 8869 return 0; 8870 } 8871 break; 8872 } 8873 spin_unlock(&ctx->flc_lock); 8874 return 0; 8875 } 8876