1 /* 2 * linux/fs/nfsd/nfs4state.c 3 * 4 * Copyright (c) 2001 The Regents of the University of Michigan. 5 * All rights reserved. 6 * 7 * Kendrick Smith <kmsmith@umich.edu> 8 * Andy Adamson <kandros@umich.edu> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 26 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 */ 36 37 #include <linux/param.h> 38 #include <linux/major.h> 39 #include <linux/slab.h> 40 41 #include <linux/sunrpc/svc.h> 42 #include <linux/nfsd/nfsd.h> 43 #include <linux/nfsd/cache.h> 44 #include <linux/mount.h> 45 #include <linux/workqueue.h> 46 #include <linux/smp_lock.h> 47 #include <linux/kthread.h> 48 #include <linux/nfs4.h> 49 #include <linux/nfsd/state.h> 50 #include <linux/nfsd/xdr4.h> 51 #include <linux/namei.h> 52 #include <linux/swap.h> 53 #include <linux/mutex.h> 54 #include <linux/lockd/bind.h> 55 #include <linux/module.h> 56 57 #define NFSDDBG_FACILITY NFSDDBG_PROC 58 59 /* Globals */ 60 static time_t lease_time = 90; /* default lease time */ 61 static time_t user_lease_time = 90; 62 static time_t boot_time; 63 static int in_grace = 1; 64 static u32 current_clientid = 1; 65 static u32 current_ownerid = 1; 66 static u32 current_fileid = 1; 67 static u32 current_delegid = 1; 68 static u32 nfs4_init; 69 static stateid_t zerostateid; /* bits all 0 */ 70 static stateid_t onestateid; /* bits all 1 */ 71 72 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zerostateid, sizeof(stateid_t))) 73 #define ONE_STATEID(stateid) (!memcmp((stateid), &onestateid, sizeof(stateid_t))) 74 75 /* forward declarations */ 76 static struct nfs4_stateid * find_stateid(stateid_t *stid, int flags); 77 static struct nfs4_delegation * find_delegation_stateid(struct inode *ino, stateid_t *stid); 78 static void release_stateid_lockowners(struct nfs4_stateid *open_stp); 79 static char user_recovery_dirname[PATH_MAX] = "/var/lib/nfs/v4recovery"; 80 static void nfs4_set_recdir(char *recdir); 81 82 /* Locking: 83 * 84 * client_mutex: 85 * protects clientid_hashtbl[], clientstr_hashtbl[], 86 * unconfstr_hashtbl[], uncofid_hashtbl[]. 87 */ 88 static DEFINE_MUTEX(client_mutex); 89 90 static struct kmem_cache *stateowner_slab = NULL; 91 static struct kmem_cache *file_slab = NULL; 92 static struct kmem_cache *stateid_slab = NULL; 93 static struct kmem_cache *deleg_slab = NULL; 94 95 void 96 nfs4_lock_state(void) 97 { 98 mutex_lock(&client_mutex); 99 } 100 101 void 102 nfs4_unlock_state(void) 103 { 104 mutex_unlock(&client_mutex); 105 } 106 107 static inline u32 108 opaque_hashval(const void *ptr, int nbytes) 109 { 110 unsigned char *cptr = (unsigned char *) ptr; 111 112 u32 x = 0; 113 while (nbytes--) { 114 x *= 37; 115 x += *cptr++; 116 } 117 return x; 118 } 119 120 /* forward declarations */ 121 static void release_stateowner(struct nfs4_stateowner *sop); 122 static void release_stateid(struct nfs4_stateid *stp, int flags); 123 124 /* 125 * Delegation state 126 */ 127 128 /* recall_lock protects the del_recall_lru */ 129 static DEFINE_SPINLOCK(recall_lock); 130 static struct list_head del_recall_lru; 131 132 static void 133 free_nfs4_file(struct kref *kref) 134 { 135 struct nfs4_file *fp = container_of(kref, struct nfs4_file, fi_ref); 136 list_del(&fp->fi_hash); 137 iput(fp->fi_inode); 138 kmem_cache_free(file_slab, fp); 139 } 140 141 static inline void 142 put_nfs4_file(struct nfs4_file *fi) 143 { 144 kref_put(&fi->fi_ref, free_nfs4_file); 145 } 146 147 static inline void 148 get_nfs4_file(struct nfs4_file *fi) 149 { 150 kref_get(&fi->fi_ref); 151 } 152 153 static int num_delegations; 154 unsigned int max_delegations; 155 156 /* 157 * Open owner state (share locks) 158 */ 159 160 /* hash tables for nfs4_stateowner */ 161 #define OWNER_HASH_BITS 8 162 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS) 163 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1) 164 165 #define ownerid_hashval(id) \ 166 ((id) & OWNER_HASH_MASK) 167 #define ownerstr_hashval(clientid, ownername) \ 168 (((clientid) + opaque_hashval((ownername.data), (ownername.len))) & OWNER_HASH_MASK) 169 170 static struct list_head ownerid_hashtbl[OWNER_HASH_SIZE]; 171 static struct list_head ownerstr_hashtbl[OWNER_HASH_SIZE]; 172 173 /* hash table for nfs4_file */ 174 #define FILE_HASH_BITS 8 175 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS) 176 #define FILE_HASH_MASK (FILE_HASH_SIZE - 1) 177 /* hash table for (open)nfs4_stateid */ 178 #define STATEID_HASH_BITS 10 179 #define STATEID_HASH_SIZE (1 << STATEID_HASH_BITS) 180 #define STATEID_HASH_MASK (STATEID_HASH_SIZE - 1) 181 182 #define file_hashval(x) \ 183 hash_ptr(x, FILE_HASH_BITS) 184 #define stateid_hashval(owner_id, file_id) \ 185 (((owner_id) + (file_id)) & STATEID_HASH_MASK) 186 187 static struct list_head file_hashtbl[FILE_HASH_SIZE]; 188 static struct list_head stateid_hashtbl[STATEID_HASH_SIZE]; 189 190 static struct nfs4_delegation * 191 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_fh *current_fh, u32 type) 192 { 193 struct nfs4_delegation *dp; 194 struct nfs4_file *fp = stp->st_file; 195 struct nfs4_callback *cb = &stp->st_stateowner->so_client->cl_callback; 196 197 dprintk("NFSD alloc_init_deleg\n"); 198 if (fp->fi_had_conflict) 199 return NULL; 200 if (num_delegations > max_delegations) 201 return NULL; 202 dp = kmem_cache_alloc(deleg_slab, GFP_KERNEL); 203 if (dp == NULL) 204 return dp; 205 num_delegations++; 206 INIT_LIST_HEAD(&dp->dl_perfile); 207 INIT_LIST_HEAD(&dp->dl_perclnt); 208 INIT_LIST_HEAD(&dp->dl_recall_lru); 209 dp->dl_client = clp; 210 get_nfs4_file(fp); 211 dp->dl_file = fp; 212 dp->dl_flock = NULL; 213 get_file(stp->st_vfs_file); 214 dp->dl_vfs_file = stp->st_vfs_file; 215 dp->dl_type = type; 216 dp->dl_recall.cbr_dp = NULL; 217 dp->dl_recall.cbr_ident = cb->cb_ident; 218 dp->dl_recall.cbr_trunc = 0; 219 dp->dl_stateid.si_boot = boot_time; 220 dp->dl_stateid.si_stateownerid = current_delegid++; 221 dp->dl_stateid.si_fileid = 0; 222 dp->dl_stateid.si_generation = 0; 223 dp->dl_fhlen = current_fh->fh_handle.fh_size; 224 memcpy(dp->dl_fhval, ¤t_fh->fh_handle.fh_base, 225 current_fh->fh_handle.fh_size); 226 dp->dl_time = 0; 227 atomic_set(&dp->dl_count, 1); 228 list_add(&dp->dl_perfile, &fp->fi_delegations); 229 list_add(&dp->dl_perclnt, &clp->cl_delegations); 230 return dp; 231 } 232 233 void 234 nfs4_put_delegation(struct nfs4_delegation *dp) 235 { 236 if (atomic_dec_and_test(&dp->dl_count)) { 237 dprintk("NFSD: freeing dp %p\n",dp); 238 put_nfs4_file(dp->dl_file); 239 kmem_cache_free(deleg_slab, dp); 240 num_delegations--; 241 } 242 } 243 244 /* Remove the associated file_lock first, then remove the delegation. 245 * lease_modify() is called to remove the FS_LEASE file_lock from 246 * the i_flock list, eventually calling nfsd's lock_manager 247 * fl_release_callback. 248 */ 249 static void 250 nfs4_close_delegation(struct nfs4_delegation *dp) 251 { 252 struct file *filp = dp->dl_vfs_file; 253 254 dprintk("NFSD: close_delegation dp %p\n",dp); 255 dp->dl_vfs_file = NULL; 256 /* The following nfsd_close may not actually close the file, 257 * but we want to remove the lease in any case. */ 258 if (dp->dl_flock) 259 vfs_setlease(filp, F_UNLCK, &dp->dl_flock); 260 nfsd_close(filp); 261 } 262 263 /* Called under the state lock. */ 264 static void 265 unhash_delegation(struct nfs4_delegation *dp) 266 { 267 list_del_init(&dp->dl_perfile); 268 list_del_init(&dp->dl_perclnt); 269 spin_lock(&recall_lock); 270 list_del_init(&dp->dl_recall_lru); 271 spin_unlock(&recall_lock); 272 nfs4_close_delegation(dp); 273 nfs4_put_delegation(dp); 274 } 275 276 /* 277 * SETCLIENTID state 278 */ 279 280 /* Hash tables for nfs4_clientid state */ 281 #define CLIENT_HASH_BITS 4 282 #define CLIENT_HASH_SIZE (1 << CLIENT_HASH_BITS) 283 #define CLIENT_HASH_MASK (CLIENT_HASH_SIZE - 1) 284 285 #define clientid_hashval(id) \ 286 ((id) & CLIENT_HASH_MASK) 287 #define clientstr_hashval(name) \ 288 (opaque_hashval((name), 8) & CLIENT_HASH_MASK) 289 /* 290 * reclaim_str_hashtbl[] holds known client info from previous reset/reboot 291 * used in reboot/reset lease grace period processing 292 * 293 * conf_id_hashtbl[], and conf_str_hashtbl[] hold confirmed 294 * setclientid_confirmed info. 295 * 296 * unconf_str_hastbl[] and unconf_id_hashtbl[] hold unconfirmed 297 * setclientid info. 298 * 299 * client_lru holds client queue ordered by nfs4_client.cl_time 300 * for lease renewal. 301 * 302 * close_lru holds (open) stateowner queue ordered by nfs4_stateowner.so_time 303 * for last close replay. 304 */ 305 static struct list_head reclaim_str_hashtbl[CLIENT_HASH_SIZE]; 306 static int reclaim_str_hashtbl_size = 0; 307 static struct list_head conf_id_hashtbl[CLIENT_HASH_SIZE]; 308 static struct list_head conf_str_hashtbl[CLIENT_HASH_SIZE]; 309 static struct list_head unconf_str_hashtbl[CLIENT_HASH_SIZE]; 310 static struct list_head unconf_id_hashtbl[CLIENT_HASH_SIZE]; 311 static struct list_head client_lru; 312 static struct list_head close_lru; 313 314 static inline void 315 renew_client(struct nfs4_client *clp) 316 { 317 /* 318 * Move client to the end to the LRU list. 319 */ 320 dprintk("renewing client (clientid %08x/%08x)\n", 321 clp->cl_clientid.cl_boot, 322 clp->cl_clientid.cl_id); 323 list_move_tail(&clp->cl_lru, &client_lru); 324 clp->cl_time = get_seconds(); 325 } 326 327 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */ 328 static int 329 STALE_CLIENTID(clientid_t *clid) 330 { 331 if (clid->cl_boot == boot_time) 332 return 0; 333 dprintk("NFSD stale clientid (%08x/%08x)\n", 334 clid->cl_boot, clid->cl_id); 335 return 1; 336 } 337 338 /* 339 * XXX Should we use a slab cache ? 340 * This type of memory management is somewhat inefficient, but we use it 341 * anyway since SETCLIENTID is not a common operation. 342 */ 343 static inline struct nfs4_client * 344 alloc_client(struct xdr_netobj name) 345 { 346 struct nfs4_client *clp; 347 348 if ((clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL))!= NULL) { 349 if ((clp->cl_name.data = kmalloc(name.len, GFP_KERNEL)) != NULL) { 350 memcpy(clp->cl_name.data, name.data, name.len); 351 clp->cl_name.len = name.len; 352 } 353 else { 354 kfree(clp); 355 clp = NULL; 356 } 357 } 358 return clp; 359 } 360 361 static inline void 362 free_client(struct nfs4_client *clp) 363 { 364 if (clp->cl_cred.cr_group_info) 365 put_group_info(clp->cl_cred.cr_group_info); 366 kfree(clp->cl_name.data); 367 kfree(clp); 368 } 369 370 void 371 put_nfs4_client(struct nfs4_client *clp) 372 { 373 if (atomic_dec_and_test(&clp->cl_count)) 374 free_client(clp); 375 } 376 377 static void 378 shutdown_callback_client(struct nfs4_client *clp) 379 { 380 struct rpc_clnt *clnt = clp->cl_callback.cb_client; 381 382 /* shutdown rpc client, ending any outstanding recall rpcs */ 383 if (clnt) { 384 clp->cl_callback.cb_client = NULL; 385 rpc_shutdown_client(clnt); 386 } 387 } 388 389 static void 390 expire_client(struct nfs4_client *clp) 391 { 392 struct nfs4_stateowner *sop; 393 struct nfs4_delegation *dp; 394 struct list_head reaplist; 395 396 dprintk("NFSD: expire_client cl_count %d\n", 397 atomic_read(&clp->cl_count)); 398 399 shutdown_callback_client(clp); 400 401 INIT_LIST_HEAD(&reaplist); 402 spin_lock(&recall_lock); 403 while (!list_empty(&clp->cl_delegations)) { 404 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); 405 dprintk("NFSD: expire client. dp %p, fp %p\n", dp, 406 dp->dl_flock); 407 list_del_init(&dp->dl_perclnt); 408 list_move(&dp->dl_recall_lru, &reaplist); 409 } 410 spin_unlock(&recall_lock); 411 while (!list_empty(&reaplist)) { 412 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); 413 list_del_init(&dp->dl_recall_lru); 414 unhash_delegation(dp); 415 } 416 list_del(&clp->cl_idhash); 417 list_del(&clp->cl_strhash); 418 list_del(&clp->cl_lru); 419 while (!list_empty(&clp->cl_openowners)) { 420 sop = list_entry(clp->cl_openowners.next, struct nfs4_stateowner, so_perclient); 421 release_stateowner(sop); 422 } 423 put_nfs4_client(clp); 424 } 425 426 static struct nfs4_client * 427 create_client(struct xdr_netobj name, char *recdir) { 428 struct nfs4_client *clp; 429 430 if (!(clp = alloc_client(name))) 431 goto out; 432 memcpy(clp->cl_recdir, recdir, HEXDIR_LEN); 433 atomic_set(&clp->cl_count, 1); 434 atomic_set(&clp->cl_callback.cb_set, 0); 435 INIT_LIST_HEAD(&clp->cl_idhash); 436 INIT_LIST_HEAD(&clp->cl_strhash); 437 INIT_LIST_HEAD(&clp->cl_openowners); 438 INIT_LIST_HEAD(&clp->cl_delegations); 439 INIT_LIST_HEAD(&clp->cl_lru); 440 out: 441 return clp; 442 } 443 444 static void 445 copy_verf(struct nfs4_client *target, nfs4_verifier *source) { 446 memcpy(target->cl_verifier.data, source->data, sizeof(target->cl_verifier.data)); 447 } 448 449 static void 450 copy_clid(struct nfs4_client *target, struct nfs4_client *source) { 451 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; 452 target->cl_clientid.cl_id = source->cl_clientid.cl_id; 453 } 454 455 static void 456 copy_cred(struct svc_cred *target, struct svc_cred *source) { 457 458 target->cr_uid = source->cr_uid; 459 target->cr_gid = source->cr_gid; 460 target->cr_group_info = source->cr_group_info; 461 get_group_info(target->cr_group_info); 462 } 463 464 static inline int 465 same_name(const char *n1, const char *n2) { 466 return 0 == memcmp(n1, n2, HEXDIR_LEN); 467 } 468 469 static int 470 cmp_verf(nfs4_verifier *v1, nfs4_verifier *v2) { 471 return(!memcmp(v1->data,v2->data,sizeof(v1->data))); 472 } 473 474 static int 475 cmp_clid(clientid_t * cl1, clientid_t * cl2) { 476 return((cl1->cl_boot == cl2->cl_boot) && 477 (cl1->cl_id == cl2->cl_id)); 478 } 479 480 /* XXX what about NGROUP */ 481 static int 482 cmp_creds(struct svc_cred *cr1, struct svc_cred *cr2){ 483 return(cr1->cr_uid == cr2->cr_uid); 484 485 } 486 487 static void 488 gen_clid(struct nfs4_client *clp) { 489 clp->cl_clientid.cl_boot = boot_time; 490 clp->cl_clientid.cl_id = current_clientid++; 491 } 492 493 static void 494 gen_confirm(struct nfs4_client *clp) { 495 struct timespec tv; 496 u32 * p; 497 498 tv = CURRENT_TIME; 499 p = (u32 *)clp->cl_confirm.data; 500 *p++ = tv.tv_sec; 501 *p++ = tv.tv_nsec; 502 } 503 504 static int 505 check_name(struct xdr_netobj name) { 506 507 if (name.len == 0) 508 return 0; 509 if (name.len > NFS4_OPAQUE_LIMIT) { 510 printk("NFSD: check_name: name too long(%d)!\n", name.len); 511 return 0; 512 } 513 return 1; 514 } 515 516 static void 517 add_to_unconfirmed(struct nfs4_client *clp, unsigned int strhashval) 518 { 519 unsigned int idhashval; 520 521 list_add(&clp->cl_strhash, &unconf_str_hashtbl[strhashval]); 522 idhashval = clientid_hashval(clp->cl_clientid.cl_id); 523 list_add(&clp->cl_idhash, &unconf_id_hashtbl[idhashval]); 524 list_add_tail(&clp->cl_lru, &client_lru); 525 clp->cl_time = get_seconds(); 526 } 527 528 static void 529 move_to_confirmed(struct nfs4_client *clp) 530 { 531 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id); 532 unsigned int strhashval; 533 534 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp); 535 list_del_init(&clp->cl_strhash); 536 list_move(&clp->cl_idhash, &conf_id_hashtbl[idhashval]); 537 strhashval = clientstr_hashval(clp->cl_recdir); 538 list_add(&clp->cl_strhash, &conf_str_hashtbl[strhashval]); 539 renew_client(clp); 540 } 541 542 static struct nfs4_client * 543 find_confirmed_client(clientid_t *clid) 544 { 545 struct nfs4_client *clp; 546 unsigned int idhashval = clientid_hashval(clid->cl_id); 547 548 list_for_each_entry(clp, &conf_id_hashtbl[idhashval], cl_idhash) { 549 if (cmp_clid(&clp->cl_clientid, clid)) 550 return clp; 551 } 552 return NULL; 553 } 554 555 static struct nfs4_client * 556 find_unconfirmed_client(clientid_t *clid) 557 { 558 struct nfs4_client *clp; 559 unsigned int idhashval = clientid_hashval(clid->cl_id); 560 561 list_for_each_entry(clp, &unconf_id_hashtbl[idhashval], cl_idhash) { 562 if (cmp_clid(&clp->cl_clientid, clid)) 563 return clp; 564 } 565 return NULL; 566 } 567 568 static struct nfs4_client * 569 find_confirmed_client_by_str(const char *dname, unsigned int hashval) 570 { 571 struct nfs4_client *clp; 572 573 list_for_each_entry(clp, &conf_str_hashtbl[hashval], cl_strhash) { 574 if (same_name(clp->cl_recdir, dname)) 575 return clp; 576 } 577 return NULL; 578 } 579 580 static struct nfs4_client * 581 find_unconfirmed_client_by_str(const char *dname, unsigned int hashval) 582 { 583 struct nfs4_client *clp; 584 585 list_for_each_entry(clp, &unconf_str_hashtbl[hashval], cl_strhash) { 586 if (same_name(clp->cl_recdir, dname)) 587 return clp; 588 } 589 return NULL; 590 } 591 592 /* a helper function for parse_callback */ 593 static int 594 parse_octet(unsigned int *lenp, char **addrp) 595 { 596 unsigned int len = *lenp; 597 char *p = *addrp; 598 int n = -1; 599 char c; 600 601 for (;;) { 602 if (!len) 603 break; 604 len--; 605 c = *p++; 606 if (c == '.') 607 break; 608 if ((c < '0') || (c > '9')) { 609 n = -1; 610 break; 611 } 612 if (n < 0) 613 n = 0; 614 n = (n * 10) + (c - '0'); 615 if (n > 255) { 616 n = -1; 617 break; 618 } 619 } 620 *lenp = len; 621 *addrp = p; 622 return n; 623 } 624 625 /* parse and set the setclientid ipv4 callback address */ 626 static int 627 parse_ipv4(unsigned int addr_len, char *addr_val, unsigned int *cbaddrp, unsigned short *cbportp) 628 { 629 int temp = 0; 630 u32 cbaddr = 0; 631 u16 cbport = 0; 632 u32 addrlen = addr_len; 633 char *addr = addr_val; 634 int i, shift; 635 636 /* ipaddress */ 637 shift = 24; 638 for(i = 4; i > 0 ; i--) { 639 if ((temp = parse_octet(&addrlen, &addr)) < 0) { 640 return 0; 641 } 642 cbaddr |= (temp << shift); 643 if (shift > 0) 644 shift -= 8; 645 } 646 *cbaddrp = cbaddr; 647 648 /* port */ 649 shift = 8; 650 for(i = 2; i > 0 ; i--) { 651 if ((temp = parse_octet(&addrlen, &addr)) < 0) { 652 return 0; 653 } 654 cbport |= (temp << shift); 655 if (shift > 0) 656 shift -= 8; 657 } 658 *cbportp = cbport; 659 return 1; 660 } 661 662 static void 663 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se) 664 { 665 struct nfs4_callback *cb = &clp->cl_callback; 666 667 /* Currently, we only support tcp for the callback channel */ 668 if ((se->se_callback_netid_len != 3) || memcmp((char *)se->se_callback_netid_val, "tcp", 3)) 669 goto out_err; 670 671 if ( !(parse_ipv4(se->se_callback_addr_len, se->se_callback_addr_val, 672 &cb->cb_addr, &cb->cb_port))) 673 goto out_err; 674 cb->cb_prog = se->se_callback_prog; 675 cb->cb_ident = se->se_callback_ident; 676 return; 677 out_err: 678 dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) " 679 "will not receive delegations\n", 680 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id); 681 682 return; 683 } 684 685 /* 686 * RFC 3010 has a complex implmentation description of processing a 687 * SETCLIENTID request consisting of 5 bullets, labeled as 688 * CASE0 - CASE4 below. 689 * 690 * NOTES: 691 * callback information will be processed in a future patch 692 * 693 * an unconfirmed record is added when: 694 * NORMAL (part of CASE 4): there is no confirmed nor unconfirmed record. 695 * CASE 1: confirmed record found with matching name, principal, 696 * verifier, and clientid. 697 * CASE 2: confirmed record found with matching name, principal, 698 * and there is no unconfirmed record with matching 699 * name and principal 700 * 701 * an unconfirmed record is replaced when: 702 * CASE 3: confirmed record found with matching name, principal, 703 * and an unconfirmed record is found with matching 704 * name, principal, and with clientid and 705 * confirm that does not match the confirmed record. 706 * CASE 4: there is no confirmed record with matching name and 707 * principal. there is an unconfirmed record with 708 * matching name, principal. 709 * 710 * an unconfirmed record is deleted when: 711 * CASE 1: an unconfirmed record that matches input name, verifier, 712 * and confirmed clientid. 713 * CASE 4: any unconfirmed records with matching name and principal 714 * that exist after an unconfirmed record has been replaced 715 * as described above. 716 * 717 */ 718 __be32 719 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 720 struct nfsd4_setclientid *setclid) 721 { 722 struct sockaddr_in *sin = svc_addr_in(rqstp); 723 struct xdr_netobj clname = { 724 .len = setclid->se_namelen, 725 .data = setclid->se_name, 726 }; 727 nfs4_verifier clverifier = setclid->se_verf; 728 unsigned int strhashval; 729 struct nfs4_client *conf, *unconf, *new; 730 __be32 status; 731 char dname[HEXDIR_LEN]; 732 733 if (!check_name(clname)) 734 return nfserr_inval; 735 736 status = nfs4_make_rec_clidname(dname, &clname); 737 if (status) 738 return status; 739 740 /* 741 * XXX The Duplicate Request Cache (DRC) has been checked (??) 742 * We get here on a DRC miss. 743 */ 744 745 strhashval = clientstr_hashval(dname); 746 747 nfs4_lock_state(); 748 conf = find_confirmed_client_by_str(dname, strhashval); 749 if (conf) { 750 /* 751 * CASE 0: 752 * clname match, confirmed, different principal 753 * or different ip_address 754 */ 755 status = nfserr_clid_inuse; 756 if (!cmp_creds(&conf->cl_cred, &rqstp->rq_cred) 757 || conf->cl_addr != sin->sin_addr.s_addr) { 758 dprintk("NFSD: setclientid: string in use by client" 759 "at %u.%u.%u.%u\n", NIPQUAD(conf->cl_addr)); 760 goto out; 761 } 762 } 763 unconf = find_unconfirmed_client_by_str(dname, strhashval); 764 status = nfserr_resource; 765 if (!conf) { 766 /* 767 * CASE 4: 768 * placed first, because it is the normal case. 769 */ 770 if (unconf) 771 expire_client(unconf); 772 new = create_client(clname, dname); 773 if (new == NULL) 774 goto out; 775 copy_verf(new, &clverifier); 776 new->cl_addr = sin->sin_addr.s_addr; 777 copy_cred(&new->cl_cred,&rqstp->rq_cred); 778 gen_clid(new); 779 gen_confirm(new); 780 gen_callback(new, setclid); 781 add_to_unconfirmed(new, strhashval); 782 } else if (cmp_verf(&conf->cl_verifier, &clverifier)) { 783 /* 784 * CASE 1: 785 * cl_name match, confirmed, principal match 786 * verifier match: probable callback update 787 * 788 * remove any unconfirmed nfs4_client with 789 * matching cl_name, cl_verifier, and cl_clientid 790 * 791 * create and insert an unconfirmed nfs4_client with same 792 * cl_name, cl_verifier, and cl_clientid as existing 793 * nfs4_client, but with the new callback info and a 794 * new cl_confirm 795 */ 796 if (unconf) { 797 /* Note this is removing unconfirmed {*x***}, 798 * which is stronger than RFC recommended {vxc**}. 799 * This has the advantage that there is at most 800 * one {*x***} in either list at any time. 801 */ 802 expire_client(unconf); 803 } 804 new = create_client(clname, dname); 805 if (new == NULL) 806 goto out; 807 copy_verf(new,&conf->cl_verifier); 808 new->cl_addr = sin->sin_addr.s_addr; 809 copy_cred(&new->cl_cred,&rqstp->rq_cred); 810 copy_clid(new, conf); 811 gen_confirm(new); 812 gen_callback(new, setclid); 813 add_to_unconfirmed(new,strhashval); 814 } else if (!unconf) { 815 /* 816 * CASE 2: 817 * clname match, confirmed, principal match 818 * verfier does not match 819 * no unconfirmed. create a new unconfirmed nfs4_client 820 * using input clverifier, clname, and callback info 821 * and generate a new cl_clientid and cl_confirm. 822 */ 823 new = create_client(clname, dname); 824 if (new == NULL) 825 goto out; 826 copy_verf(new,&clverifier); 827 new->cl_addr = sin->sin_addr.s_addr; 828 copy_cred(&new->cl_cred,&rqstp->rq_cred); 829 gen_clid(new); 830 gen_confirm(new); 831 gen_callback(new, setclid); 832 add_to_unconfirmed(new, strhashval); 833 } else if (!cmp_verf(&conf->cl_confirm, &unconf->cl_confirm)) { 834 /* 835 * CASE3: 836 * confirmed found (name, principal match) 837 * confirmed verifier does not match input clverifier 838 * 839 * unconfirmed found (name match) 840 * confirmed->cl_confirm != unconfirmed->cl_confirm 841 * 842 * remove unconfirmed. 843 * 844 * create an unconfirmed nfs4_client 845 * with same cl_name as existing confirmed nfs4_client, 846 * but with new callback info, new cl_clientid, 847 * new cl_verifier and a new cl_confirm 848 */ 849 expire_client(unconf); 850 new = create_client(clname, dname); 851 if (new == NULL) 852 goto out; 853 copy_verf(new,&clverifier); 854 new->cl_addr = sin->sin_addr.s_addr; 855 copy_cred(&new->cl_cred,&rqstp->rq_cred); 856 gen_clid(new); 857 gen_confirm(new); 858 gen_callback(new, setclid); 859 add_to_unconfirmed(new, strhashval); 860 } else { 861 /* No cases hit !!! */ 862 status = nfserr_inval; 863 goto out; 864 865 } 866 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; 867 setclid->se_clientid.cl_id = new->cl_clientid.cl_id; 868 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data)); 869 status = nfs_ok; 870 out: 871 nfs4_unlock_state(); 872 return status; 873 } 874 875 876 /* 877 * RFC 3010 has a complex implmentation description of processing a 878 * SETCLIENTID_CONFIRM request consisting of 4 bullets describing 879 * processing on a DRC miss, labeled as CASE1 - CASE4 below. 880 * 881 * NOTE: callback information will be processed here in a future patch 882 */ 883 __be32 884 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, 885 struct nfsd4_compound_state *cstate, 886 struct nfsd4_setclientid_confirm *setclientid_confirm) 887 { 888 struct sockaddr_in *sin = svc_addr_in(rqstp); 889 struct nfs4_client *conf, *unconf; 890 nfs4_verifier confirm = setclientid_confirm->sc_confirm; 891 clientid_t * clid = &setclientid_confirm->sc_clientid; 892 __be32 status; 893 894 if (STALE_CLIENTID(clid)) 895 return nfserr_stale_clientid; 896 /* 897 * XXX The Duplicate Request Cache (DRC) has been checked (??) 898 * We get here on a DRC miss. 899 */ 900 901 nfs4_lock_state(); 902 903 conf = find_confirmed_client(clid); 904 unconf = find_unconfirmed_client(clid); 905 906 status = nfserr_clid_inuse; 907 if (conf && conf->cl_addr != sin->sin_addr.s_addr) 908 goto out; 909 if (unconf && unconf->cl_addr != sin->sin_addr.s_addr) 910 goto out; 911 912 if ((conf && unconf) && 913 (cmp_verf(&unconf->cl_confirm, &confirm)) && 914 (cmp_verf(&conf->cl_verifier, &unconf->cl_verifier)) && 915 (same_name(conf->cl_recdir,unconf->cl_recdir)) && 916 (!cmp_verf(&conf->cl_confirm, &unconf->cl_confirm))) { 917 /* CASE 1: 918 * unconf record that matches input clientid and input confirm. 919 * conf record that matches input clientid. 920 * conf and unconf records match names, verifiers 921 */ 922 if (!cmp_creds(&conf->cl_cred, &unconf->cl_cred)) 923 status = nfserr_clid_inuse; 924 else { 925 /* XXX: We just turn off callbacks until we can handle 926 * change request correctly. */ 927 atomic_set(&conf->cl_callback.cb_set, 0); 928 gen_confirm(conf); 929 nfsd4_remove_clid_dir(unconf); 930 expire_client(unconf); 931 status = nfs_ok; 932 933 } 934 } else if ((conf && !unconf) || 935 ((conf && unconf) && 936 (!cmp_verf(&conf->cl_verifier, &unconf->cl_verifier) || 937 !same_name(conf->cl_recdir, unconf->cl_recdir)))) { 938 /* CASE 2: 939 * conf record that matches input clientid. 940 * if unconf record matches input clientid, then 941 * unconf->cl_name or unconf->cl_verifier don't match the 942 * conf record. 943 */ 944 if (!cmp_creds(&conf->cl_cred,&rqstp->rq_cred)) 945 status = nfserr_clid_inuse; 946 else 947 status = nfs_ok; 948 } else if (!conf && unconf 949 && cmp_verf(&unconf->cl_confirm, &confirm)) { 950 /* CASE 3: 951 * conf record not found. 952 * unconf record found. 953 * unconf->cl_confirm matches input confirm 954 */ 955 if (!cmp_creds(&unconf->cl_cred, &rqstp->rq_cred)) { 956 status = nfserr_clid_inuse; 957 } else { 958 unsigned int hash = 959 clientstr_hashval(unconf->cl_recdir); 960 conf = find_confirmed_client_by_str(unconf->cl_recdir, 961 hash); 962 if (conf) { 963 nfsd4_remove_clid_dir(conf); 964 expire_client(conf); 965 } 966 move_to_confirmed(unconf); 967 conf = unconf; 968 status = nfs_ok; 969 } 970 } else if ((!conf || (conf && !cmp_verf(&conf->cl_confirm, &confirm))) 971 && (!unconf || (unconf && !cmp_verf(&unconf->cl_confirm, 972 &confirm)))) { 973 /* CASE 4: 974 * conf record not found, or if conf, conf->cl_confirm does not 975 * match input confirm. 976 * unconf record not found, or if unconf, unconf->cl_confirm 977 * does not match input confirm. 978 */ 979 status = nfserr_stale_clientid; 980 } else { 981 /* check that we have hit one of the cases...*/ 982 status = nfserr_clid_inuse; 983 } 984 out: 985 if (!status) 986 nfsd4_probe_callback(conf); 987 nfs4_unlock_state(); 988 return status; 989 } 990 991 /* OPEN Share state helper functions */ 992 static inline struct nfs4_file * 993 alloc_init_file(struct inode *ino) 994 { 995 struct nfs4_file *fp; 996 unsigned int hashval = file_hashval(ino); 997 998 fp = kmem_cache_alloc(file_slab, GFP_KERNEL); 999 if (fp) { 1000 kref_init(&fp->fi_ref); 1001 INIT_LIST_HEAD(&fp->fi_hash); 1002 INIT_LIST_HEAD(&fp->fi_stateids); 1003 INIT_LIST_HEAD(&fp->fi_delegations); 1004 list_add(&fp->fi_hash, &file_hashtbl[hashval]); 1005 fp->fi_inode = igrab(ino); 1006 fp->fi_id = current_fileid++; 1007 fp->fi_had_conflict = false; 1008 return fp; 1009 } 1010 return NULL; 1011 } 1012 1013 static void 1014 nfsd4_free_slab(struct kmem_cache **slab) 1015 { 1016 if (*slab == NULL) 1017 return; 1018 kmem_cache_destroy(*slab); 1019 *slab = NULL; 1020 } 1021 1022 static void 1023 nfsd4_free_slabs(void) 1024 { 1025 nfsd4_free_slab(&stateowner_slab); 1026 nfsd4_free_slab(&file_slab); 1027 nfsd4_free_slab(&stateid_slab); 1028 nfsd4_free_slab(&deleg_slab); 1029 } 1030 1031 static int 1032 nfsd4_init_slabs(void) 1033 { 1034 stateowner_slab = kmem_cache_create("nfsd4_stateowners", 1035 sizeof(struct nfs4_stateowner), 0, 0, NULL); 1036 if (stateowner_slab == NULL) 1037 goto out_nomem; 1038 file_slab = kmem_cache_create("nfsd4_files", 1039 sizeof(struct nfs4_file), 0, 0, NULL); 1040 if (file_slab == NULL) 1041 goto out_nomem; 1042 stateid_slab = kmem_cache_create("nfsd4_stateids", 1043 sizeof(struct nfs4_stateid), 0, 0, NULL); 1044 if (stateid_slab == NULL) 1045 goto out_nomem; 1046 deleg_slab = kmem_cache_create("nfsd4_delegations", 1047 sizeof(struct nfs4_delegation), 0, 0, NULL); 1048 if (deleg_slab == NULL) 1049 goto out_nomem; 1050 return 0; 1051 out_nomem: 1052 nfsd4_free_slabs(); 1053 dprintk("nfsd4: out of memory while initializing nfsv4\n"); 1054 return -ENOMEM; 1055 } 1056 1057 void 1058 nfs4_free_stateowner(struct kref *kref) 1059 { 1060 struct nfs4_stateowner *sop = 1061 container_of(kref, struct nfs4_stateowner, so_ref); 1062 kfree(sop->so_owner.data); 1063 kmem_cache_free(stateowner_slab, sop); 1064 } 1065 1066 static inline struct nfs4_stateowner * 1067 alloc_stateowner(struct xdr_netobj *owner) 1068 { 1069 struct nfs4_stateowner *sop; 1070 1071 if ((sop = kmem_cache_alloc(stateowner_slab, GFP_KERNEL))) { 1072 if ((sop->so_owner.data = kmalloc(owner->len, GFP_KERNEL))) { 1073 memcpy(sop->so_owner.data, owner->data, owner->len); 1074 sop->so_owner.len = owner->len; 1075 kref_init(&sop->so_ref); 1076 return sop; 1077 } 1078 kmem_cache_free(stateowner_slab, sop); 1079 } 1080 return NULL; 1081 } 1082 1083 static struct nfs4_stateowner * 1084 alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfsd4_open *open) { 1085 struct nfs4_stateowner *sop; 1086 struct nfs4_replay *rp; 1087 unsigned int idhashval; 1088 1089 if (!(sop = alloc_stateowner(&open->op_owner))) 1090 return NULL; 1091 idhashval = ownerid_hashval(current_ownerid); 1092 INIT_LIST_HEAD(&sop->so_idhash); 1093 INIT_LIST_HEAD(&sop->so_strhash); 1094 INIT_LIST_HEAD(&sop->so_perclient); 1095 INIT_LIST_HEAD(&sop->so_stateids); 1096 INIT_LIST_HEAD(&sop->so_perstateid); /* not used */ 1097 INIT_LIST_HEAD(&sop->so_close_lru); 1098 sop->so_time = 0; 1099 list_add(&sop->so_idhash, &ownerid_hashtbl[idhashval]); 1100 list_add(&sop->so_strhash, &ownerstr_hashtbl[strhashval]); 1101 list_add(&sop->so_perclient, &clp->cl_openowners); 1102 sop->so_is_open_owner = 1; 1103 sop->so_id = current_ownerid++; 1104 sop->so_client = clp; 1105 sop->so_seqid = open->op_seqid; 1106 sop->so_confirmed = 0; 1107 rp = &sop->so_replay; 1108 rp->rp_status = nfserr_serverfault; 1109 rp->rp_buflen = 0; 1110 rp->rp_buf = rp->rp_ibuf; 1111 return sop; 1112 } 1113 1114 static void 1115 release_stateid_lockowners(struct nfs4_stateid *open_stp) 1116 { 1117 struct nfs4_stateowner *lock_sop; 1118 1119 while (!list_empty(&open_stp->st_lockowners)) { 1120 lock_sop = list_entry(open_stp->st_lockowners.next, 1121 struct nfs4_stateowner, so_perstateid); 1122 /* list_del(&open_stp->st_lockowners); */ 1123 BUG_ON(lock_sop->so_is_open_owner); 1124 release_stateowner(lock_sop); 1125 } 1126 } 1127 1128 static void 1129 unhash_stateowner(struct nfs4_stateowner *sop) 1130 { 1131 struct nfs4_stateid *stp; 1132 1133 list_del(&sop->so_idhash); 1134 list_del(&sop->so_strhash); 1135 if (sop->so_is_open_owner) 1136 list_del(&sop->so_perclient); 1137 list_del(&sop->so_perstateid); 1138 while (!list_empty(&sop->so_stateids)) { 1139 stp = list_entry(sop->so_stateids.next, 1140 struct nfs4_stateid, st_perstateowner); 1141 if (sop->so_is_open_owner) 1142 release_stateid(stp, OPEN_STATE); 1143 else 1144 release_stateid(stp, LOCK_STATE); 1145 } 1146 } 1147 1148 static void 1149 release_stateowner(struct nfs4_stateowner *sop) 1150 { 1151 unhash_stateowner(sop); 1152 list_del(&sop->so_close_lru); 1153 nfs4_put_stateowner(sop); 1154 } 1155 1156 static inline void 1157 init_stateid(struct nfs4_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) { 1158 struct nfs4_stateowner *sop = open->op_stateowner; 1159 unsigned int hashval = stateid_hashval(sop->so_id, fp->fi_id); 1160 1161 INIT_LIST_HEAD(&stp->st_hash); 1162 INIT_LIST_HEAD(&stp->st_perstateowner); 1163 INIT_LIST_HEAD(&stp->st_lockowners); 1164 INIT_LIST_HEAD(&stp->st_perfile); 1165 list_add(&stp->st_hash, &stateid_hashtbl[hashval]); 1166 list_add(&stp->st_perstateowner, &sop->so_stateids); 1167 list_add(&stp->st_perfile, &fp->fi_stateids); 1168 stp->st_stateowner = sop; 1169 get_nfs4_file(fp); 1170 stp->st_file = fp; 1171 stp->st_stateid.si_boot = boot_time; 1172 stp->st_stateid.si_stateownerid = sop->so_id; 1173 stp->st_stateid.si_fileid = fp->fi_id; 1174 stp->st_stateid.si_generation = 0; 1175 stp->st_access_bmap = 0; 1176 stp->st_deny_bmap = 0; 1177 __set_bit(open->op_share_access, &stp->st_access_bmap); 1178 __set_bit(open->op_share_deny, &stp->st_deny_bmap); 1179 stp->st_openstp = NULL; 1180 } 1181 1182 static void 1183 release_stateid(struct nfs4_stateid *stp, int flags) 1184 { 1185 struct file *filp = stp->st_vfs_file; 1186 1187 list_del(&stp->st_hash); 1188 list_del(&stp->st_perfile); 1189 list_del(&stp->st_perstateowner); 1190 if (flags & OPEN_STATE) { 1191 release_stateid_lockowners(stp); 1192 stp->st_vfs_file = NULL; 1193 nfsd_close(filp); 1194 } else if (flags & LOCK_STATE) 1195 locks_remove_posix(filp, (fl_owner_t) stp->st_stateowner); 1196 put_nfs4_file(stp->st_file); 1197 kmem_cache_free(stateid_slab, stp); 1198 } 1199 1200 static void 1201 move_to_close_lru(struct nfs4_stateowner *sop) 1202 { 1203 dprintk("NFSD: move_to_close_lru nfs4_stateowner %p\n", sop); 1204 1205 list_move_tail(&sop->so_close_lru, &close_lru); 1206 sop->so_time = get_seconds(); 1207 } 1208 1209 static int 1210 cmp_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner, clientid_t *clid) { 1211 return ((sop->so_owner.len == owner->len) && 1212 !memcmp(sop->so_owner.data, owner->data, owner->len) && 1213 (sop->so_client->cl_clientid.cl_id == clid->cl_id)); 1214 } 1215 1216 static struct nfs4_stateowner * 1217 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open) 1218 { 1219 struct nfs4_stateowner *so = NULL; 1220 1221 list_for_each_entry(so, &ownerstr_hashtbl[hashval], so_strhash) { 1222 if (cmp_owner_str(so, &open->op_owner, &open->op_clientid)) 1223 return so; 1224 } 1225 return NULL; 1226 } 1227 1228 /* search file_hashtbl[] for file */ 1229 static struct nfs4_file * 1230 find_file(struct inode *ino) 1231 { 1232 unsigned int hashval = file_hashval(ino); 1233 struct nfs4_file *fp; 1234 1235 list_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) { 1236 if (fp->fi_inode == ino) { 1237 get_nfs4_file(fp); 1238 return fp; 1239 } 1240 } 1241 return NULL; 1242 } 1243 1244 static int access_valid(u32 x) 1245 { 1246 return (x > 0 && x < 4); 1247 } 1248 1249 static int deny_valid(u32 x) 1250 { 1251 return (x >= 0 && x < 5); 1252 } 1253 1254 static void 1255 set_access(unsigned int *access, unsigned long bmap) { 1256 int i; 1257 1258 *access = 0; 1259 for (i = 1; i < 4; i++) { 1260 if (test_bit(i, &bmap)) 1261 *access |= i; 1262 } 1263 } 1264 1265 static void 1266 set_deny(unsigned int *deny, unsigned long bmap) { 1267 int i; 1268 1269 *deny = 0; 1270 for (i = 0; i < 4; i++) { 1271 if (test_bit(i, &bmap)) 1272 *deny |= i ; 1273 } 1274 } 1275 1276 static int 1277 test_share(struct nfs4_stateid *stp, struct nfsd4_open *open) { 1278 unsigned int access, deny; 1279 1280 set_access(&access, stp->st_access_bmap); 1281 set_deny(&deny, stp->st_deny_bmap); 1282 if ((access & open->op_share_deny) || (deny & open->op_share_access)) 1283 return 0; 1284 return 1; 1285 } 1286 1287 /* 1288 * Called to check deny when READ with all zero stateid or 1289 * WRITE with all zero or all one stateid 1290 */ 1291 static __be32 1292 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type) 1293 { 1294 struct inode *ino = current_fh->fh_dentry->d_inode; 1295 struct nfs4_file *fp; 1296 struct nfs4_stateid *stp; 1297 __be32 ret; 1298 1299 dprintk("NFSD: nfs4_share_conflict\n"); 1300 1301 fp = find_file(ino); 1302 if (!fp) 1303 return nfs_ok; 1304 ret = nfserr_locked; 1305 /* Search for conflicting share reservations */ 1306 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) { 1307 if (test_bit(deny_type, &stp->st_deny_bmap) || 1308 test_bit(NFS4_SHARE_DENY_BOTH, &stp->st_deny_bmap)) 1309 goto out; 1310 } 1311 ret = nfs_ok; 1312 out: 1313 put_nfs4_file(fp); 1314 return ret; 1315 } 1316 1317 static inline void 1318 nfs4_file_downgrade(struct file *filp, unsigned int share_access) 1319 { 1320 if (share_access & NFS4_SHARE_ACCESS_WRITE) { 1321 put_write_access(filp->f_path.dentry->d_inode); 1322 filp->f_mode = (filp->f_mode | FMODE_READ) & ~FMODE_WRITE; 1323 } 1324 } 1325 1326 /* 1327 * Recall a delegation 1328 */ 1329 static int 1330 do_recall(void *__dp) 1331 { 1332 struct nfs4_delegation *dp = __dp; 1333 1334 dp->dl_file->fi_had_conflict = true; 1335 nfsd4_cb_recall(dp); 1336 return 0; 1337 } 1338 1339 /* 1340 * Spawn a thread to perform a recall on the delegation represented 1341 * by the lease (file_lock) 1342 * 1343 * Called from break_lease() with lock_kernel() held. 1344 * Note: we assume break_lease will only call this *once* for any given 1345 * lease. 1346 */ 1347 static 1348 void nfsd_break_deleg_cb(struct file_lock *fl) 1349 { 1350 struct nfs4_delegation *dp= (struct nfs4_delegation *)fl->fl_owner; 1351 struct task_struct *t; 1352 1353 dprintk("NFSD nfsd_break_deleg_cb: dp %p fl %p\n",dp,fl); 1354 if (!dp) 1355 return; 1356 1357 /* We're assuming the state code never drops its reference 1358 * without first removing the lease. Since we're in this lease 1359 * callback (and since the lease code is serialized by the kernel 1360 * lock) we know the server hasn't removed the lease yet, we know 1361 * it's safe to take a reference: */ 1362 atomic_inc(&dp->dl_count); 1363 1364 spin_lock(&recall_lock); 1365 list_add_tail(&dp->dl_recall_lru, &del_recall_lru); 1366 spin_unlock(&recall_lock); 1367 1368 /* only place dl_time is set. protected by lock_kernel*/ 1369 dp->dl_time = get_seconds(); 1370 1371 /* XXX need to merge NFSD_LEASE_TIME with fs/locks.c:lease_break_time */ 1372 fl->fl_break_time = jiffies + NFSD_LEASE_TIME * HZ; 1373 1374 t = kthread_run(do_recall, dp, "%s", "nfs4_cb_recall"); 1375 if (IS_ERR(t)) { 1376 struct nfs4_client *clp = dp->dl_client; 1377 1378 printk(KERN_INFO "NFSD: Callback thread failed for " 1379 "for client (clientid %08x/%08x)\n", 1380 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id); 1381 nfs4_put_delegation(dp); 1382 } 1383 } 1384 1385 /* 1386 * The file_lock is being reapd. 1387 * 1388 * Called by locks_free_lock() with lock_kernel() held. 1389 */ 1390 static 1391 void nfsd_release_deleg_cb(struct file_lock *fl) 1392 { 1393 struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner; 1394 1395 dprintk("NFSD nfsd_release_deleg_cb: fl %p dp %p dl_count %d\n", fl,dp, atomic_read(&dp->dl_count)); 1396 1397 if (!(fl->fl_flags & FL_LEASE) || !dp) 1398 return; 1399 dp->dl_flock = NULL; 1400 } 1401 1402 /* 1403 * Set the delegation file_lock back pointer. 1404 * 1405 * Called from setlease() with lock_kernel() held. 1406 */ 1407 static 1408 void nfsd_copy_lock_deleg_cb(struct file_lock *new, struct file_lock *fl) 1409 { 1410 struct nfs4_delegation *dp = (struct nfs4_delegation *)new->fl_owner; 1411 1412 dprintk("NFSD: nfsd_copy_lock_deleg_cb: new fl %p dp %p\n", new, dp); 1413 if (!dp) 1414 return; 1415 dp->dl_flock = new; 1416 } 1417 1418 /* 1419 * Called from setlease() with lock_kernel() held 1420 */ 1421 static 1422 int nfsd_same_client_deleg_cb(struct file_lock *onlist, struct file_lock *try) 1423 { 1424 struct nfs4_delegation *onlistd = 1425 (struct nfs4_delegation *)onlist->fl_owner; 1426 struct nfs4_delegation *tryd = 1427 (struct nfs4_delegation *)try->fl_owner; 1428 1429 if (onlist->fl_lmops != try->fl_lmops) 1430 return 0; 1431 1432 return onlistd->dl_client == tryd->dl_client; 1433 } 1434 1435 1436 static 1437 int nfsd_change_deleg_cb(struct file_lock **onlist, int arg) 1438 { 1439 if (arg & F_UNLCK) 1440 return lease_modify(onlist, arg); 1441 else 1442 return -EAGAIN; 1443 } 1444 1445 static struct lock_manager_operations nfsd_lease_mng_ops = { 1446 .fl_break = nfsd_break_deleg_cb, 1447 .fl_release_private = nfsd_release_deleg_cb, 1448 .fl_copy_lock = nfsd_copy_lock_deleg_cb, 1449 .fl_mylease = nfsd_same_client_deleg_cb, 1450 .fl_change = nfsd_change_deleg_cb, 1451 }; 1452 1453 1454 __be32 1455 nfsd4_process_open1(struct nfsd4_open *open) 1456 { 1457 clientid_t *clientid = &open->op_clientid; 1458 struct nfs4_client *clp = NULL; 1459 unsigned int strhashval; 1460 struct nfs4_stateowner *sop = NULL; 1461 1462 if (!check_name(open->op_owner)) 1463 return nfserr_inval; 1464 1465 if (STALE_CLIENTID(&open->op_clientid)) 1466 return nfserr_stale_clientid; 1467 1468 strhashval = ownerstr_hashval(clientid->cl_id, open->op_owner); 1469 sop = find_openstateowner_str(strhashval, open); 1470 open->op_stateowner = sop; 1471 if (!sop) { 1472 /* Make sure the client's lease hasn't expired. */ 1473 clp = find_confirmed_client(clientid); 1474 if (clp == NULL) 1475 return nfserr_expired; 1476 goto renew; 1477 } 1478 if (!sop->so_confirmed) { 1479 /* Replace unconfirmed owners without checking for replay. */ 1480 clp = sop->so_client; 1481 release_stateowner(sop); 1482 open->op_stateowner = NULL; 1483 goto renew; 1484 } 1485 if (open->op_seqid == sop->so_seqid - 1) { 1486 if (sop->so_replay.rp_buflen) 1487 return nfserr_replay_me; 1488 /* The original OPEN failed so spectacularly 1489 * that we don't even have replay data saved! 1490 * Therefore, we have no choice but to continue 1491 * processing this OPEN; presumably, we'll 1492 * fail again for the same reason. 1493 */ 1494 dprintk("nfsd4_process_open1: replay with no replay cache\n"); 1495 goto renew; 1496 } 1497 if (open->op_seqid != sop->so_seqid) 1498 return nfserr_bad_seqid; 1499 renew: 1500 if (open->op_stateowner == NULL) { 1501 sop = alloc_init_open_stateowner(strhashval, clp, open); 1502 if (sop == NULL) 1503 return nfserr_resource; 1504 open->op_stateowner = sop; 1505 } 1506 list_del_init(&sop->so_close_lru); 1507 renew_client(sop->so_client); 1508 return nfs_ok; 1509 } 1510 1511 static inline __be32 1512 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags) 1513 { 1514 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ)) 1515 return nfserr_openmode; 1516 else 1517 return nfs_ok; 1518 } 1519 1520 static struct nfs4_delegation * 1521 find_delegation_file(struct nfs4_file *fp, stateid_t *stid) 1522 { 1523 struct nfs4_delegation *dp; 1524 1525 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) { 1526 if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid) 1527 return dp; 1528 } 1529 return NULL; 1530 } 1531 1532 static __be32 1533 nfs4_check_deleg(struct nfs4_file *fp, struct nfsd4_open *open, 1534 struct nfs4_delegation **dp) 1535 { 1536 int flags; 1537 __be32 status = nfserr_bad_stateid; 1538 1539 *dp = find_delegation_file(fp, &open->op_delegate_stateid); 1540 if (*dp == NULL) 1541 goto out; 1542 flags = open->op_share_access == NFS4_SHARE_ACCESS_READ ? 1543 RD_STATE : WR_STATE; 1544 status = nfs4_check_delegmode(*dp, flags); 1545 if (status) 1546 *dp = NULL; 1547 out: 1548 if (open->op_claim_type != NFS4_OPEN_CLAIM_DELEGATE_CUR) 1549 return nfs_ok; 1550 if (status) 1551 return status; 1552 open->op_stateowner->so_confirmed = 1; 1553 return nfs_ok; 1554 } 1555 1556 static __be32 1557 nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_stateid **stpp) 1558 { 1559 struct nfs4_stateid *local; 1560 __be32 status = nfserr_share_denied; 1561 struct nfs4_stateowner *sop = open->op_stateowner; 1562 1563 list_for_each_entry(local, &fp->fi_stateids, st_perfile) { 1564 /* ignore lock owners */ 1565 if (local->st_stateowner->so_is_open_owner == 0) 1566 continue; 1567 /* remember if we have seen this open owner */ 1568 if (local->st_stateowner == sop) 1569 *stpp = local; 1570 /* check for conflicting share reservations */ 1571 if (!test_share(local, open)) 1572 goto out; 1573 } 1574 status = 0; 1575 out: 1576 return status; 1577 } 1578 1579 static inline struct nfs4_stateid * 1580 nfs4_alloc_stateid(void) 1581 { 1582 return kmem_cache_alloc(stateid_slab, GFP_KERNEL); 1583 } 1584 1585 static __be32 1586 nfs4_new_open(struct svc_rqst *rqstp, struct nfs4_stateid **stpp, 1587 struct nfs4_delegation *dp, 1588 struct svc_fh *cur_fh, int flags) 1589 { 1590 struct nfs4_stateid *stp; 1591 1592 stp = nfs4_alloc_stateid(); 1593 if (stp == NULL) 1594 return nfserr_resource; 1595 1596 if (dp) { 1597 get_file(dp->dl_vfs_file); 1598 stp->st_vfs_file = dp->dl_vfs_file; 1599 } else { 1600 __be32 status; 1601 status = nfsd_open(rqstp, cur_fh, S_IFREG, flags, 1602 &stp->st_vfs_file); 1603 if (status) { 1604 if (status == nfserr_dropit) 1605 status = nfserr_jukebox; 1606 kmem_cache_free(stateid_slab, stp); 1607 return status; 1608 } 1609 } 1610 *stpp = stp; 1611 return 0; 1612 } 1613 1614 static inline __be32 1615 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh, 1616 struct nfsd4_open *open) 1617 { 1618 struct iattr iattr = { 1619 .ia_valid = ATTR_SIZE, 1620 .ia_size = 0, 1621 }; 1622 if (!open->op_truncate) 1623 return 0; 1624 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE)) 1625 return nfserr_inval; 1626 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0); 1627 } 1628 1629 static __be32 1630 nfs4_upgrade_open(struct svc_rqst *rqstp, struct svc_fh *cur_fh, struct nfs4_stateid *stp, struct nfsd4_open *open) 1631 { 1632 struct file *filp = stp->st_vfs_file; 1633 struct inode *inode = filp->f_path.dentry->d_inode; 1634 unsigned int share_access, new_writer; 1635 __be32 status; 1636 1637 set_access(&share_access, stp->st_access_bmap); 1638 new_writer = (~share_access) & open->op_share_access 1639 & NFS4_SHARE_ACCESS_WRITE; 1640 1641 if (new_writer) { 1642 int err = get_write_access(inode); 1643 if (err) 1644 return nfserrno(err); 1645 } 1646 status = nfsd4_truncate(rqstp, cur_fh, open); 1647 if (status) { 1648 if (new_writer) 1649 put_write_access(inode); 1650 return status; 1651 } 1652 /* remember the open */ 1653 filp->f_mode |= open->op_share_access; 1654 set_bit(open->op_share_access, &stp->st_access_bmap); 1655 set_bit(open->op_share_deny, &stp->st_deny_bmap); 1656 1657 return nfs_ok; 1658 } 1659 1660 1661 static void 1662 nfs4_set_claim_prev(struct nfsd4_open *open) 1663 { 1664 open->op_stateowner->so_confirmed = 1; 1665 open->op_stateowner->so_client->cl_firststate = 1; 1666 } 1667 1668 /* 1669 * Attempt to hand out a delegation. 1670 */ 1671 static void 1672 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_stateid *stp) 1673 { 1674 struct nfs4_delegation *dp; 1675 struct nfs4_stateowner *sop = stp->st_stateowner; 1676 struct nfs4_callback *cb = &sop->so_client->cl_callback; 1677 struct file_lock fl, *flp = &fl; 1678 int status, flag = 0; 1679 1680 flag = NFS4_OPEN_DELEGATE_NONE; 1681 open->op_recall = 0; 1682 switch (open->op_claim_type) { 1683 case NFS4_OPEN_CLAIM_PREVIOUS: 1684 if (!atomic_read(&cb->cb_set)) 1685 open->op_recall = 1; 1686 flag = open->op_delegate_type; 1687 if (flag == NFS4_OPEN_DELEGATE_NONE) 1688 goto out; 1689 break; 1690 case NFS4_OPEN_CLAIM_NULL: 1691 /* Let's not give out any delegations till everyone's 1692 * had the chance to reclaim theirs.... */ 1693 if (nfs4_in_grace()) 1694 goto out; 1695 if (!atomic_read(&cb->cb_set) || !sop->so_confirmed) 1696 goto out; 1697 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) 1698 flag = NFS4_OPEN_DELEGATE_WRITE; 1699 else 1700 flag = NFS4_OPEN_DELEGATE_READ; 1701 break; 1702 default: 1703 goto out; 1704 } 1705 1706 dp = alloc_init_deleg(sop->so_client, stp, fh, flag); 1707 if (dp == NULL) { 1708 flag = NFS4_OPEN_DELEGATE_NONE; 1709 goto out; 1710 } 1711 locks_init_lock(&fl); 1712 fl.fl_lmops = &nfsd_lease_mng_ops; 1713 fl.fl_flags = FL_LEASE; 1714 fl.fl_end = OFFSET_MAX; 1715 fl.fl_owner = (fl_owner_t)dp; 1716 fl.fl_file = stp->st_vfs_file; 1717 fl.fl_pid = current->tgid; 1718 1719 /* vfs_setlease checks to see if delegation should be handed out. 1720 * the lock_manager callbacks fl_mylease and fl_change are used 1721 */ 1722 if ((status = vfs_setlease(stp->st_vfs_file, 1723 flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK, &flp))) { 1724 dprintk("NFSD: setlease failed [%d], no delegation\n", status); 1725 unhash_delegation(dp); 1726 flag = NFS4_OPEN_DELEGATE_NONE; 1727 goto out; 1728 } 1729 1730 memcpy(&open->op_delegate_stateid, &dp->dl_stateid, sizeof(dp->dl_stateid)); 1731 1732 dprintk("NFSD: delegation stateid=(%08x/%08x/%08x/%08x)\n\n", 1733 dp->dl_stateid.si_boot, 1734 dp->dl_stateid.si_stateownerid, 1735 dp->dl_stateid.si_fileid, 1736 dp->dl_stateid.si_generation); 1737 out: 1738 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS 1739 && flag == NFS4_OPEN_DELEGATE_NONE 1740 && open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) 1741 printk("NFSD: WARNING: refusing delegation reclaim\n"); 1742 open->op_delegate_type = flag; 1743 } 1744 1745 /* 1746 * called with nfs4_lock_state() held. 1747 */ 1748 __be32 1749 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open) 1750 { 1751 struct nfs4_file *fp = NULL; 1752 struct inode *ino = current_fh->fh_dentry->d_inode; 1753 struct nfs4_stateid *stp = NULL; 1754 struct nfs4_delegation *dp = NULL; 1755 __be32 status; 1756 1757 status = nfserr_inval; 1758 if (!access_valid(open->op_share_access) 1759 || !deny_valid(open->op_share_deny)) 1760 goto out; 1761 /* 1762 * Lookup file; if found, lookup stateid and check open request, 1763 * and check for delegations in the process of being recalled. 1764 * If not found, create the nfs4_file struct 1765 */ 1766 fp = find_file(ino); 1767 if (fp) { 1768 if ((status = nfs4_check_open(fp, open, &stp))) 1769 goto out; 1770 status = nfs4_check_deleg(fp, open, &dp); 1771 if (status) 1772 goto out; 1773 } else { 1774 status = nfserr_bad_stateid; 1775 if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR) 1776 goto out; 1777 status = nfserr_resource; 1778 fp = alloc_init_file(ino); 1779 if (fp == NULL) 1780 goto out; 1781 } 1782 1783 /* 1784 * OPEN the file, or upgrade an existing OPEN. 1785 * If truncate fails, the OPEN fails. 1786 */ 1787 if (stp) { 1788 /* Stateid was found, this is an OPEN upgrade */ 1789 status = nfs4_upgrade_open(rqstp, current_fh, stp, open); 1790 if (status) 1791 goto out; 1792 update_stateid(&stp->st_stateid); 1793 } else { 1794 /* Stateid was not found, this is a new OPEN */ 1795 int flags = 0; 1796 if (open->op_share_access & NFS4_SHARE_ACCESS_READ) 1797 flags |= MAY_READ; 1798 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) 1799 flags |= MAY_WRITE; 1800 status = nfs4_new_open(rqstp, &stp, dp, current_fh, flags); 1801 if (status) 1802 goto out; 1803 init_stateid(stp, fp, open); 1804 status = nfsd4_truncate(rqstp, current_fh, open); 1805 if (status) { 1806 release_stateid(stp, OPEN_STATE); 1807 goto out; 1808 } 1809 } 1810 memcpy(&open->op_stateid, &stp->st_stateid, sizeof(stateid_t)); 1811 1812 /* 1813 * Attempt to hand out a delegation. No error return, because the 1814 * OPEN succeeds even if we fail. 1815 */ 1816 nfs4_open_delegation(current_fh, open, stp); 1817 1818 status = nfs_ok; 1819 1820 dprintk("nfs4_process_open2: stateid=(%08x/%08x/%08x/%08x)\n", 1821 stp->st_stateid.si_boot, stp->st_stateid.si_stateownerid, 1822 stp->st_stateid.si_fileid, stp->st_stateid.si_generation); 1823 out: 1824 if (fp) 1825 put_nfs4_file(fp); 1826 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS) 1827 nfs4_set_claim_prev(open); 1828 /* 1829 * To finish the open response, we just need to set the rflags. 1830 */ 1831 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX; 1832 if (!open->op_stateowner->so_confirmed) 1833 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM; 1834 1835 return status; 1836 } 1837 1838 static struct workqueue_struct *laundry_wq; 1839 static void laundromat_main(struct work_struct *); 1840 static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main); 1841 1842 __be32 1843 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 1844 clientid_t *clid) 1845 { 1846 struct nfs4_client *clp; 1847 __be32 status; 1848 1849 nfs4_lock_state(); 1850 dprintk("process_renew(%08x/%08x): starting\n", 1851 clid->cl_boot, clid->cl_id); 1852 status = nfserr_stale_clientid; 1853 if (STALE_CLIENTID(clid)) 1854 goto out; 1855 clp = find_confirmed_client(clid); 1856 status = nfserr_expired; 1857 if (clp == NULL) { 1858 /* We assume the client took too long to RENEW. */ 1859 dprintk("nfsd4_renew: clientid not found!\n"); 1860 goto out; 1861 } 1862 renew_client(clp); 1863 status = nfserr_cb_path_down; 1864 if (!list_empty(&clp->cl_delegations) 1865 && !atomic_read(&clp->cl_callback.cb_set)) 1866 goto out; 1867 status = nfs_ok; 1868 out: 1869 nfs4_unlock_state(); 1870 return status; 1871 } 1872 1873 static void 1874 end_grace(void) 1875 { 1876 dprintk("NFSD: end of grace period\n"); 1877 nfsd4_recdir_purge_old(); 1878 in_grace = 0; 1879 } 1880 1881 static time_t 1882 nfs4_laundromat(void) 1883 { 1884 struct nfs4_client *clp; 1885 struct nfs4_stateowner *sop; 1886 struct nfs4_delegation *dp; 1887 struct list_head *pos, *next, reaplist; 1888 time_t cutoff = get_seconds() - NFSD_LEASE_TIME; 1889 time_t t, clientid_val = NFSD_LEASE_TIME; 1890 time_t u, test_val = NFSD_LEASE_TIME; 1891 1892 nfs4_lock_state(); 1893 1894 dprintk("NFSD: laundromat service - starting\n"); 1895 if (in_grace) 1896 end_grace(); 1897 list_for_each_safe(pos, next, &client_lru) { 1898 clp = list_entry(pos, struct nfs4_client, cl_lru); 1899 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) { 1900 t = clp->cl_time - cutoff; 1901 if (clientid_val > t) 1902 clientid_val = t; 1903 break; 1904 } 1905 dprintk("NFSD: purging unused client (clientid %08x)\n", 1906 clp->cl_clientid.cl_id); 1907 nfsd4_remove_clid_dir(clp); 1908 expire_client(clp); 1909 } 1910 INIT_LIST_HEAD(&reaplist); 1911 spin_lock(&recall_lock); 1912 list_for_each_safe(pos, next, &del_recall_lru) { 1913 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 1914 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) { 1915 u = dp->dl_time - cutoff; 1916 if (test_val > u) 1917 test_val = u; 1918 break; 1919 } 1920 dprintk("NFSD: purging unused delegation dp %p, fp %p\n", 1921 dp, dp->dl_flock); 1922 list_move(&dp->dl_recall_lru, &reaplist); 1923 } 1924 spin_unlock(&recall_lock); 1925 list_for_each_safe(pos, next, &reaplist) { 1926 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 1927 list_del_init(&dp->dl_recall_lru); 1928 unhash_delegation(dp); 1929 } 1930 test_val = NFSD_LEASE_TIME; 1931 list_for_each_safe(pos, next, &close_lru) { 1932 sop = list_entry(pos, struct nfs4_stateowner, so_close_lru); 1933 if (time_after((unsigned long)sop->so_time, (unsigned long)cutoff)) { 1934 u = sop->so_time - cutoff; 1935 if (test_val > u) 1936 test_val = u; 1937 break; 1938 } 1939 dprintk("NFSD: purging unused open stateowner (so_id %d)\n", 1940 sop->so_id); 1941 release_stateowner(sop); 1942 } 1943 if (clientid_val < NFSD_LAUNDROMAT_MINTIMEOUT) 1944 clientid_val = NFSD_LAUNDROMAT_MINTIMEOUT; 1945 nfs4_unlock_state(); 1946 return clientid_val; 1947 } 1948 1949 void 1950 laundromat_main(struct work_struct *not_used) 1951 { 1952 time_t t; 1953 1954 t = nfs4_laundromat(); 1955 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t); 1956 queue_delayed_work(laundry_wq, &laundromat_work, t*HZ); 1957 } 1958 1959 static struct nfs4_stateowner * 1960 search_close_lru(u32 st_id, int flags) 1961 { 1962 struct nfs4_stateowner *local = NULL; 1963 1964 if (flags & CLOSE_STATE) { 1965 list_for_each_entry(local, &close_lru, so_close_lru) { 1966 if (local->so_id == st_id) 1967 return local; 1968 } 1969 } 1970 return NULL; 1971 } 1972 1973 static inline int 1974 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stateid *stp) 1975 { 1976 return fhp->fh_dentry->d_inode != stp->st_vfs_file->f_path.dentry->d_inode; 1977 } 1978 1979 static int 1980 STALE_STATEID(stateid_t *stateid) 1981 { 1982 if (stateid->si_boot == boot_time) 1983 return 0; 1984 dprintk("NFSD: stale stateid (%08x/%08x/%08x/%08x)!\n", 1985 stateid->si_boot, stateid->si_stateownerid, stateid->si_fileid, 1986 stateid->si_generation); 1987 return 1; 1988 } 1989 1990 static inline int 1991 access_permit_read(unsigned long access_bmap) 1992 { 1993 return test_bit(NFS4_SHARE_ACCESS_READ, &access_bmap) || 1994 test_bit(NFS4_SHARE_ACCESS_BOTH, &access_bmap) || 1995 test_bit(NFS4_SHARE_ACCESS_WRITE, &access_bmap); 1996 } 1997 1998 static inline int 1999 access_permit_write(unsigned long access_bmap) 2000 { 2001 return test_bit(NFS4_SHARE_ACCESS_WRITE, &access_bmap) || 2002 test_bit(NFS4_SHARE_ACCESS_BOTH, &access_bmap); 2003 } 2004 2005 static 2006 __be32 nfs4_check_openmode(struct nfs4_stateid *stp, int flags) 2007 { 2008 __be32 status = nfserr_openmode; 2009 2010 if ((flags & WR_STATE) && (!access_permit_write(stp->st_access_bmap))) 2011 goto out; 2012 if ((flags & RD_STATE) && (!access_permit_read(stp->st_access_bmap))) 2013 goto out; 2014 status = nfs_ok; 2015 out: 2016 return status; 2017 } 2018 2019 static inline __be32 2020 check_special_stateids(svc_fh *current_fh, stateid_t *stateid, int flags) 2021 { 2022 /* Trying to call delegreturn with a special stateid? Yuch: */ 2023 if (!(flags & (RD_STATE | WR_STATE))) 2024 return nfserr_bad_stateid; 2025 else if (ONE_STATEID(stateid) && (flags & RD_STATE)) 2026 return nfs_ok; 2027 else if (nfs4_in_grace()) { 2028 /* Answer in remaining cases depends on existance of 2029 * conflicting state; so we must wait out the grace period. */ 2030 return nfserr_grace; 2031 } else if (flags & WR_STATE) 2032 return nfs4_share_conflict(current_fh, 2033 NFS4_SHARE_DENY_WRITE); 2034 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */ 2035 return nfs4_share_conflict(current_fh, 2036 NFS4_SHARE_DENY_READ); 2037 } 2038 2039 /* 2040 * Allow READ/WRITE during grace period on recovered state only for files 2041 * that are not able to provide mandatory locking. 2042 */ 2043 static inline int 2044 io_during_grace_disallowed(struct inode *inode, int flags) 2045 { 2046 return nfs4_in_grace() && (flags & (RD_STATE | WR_STATE)) 2047 && MANDATORY_LOCK(inode); 2048 } 2049 2050 /* 2051 * Checks for stateid operations 2052 */ 2053 __be32 2054 nfs4_preprocess_stateid_op(struct svc_fh *current_fh, stateid_t *stateid, int flags, struct file **filpp) 2055 { 2056 struct nfs4_stateid *stp = NULL; 2057 struct nfs4_delegation *dp = NULL; 2058 stateid_t *stidp; 2059 struct inode *ino = current_fh->fh_dentry->d_inode; 2060 __be32 status; 2061 2062 dprintk("NFSD: preprocess_stateid_op: stateid = (%08x/%08x/%08x/%08x)\n", 2063 stateid->si_boot, stateid->si_stateownerid, 2064 stateid->si_fileid, stateid->si_generation); 2065 if (filpp) 2066 *filpp = NULL; 2067 2068 if (io_during_grace_disallowed(ino, flags)) 2069 return nfserr_grace; 2070 2071 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) 2072 return check_special_stateids(current_fh, stateid, flags); 2073 2074 /* STALE STATEID */ 2075 status = nfserr_stale_stateid; 2076 if (STALE_STATEID(stateid)) 2077 goto out; 2078 2079 /* BAD STATEID */ 2080 status = nfserr_bad_stateid; 2081 if (!stateid->si_fileid) { /* delegation stateid */ 2082 if(!(dp = find_delegation_stateid(ino, stateid))) { 2083 dprintk("NFSD: delegation stateid not found\n"); 2084 goto out; 2085 } 2086 stidp = &dp->dl_stateid; 2087 } else { /* open or lock stateid */ 2088 if (!(stp = find_stateid(stateid, flags))) { 2089 dprintk("NFSD: open or lock stateid not found\n"); 2090 goto out; 2091 } 2092 if ((flags & CHECK_FH) && nfs4_check_fh(current_fh, stp)) 2093 goto out; 2094 if (!stp->st_stateowner->so_confirmed) 2095 goto out; 2096 stidp = &stp->st_stateid; 2097 } 2098 if (stateid->si_generation > stidp->si_generation) 2099 goto out; 2100 2101 /* OLD STATEID */ 2102 status = nfserr_old_stateid; 2103 if (stateid->si_generation < stidp->si_generation) 2104 goto out; 2105 if (stp) { 2106 if ((status = nfs4_check_openmode(stp,flags))) 2107 goto out; 2108 renew_client(stp->st_stateowner->so_client); 2109 if (filpp) 2110 *filpp = stp->st_vfs_file; 2111 } else if (dp) { 2112 if ((status = nfs4_check_delegmode(dp, flags))) 2113 goto out; 2114 renew_client(dp->dl_client); 2115 if (flags & DELEG_RET) 2116 unhash_delegation(dp); 2117 if (filpp) 2118 *filpp = dp->dl_vfs_file; 2119 } 2120 status = nfs_ok; 2121 out: 2122 return status; 2123 } 2124 2125 static inline int 2126 setlkflg (int type) 2127 { 2128 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ? 2129 RD_STATE : WR_STATE; 2130 } 2131 2132 /* 2133 * Checks for sequence id mutating operations. 2134 */ 2135 static __be32 2136 nfs4_preprocess_seqid_op(struct svc_fh *current_fh, u32 seqid, stateid_t *stateid, int flags, struct nfs4_stateowner **sopp, struct nfs4_stateid **stpp, struct nfsd4_lock *lock) 2137 { 2138 struct nfs4_stateid *stp; 2139 struct nfs4_stateowner *sop; 2140 2141 dprintk("NFSD: preprocess_seqid_op: seqid=%d " 2142 "stateid = (%08x/%08x/%08x/%08x)\n", seqid, 2143 stateid->si_boot, stateid->si_stateownerid, stateid->si_fileid, 2144 stateid->si_generation); 2145 2146 *stpp = NULL; 2147 *sopp = NULL; 2148 2149 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) { 2150 printk("NFSD: preprocess_seqid_op: magic stateid!\n"); 2151 return nfserr_bad_stateid; 2152 } 2153 2154 if (STALE_STATEID(stateid)) 2155 return nfserr_stale_stateid; 2156 /* 2157 * We return BAD_STATEID if filehandle doesn't match stateid, 2158 * the confirmed flag is incorrecly set, or the generation 2159 * number is incorrect. 2160 */ 2161 stp = find_stateid(stateid, flags); 2162 if (stp == NULL) { 2163 /* 2164 * Also, we should make sure this isn't just the result of 2165 * a replayed close: 2166 */ 2167 sop = search_close_lru(stateid->si_stateownerid, flags); 2168 if (sop == NULL) 2169 return nfserr_bad_stateid; 2170 *sopp = sop; 2171 goto check_replay; 2172 } 2173 2174 if (lock) { 2175 struct nfs4_stateowner *sop = stp->st_stateowner; 2176 clientid_t *lockclid = &lock->v.new.clientid; 2177 struct nfs4_client *clp = sop->so_client; 2178 int lkflg = 0; 2179 __be32 status; 2180 2181 lkflg = setlkflg(lock->lk_type); 2182 2183 if (lock->lk_is_new) { 2184 if (!sop->so_is_open_owner) 2185 return nfserr_bad_stateid; 2186 if (!cmp_clid(&clp->cl_clientid, lockclid)) 2187 return nfserr_bad_stateid; 2188 /* stp is the open stateid */ 2189 status = nfs4_check_openmode(stp, lkflg); 2190 if (status) 2191 return status; 2192 } else { 2193 /* stp is the lock stateid */ 2194 status = nfs4_check_openmode(stp->st_openstp, lkflg); 2195 if (status) 2196 return status; 2197 } 2198 2199 } 2200 2201 if ((flags & CHECK_FH) && nfs4_check_fh(current_fh, stp)) { 2202 printk("NFSD: preprocess_seqid_op: fh-stateid mismatch!\n"); 2203 return nfserr_bad_stateid; 2204 } 2205 2206 *stpp = stp; 2207 *sopp = sop = stp->st_stateowner; 2208 2209 /* 2210 * We now validate the seqid and stateid generation numbers. 2211 * For the moment, we ignore the possibility of 2212 * generation number wraparound. 2213 */ 2214 if (seqid != sop->so_seqid) 2215 goto check_replay; 2216 2217 if (sop->so_confirmed && flags & CONFIRM) { 2218 printk("NFSD: preprocess_seqid_op: expected" 2219 " unconfirmed stateowner!\n"); 2220 return nfserr_bad_stateid; 2221 } 2222 if (!sop->so_confirmed && !(flags & CONFIRM)) { 2223 printk("NFSD: preprocess_seqid_op: stateowner not" 2224 " confirmed yet!\n"); 2225 return nfserr_bad_stateid; 2226 } 2227 if (stateid->si_generation > stp->st_stateid.si_generation) { 2228 printk("NFSD: preprocess_seqid_op: future stateid?!\n"); 2229 return nfserr_bad_stateid; 2230 } 2231 2232 if (stateid->si_generation < stp->st_stateid.si_generation) { 2233 printk("NFSD: preprocess_seqid_op: old stateid!\n"); 2234 return nfserr_old_stateid; 2235 } 2236 renew_client(sop->so_client); 2237 return nfs_ok; 2238 2239 check_replay: 2240 if (seqid == sop->so_seqid - 1) { 2241 dprintk("NFSD: preprocess_seqid_op: retransmission?\n"); 2242 /* indicate replay to calling function */ 2243 return nfserr_replay_me; 2244 } 2245 printk("NFSD: preprocess_seqid_op: bad seqid (expected %d, got %d)\n", 2246 sop->so_seqid, seqid); 2247 *sopp = NULL; 2248 return nfserr_bad_seqid; 2249 } 2250 2251 __be32 2252 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 2253 struct nfsd4_open_confirm *oc) 2254 { 2255 __be32 status; 2256 struct nfs4_stateowner *sop; 2257 struct nfs4_stateid *stp; 2258 2259 dprintk("NFSD: nfsd4_open_confirm on file %.*s\n", 2260 (int)cstate->current_fh.fh_dentry->d_name.len, 2261 cstate->current_fh.fh_dentry->d_name.name); 2262 2263 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0); 2264 if (status) 2265 return status; 2266 2267 nfs4_lock_state(); 2268 2269 if ((status = nfs4_preprocess_seqid_op(&cstate->current_fh, 2270 oc->oc_seqid, &oc->oc_req_stateid, 2271 CHECK_FH | CONFIRM | OPEN_STATE, 2272 &oc->oc_stateowner, &stp, NULL))) 2273 goto out; 2274 2275 sop = oc->oc_stateowner; 2276 sop->so_confirmed = 1; 2277 update_stateid(&stp->st_stateid); 2278 memcpy(&oc->oc_resp_stateid, &stp->st_stateid, sizeof(stateid_t)); 2279 dprintk("NFSD: nfsd4_open_confirm: success, seqid=%d " 2280 "stateid=(%08x/%08x/%08x/%08x)\n", oc->oc_seqid, 2281 stp->st_stateid.si_boot, 2282 stp->st_stateid.si_stateownerid, 2283 stp->st_stateid.si_fileid, 2284 stp->st_stateid.si_generation); 2285 2286 nfsd4_create_clid_dir(sop->so_client); 2287 out: 2288 if (oc->oc_stateowner) { 2289 nfs4_get_stateowner(oc->oc_stateowner); 2290 cstate->replay_owner = oc->oc_stateowner; 2291 } 2292 nfs4_unlock_state(); 2293 return status; 2294 } 2295 2296 2297 /* 2298 * unset all bits in union bitmap (bmap) that 2299 * do not exist in share (from successful OPEN_DOWNGRADE) 2300 */ 2301 static void 2302 reset_union_bmap_access(unsigned long access, unsigned long *bmap) 2303 { 2304 int i; 2305 for (i = 1; i < 4; i++) { 2306 if ((i & access) != i) 2307 __clear_bit(i, bmap); 2308 } 2309 } 2310 2311 static void 2312 reset_union_bmap_deny(unsigned long deny, unsigned long *bmap) 2313 { 2314 int i; 2315 for (i = 0; i < 4; i++) { 2316 if ((i & deny) != i) 2317 __clear_bit(i, bmap); 2318 } 2319 } 2320 2321 __be32 2322 nfsd4_open_downgrade(struct svc_rqst *rqstp, 2323 struct nfsd4_compound_state *cstate, 2324 struct nfsd4_open_downgrade *od) 2325 { 2326 __be32 status; 2327 struct nfs4_stateid *stp; 2328 unsigned int share_access; 2329 2330 dprintk("NFSD: nfsd4_open_downgrade on file %.*s\n", 2331 (int)cstate->current_fh.fh_dentry->d_name.len, 2332 cstate->current_fh.fh_dentry->d_name.name); 2333 2334 if (!access_valid(od->od_share_access) 2335 || !deny_valid(od->od_share_deny)) 2336 return nfserr_inval; 2337 2338 nfs4_lock_state(); 2339 if ((status = nfs4_preprocess_seqid_op(&cstate->current_fh, 2340 od->od_seqid, 2341 &od->od_stateid, 2342 CHECK_FH | OPEN_STATE, 2343 &od->od_stateowner, &stp, NULL))) 2344 goto out; 2345 2346 status = nfserr_inval; 2347 if (!test_bit(od->od_share_access, &stp->st_access_bmap)) { 2348 dprintk("NFSD:access not a subset current bitmap: 0x%lx, input access=%08x\n", 2349 stp->st_access_bmap, od->od_share_access); 2350 goto out; 2351 } 2352 if (!test_bit(od->od_share_deny, &stp->st_deny_bmap)) { 2353 dprintk("NFSD:deny not a subset current bitmap: 0x%lx, input deny=%08x\n", 2354 stp->st_deny_bmap, od->od_share_deny); 2355 goto out; 2356 } 2357 set_access(&share_access, stp->st_access_bmap); 2358 nfs4_file_downgrade(stp->st_vfs_file, 2359 share_access & ~od->od_share_access); 2360 2361 reset_union_bmap_access(od->od_share_access, &stp->st_access_bmap); 2362 reset_union_bmap_deny(od->od_share_deny, &stp->st_deny_bmap); 2363 2364 update_stateid(&stp->st_stateid); 2365 memcpy(&od->od_stateid, &stp->st_stateid, sizeof(stateid_t)); 2366 status = nfs_ok; 2367 out: 2368 if (od->od_stateowner) { 2369 nfs4_get_stateowner(od->od_stateowner); 2370 cstate->replay_owner = od->od_stateowner; 2371 } 2372 nfs4_unlock_state(); 2373 return status; 2374 } 2375 2376 /* 2377 * nfs4_unlock_state() called after encode 2378 */ 2379 __be32 2380 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 2381 struct nfsd4_close *close) 2382 { 2383 __be32 status; 2384 struct nfs4_stateid *stp; 2385 2386 dprintk("NFSD: nfsd4_close on file %.*s\n", 2387 (int)cstate->current_fh.fh_dentry->d_name.len, 2388 cstate->current_fh.fh_dentry->d_name.name); 2389 2390 nfs4_lock_state(); 2391 /* check close_lru for replay */ 2392 if ((status = nfs4_preprocess_seqid_op(&cstate->current_fh, 2393 close->cl_seqid, 2394 &close->cl_stateid, 2395 CHECK_FH | OPEN_STATE | CLOSE_STATE, 2396 &close->cl_stateowner, &stp, NULL))) 2397 goto out; 2398 status = nfs_ok; 2399 update_stateid(&stp->st_stateid); 2400 memcpy(&close->cl_stateid, &stp->st_stateid, sizeof(stateid_t)); 2401 2402 /* release_stateid() calls nfsd_close() if needed */ 2403 release_stateid(stp, OPEN_STATE); 2404 2405 /* place unused nfs4_stateowners on so_close_lru list to be 2406 * released by the laundromat service after the lease period 2407 * to enable us to handle CLOSE replay 2408 */ 2409 if (list_empty(&close->cl_stateowner->so_stateids)) 2410 move_to_close_lru(close->cl_stateowner); 2411 out: 2412 if (close->cl_stateowner) { 2413 nfs4_get_stateowner(close->cl_stateowner); 2414 cstate->replay_owner = close->cl_stateowner; 2415 } 2416 nfs4_unlock_state(); 2417 return status; 2418 } 2419 2420 __be32 2421 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 2422 struct nfsd4_delegreturn *dr) 2423 { 2424 __be32 status; 2425 2426 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) 2427 goto out; 2428 2429 nfs4_lock_state(); 2430 status = nfs4_preprocess_stateid_op(&cstate->current_fh, 2431 &dr->dr_stateid, DELEG_RET, NULL); 2432 nfs4_unlock_state(); 2433 out: 2434 return status; 2435 } 2436 2437 2438 /* 2439 * Lock owner state (byte-range locks) 2440 */ 2441 #define LOFF_OVERFLOW(start, len) ((u64)(len) > ~(u64)(start)) 2442 #define LOCK_HASH_BITS 8 2443 #define LOCK_HASH_SIZE (1 << LOCK_HASH_BITS) 2444 #define LOCK_HASH_MASK (LOCK_HASH_SIZE - 1) 2445 2446 #define lockownerid_hashval(id) \ 2447 ((id) & LOCK_HASH_MASK) 2448 2449 static inline unsigned int 2450 lock_ownerstr_hashval(struct inode *inode, u32 cl_id, 2451 struct xdr_netobj *ownername) 2452 { 2453 return (file_hashval(inode) + cl_id 2454 + opaque_hashval(ownername->data, ownername->len)) 2455 & LOCK_HASH_MASK; 2456 } 2457 2458 static struct list_head lock_ownerid_hashtbl[LOCK_HASH_SIZE]; 2459 static struct list_head lock_ownerstr_hashtbl[LOCK_HASH_SIZE]; 2460 static struct list_head lockstateid_hashtbl[STATEID_HASH_SIZE]; 2461 2462 static struct nfs4_stateid * 2463 find_stateid(stateid_t *stid, int flags) 2464 { 2465 struct nfs4_stateid *local = NULL; 2466 u32 st_id = stid->si_stateownerid; 2467 u32 f_id = stid->si_fileid; 2468 unsigned int hashval; 2469 2470 dprintk("NFSD: find_stateid flags 0x%x\n",flags); 2471 if ((flags & LOCK_STATE) || (flags & RD_STATE) || (flags & WR_STATE)) { 2472 hashval = stateid_hashval(st_id, f_id); 2473 list_for_each_entry(local, &lockstateid_hashtbl[hashval], st_hash) { 2474 if ((local->st_stateid.si_stateownerid == st_id) && 2475 (local->st_stateid.si_fileid == f_id)) 2476 return local; 2477 } 2478 } 2479 if ((flags & OPEN_STATE) || (flags & RD_STATE) || (flags & WR_STATE)) { 2480 hashval = stateid_hashval(st_id, f_id); 2481 list_for_each_entry(local, &stateid_hashtbl[hashval], st_hash) { 2482 if ((local->st_stateid.si_stateownerid == st_id) && 2483 (local->st_stateid.si_fileid == f_id)) 2484 return local; 2485 } 2486 } 2487 return NULL; 2488 } 2489 2490 static struct nfs4_delegation * 2491 find_delegation_stateid(struct inode *ino, stateid_t *stid) 2492 { 2493 struct nfs4_file *fp; 2494 struct nfs4_delegation *dl; 2495 2496 dprintk("NFSD:find_delegation_stateid stateid=(%08x/%08x/%08x/%08x)\n", 2497 stid->si_boot, stid->si_stateownerid, 2498 stid->si_fileid, stid->si_generation); 2499 2500 fp = find_file(ino); 2501 if (!fp) 2502 return NULL; 2503 dl = find_delegation_file(fp, stid); 2504 put_nfs4_file(fp); 2505 return dl; 2506 } 2507 2508 /* 2509 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that 2510 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th 2511 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit 2512 * locking, this prevents us from being completely protocol-compliant. The 2513 * real solution to this problem is to start using unsigned file offsets in 2514 * the VFS, but this is a very deep change! 2515 */ 2516 static inline void 2517 nfs4_transform_lock_offset(struct file_lock *lock) 2518 { 2519 if (lock->fl_start < 0) 2520 lock->fl_start = OFFSET_MAX; 2521 if (lock->fl_end < 0) 2522 lock->fl_end = OFFSET_MAX; 2523 } 2524 2525 /* Hack!: For now, we're defining this just so we can use a pointer to it 2526 * as a unique cookie to identify our (NFSv4's) posix locks. */ 2527 static struct lock_manager_operations nfsd_posix_mng_ops = { 2528 }; 2529 2530 static inline void 2531 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny) 2532 { 2533 struct nfs4_stateowner *sop; 2534 unsigned int hval; 2535 2536 if (fl->fl_lmops == &nfsd_posix_mng_ops) { 2537 sop = (struct nfs4_stateowner *) fl->fl_owner; 2538 hval = lockownerid_hashval(sop->so_id); 2539 kref_get(&sop->so_ref); 2540 deny->ld_sop = sop; 2541 deny->ld_clientid = sop->so_client->cl_clientid; 2542 } else { 2543 deny->ld_sop = NULL; 2544 deny->ld_clientid.cl_boot = 0; 2545 deny->ld_clientid.cl_id = 0; 2546 } 2547 deny->ld_start = fl->fl_start; 2548 deny->ld_length = ~(u64)0; 2549 if (fl->fl_end != ~(u64)0) 2550 deny->ld_length = fl->fl_end - fl->fl_start + 1; 2551 deny->ld_type = NFS4_READ_LT; 2552 if (fl->fl_type != F_RDLCK) 2553 deny->ld_type = NFS4_WRITE_LT; 2554 } 2555 2556 static struct nfs4_stateowner * 2557 find_lockstateowner_str(struct inode *inode, clientid_t *clid, 2558 struct xdr_netobj *owner) 2559 { 2560 unsigned int hashval = lock_ownerstr_hashval(inode, clid->cl_id, owner); 2561 struct nfs4_stateowner *op; 2562 2563 list_for_each_entry(op, &lock_ownerstr_hashtbl[hashval], so_strhash) { 2564 if (cmp_owner_str(op, owner, clid)) 2565 return op; 2566 } 2567 return NULL; 2568 } 2569 2570 /* 2571 * Alloc a lock owner structure. 2572 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has 2573 * occured. 2574 * 2575 * strhashval = lock_ownerstr_hashval 2576 */ 2577 2578 static struct nfs4_stateowner * 2579 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_stateid *open_stp, struct nfsd4_lock *lock) { 2580 struct nfs4_stateowner *sop; 2581 struct nfs4_replay *rp; 2582 unsigned int idhashval; 2583 2584 if (!(sop = alloc_stateowner(&lock->lk_new_owner))) 2585 return NULL; 2586 idhashval = lockownerid_hashval(current_ownerid); 2587 INIT_LIST_HEAD(&sop->so_idhash); 2588 INIT_LIST_HEAD(&sop->so_strhash); 2589 INIT_LIST_HEAD(&sop->so_perclient); 2590 INIT_LIST_HEAD(&sop->so_stateids); 2591 INIT_LIST_HEAD(&sop->so_perstateid); 2592 INIT_LIST_HEAD(&sop->so_close_lru); /* not used */ 2593 sop->so_time = 0; 2594 list_add(&sop->so_idhash, &lock_ownerid_hashtbl[idhashval]); 2595 list_add(&sop->so_strhash, &lock_ownerstr_hashtbl[strhashval]); 2596 list_add(&sop->so_perstateid, &open_stp->st_lockowners); 2597 sop->so_is_open_owner = 0; 2598 sop->so_id = current_ownerid++; 2599 sop->so_client = clp; 2600 /* It is the openowner seqid that will be incremented in encode in the 2601 * case of new lockowners; so increment the lock seqid manually: */ 2602 sop->so_seqid = lock->lk_new_lock_seqid + 1; 2603 sop->so_confirmed = 1; 2604 rp = &sop->so_replay; 2605 rp->rp_status = nfserr_serverfault; 2606 rp->rp_buflen = 0; 2607 rp->rp_buf = rp->rp_ibuf; 2608 return sop; 2609 } 2610 2611 static struct nfs4_stateid * 2612 alloc_init_lock_stateid(struct nfs4_stateowner *sop, struct nfs4_file *fp, struct nfs4_stateid *open_stp) 2613 { 2614 struct nfs4_stateid *stp; 2615 unsigned int hashval = stateid_hashval(sop->so_id, fp->fi_id); 2616 2617 stp = nfs4_alloc_stateid(); 2618 if (stp == NULL) 2619 goto out; 2620 INIT_LIST_HEAD(&stp->st_hash); 2621 INIT_LIST_HEAD(&stp->st_perfile); 2622 INIT_LIST_HEAD(&stp->st_perstateowner); 2623 INIT_LIST_HEAD(&stp->st_lockowners); /* not used */ 2624 list_add(&stp->st_hash, &lockstateid_hashtbl[hashval]); 2625 list_add(&stp->st_perfile, &fp->fi_stateids); 2626 list_add(&stp->st_perstateowner, &sop->so_stateids); 2627 stp->st_stateowner = sop; 2628 get_nfs4_file(fp); 2629 stp->st_file = fp; 2630 stp->st_stateid.si_boot = boot_time; 2631 stp->st_stateid.si_stateownerid = sop->so_id; 2632 stp->st_stateid.si_fileid = fp->fi_id; 2633 stp->st_stateid.si_generation = 0; 2634 stp->st_vfs_file = open_stp->st_vfs_file; /* FIXME refcount?? */ 2635 stp->st_access_bmap = open_stp->st_access_bmap; 2636 stp->st_deny_bmap = open_stp->st_deny_bmap; 2637 stp->st_openstp = open_stp; 2638 2639 out: 2640 return stp; 2641 } 2642 2643 static int 2644 check_lock_length(u64 offset, u64 length) 2645 { 2646 return ((length == 0) || ((length != ~(u64)0) && 2647 LOFF_OVERFLOW(offset, length))); 2648 } 2649 2650 /* 2651 * LOCK operation 2652 */ 2653 __be32 2654 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 2655 struct nfsd4_lock *lock) 2656 { 2657 struct nfs4_stateowner *open_sop = NULL; 2658 struct nfs4_stateowner *lock_sop = NULL; 2659 struct nfs4_stateid *lock_stp; 2660 struct file *filp; 2661 struct file_lock file_lock; 2662 struct file_lock conflock; 2663 __be32 status = 0; 2664 unsigned int strhashval; 2665 unsigned int cmd; 2666 int err; 2667 2668 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n", 2669 (long long) lock->lk_offset, 2670 (long long) lock->lk_length); 2671 2672 if (check_lock_length(lock->lk_offset, lock->lk_length)) 2673 return nfserr_inval; 2674 2675 if ((status = fh_verify(rqstp, &cstate->current_fh, 2676 S_IFREG, MAY_LOCK))) { 2677 dprintk("NFSD: nfsd4_lock: permission denied!\n"); 2678 return status; 2679 } 2680 2681 nfs4_lock_state(); 2682 2683 if (lock->lk_is_new) { 2684 /* 2685 * Client indicates that this is a new lockowner. 2686 * Use open owner and open stateid to create lock owner and 2687 * lock stateid. 2688 */ 2689 struct nfs4_stateid *open_stp = NULL; 2690 struct nfs4_file *fp; 2691 2692 status = nfserr_stale_clientid; 2693 if (STALE_CLIENTID(&lock->lk_new_clientid)) 2694 goto out; 2695 2696 /* validate and update open stateid and open seqid */ 2697 status = nfs4_preprocess_seqid_op(&cstate->current_fh, 2698 lock->lk_new_open_seqid, 2699 &lock->lk_new_open_stateid, 2700 CHECK_FH | OPEN_STATE, 2701 &lock->lk_replay_owner, &open_stp, 2702 lock); 2703 if (status) 2704 goto out; 2705 open_sop = lock->lk_replay_owner; 2706 /* create lockowner and lock stateid */ 2707 fp = open_stp->st_file; 2708 strhashval = lock_ownerstr_hashval(fp->fi_inode, 2709 open_sop->so_client->cl_clientid.cl_id, 2710 &lock->v.new.owner); 2711 /* XXX: Do we need to check for duplicate stateowners on 2712 * the same file, or should they just be allowed (and 2713 * create new stateids)? */ 2714 status = nfserr_resource; 2715 lock_sop = alloc_init_lock_stateowner(strhashval, 2716 open_sop->so_client, open_stp, lock); 2717 if (lock_sop == NULL) 2718 goto out; 2719 lock_stp = alloc_init_lock_stateid(lock_sop, fp, open_stp); 2720 if (lock_stp == NULL) 2721 goto out; 2722 } else { 2723 /* lock (lock owner + lock stateid) already exists */ 2724 status = nfs4_preprocess_seqid_op(&cstate->current_fh, 2725 lock->lk_old_lock_seqid, 2726 &lock->lk_old_lock_stateid, 2727 CHECK_FH | LOCK_STATE, 2728 &lock->lk_replay_owner, &lock_stp, lock); 2729 if (status) 2730 goto out; 2731 lock_sop = lock->lk_replay_owner; 2732 } 2733 /* lock->lk_replay_owner and lock_stp have been created or found */ 2734 filp = lock_stp->st_vfs_file; 2735 2736 status = nfserr_grace; 2737 if (nfs4_in_grace() && !lock->lk_reclaim) 2738 goto out; 2739 status = nfserr_no_grace; 2740 if (!nfs4_in_grace() && lock->lk_reclaim) 2741 goto out; 2742 2743 locks_init_lock(&file_lock); 2744 switch (lock->lk_type) { 2745 case NFS4_READ_LT: 2746 case NFS4_READW_LT: 2747 file_lock.fl_type = F_RDLCK; 2748 cmd = F_SETLK; 2749 break; 2750 case NFS4_WRITE_LT: 2751 case NFS4_WRITEW_LT: 2752 file_lock.fl_type = F_WRLCK; 2753 cmd = F_SETLK; 2754 break; 2755 default: 2756 status = nfserr_inval; 2757 goto out; 2758 } 2759 file_lock.fl_owner = (fl_owner_t)lock_sop; 2760 file_lock.fl_pid = current->tgid; 2761 file_lock.fl_file = filp; 2762 file_lock.fl_flags = FL_POSIX; 2763 file_lock.fl_lmops = &nfsd_posix_mng_ops; 2764 2765 file_lock.fl_start = lock->lk_offset; 2766 if ((lock->lk_length == ~(u64)0) || 2767 LOFF_OVERFLOW(lock->lk_offset, lock->lk_length)) 2768 file_lock.fl_end = ~(u64)0; 2769 else 2770 file_lock.fl_end = lock->lk_offset + lock->lk_length - 1; 2771 nfs4_transform_lock_offset(&file_lock); 2772 2773 /* 2774 * Try to lock the file in the VFS. 2775 * Note: locks.c uses the BKL to protect the inode's lock list. 2776 */ 2777 2778 /* XXX?: Just to divert the locks_release_private at the start of 2779 * locks_copy_lock: */ 2780 locks_init_lock(&conflock); 2781 err = vfs_lock_file(filp, cmd, &file_lock, &conflock); 2782 switch (-err) { 2783 case 0: /* success! */ 2784 update_stateid(&lock_stp->st_stateid); 2785 memcpy(&lock->lk_resp_stateid, &lock_stp->st_stateid, 2786 sizeof(stateid_t)); 2787 status = 0; 2788 break; 2789 case (EAGAIN): /* conflock holds conflicting lock */ 2790 status = nfserr_denied; 2791 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n"); 2792 nfs4_set_lock_denied(&conflock, &lock->lk_denied); 2793 break; 2794 case (EDEADLK): 2795 status = nfserr_deadlock; 2796 break; 2797 default: 2798 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err); 2799 status = nfserr_resource; 2800 break; 2801 } 2802 out: 2803 if (status && lock->lk_is_new && lock_sop) 2804 release_stateowner(lock_sop); 2805 if (lock->lk_replay_owner) { 2806 nfs4_get_stateowner(lock->lk_replay_owner); 2807 cstate->replay_owner = lock->lk_replay_owner; 2808 } 2809 nfs4_unlock_state(); 2810 return status; 2811 } 2812 2813 /* 2814 * LOCKT operation 2815 */ 2816 __be32 2817 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 2818 struct nfsd4_lockt *lockt) 2819 { 2820 struct inode *inode; 2821 struct file file; 2822 struct file_lock file_lock; 2823 int error; 2824 __be32 status; 2825 2826 if (nfs4_in_grace()) 2827 return nfserr_grace; 2828 2829 if (check_lock_length(lockt->lt_offset, lockt->lt_length)) 2830 return nfserr_inval; 2831 2832 lockt->lt_stateowner = NULL; 2833 nfs4_lock_state(); 2834 2835 status = nfserr_stale_clientid; 2836 if (STALE_CLIENTID(&lockt->lt_clientid)) 2837 goto out; 2838 2839 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) { 2840 dprintk("NFSD: nfsd4_lockt: fh_verify() failed!\n"); 2841 if (status == nfserr_symlink) 2842 status = nfserr_inval; 2843 goto out; 2844 } 2845 2846 inode = cstate->current_fh.fh_dentry->d_inode; 2847 locks_init_lock(&file_lock); 2848 switch (lockt->lt_type) { 2849 case NFS4_READ_LT: 2850 case NFS4_READW_LT: 2851 file_lock.fl_type = F_RDLCK; 2852 break; 2853 case NFS4_WRITE_LT: 2854 case NFS4_WRITEW_LT: 2855 file_lock.fl_type = F_WRLCK; 2856 break; 2857 default: 2858 printk("NFSD: nfs4_lockt: bad lock type!\n"); 2859 status = nfserr_inval; 2860 goto out; 2861 } 2862 2863 lockt->lt_stateowner = find_lockstateowner_str(inode, 2864 &lockt->lt_clientid, &lockt->lt_owner); 2865 if (lockt->lt_stateowner) 2866 file_lock.fl_owner = (fl_owner_t)lockt->lt_stateowner; 2867 file_lock.fl_pid = current->tgid; 2868 file_lock.fl_flags = FL_POSIX; 2869 file_lock.fl_lmops = &nfsd_posix_mng_ops; 2870 2871 file_lock.fl_start = lockt->lt_offset; 2872 if ((lockt->lt_length == ~(u64)0) || LOFF_OVERFLOW(lockt->lt_offset, lockt->lt_length)) 2873 file_lock.fl_end = ~(u64)0; 2874 else 2875 file_lock.fl_end = lockt->lt_offset + lockt->lt_length - 1; 2876 2877 nfs4_transform_lock_offset(&file_lock); 2878 2879 /* vfs_test_lock uses the struct file _only_ to resolve the inode. 2880 * since LOCKT doesn't require an OPEN, and therefore a struct 2881 * file may not exist, pass vfs_test_lock a struct file with 2882 * only the dentry:inode set. 2883 */ 2884 memset(&file, 0, sizeof (struct file)); 2885 file.f_path.dentry = cstate->current_fh.fh_dentry; 2886 2887 status = nfs_ok; 2888 error = vfs_test_lock(&file, &file_lock); 2889 if (error) { 2890 status = nfserrno(error); 2891 goto out; 2892 } 2893 if (file_lock.fl_type != F_UNLCK) { 2894 status = nfserr_denied; 2895 nfs4_set_lock_denied(&file_lock, &lockt->lt_denied); 2896 } 2897 out: 2898 nfs4_unlock_state(); 2899 return status; 2900 } 2901 2902 __be32 2903 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 2904 struct nfsd4_locku *locku) 2905 { 2906 struct nfs4_stateid *stp; 2907 struct file *filp = NULL; 2908 struct file_lock file_lock; 2909 __be32 status; 2910 int err; 2911 2912 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n", 2913 (long long) locku->lu_offset, 2914 (long long) locku->lu_length); 2915 2916 if (check_lock_length(locku->lu_offset, locku->lu_length)) 2917 return nfserr_inval; 2918 2919 nfs4_lock_state(); 2920 2921 if ((status = nfs4_preprocess_seqid_op(&cstate->current_fh, 2922 locku->lu_seqid, 2923 &locku->lu_stateid, 2924 CHECK_FH | LOCK_STATE, 2925 &locku->lu_stateowner, &stp, NULL))) 2926 goto out; 2927 2928 filp = stp->st_vfs_file; 2929 BUG_ON(!filp); 2930 locks_init_lock(&file_lock); 2931 file_lock.fl_type = F_UNLCK; 2932 file_lock.fl_owner = (fl_owner_t) locku->lu_stateowner; 2933 file_lock.fl_pid = current->tgid; 2934 file_lock.fl_file = filp; 2935 file_lock.fl_flags = FL_POSIX; 2936 file_lock.fl_lmops = &nfsd_posix_mng_ops; 2937 file_lock.fl_start = locku->lu_offset; 2938 2939 if ((locku->lu_length == ~(u64)0) || LOFF_OVERFLOW(locku->lu_offset, locku->lu_length)) 2940 file_lock.fl_end = ~(u64)0; 2941 else 2942 file_lock.fl_end = locku->lu_offset + locku->lu_length - 1; 2943 nfs4_transform_lock_offset(&file_lock); 2944 2945 /* 2946 * Try to unlock the file in the VFS. 2947 */ 2948 err = vfs_lock_file(filp, F_SETLK, &file_lock, NULL); 2949 if (err) { 2950 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n"); 2951 goto out_nfserr; 2952 } 2953 /* 2954 * OK, unlock succeeded; the only thing left to do is update the stateid. 2955 */ 2956 update_stateid(&stp->st_stateid); 2957 memcpy(&locku->lu_stateid, &stp->st_stateid, sizeof(stateid_t)); 2958 2959 out: 2960 if (locku->lu_stateowner) { 2961 nfs4_get_stateowner(locku->lu_stateowner); 2962 cstate->replay_owner = locku->lu_stateowner; 2963 } 2964 nfs4_unlock_state(); 2965 return status; 2966 2967 out_nfserr: 2968 status = nfserrno(err); 2969 goto out; 2970 } 2971 2972 /* 2973 * returns 2974 * 1: locks held by lockowner 2975 * 0: no locks held by lockowner 2976 */ 2977 static int 2978 check_for_locks(struct file *filp, struct nfs4_stateowner *lowner) 2979 { 2980 struct file_lock **flpp; 2981 struct inode *inode = filp->f_path.dentry->d_inode; 2982 int status = 0; 2983 2984 lock_kernel(); 2985 for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) { 2986 if ((*flpp)->fl_owner == (fl_owner_t)lowner) { 2987 status = 1; 2988 goto out; 2989 } 2990 } 2991 out: 2992 unlock_kernel(); 2993 return status; 2994 } 2995 2996 __be32 2997 nfsd4_release_lockowner(struct svc_rqst *rqstp, 2998 struct nfsd4_compound_state *cstate, 2999 struct nfsd4_release_lockowner *rlockowner) 3000 { 3001 clientid_t *clid = &rlockowner->rl_clientid; 3002 struct nfs4_stateowner *sop; 3003 struct nfs4_stateid *stp; 3004 struct xdr_netobj *owner = &rlockowner->rl_owner; 3005 struct list_head matches; 3006 int i; 3007 __be32 status; 3008 3009 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n", 3010 clid->cl_boot, clid->cl_id); 3011 3012 /* XXX check for lease expiration */ 3013 3014 status = nfserr_stale_clientid; 3015 if (STALE_CLIENTID(clid)) 3016 return status; 3017 3018 nfs4_lock_state(); 3019 3020 status = nfserr_locks_held; 3021 /* XXX: we're doing a linear search through all the lockowners. 3022 * Yipes! For now we'll just hope clients aren't really using 3023 * release_lockowner much, but eventually we have to fix these 3024 * data structures. */ 3025 INIT_LIST_HEAD(&matches); 3026 for (i = 0; i < LOCK_HASH_SIZE; i++) { 3027 list_for_each_entry(sop, &lock_ownerid_hashtbl[i], so_idhash) { 3028 if (!cmp_owner_str(sop, owner, clid)) 3029 continue; 3030 list_for_each_entry(stp, &sop->so_stateids, 3031 st_perstateowner) { 3032 if (check_for_locks(stp->st_vfs_file, sop)) 3033 goto out; 3034 /* Note: so_perclient unused for lockowners, 3035 * so it's OK to fool with here. */ 3036 list_add(&sop->so_perclient, &matches); 3037 } 3038 } 3039 } 3040 /* Clients probably won't expect us to return with some (but not all) 3041 * of the lockowner state released; so don't release any until all 3042 * have been checked. */ 3043 status = nfs_ok; 3044 while (!list_empty(&matches)) { 3045 sop = list_entry(matches.next, struct nfs4_stateowner, 3046 so_perclient); 3047 /* unhash_stateowner deletes so_perclient only 3048 * for openowners. */ 3049 list_del(&sop->so_perclient); 3050 release_stateowner(sop); 3051 } 3052 out: 3053 nfs4_unlock_state(); 3054 return status; 3055 } 3056 3057 static inline struct nfs4_client_reclaim * 3058 alloc_reclaim(void) 3059 { 3060 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL); 3061 } 3062 3063 int 3064 nfs4_has_reclaimed_state(const char *name) 3065 { 3066 unsigned int strhashval = clientstr_hashval(name); 3067 struct nfs4_client *clp; 3068 3069 clp = find_confirmed_client_by_str(name, strhashval); 3070 return clp ? 1 : 0; 3071 } 3072 3073 /* 3074 * failure => all reset bets are off, nfserr_no_grace... 3075 */ 3076 int 3077 nfs4_client_to_reclaim(const char *name) 3078 { 3079 unsigned int strhashval; 3080 struct nfs4_client_reclaim *crp = NULL; 3081 3082 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name); 3083 crp = alloc_reclaim(); 3084 if (!crp) 3085 return 0; 3086 strhashval = clientstr_hashval(name); 3087 INIT_LIST_HEAD(&crp->cr_strhash); 3088 list_add(&crp->cr_strhash, &reclaim_str_hashtbl[strhashval]); 3089 memcpy(crp->cr_recdir, name, HEXDIR_LEN); 3090 reclaim_str_hashtbl_size++; 3091 return 1; 3092 } 3093 3094 static void 3095 nfs4_release_reclaim(void) 3096 { 3097 struct nfs4_client_reclaim *crp = NULL; 3098 int i; 3099 3100 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 3101 while (!list_empty(&reclaim_str_hashtbl[i])) { 3102 crp = list_entry(reclaim_str_hashtbl[i].next, 3103 struct nfs4_client_reclaim, cr_strhash); 3104 list_del(&crp->cr_strhash); 3105 kfree(crp); 3106 reclaim_str_hashtbl_size--; 3107 } 3108 } 3109 BUG_ON(reclaim_str_hashtbl_size); 3110 } 3111 3112 /* 3113 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */ 3114 static struct nfs4_client_reclaim * 3115 nfs4_find_reclaim_client(clientid_t *clid) 3116 { 3117 unsigned int strhashval; 3118 struct nfs4_client *clp; 3119 struct nfs4_client_reclaim *crp = NULL; 3120 3121 3122 /* find clientid in conf_id_hashtbl */ 3123 clp = find_confirmed_client(clid); 3124 if (clp == NULL) 3125 return NULL; 3126 3127 dprintk("NFSD: nfs4_find_reclaim_client for %.*s with recdir %s\n", 3128 clp->cl_name.len, clp->cl_name.data, 3129 clp->cl_recdir); 3130 3131 /* find clp->cl_name in reclaim_str_hashtbl */ 3132 strhashval = clientstr_hashval(clp->cl_recdir); 3133 list_for_each_entry(crp, &reclaim_str_hashtbl[strhashval], cr_strhash) { 3134 if (same_name(crp->cr_recdir, clp->cl_recdir)) { 3135 return crp; 3136 } 3137 } 3138 return NULL; 3139 } 3140 3141 /* 3142 * Called from OPEN. Look for clientid in reclaim list. 3143 */ 3144 __be32 3145 nfs4_check_open_reclaim(clientid_t *clid) 3146 { 3147 return nfs4_find_reclaim_client(clid) ? nfs_ok : nfserr_reclaim_bad; 3148 } 3149 3150 /* initialization to perform at module load time: */ 3151 3152 void 3153 nfs4_state_init(void) 3154 { 3155 int i; 3156 3157 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 3158 INIT_LIST_HEAD(&conf_id_hashtbl[i]); 3159 INIT_LIST_HEAD(&conf_str_hashtbl[i]); 3160 INIT_LIST_HEAD(&unconf_str_hashtbl[i]); 3161 INIT_LIST_HEAD(&unconf_id_hashtbl[i]); 3162 } 3163 for (i = 0; i < FILE_HASH_SIZE; i++) { 3164 INIT_LIST_HEAD(&file_hashtbl[i]); 3165 } 3166 for (i = 0; i < OWNER_HASH_SIZE; i++) { 3167 INIT_LIST_HEAD(&ownerstr_hashtbl[i]); 3168 INIT_LIST_HEAD(&ownerid_hashtbl[i]); 3169 } 3170 for (i = 0; i < STATEID_HASH_SIZE; i++) { 3171 INIT_LIST_HEAD(&stateid_hashtbl[i]); 3172 INIT_LIST_HEAD(&lockstateid_hashtbl[i]); 3173 } 3174 for (i = 0; i < LOCK_HASH_SIZE; i++) { 3175 INIT_LIST_HEAD(&lock_ownerid_hashtbl[i]); 3176 INIT_LIST_HEAD(&lock_ownerstr_hashtbl[i]); 3177 } 3178 memset(&onestateid, ~0, sizeof(stateid_t)); 3179 INIT_LIST_HEAD(&close_lru); 3180 INIT_LIST_HEAD(&client_lru); 3181 INIT_LIST_HEAD(&del_recall_lru); 3182 for (i = 0; i < CLIENT_HASH_SIZE; i++) 3183 INIT_LIST_HEAD(&reclaim_str_hashtbl[i]); 3184 reclaim_str_hashtbl_size = 0; 3185 } 3186 3187 static void 3188 nfsd4_load_reboot_recovery_data(void) 3189 { 3190 int status; 3191 3192 nfs4_lock_state(); 3193 nfsd4_init_recdir(user_recovery_dirname); 3194 status = nfsd4_recdir_load(); 3195 nfs4_unlock_state(); 3196 if (status) 3197 printk("NFSD: Failure reading reboot recovery data\n"); 3198 } 3199 3200 unsigned long 3201 get_nfs4_grace_period(void) 3202 { 3203 return max(user_lease_time, lease_time) * HZ; 3204 } 3205 3206 /* 3207 * Since the lifetime of a delegation isn't limited to that of an open, a 3208 * client may quite reasonably hang on to a delegation as long as it has 3209 * the inode cached. This becomes an obvious problem the first time a 3210 * client's inode cache approaches the size of the server's total memory. 3211 * 3212 * For now we avoid this problem by imposing a hard limit on the number 3213 * of delegations, which varies according to the server's memory size. 3214 */ 3215 static void 3216 set_max_delegations(void) 3217 { 3218 /* 3219 * Allow at most 4 delegations per megabyte of RAM. Quick 3220 * estimates suggest that in the worst case (where every delegation 3221 * is for a different inode), a delegation could take about 1.5K, 3222 * giving a worst case usage of about 6% of memory. 3223 */ 3224 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT); 3225 } 3226 3227 /* initialization to perform when the nfsd service is started: */ 3228 3229 static void 3230 __nfs4_state_start(void) 3231 { 3232 unsigned long grace_time; 3233 3234 boot_time = get_seconds(); 3235 grace_time = get_nfs_grace_period(); 3236 lease_time = user_lease_time; 3237 in_grace = 1; 3238 printk(KERN_INFO "NFSD: starting %ld-second grace period\n", 3239 grace_time/HZ); 3240 laundry_wq = create_singlethread_workqueue("nfsd4"); 3241 queue_delayed_work(laundry_wq, &laundromat_work, grace_time); 3242 set_max_delegations(); 3243 } 3244 3245 int 3246 nfs4_state_start(void) 3247 { 3248 int status; 3249 3250 if (nfs4_init) 3251 return 0; 3252 status = nfsd4_init_slabs(); 3253 if (status) 3254 return status; 3255 nfsd4_load_reboot_recovery_data(); 3256 __nfs4_state_start(); 3257 nfs4_init = 1; 3258 return 0; 3259 } 3260 3261 int 3262 nfs4_in_grace(void) 3263 { 3264 return in_grace; 3265 } 3266 3267 time_t 3268 nfs4_lease_time(void) 3269 { 3270 return lease_time; 3271 } 3272 3273 static void 3274 __nfs4_state_shutdown(void) 3275 { 3276 int i; 3277 struct nfs4_client *clp = NULL; 3278 struct nfs4_delegation *dp = NULL; 3279 struct list_head *pos, *next, reaplist; 3280 3281 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 3282 while (!list_empty(&conf_id_hashtbl[i])) { 3283 clp = list_entry(conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); 3284 expire_client(clp); 3285 } 3286 while (!list_empty(&unconf_str_hashtbl[i])) { 3287 clp = list_entry(unconf_str_hashtbl[i].next, struct nfs4_client, cl_strhash); 3288 expire_client(clp); 3289 } 3290 } 3291 INIT_LIST_HEAD(&reaplist); 3292 spin_lock(&recall_lock); 3293 list_for_each_safe(pos, next, &del_recall_lru) { 3294 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 3295 list_move(&dp->dl_recall_lru, &reaplist); 3296 } 3297 spin_unlock(&recall_lock); 3298 list_for_each_safe(pos, next, &reaplist) { 3299 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 3300 list_del_init(&dp->dl_recall_lru); 3301 unhash_delegation(dp); 3302 } 3303 3304 nfsd4_shutdown_recdir(); 3305 nfs4_init = 0; 3306 } 3307 3308 void 3309 nfs4_state_shutdown(void) 3310 { 3311 cancel_rearming_delayed_workqueue(laundry_wq, &laundromat_work); 3312 destroy_workqueue(laundry_wq); 3313 nfs4_lock_state(); 3314 nfs4_release_reclaim(); 3315 __nfs4_state_shutdown(); 3316 nfsd4_free_slabs(); 3317 nfs4_unlock_state(); 3318 } 3319 3320 static void 3321 nfs4_set_recdir(char *recdir) 3322 { 3323 nfs4_lock_state(); 3324 strcpy(user_recovery_dirname, recdir); 3325 nfs4_unlock_state(); 3326 } 3327 3328 /* 3329 * Change the NFSv4 recovery directory to recdir. 3330 */ 3331 int 3332 nfs4_reset_recoverydir(char *recdir) 3333 { 3334 int status; 3335 struct nameidata nd; 3336 3337 status = path_lookup(recdir, LOOKUP_FOLLOW, &nd); 3338 if (status) 3339 return status; 3340 status = -ENOTDIR; 3341 if (S_ISDIR(nd.dentry->d_inode->i_mode)) { 3342 nfs4_set_recdir(recdir); 3343 status = 0; 3344 } 3345 path_release(&nd); 3346 return status; 3347 } 3348 3349 /* 3350 * Called when leasetime is changed. 3351 * 3352 * The only way the protocol gives us to handle on-the-fly lease changes is to 3353 * simulate a reboot. Instead of doing that, we just wait till the next time 3354 * we start to register any changes in lease time. If the administrator 3355 * really wants to change the lease time *now*, they can go ahead and bring 3356 * nfsd down and then back up again after changing the lease time. 3357 */ 3358 void 3359 nfs4_reset_lease(time_t leasetime) 3360 { 3361 lock_kernel(); 3362 user_lease_time = leasetime; 3363 unlock_kernel(); 3364 } 3365