1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/sched.h> 11 #include <linux/slab.h> 12 #include <linux/spinlock.h> 13 #include <linux/completion.h> 14 #include <linux/buffer_head.h> 15 #include <linux/delay.h> 16 #include <linux/sort.h> 17 #include <linux/jhash.h> 18 #include <linux/kallsyms.h> 19 #include <linux/gfs2_ondisk.h> 20 #include <linux/list.h> 21 #include <linux/lm_interface.h> 22 #include <linux/wait.h> 23 #include <linux/rwsem.h> 24 #include <asm/uaccess.h> 25 26 #include "gfs2.h" 27 #include "incore.h" 28 #include "glock.h" 29 #include "glops.h" 30 #include "inode.h" 31 #include "lm.h" 32 #include "lops.h" 33 #include "meta_io.h" 34 #include "quota.h" 35 #include "super.h" 36 #include "util.h" 37 38 struct gfs2_gl_hash_bucket { 39 struct hlist_head hb_list; 40 }; 41 42 typedef void (*glock_examiner) (struct gfs2_glock * gl); 43 44 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); 45 static int dump_glock(struct gfs2_glock *gl); 46 static int dump_inode(struct gfs2_inode *ip); 47 static void gfs2_glock_xmote_th(struct gfs2_holder *gh); 48 static void gfs2_glock_drop_th(struct gfs2_glock *gl); 49 static DECLARE_RWSEM(gfs2_umount_flush_sem); 50 51 #define GFS2_GL_HASH_SHIFT 15 52 #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) 53 #define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1) 54 55 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE]; 56 57 /* 58 * Despite what you might think, the numbers below are not arbitrary :-) 59 * They are taken from the ipv4 routing hash code, which is well tested 60 * and thus should be nearly optimal. Later on we might tweek the numbers 61 * but for now this should be fine. 62 * 63 * The reason for putting the locks in a separate array from the list heads 64 * is that we can have fewer locks than list heads and save memory. We use 65 * the same hash function for both, but with a different hash mask. 66 */ 67 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \ 68 defined(CONFIG_PROVE_LOCKING) 69 70 #ifdef CONFIG_LOCKDEP 71 # define GL_HASH_LOCK_SZ 256 72 #else 73 # if NR_CPUS >= 32 74 # define GL_HASH_LOCK_SZ 4096 75 # elif NR_CPUS >= 16 76 # define GL_HASH_LOCK_SZ 2048 77 # elif NR_CPUS >= 8 78 # define GL_HASH_LOCK_SZ 1024 79 # elif NR_CPUS >= 4 80 # define GL_HASH_LOCK_SZ 512 81 # else 82 # define GL_HASH_LOCK_SZ 256 83 # endif 84 #endif 85 86 /* We never want more locks than chains */ 87 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ 88 # undef GL_HASH_LOCK_SZ 89 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE 90 #endif 91 92 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ]; 93 94 static inline rwlock_t *gl_lock_addr(unsigned int x) 95 { 96 return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)]; 97 } 98 #else /* not SMP, so no spinlocks required */ 99 static inline rwlock_t *gl_lock_addr(unsigned int x) 100 { 101 return NULL; 102 } 103 #endif 104 105 /** 106 * relaxed_state_ok - is a requested lock compatible with the current lock mode? 107 * @actual: the current state of the lock 108 * @requested: the lock state that was requested by the caller 109 * @flags: the modifier flags passed in by the caller 110 * 111 * Returns: 1 if the locks are compatible, 0 otherwise 112 */ 113 114 static inline int relaxed_state_ok(unsigned int actual, unsigned requested, 115 int flags) 116 { 117 if (actual == requested) 118 return 1; 119 120 if (flags & GL_EXACT) 121 return 0; 122 123 if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED) 124 return 1; 125 126 if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY)) 127 return 1; 128 129 return 0; 130 } 131 132 /** 133 * gl_hash() - Turn glock number into hash bucket number 134 * @lock: The glock number 135 * 136 * Returns: The number of the corresponding hash bucket 137 */ 138 139 static unsigned int gl_hash(const struct gfs2_sbd *sdp, 140 const struct lm_lockname *name) 141 { 142 unsigned int h; 143 144 h = jhash(&name->ln_number, sizeof(u64), 0); 145 h = jhash(&name->ln_type, sizeof(unsigned int), h); 146 h = jhash(&sdp, sizeof(struct gfs2_sbd *), h); 147 h &= GFS2_GL_HASH_MASK; 148 149 return h; 150 } 151 152 /** 153 * glock_free() - Perform a few checks and then release struct gfs2_glock 154 * @gl: The glock to release 155 * 156 * Also calls lock module to release its internal structure for this glock. 157 * 158 */ 159 160 static void glock_free(struct gfs2_glock *gl) 161 { 162 struct gfs2_sbd *sdp = gl->gl_sbd; 163 struct inode *aspace = gl->gl_aspace; 164 165 gfs2_lm_put_lock(sdp, gl->gl_lock); 166 167 if (aspace) 168 gfs2_aspace_put(aspace); 169 170 kmem_cache_free(gfs2_glock_cachep, gl); 171 } 172 173 /** 174 * gfs2_glock_hold() - increment reference count on glock 175 * @gl: The glock to hold 176 * 177 */ 178 179 void gfs2_glock_hold(struct gfs2_glock *gl) 180 { 181 atomic_inc(&gl->gl_ref); 182 } 183 184 /** 185 * gfs2_glock_put() - Decrement reference count on glock 186 * @gl: The glock to put 187 * 188 */ 189 190 int gfs2_glock_put(struct gfs2_glock *gl) 191 { 192 int rv = 0; 193 struct gfs2_sbd *sdp = gl->gl_sbd; 194 195 write_lock(gl_lock_addr(gl->gl_hash)); 196 if (atomic_dec_and_test(&gl->gl_ref)) { 197 hlist_del(&gl->gl_list); 198 write_unlock(gl_lock_addr(gl->gl_hash)); 199 BUG_ON(spin_is_locked(&gl->gl_spin)); 200 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED); 201 gfs2_assert(sdp, list_empty(&gl->gl_reclaim)); 202 gfs2_assert(sdp, list_empty(&gl->gl_holders)); 203 gfs2_assert(sdp, list_empty(&gl->gl_waiters1)); 204 gfs2_assert(sdp, list_empty(&gl->gl_waiters2)); 205 gfs2_assert(sdp, list_empty(&gl->gl_waiters3)); 206 glock_free(gl); 207 rv = 1; 208 goto out; 209 } 210 write_unlock(gl_lock_addr(gl->gl_hash)); 211 out: 212 return rv; 213 } 214 215 /** 216 * search_bucket() - Find struct gfs2_glock by lock number 217 * @bucket: the bucket to search 218 * @name: The lock name 219 * 220 * Returns: NULL, or the struct gfs2_glock with the requested number 221 */ 222 223 static struct gfs2_glock *search_bucket(unsigned int hash, 224 const struct gfs2_sbd *sdp, 225 const struct lm_lockname *name) 226 { 227 struct gfs2_glock *gl; 228 struct hlist_node *h; 229 230 hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) { 231 if (!lm_name_equal(&gl->gl_name, name)) 232 continue; 233 if (gl->gl_sbd != sdp) 234 continue; 235 236 atomic_inc(&gl->gl_ref); 237 238 return gl; 239 } 240 241 return NULL; 242 } 243 244 /** 245 * gfs2_glock_find() - Find glock by lock number 246 * @sdp: The GFS2 superblock 247 * @name: The lock name 248 * 249 * Returns: NULL, or the struct gfs2_glock with the requested number 250 */ 251 252 static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp, 253 const struct lm_lockname *name) 254 { 255 unsigned int hash = gl_hash(sdp, name); 256 struct gfs2_glock *gl; 257 258 read_lock(gl_lock_addr(hash)); 259 gl = search_bucket(hash, sdp, name); 260 read_unlock(gl_lock_addr(hash)); 261 262 return gl; 263 } 264 265 /** 266 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist 267 * @sdp: The GFS2 superblock 268 * @number: the lock number 269 * @glops: The glock_operations to use 270 * @create: If 0, don't create the glock if it doesn't exist 271 * @glp: the glock is returned here 272 * 273 * This does not lock a glock, just finds/creates structures for one. 274 * 275 * Returns: errno 276 */ 277 278 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, 279 const struct gfs2_glock_operations *glops, int create, 280 struct gfs2_glock **glp) 281 { 282 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type }; 283 struct gfs2_glock *gl, *tmp; 284 unsigned int hash = gl_hash(sdp, &name); 285 int error; 286 287 read_lock(gl_lock_addr(hash)); 288 gl = search_bucket(hash, sdp, &name); 289 read_unlock(gl_lock_addr(hash)); 290 291 if (gl || !create) { 292 *glp = gl; 293 return 0; 294 } 295 296 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL); 297 if (!gl) 298 return -ENOMEM; 299 300 gl->gl_flags = 0; 301 gl->gl_name = name; 302 atomic_set(&gl->gl_ref, 1); 303 gl->gl_state = LM_ST_UNLOCKED; 304 gl->gl_hash = hash; 305 gl->gl_owner = NULL; 306 gl->gl_ip = 0; 307 gl->gl_ops = glops; 308 gl->gl_req_gh = NULL; 309 gl->gl_req_bh = NULL; 310 gl->gl_vn = 0; 311 gl->gl_stamp = jiffies; 312 gl->gl_object = NULL; 313 gl->gl_sbd = sdp; 314 gl->gl_aspace = NULL; 315 lops_init_le(&gl->gl_le, &gfs2_glock_lops); 316 317 /* If this glock protects actual on-disk data or metadata blocks, 318 create a VFS inode to manage the pages/buffers holding them. */ 319 if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) { 320 gl->gl_aspace = gfs2_aspace_get(sdp); 321 if (!gl->gl_aspace) { 322 error = -ENOMEM; 323 goto fail; 324 } 325 } 326 327 error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock); 328 if (error) 329 goto fail_aspace; 330 331 write_lock(gl_lock_addr(hash)); 332 tmp = search_bucket(hash, sdp, &name); 333 if (tmp) { 334 write_unlock(gl_lock_addr(hash)); 335 glock_free(gl); 336 gl = tmp; 337 } else { 338 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list); 339 write_unlock(gl_lock_addr(hash)); 340 } 341 342 *glp = gl; 343 344 return 0; 345 346 fail_aspace: 347 if (gl->gl_aspace) 348 gfs2_aspace_put(gl->gl_aspace); 349 fail: 350 kmem_cache_free(gfs2_glock_cachep, gl); 351 return error; 352 } 353 354 /** 355 * gfs2_holder_init - initialize a struct gfs2_holder in the default way 356 * @gl: the glock 357 * @state: the state we're requesting 358 * @flags: the modifier flags 359 * @gh: the holder structure 360 * 361 */ 362 363 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags, 364 struct gfs2_holder *gh) 365 { 366 INIT_LIST_HEAD(&gh->gh_list); 367 gh->gh_gl = gl; 368 gh->gh_ip = (unsigned long)__builtin_return_address(0); 369 gh->gh_owner = current; 370 gh->gh_state = state; 371 gh->gh_flags = flags; 372 gh->gh_error = 0; 373 gh->gh_iflags = 0; 374 gfs2_glock_hold(gl); 375 } 376 377 /** 378 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it 379 * @state: the state we're requesting 380 * @flags: the modifier flags 381 * @gh: the holder structure 382 * 383 * Don't mess with the glock. 384 * 385 */ 386 387 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh) 388 { 389 gh->gh_state = state; 390 gh->gh_flags = flags; 391 gh->gh_iflags &= 1 << HIF_ALLOCED; 392 gh->gh_ip = (unsigned long)__builtin_return_address(0); 393 } 394 395 /** 396 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference) 397 * @gh: the holder structure 398 * 399 */ 400 401 void gfs2_holder_uninit(struct gfs2_holder *gh) 402 { 403 gfs2_glock_put(gh->gh_gl); 404 gh->gh_gl = NULL; 405 gh->gh_ip = 0; 406 } 407 408 /** 409 * gfs2_holder_get - get a struct gfs2_holder structure 410 * @gl: the glock 411 * @state: the state we're requesting 412 * @flags: the modifier flags 413 * @gfp_flags: 414 * 415 * Figure out how big an impact this function has. Either: 416 * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd 417 * 2) Leave it like it is 418 * 419 * Returns: the holder structure, NULL on ENOMEM 420 */ 421 422 static struct gfs2_holder *gfs2_holder_get(struct gfs2_glock *gl, 423 unsigned int state, 424 int flags, gfp_t gfp_flags) 425 { 426 struct gfs2_holder *gh; 427 428 gh = kmalloc(sizeof(struct gfs2_holder), gfp_flags); 429 if (!gh) 430 return NULL; 431 432 gfs2_holder_init(gl, state, flags, gh); 433 set_bit(HIF_ALLOCED, &gh->gh_iflags); 434 gh->gh_ip = (unsigned long)__builtin_return_address(0); 435 return gh; 436 } 437 438 /** 439 * gfs2_holder_put - get rid of a struct gfs2_holder structure 440 * @gh: the holder structure 441 * 442 */ 443 444 static void gfs2_holder_put(struct gfs2_holder *gh) 445 { 446 gfs2_holder_uninit(gh); 447 kfree(gh); 448 } 449 450 static void gfs2_holder_dispose_or_wake(struct gfs2_holder *gh) 451 { 452 if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) { 453 gfs2_holder_put(gh); 454 return; 455 } 456 clear_bit(HIF_WAIT, &gh->gh_iflags); 457 smp_mb(); 458 wake_up_bit(&gh->gh_iflags, HIF_WAIT); 459 } 460 461 static int holder_wait(void *word) 462 { 463 schedule(); 464 return 0; 465 } 466 467 static void wait_on_holder(struct gfs2_holder *gh) 468 { 469 might_sleep(); 470 wait_on_bit(&gh->gh_iflags, HIF_WAIT, holder_wait, TASK_UNINTERRUPTIBLE); 471 } 472 473 /** 474 * rq_mutex - process a mutex request in the queue 475 * @gh: the glock holder 476 * 477 * Returns: 1 if the queue is blocked 478 */ 479 480 static int rq_mutex(struct gfs2_holder *gh) 481 { 482 struct gfs2_glock *gl = gh->gh_gl; 483 484 list_del_init(&gh->gh_list); 485 /* gh->gh_error never examined. */ 486 set_bit(GLF_LOCK, &gl->gl_flags); 487 clear_bit(HIF_WAIT, &gh->gh_iflags); 488 smp_mb(); 489 wake_up_bit(&gh->gh_iflags, HIF_WAIT); 490 491 return 1; 492 } 493 494 /** 495 * rq_promote - process a promote request in the queue 496 * @gh: the glock holder 497 * 498 * Acquire a new inter-node lock, or change a lock state to more restrictive. 499 * 500 * Returns: 1 if the queue is blocked 501 */ 502 503 static int rq_promote(struct gfs2_holder *gh) 504 { 505 struct gfs2_glock *gl = gh->gh_gl; 506 struct gfs2_sbd *sdp = gl->gl_sbd; 507 508 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { 509 if (list_empty(&gl->gl_holders)) { 510 gl->gl_req_gh = gh; 511 set_bit(GLF_LOCK, &gl->gl_flags); 512 spin_unlock(&gl->gl_spin); 513 514 if (atomic_read(&sdp->sd_reclaim_count) > 515 gfs2_tune_get(sdp, gt_reclaim_limit) && 516 !(gh->gh_flags & LM_FLAG_PRIORITY)) { 517 gfs2_reclaim_glock(sdp); 518 gfs2_reclaim_glock(sdp); 519 } 520 521 gfs2_glock_xmote_th(gh); 522 spin_lock(&gl->gl_spin); 523 } 524 return 1; 525 } 526 527 if (list_empty(&gl->gl_holders)) { 528 set_bit(HIF_FIRST, &gh->gh_iflags); 529 set_bit(GLF_LOCK, &gl->gl_flags); 530 } else { 531 struct gfs2_holder *next_gh; 532 if (gh->gh_state == LM_ST_EXCLUSIVE) 533 return 1; 534 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder, 535 gh_list); 536 if (next_gh->gh_state == LM_ST_EXCLUSIVE) 537 return 1; 538 } 539 540 list_move_tail(&gh->gh_list, &gl->gl_holders); 541 gh->gh_error = 0; 542 set_bit(HIF_HOLDER, &gh->gh_iflags); 543 544 gfs2_holder_dispose_or_wake(gh); 545 546 return 0; 547 } 548 549 /** 550 * rq_demote - process a demote request in the queue 551 * @gh: the glock holder 552 * 553 * Returns: 1 if the queue is blocked 554 */ 555 556 static int rq_demote(struct gfs2_holder *gh) 557 { 558 struct gfs2_glock *gl = gh->gh_gl; 559 560 if (!list_empty(&gl->gl_holders)) 561 return 1; 562 563 if (gl->gl_state == gh->gh_state || gl->gl_state == LM_ST_UNLOCKED) { 564 list_del_init(&gh->gh_list); 565 gh->gh_error = 0; 566 spin_unlock(&gl->gl_spin); 567 gfs2_holder_dispose_or_wake(gh); 568 spin_lock(&gl->gl_spin); 569 } else { 570 gl->gl_req_gh = gh; 571 set_bit(GLF_LOCK, &gl->gl_flags); 572 spin_unlock(&gl->gl_spin); 573 574 if (gh->gh_state == LM_ST_UNLOCKED || 575 gl->gl_state != LM_ST_EXCLUSIVE) 576 gfs2_glock_drop_th(gl); 577 else 578 gfs2_glock_xmote_th(gh); 579 580 spin_lock(&gl->gl_spin); 581 } 582 583 return 0; 584 } 585 586 /** 587 * run_queue - process holder structures on a glock 588 * @gl: the glock 589 * 590 */ 591 static void run_queue(struct gfs2_glock *gl) 592 { 593 struct gfs2_holder *gh; 594 int blocked = 1; 595 596 for (;;) { 597 if (test_bit(GLF_LOCK, &gl->gl_flags)) 598 break; 599 600 if (!list_empty(&gl->gl_waiters1)) { 601 gh = list_entry(gl->gl_waiters1.next, 602 struct gfs2_holder, gh_list); 603 604 if (test_bit(HIF_MUTEX, &gh->gh_iflags)) 605 blocked = rq_mutex(gh); 606 else 607 gfs2_assert_warn(gl->gl_sbd, 0); 608 609 } else if (!list_empty(&gl->gl_waiters2) && 610 !test_bit(GLF_SKIP_WAITERS2, &gl->gl_flags)) { 611 gh = list_entry(gl->gl_waiters2.next, 612 struct gfs2_holder, gh_list); 613 614 if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) 615 blocked = rq_demote(gh); 616 else 617 gfs2_assert_warn(gl->gl_sbd, 0); 618 619 } else if (!list_empty(&gl->gl_waiters3)) { 620 gh = list_entry(gl->gl_waiters3.next, 621 struct gfs2_holder, gh_list); 622 623 if (test_bit(HIF_PROMOTE, &gh->gh_iflags)) 624 blocked = rq_promote(gh); 625 else 626 gfs2_assert_warn(gl->gl_sbd, 0); 627 628 } else 629 break; 630 631 if (blocked) 632 break; 633 } 634 } 635 636 /** 637 * gfs2_glmutex_lock - acquire a local lock on a glock 638 * @gl: the glock 639 * 640 * Gives caller exclusive access to manipulate a glock structure. 641 */ 642 643 static void gfs2_glmutex_lock(struct gfs2_glock *gl) 644 { 645 struct gfs2_holder gh; 646 647 gfs2_holder_init(gl, 0, 0, &gh); 648 set_bit(HIF_MUTEX, &gh.gh_iflags); 649 if (test_and_set_bit(HIF_WAIT, &gh.gh_iflags)) 650 BUG(); 651 652 spin_lock(&gl->gl_spin); 653 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 654 list_add_tail(&gh.gh_list, &gl->gl_waiters1); 655 } else { 656 gl->gl_owner = current; 657 gl->gl_ip = (unsigned long)__builtin_return_address(0); 658 clear_bit(HIF_WAIT, &gh.gh_iflags); 659 smp_mb(); 660 wake_up_bit(&gh.gh_iflags, HIF_WAIT); 661 } 662 spin_unlock(&gl->gl_spin); 663 664 wait_on_holder(&gh); 665 gfs2_holder_uninit(&gh); 666 } 667 668 /** 669 * gfs2_glmutex_trylock - try to acquire a local lock on a glock 670 * @gl: the glock 671 * 672 * Returns: 1 if the glock is acquired 673 */ 674 675 static int gfs2_glmutex_trylock(struct gfs2_glock *gl) 676 { 677 int acquired = 1; 678 679 spin_lock(&gl->gl_spin); 680 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 681 acquired = 0; 682 } else { 683 gl->gl_owner = current; 684 gl->gl_ip = (unsigned long)__builtin_return_address(0); 685 } 686 spin_unlock(&gl->gl_spin); 687 688 return acquired; 689 } 690 691 /** 692 * gfs2_glmutex_unlock - release a local lock on a glock 693 * @gl: the glock 694 * 695 */ 696 697 static void gfs2_glmutex_unlock(struct gfs2_glock *gl) 698 { 699 spin_lock(&gl->gl_spin); 700 clear_bit(GLF_LOCK, &gl->gl_flags); 701 gl->gl_owner = NULL; 702 gl->gl_ip = 0; 703 run_queue(gl); 704 BUG_ON(!spin_is_locked(&gl->gl_spin)); 705 spin_unlock(&gl->gl_spin); 706 } 707 708 /** 709 * handle_callback - add a demote request to a lock's queue 710 * @gl: the glock 711 * @state: the state the caller wants us to change to 712 * 713 * Note: This may fail sliently if we are out of memory. 714 */ 715 716 static void handle_callback(struct gfs2_glock *gl, unsigned int state) 717 { 718 struct gfs2_holder *gh, *new_gh = NULL; 719 720 restart: 721 spin_lock(&gl->gl_spin); 722 723 list_for_each_entry(gh, &gl->gl_waiters2, gh_list) { 724 if (test_bit(HIF_DEMOTE, &gh->gh_iflags) && 725 gl->gl_req_gh != gh) { 726 if (gh->gh_state != state) 727 gh->gh_state = LM_ST_UNLOCKED; 728 goto out; 729 } 730 } 731 732 if (new_gh) { 733 list_add_tail(&new_gh->gh_list, &gl->gl_waiters2); 734 new_gh = NULL; 735 } else { 736 spin_unlock(&gl->gl_spin); 737 738 new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, GFP_NOFS); 739 if (!new_gh) 740 return; 741 set_bit(HIF_DEMOTE, &new_gh->gh_iflags); 742 set_bit(HIF_DEALLOC, &new_gh->gh_iflags); 743 set_bit(HIF_WAIT, &new_gh->gh_iflags); 744 745 goto restart; 746 } 747 748 out: 749 spin_unlock(&gl->gl_spin); 750 751 if (new_gh) 752 gfs2_holder_put(new_gh); 753 } 754 755 /** 756 * state_change - record that the glock is now in a different state 757 * @gl: the glock 758 * @new_state the new state 759 * 760 */ 761 762 static void state_change(struct gfs2_glock *gl, unsigned int new_state) 763 { 764 int held1, held2; 765 766 held1 = (gl->gl_state != LM_ST_UNLOCKED); 767 held2 = (new_state != LM_ST_UNLOCKED); 768 769 if (held1 != held2) { 770 if (held2) 771 gfs2_glock_hold(gl); 772 else 773 gfs2_glock_put(gl); 774 } 775 776 gl->gl_state = new_state; 777 } 778 779 /** 780 * xmote_bh - Called after the lock module is done acquiring a lock 781 * @gl: The glock in question 782 * @ret: the int returned from the lock module 783 * 784 */ 785 786 static void xmote_bh(struct gfs2_glock *gl, unsigned int ret) 787 { 788 struct gfs2_sbd *sdp = gl->gl_sbd; 789 const struct gfs2_glock_operations *glops = gl->gl_ops; 790 struct gfs2_holder *gh = gl->gl_req_gh; 791 int prev_state = gl->gl_state; 792 int op_done = 1; 793 794 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); 795 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); 796 gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC)); 797 798 state_change(gl, ret & LM_OUT_ST_MASK); 799 800 if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) { 801 if (glops->go_inval) 802 glops->go_inval(gl, DIO_METADATA); 803 } else if (gl->gl_state == LM_ST_DEFERRED) { 804 /* We might not want to do this here. 805 Look at moving to the inode glops. */ 806 if (glops->go_inval) 807 glops->go_inval(gl, 0); 808 } 809 810 /* Deal with each possible exit condition */ 811 812 if (!gh) 813 gl->gl_stamp = jiffies; 814 else if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { 815 spin_lock(&gl->gl_spin); 816 list_del_init(&gh->gh_list); 817 gh->gh_error = -EIO; 818 spin_unlock(&gl->gl_spin); 819 } else if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) { 820 spin_lock(&gl->gl_spin); 821 list_del_init(&gh->gh_list); 822 if (gl->gl_state == gh->gh_state || 823 gl->gl_state == LM_ST_UNLOCKED) { 824 gh->gh_error = 0; 825 } else { 826 if (gfs2_assert_warn(sdp, gh->gh_flags & 827 (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1) 828 fs_warn(sdp, "ret = 0x%.8X\n", ret); 829 gh->gh_error = GLR_TRYFAILED; 830 } 831 spin_unlock(&gl->gl_spin); 832 833 if (ret & LM_OUT_CANCELED) 834 handle_callback(gl, LM_ST_UNLOCKED); 835 836 } else if (ret & LM_OUT_CANCELED) { 837 spin_lock(&gl->gl_spin); 838 list_del_init(&gh->gh_list); 839 gh->gh_error = GLR_CANCELED; 840 spin_unlock(&gl->gl_spin); 841 842 } else if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { 843 spin_lock(&gl->gl_spin); 844 list_move_tail(&gh->gh_list, &gl->gl_holders); 845 gh->gh_error = 0; 846 set_bit(HIF_HOLDER, &gh->gh_iflags); 847 spin_unlock(&gl->gl_spin); 848 849 set_bit(HIF_FIRST, &gh->gh_iflags); 850 851 op_done = 0; 852 853 } else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { 854 spin_lock(&gl->gl_spin); 855 list_del_init(&gh->gh_list); 856 gh->gh_error = GLR_TRYFAILED; 857 spin_unlock(&gl->gl_spin); 858 859 } else { 860 if (gfs2_assert_withdraw(sdp, 0) == -1) 861 fs_err(sdp, "ret = 0x%.8X\n", ret); 862 } 863 864 if (glops->go_xmote_bh) 865 glops->go_xmote_bh(gl); 866 867 if (op_done) { 868 spin_lock(&gl->gl_spin); 869 gl->gl_req_gh = NULL; 870 gl->gl_req_bh = NULL; 871 clear_bit(GLF_LOCK, &gl->gl_flags); 872 run_queue(gl); 873 spin_unlock(&gl->gl_spin); 874 } 875 876 gfs2_glock_put(gl); 877 878 if (gh) 879 gfs2_holder_dispose_or_wake(gh); 880 } 881 882 /** 883 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock 884 * @gl: The glock in question 885 * @state: the requested state 886 * @flags: modifier flags to the lock call 887 * 888 */ 889 890 void gfs2_glock_xmote_th(struct gfs2_holder *gh) 891 { 892 struct gfs2_glock *gl = gh->gh_gl; 893 struct gfs2_sbd *sdp = gl->gl_sbd; 894 int flags = gh->gh_flags; 895 unsigned state = gh->gh_state; 896 const struct gfs2_glock_operations *glops = gl->gl_ops; 897 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB | 898 LM_FLAG_NOEXP | LM_FLAG_ANY | 899 LM_FLAG_PRIORITY); 900 unsigned int lck_ret; 901 902 if (glops->go_xmote_th) 903 glops->go_xmote_th(gl); 904 905 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); 906 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); 907 gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED); 908 gfs2_assert_warn(sdp, state != gl->gl_state); 909 910 gfs2_glock_hold(gl); 911 gl->gl_req_bh = xmote_bh; 912 913 lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags); 914 915 if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR))) 916 return; 917 918 if (lck_ret & LM_OUT_ASYNC) 919 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC); 920 else 921 xmote_bh(gl, lck_ret); 922 } 923 924 /** 925 * drop_bh - Called after a lock module unlock completes 926 * @gl: the glock 927 * @ret: the return status 928 * 929 * Doesn't wake up the process waiting on the struct gfs2_holder (if any) 930 * Doesn't drop the reference on the glock the top half took out 931 * 932 */ 933 934 static void drop_bh(struct gfs2_glock *gl, unsigned int ret) 935 { 936 struct gfs2_sbd *sdp = gl->gl_sbd; 937 const struct gfs2_glock_operations *glops = gl->gl_ops; 938 struct gfs2_holder *gh = gl->gl_req_gh; 939 940 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); 941 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); 942 gfs2_assert_warn(sdp, !ret); 943 944 state_change(gl, LM_ST_UNLOCKED); 945 946 if (glops->go_inval) 947 glops->go_inval(gl, DIO_METADATA); 948 949 if (gh) { 950 spin_lock(&gl->gl_spin); 951 list_del_init(&gh->gh_list); 952 gh->gh_error = 0; 953 spin_unlock(&gl->gl_spin); 954 } 955 956 if (glops->go_drop_bh) 957 glops->go_drop_bh(gl); 958 959 spin_lock(&gl->gl_spin); 960 gl->gl_req_gh = NULL; 961 gl->gl_req_bh = NULL; 962 clear_bit(GLF_LOCK, &gl->gl_flags); 963 run_queue(gl); 964 spin_unlock(&gl->gl_spin); 965 966 gfs2_glock_put(gl); 967 968 if (gh) 969 gfs2_holder_dispose_or_wake(gh); 970 } 971 972 /** 973 * gfs2_glock_drop_th - call into the lock module to unlock a lock 974 * @gl: the glock 975 * 976 */ 977 978 static void gfs2_glock_drop_th(struct gfs2_glock *gl) 979 { 980 struct gfs2_sbd *sdp = gl->gl_sbd; 981 const struct gfs2_glock_operations *glops = gl->gl_ops; 982 unsigned int ret; 983 984 if (glops->go_drop_th) 985 glops->go_drop_th(gl); 986 987 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); 988 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); 989 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED); 990 991 gfs2_glock_hold(gl); 992 gl->gl_req_bh = drop_bh; 993 994 ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state); 995 996 if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR))) 997 return; 998 999 if (!ret) 1000 drop_bh(gl, ret); 1001 else 1002 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC); 1003 } 1004 1005 /** 1006 * do_cancels - cancel requests for locks stuck waiting on an expire flag 1007 * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock 1008 * 1009 * Don't cancel GL_NOCANCEL requests. 1010 */ 1011 1012 static void do_cancels(struct gfs2_holder *gh) 1013 { 1014 struct gfs2_glock *gl = gh->gh_gl; 1015 1016 spin_lock(&gl->gl_spin); 1017 1018 while (gl->gl_req_gh != gh && 1019 !test_bit(HIF_HOLDER, &gh->gh_iflags) && 1020 !list_empty(&gh->gh_list)) { 1021 if (gl->gl_req_bh && !(gl->gl_req_gh && 1022 (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) { 1023 spin_unlock(&gl->gl_spin); 1024 gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock); 1025 msleep(100); 1026 spin_lock(&gl->gl_spin); 1027 } else { 1028 spin_unlock(&gl->gl_spin); 1029 msleep(100); 1030 spin_lock(&gl->gl_spin); 1031 } 1032 } 1033 1034 spin_unlock(&gl->gl_spin); 1035 } 1036 1037 /** 1038 * glock_wait_internal - wait on a glock acquisition 1039 * @gh: the glock holder 1040 * 1041 * Returns: 0 on success 1042 */ 1043 1044 static int glock_wait_internal(struct gfs2_holder *gh) 1045 { 1046 struct gfs2_glock *gl = gh->gh_gl; 1047 struct gfs2_sbd *sdp = gl->gl_sbd; 1048 const struct gfs2_glock_operations *glops = gl->gl_ops; 1049 1050 if (test_bit(HIF_ABORTED, &gh->gh_iflags)) 1051 return -EIO; 1052 1053 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { 1054 spin_lock(&gl->gl_spin); 1055 if (gl->gl_req_gh != gh && 1056 !test_bit(HIF_HOLDER, &gh->gh_iflags) && 1057 !list_empty(&gh->gh_list)) { 1058 list_del_init(&gh->gh_list); 1059 gh->gh_error = GLR_TRYFAILED; 1060 run_queue(gl); 1061 spin_unlock(&gl->gl_spin); 1062 return gh->gh_error; 1063 } 1064 spin_unlock(&gl->gl_spin); 1065 } 1066 1067 if (gh->gh_flags & LM_FLAG_PRIORITY) 1068 do_cancels(gh); 1069 1070 wait_on_holder(gh); 1071 if (gh->gh_error) 1072 return gh->gh_error; 1073 1074 gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags)); 1075 gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state, 1076 gh->gh_flags)); 1077 1078 if (test_bit(HIF_FIRST, &gh->gh_iflags)) { 1079 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); 1080 1081 if (glops->go_lock) { 1082 gh->gh_error = glops->go_lock(gh); 1083 if (gh->gh_error) { 1084 spin_lock(&gl->gl_spin); 1085 list_del_init(&gh->gh_list); 1086 spin_unlock(&gl->gl_spin); 1087 } 1088 } 1089 1090 spin_lock(&gl->gl_spin); 1091 gl->gl_req_gh = NULL; 1092 gl->gl_req_bh = NULL; 1093 clear_bit(GLF_LOCK, &gl->gl_flags); 1094 run_queue(gl); 1095 spin_unlock(&gl->gl_spin); 1096 } 1097 1098 return gh->gh_error; 1099 } 1100 1101 static inline struct gfs2_holder * 1102 find_holder_by_owner(struct list_head *head, struct task_struct *owner) 1103 { 1104 struct gfs2_holder *gh; 1105 1106 list_for_each_entry(gh, head, gh_list) { 1107 if (gh->gh_owner == owner) 1108 return gh; 1109 } 1110 1111 return NULL; 1112 } 1113 1114 /** 1115 * add_to_queue - Add a holder to the wait queue (but look for recursion) 1116 * @gh: the holder structure to add 1117 * 1118 */ 1119 1120 static void add_to_queue(struct gfs2_holder *gh) 1121 { 1122 struct gfs2_glock *gl = gh->gh_gl; 1123 struct gfs2_holder *existing; 1124 1125 BUG_ON(!gh->gh_owner); 1126 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) 1127 BUG(); 1128 1129 existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner); 1130 if (existing) { 1131 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip); 1132 printk(KERN_INFO "pid : %d\n", existing->gh_owner->pid); 1133 printk(KERN_INFO "lock type : %d lock state : %d\n", 1134 existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state); 1135 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); 1136 printk(KERN_INFO "pid : %d\n", gh->gh_owner->pid); 1137 printk(KERN_INFO "lock type : %d lock state : %d\n", 1138 gl->gl_name.ln_type, gl->gl_state); 1139 BUG(); 1140 } 1141 1142 existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner); 1143 if (existing) { 1144 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip); 1145 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); 1146 BUG(); 1147 } 1148 1149 if (gh->gh_flags & LM_FLAG_PRIORITY) 1150 list_add(&gh->gh_list, &gl->gl_waiters3); 1151 else 1152 list_add_tail(&gh->gh_list, &gl->gl_waiters3); 1153 } 1154 1155 /** 1156 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock) 1157 * @gh: the holder structure 1158 * 1159 * if (gh->gh_flags & GL_ASYNC), this never returns an error 1160 * 1161 * Returns: 0, GLR_TRYFAILED, or errno on failure 1162 */ 1163 1164 int gfs2_glock_nq(struct gfs2_holder *gh) 1165 { 1166 struct gfs2_glock *gl = gh->gh_gl; 1167 struct gfs2_sbd *sdp = gl->gl_sbd; 1168 int error = 0; 1169 1170 restart: 1171 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { 1172 set_bit(HIF_ABORTED, &gh->gh_iflags); 1173 return -EIO; 1174 } 1175 1176 set_bit(HIF_PROMOTE, &gh->gh_iflags); 1177 1178 spin_lock(&gl->gl_spin); 1179 add_to_queue(gh); 1180 run_queue(gl); 1181 spin_unlock(&gl->gl_spin); 1182 1183 if (!(gh->gh_flags & GL_ASYNC)) { 1184 error = glock_wait_internal(gh); 1185 if (error == GLR_CANCELED) { 1186 msleep(100); 1187 goto restart; 1188 } 1189 } 1190 1191 return error; 1192 } 1193 1194 /** 1195 * gfs2_glock_poll - poll to see if an async request has been completed 1196 * @gh: the holder 1197 * 1198 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on 1199 */ 1200 1201 int gfs2_glock_poll(struct gfs2_holder *gh) 1202 { 1203 struct gfs2_glock *gl = gh->gh_gl; 1204 int ready = 0; 1205 1206 spin_lock(&gl->gl_spin); 1207 1208 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 1209 ready = 1; 1210 else if (list_empty(&gh->gh_list)) { 1211 if (gh->gh_error == GLR_CANCELED) { 1212 spin_unlock(&gl->gl_spin); 1213 msleep(100); 1214 if (gfs2_glock_nq(gh)) 1215 return 1; 1216 return 0; 1217 } else 1218 ready = 1; 1219 } 1220 1221 spin_unlock(&gl->gl_spin); 1222 1223 return ready; 1224 } 1225 1226 /** 1227 * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC 1228 * @gh: the holder structure 1229 * 1230 * Returns: 0, GLR_TRYFAILED, or errno on failure 1231 */ 1232 1233 int gfs2_glock_wait(struct gfs2_holder *gh) 1234 { 1235 int error; 1236 1237 error = glock_wait_internal(gh); 1238 if (error == GLR_CANCELED) { 1239 msleep(100); 1240 gh->gh_flags &= ~GL_ASYNC; 1241 error = gfs2_glock_nq(gh); 1242 } 1243 1244 return error; 1245 } 1246 1247 /** 1248 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock) 1249 * @gh: the glock holder 1250 * 1251 */ 1252 1253 void gfs2_glock_dq(struct gfs2_holder *gh) 1254 { 1255 struct gfs2_glock *gl = gh->gh_gl; 1256 const struct gfs2_glock_operations *glops = gl->gl_ops; 1257 1258 if (gh->gh_flags & GL_NOCACHE) 1259 handle_callback(gl, LM_ST_UNLOCKED); 1260 1261 gfs2_glmutex_lock(gl); 1262 1263 spin_lock(&gl->gl_spin); 1264 list_del_init(&gh->gh_list); 1265 1266 if (list_empty(&gl->gl_holders)) { 1267 spin_unlock(&gl->gl_spin); 1268 1269 if (glops->go_unlock) 1270 glops->go_unlock(gh); 1271 1272 gl->gl_stamp = jiffies; 1273 1274 spin_lock(&gl->gl_spin); 1275 } 1276 1277 clear_bit(GLF_LOCK, &gl->gl_flags); 1278 run_queue(gl); 1279 spin_unlock(&gl->gl_spin); 1280 } 1281 1282 /** 1283 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it 1284 * @gh: the holder structure 1285 * 1286 */ 1287 1288 void gfs2_glock_dq_uninit(struct gfs2_holder *gh) 1289 { 1290 gfs2_glock_dq(gh); 1291 gfs2_holder_uninit(gh); 1292 } 1293 1294 /** 1295 * gfs2_glock_nq_num - acquire a glock based on lock number 1296 * @sdp: the filesystem 1297 * @number: the lock number 1298 * @glops: the glock operations for the type of glock 1299 * @state: the state to acquire the glock in 1300 * @flags: modifier flags for the aquisition 1301 * @gh: the struct gfs2_holder 1302 * 1303 * Returns: errno 1304 */ 1305 1306 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number, 1307 const struct gfs2_glock_operations *glops, 1308 unsigned int state, int flags, struct gfs2_holder *gh) 1309 { 1310 struct gfs2_glock *gl; 1311 int error; 1312 1313 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl); 1314 if (!error) { 1315 error = gfs2_glock_nq_init(gl, state, flags, gh); 1316 gfs2_glock_put(gl); 1317 } 1318 1319 return error; 1320 } 1321 1322 /** 1323 * glock_compare - Compare two struct gfs2_glock structures for sorting 1324 * @arg_a: the first structure 1325 * @arg_b: the second structure 1326 * 1327 */ 1328 1329 static int glock_compare(const void *arg_a, const void *arg_b) 1330 { 1331 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a; 1332 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b; 1333 const struct lm_lockname *a = &gh_a->gh_gl->gl_name; 1334 const struct lm_lockname *b = &gh_b->gh_gl->gl_name; 1335 1336 if (a->ln_number > b->ln_number) 1337 return 1; 1338 if (a->ln_number < b->ln_number) 1339 return -1; 1340 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type); 1341 return 0; 1342 } 1343 1344 /** 1345 * nq_m_sync - synchonously acquire more than one glock in deadlock free order 1346 * @num_gh: the number of structures 1347 * @ghs: an array of struct gfs2_holder structures 1348 * 1349 * Returns: 0 on success (all glocks acquired), 1350 * errno on failure (no glocks acquired) 1351 */ 1352 1353 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs, 1354 struct gfs2_holder **p) 1355 { 1356 unsigned int x; 1357 int error = 0; 1358 1359 for (x = 0; x < num_gh; x++) 1360 p[x] = &ghs[x]; 1361 1362 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL); 1363 1364 for (x = 0; x < num_gh; x++) { 1365 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); 1366 1367 error = gfs2_glock_nq(p[x]); 1368 if (error) { 1369 while (x--) 1370 gfs2_glock_dq(p[x]); 1371 break; 1372 } 1373 } 1374 1375 return error; 1376 } 1377 1378 /** 1379 * gfs2_glock_nq_m - acquire multiple glocks 1380 * @num_gh: the number of structures 1381 * @ghs: an array of struct gfs2_holder structures 1382 * 1383 * Figure out how big an impact this function has. Either: 1384 * 1) Replace this code with code that calls gfs2_glock_prefetch() 1385 * 2) Forget async stuff and just call nq_m_sync() 1386 * 3) Leave it like it is 1387 * 1388 * Returns: 0 on success (all glocks acquired), 1389 * errno on failure (no glocks acquired) 1390 */ 1391 1392 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs) 1393 { 1394 int *e; 1395 unsigned int x; 1396 int borked = 0, serious = 0; 1397 int error = 0; 1398 1399 if (!num_gh) 1400 return 0; 1401 1402 if (num_gh == 1) { 1403 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); 1404 return gfs2_glock_nq(ghs); 1405 } 1406 1407 e = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL); 1408 if (!e) 1409 return -ENOMEM; 1410 1411 for (x = 0; x < num_gh; x++) { 1412 ghs[x].gh_flags |= LM_FLAG_TRY | GL_ASYNC; 1413 error = gfs2_glock_nq(&ghs[x]); 1414 if (error) { 1415 borked = 1; 1416 serious = error; 1417 num_gh = x; 1418 break; 1419 } 1420 } 1421 1422 for (x = 0; x < num_gh; x++) { 1423 error = e[x] = glock_wait_internal(&ghs[x]); 1424 if (error) { 1425 borked = 1; 1426 if (error != GLR_TRYFAILED && error != GLR_CANCELED) 1427 serious = error; 1428 } 1429 } 1430 1431 if (!borked) { 1432 kfree(e); 1433 return 0; 1434 } 1435 1436 for (x = 0; x < num_gh; x++) 1437 if (!e[x]) 1438 gfs2_glock_dq(&ghs[x]); 1439 1440 if (serious) 1441 error = serious; 1442 else { 1443 for (x = 0; x < num_gh; x++) 1444 gfs2_holder_reinit(ghs[x].gh_state, ghs[x].gh_flags, 1445 &ghs[x]); 1446 error = nq_m_sync(num_gh, ghs, (struct gfs2_holder **)e); 1447 } 1448 1449 kfree(e); 1450 1451 return error; 1452 } 1453 1454 /** 1455 * gfs2_glock_dq_m - release multiple glocks 1456 * @num_gh: the number of structures 1457 * @ghs: an array of struct gfs2_holder structures 1458 * 1459 */ 1460 1461 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs) 1462 { 1463 unsigned int x; 1464 1465 for (x = 0; x < num_gh; x++) 1466 gfs2_glock_dq(&ghs[x]); 1467 } 1468 1469 /** 1470 * gfs2_glock_dq_uninit_m - release multiple glocks 1471 * @num_gh: the number of structures 1472 * @ghs: an array of struct gfs2_holder structures 1473 * 1474 */ 1475 1476 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs) 1477 { 1478 unsigned int x; 1479 1480 for (x = 0; x < num_gh; x++) 1481 gfs2_glock_dq_uninit(&ghs[x]); 1482 } 1483 1484 /** 1485 * gfs2_lvb_hold - attach a LVB from a glock 1486 * @gl: The glock in question 1487 * 1488 */ 1489 1490 int gfs2_lvb_hold(struct gfs2_glock *gl) 1491 { 1492 int error; 1493 1494 gfs2_glmutex_lock(gl); 1495 1496 if (!atomic_read(&gl->gl_lvb_count)) { 1497 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb); 1498 if (error) { 1499 gfs2_glmutex_unlock(gl); 1500 return error; 1501 } 1502 gfs2_glock_hold(gl); 1503 } 1504 atomic_inc(&gl->gl_lvb_count); 1505 1506 gfs2_glmutex_unlock(gl); 1507 1508 return 0; 1509 } 1510 1511 /** 1512 * gfs2_lvb_unhold - detach a LVB from a glock 1513 * @gl: The glock in question 1514 * 1515 */ 1516 1517 void gfs2_lvb_unhold(struct gfs2_glock *gl) 1518 { 1519 gfs2_glock_hold(gl); 1520 gfs2_glmutex_lock(gl); 1521 1522 gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0); 1523 if (atomic_dec_and_test(&gl->gl_lvb_count)) { 1524 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb); 1525 gl->gl_lvb = NULL; 1526 gfs2_glock_put(gl); 1527 } 1528 1529 gfs2_glmutex_unlock(gl); 1530 gfs2_glock_put(gl); 1531 } 1532 1533 static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name, 1534 unsigned int state) 1535 { 1536 struct gfs2_glock *gl; 1537 1538 gl = gfs2_glock_find(sdp, name); 1539 if (!gl) 1540 return; 1541 1542 handle_callback(gl, state); 1543 1544 spin_lock(&gl->gl_spin); 1545 run_queue(gl); 1546 spin_unlock(&gl->gl_spin); 1547 1548 gfs2_glock_put(gl); 1549 } 1550 1551 /** 1552 * gfs2_glock_cb - Callback used by locking module 1553 * @sdp: Pointer to the superblock 1554 * @type: Type of callback 1555 * @data: Type dependent data pointer 1556 * 1557 * Called by the locking module when it wants to tell us something. 1558 * Either we need to drop a lock, one of our ASYNC requests completed, or 1559 * a journal from another client needs to be recovered. 1560 */ 1561 1562 void gfs2_glock_cb(void *cb_data, unsigned int type, void *data) 1563 { 1564 struct gfs2_sbd *sdp = cb_data; 1565 1566 switch (type) { 1567 case LM_CB_NEED_E: 1568 blocking_cb(sdp, data, LM_ST_UNLOCKED); 1569 return; 1570 1571 case LM_CB_NEED_D: 1572 blocking_cb(sdp, data, LM_ST_DEFERRED); 1573 return; 1574 1575 case LM_CB_NEED_S: 1576 blocking_cb(sdp, data, LM_ST_SHARED); 1577 return; 1578 1579 case LM_CB_ASYNC: { 1580 struct lm_async_cb *async = data; 1581 struct gfs2_glock *gl; 1582 1583 down_read(&gfs2_umount_flush_sem); 1584 gl = gfs2_glock_find(sdp, &async->lc_name); 1585 if (gfs2_assert_warn(sdp, gl)) 1586 return; 1587 if (!gfs2_assert_warn(sdp, gl->gl_req_bh)) 1588 gl->gl_req_bh(gl, async->lc_ret); 1589 gfs2_glock_put(gl); 1590 up_read(&gfs2_umount_flush_sem); 1591 return; 1592 } 1593 1594 case LM_CB_NEED_RECOVERY: 1595 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data); 1596 if (sdp->sd_recoverd_process) 1597 wake_up_process(sdp->sd_recoverd_process); 1598 return; 1599 1600 case LM_CB_DROPLOCKS: 1601 gfs2_gl_hash_clear(sdp, NO_WAIT); 1602 gfs2_quota_scan(sdp); 1603 return; 1604 1605 default: 1606 gfs2_assert_warn(sdp, 0); 1607 return; 1608 } 1609 } 1610 1611 /** 1612 * demote_ok - Check to see if it's ok to unlock a glock 1613 * @gl: the glock 1614 * 1615 * Returns: 1 if it's ok 1616 */ 1617 1618 static int demote_ok(struct gfs2_glock *gl) 1619 { 1620 const struct gfs2_glock_operations *glops = gl->gl_ops; 1621 int demote = 1; 1622 1623 if (test_bit(GLF_STICKY, &gl->gl_flags)) 1624 demote = 0; 1625 else if (glops->go_demote_ok) 1626 demote = glops->go_demote_ok(gl); 1627 1628 return demote; 1629 } 1630 1631 /** 1632 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list 1633 * @gl: the glock 1634 * 1635 */ 1636 1637 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) 1638 { 1639 struct gfs2_sbd *sdp = gl->gl_sbd; 1640 1641 spin_lock(&sdp->sd_reclaim_lock); 1642 if (list_empty(&gl->gl_reclaim)) { 1643 gfs2_glock_hold(gl); 1644 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list); 1645 atomic_inc(&sdp->sd_reclaim_count); 1646 } 1647 spin_unlock(&sdp->sd_reclaim_lock); 1648 1649 wake_up(&sdp->sd_reclaim_wq); 1650 } 1651 1652 /** 1653 * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list 1654 * @sdp: the filesystem 1655 * 1656 * Called from gfs2_glockd() glock reclaim daemon, or when promoting a 1657 * different glock and we notice that there are a lot of glocks in the 1658 * reclaim list. 1659 * 1660 */ 1661 1662 void gfs2_reclaim_glock(struct gfs2_sbd *sdp) 1663 { 1664 struct gfs2_glock *gl; 1665 1666 spin_lock(&sdp->sd_reclaim_lock); 1667 if (list_empty(&sdp->sd_reclaim_list)) { 1668 spin_unlock(&sdp->sd_reclaim_lock); 1669 return; 1670 } 1671 gl = list_entry(sdp->sd_reclaim_list.next, 1672 struct gfs2_glock, gl_reclaim); 1673 list_del_init(&gl->gl_reclaim); 1674 spin_unlock(&sdp->sd_reclaim_lock); 1675 1676 atomic_dec(&sdp->sd_reclaim_count); 1677 atomic_inc(&sdp->sd_reclaimed); 1678 1679 if (gfs2_glmutex_trylock(gl)) { 1680 if (list_empty(&gl->gl_holders) && 1681 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) 1682 handle_callback(gl, LM_ST_UNLOCKED); 1683 gfs2_glmutex_unlock(gl); 1684 } 1685 1686 gfs2_glock_put(gl); 1687 } 1688 1689 /** 1690 * examine_bucket - Call a function for glock in a hash bucket 1691 * @examiner: the function 1692 * @sdp: the filesystem 1693 * @bucket: the bucket 1694 * 1695 * Returns: 1 if the bucket has entries 1696 */ 1697 1698 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp, 1699 unsigned int hash) 1700 { 1701 struct gfs2_glock *gl, *prev = NULL; 1702 int has_entries = 0; 1703 struct hlist_head *head = &gl_hash_table[hash].hb_list; 1704 1705 read_lock(gl_lock_addr(hash)); 1706 /* Can't use hlist_for_each_entry - don't want prefetch here */ 1707 if (hlist_empty(head)) 1708 goto out; 1709 gl = list_entry(head->first, struct gfs2_glock, gl_list); 1710 while(1) { 1711 if (gl->gl_sbd == sdp) { 1712 gfs2_glock_hold(gl); 1713 read_unlock(gl_lock_addr(hash)); 1714 if (prev) 1715 gfs2_glock_put(prev); 1716 prev = gl; 1717 examiner(gl); 1718 has_entries = 1; 1719 read_lock(gl_lock_addr(hash)); 1720 } 1721 if (gl->gl_list.next == NULL) 1722 break; 1723 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list); 1724 } 1725 out: 1726 read_unlock(gl_lock_addr(hash)); 1727 if (prev) 1728 gfs2_glock_put(prev); 1729 return has_entries; 1730 } 1731 1732 /** 1733 * scan_glock - look at a glock and see if we can reclaim it 1734 * @gl: the glock to look at 1735 * 1736 */ 1737 1738 static void scan_glock(struct gfs2_glock *gl) 1739 { 1740 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) 1741 return; 1742 1743 if (gfs2_glmutex_trylock(gl)) { 1744 if (list_empty(&gl->gl_holders) && 1745 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) 1746 goto out_schedule; 1747 gfs2_glmutex_unlock(gl); 1748 } 1749 return; 1750 1751 out_schedule: 1752 gfs2_glmutex_unlock(gl); 1753 gfs2_glock_schedule_for_reclaim(gl); 1754 } 1755 1756 /** 1757 * gfs2_scand_internal - Look for glocks and inodes to toss from memory 1758 * @sdp: the filesystem 1759 * 1760 */ 1761 1762 void gfs2_scand_internal(struct gfs2_sbd *sdp) 1763 { 1764 unsigned int x; 1765 1766 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) 1767 examine_bucket(scan_glock, sdp, x); 1768 } 1769 1770 /** 1771 * clear_glock - look at a glock and see if we can free it from glock cache 1772 * @gl: the glock to look at 1773 * 1774 */ 1775 1776 static void clear_glock(struct gfs2_glock *gl) 1777 { 1778 struct gfs2_sbd *sdp = gl->gl_sbd; 1779 int released; 1780 1781 spin_lock(&sdp->sd_reclaim_lock); 1782 if (!list_empty(&gl->gl_reclaim)) { 1783 list_del_init(&gl->gl_reclaim); 1784 atomic_dec(&sdp->sd_reclaim_count); 1785 spin_unlock(&sdp->sd_reclaim_lock); 1786 released = gfs2_glock_put(gl); 1787 gfs2_assert(sdp, !released); 1788 } else { 1789 spin_unlock(&sdp->sd_reclaim_lock); 1790 } 1791 1792 if (gfs2_glmutex_trylock(gl)) { 1793 if (list_empty(&gl->gl_holders) && 1794 gl->gl_state != LM_ST_UNLOCKED) 1795 handle_callback(gl, LM_ST_UNLOCKED); 1796 gfs2_glmutex_unlock(gl); 1797 } 1798 } 1799 1800 /** 1801 * gfs2_gl_hash_clear - Empty out the glock hash table 1802 * @sdp: the filesystem 1803 * @wait: wait until it's all gone 1804 * 1805 * Called when unmounting the filesystem, or when inter-node lock manager 1806 * requests DROPLOCKS because it is running out of capacity. 1807 */ 1808 1809 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait) 1810 { 1811 unsigned long t; 1812 unsigned int x; 1813 int cont; 1814 1815 t = jiffies; 1816 1817 for (;;) { 1818 cont = 0; 1819 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) { 1820 if (examine_bucket(clear_glock, sdp, x)) 1821 cont = 1; 1822 } 1823 1824 if (!wait || !cont) 1825 break; 1826 1827 if (time_after_eq(jiffies, 1828 t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) { 1829 fs_warn(sdp, "Unmount seems to be stalled. " 1830 "Dumping lock state...\n"); 1831 gfs2_dump_lockstate(sdp); 1832 t = jiffies; 1833 } 1834 1835 down_write(&gfs2_umount_flush_sem); 1836 invalidate_inodes(sdp->sd_vfs); 1837 up_write(&gfs2_umount_flush_sem); 1838 msleep(10); 1839 } 1840 } 1841 1842 /* 1843 * Diagnostic routines to help debug distributed deadlock 1844 */ 1845 1846 /** 1847 * dump_holder - print information about a glock holder 1848 * @str: a string naming the type of holder 1849 * @gh: the glock holder 1850 * 1851 * Returns: 0 on success, -ENOBUFS when we run out of space 1852 */ 1853 1854 static int dump_holder(char *str, struct gfs2_holder *gh) 1855 { 1856 unsigned int x; 1857 int error = -ENOBUFS; 1858 1859 printk(KERN_INFO " %s\n", str); 1860 printk(KERN_INFO " owner = %ld\n", 1861 (gh->gh_owner) ? (long)gh->gh_owner->pid : -1); 1862 printk(KERN_INFO " gh_state = %u\n", gh->gh_state); 1863 printk(KERN_INFO " gh_flags ="); 1864 for (x = 0; x < 32; x++) 1865 if (gh->gh_flags & (1 << x)) 1866 printk(" %u", x); 1867 printk(" \n"); 1868 printk(KERN_INFO " error = %d\n", gh->gh_error); 1869 printk(KERN_INFO " gh_iflags ="); 1870 for (x = 0; x < 32; x++) 1871 if (test_bit(x, &gh->gh_iflags)) 1872 printk(" %u", x); 1873 printk(" \n"); 1874 print_symbol(KERN_INFO " initialized at: %s\n", gh->gh_ip); 1875 1876 error = 0; 1877 1878 return error; 1879 } 1880 1881 /** 1882 * dump_inode - print information about an inode 1883 * @ip: the inode 1884 * 1885 * Returns: 0 on success, -ENOBUFS when we run out of space 1886 */ 1887 1888 static int dump_inode(struct gfs2_inode *ip) 1889 { 1890 unsigned int x; 1891 int error = -ENOBUFS; 1892 1893 printk(KERN_INFO " Inode:\n"); 1894 printk(KERN_INFO " num = %llu %llu\n", 1895 (unsigned long long)ip->i_num.no_formal_ino, 1896 (unsigned long long)ip->i_num.no_addr); 1897 printk(KERN_INFO " type = %u\n", IF2DT(ip->i_inode.i_mode)); 1898 printk(KERN_INFO " i_flags ="); 1899 for (x = 0; x < 32; x++) 1900 if (test_bit(x, &ip->i_flags)) 1901 printk(" %u", x); 1902 printk(" \n"); 1903 1904 error = 0; 1905 1906 return error; 1907 } 1908 1909 /** 1910 * dump_glock - print information about a glock 1911 * @gl: the glock 1912 * @count: where we are in the buffer 1913 * 1914 * Returns: 0 on success, -ENOBUFS when we run out of space 1915 */ 1916 1917 static int dump_glock(struct gfs2_glock *gl) 1918 { 1919 struct gfs2_holder *gh; 1920 unsigned int x; 1921 int error = -ENOBUFS; 1922 1923 spin_lock(&gl->gl_spin); 1924 1925 printk(KERN_INFO "Glock 0x%p (%u, %llu)\n", gl, gl->gl_name.ln_type, 1926 (unsigned long long)gl->gl_name.ln_number); 1927 printk(KERN_INFO " gl_flags ="); 1928 for (x = 0; x < 32; x++) { 1929 if (test_bit(x, &gl->gl_flags)) 1930 printk(" %u", x); 1931 } 1932 printk(" \n"); 1933 printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref)); 1934 printk(KERN_INFO " gl_state = %u\n", gl->gl_state); 1935 printk(KERN_INFO " gl_owner = %s\n", gl->gl_owner->comm); 1936 print_symbol(KERN_INFO " gl_ip = %s\n", gl->gl_ip); 1937 printk(KERN_INFO " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no"); 1938 printk(KERN_INFO " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no"); 1939 printk(KERN_INFO " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count)); 1940 printk(KERN_INFO " object = %s\n", (gl->gl_object) ? "yes" : "no"); 1941 printk(KERN_INFO " le = %s\n", 1942 (list_empty(&gl->gl_le.le_list)) ? "no" : "yes"); 1943 printk(KERN_INFO " reclaim = %s\n", 1944 (list_empty(&gl->gl_reclaim)) ? "no" : "yes"); 1945 if (gl->gl_aspace) 1946 printk(KERN_INFO " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace, 1947 gl->gl_aspace->i_mapping->nrpages); 1948 else 1949 printk(KERN_INFO " aspace = no\n"); 1950 printk(KERN_INFO " ail = %d\n", atomic_read(&gl->gl_ail_count)); 1951 if (gl->gl_req_gh) { 1952 error = dump_holder("Request", gl->gl_req_gh); 1953 if (error) 1954 goto out; 1955 } 1956 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 1957 error = dump_holder("Holder", gh); 1958 if (error) 1959 goto out; 1960 } 1961 list_for_each_entry(gh, &gl->gl_waiters1, gh_list) { 1962 error = dump_holder("Waiter1", gh); 1963 if (error) 1964 goto out; 1965 } 1966 list_for_each_entry(gh, &gl->gl_waiters2, gh_list) { 1967 error = dump_holder("Waiter2", gh); 1968 if (error) 1969 goto out; 1970 } 1971 list_for_each_entry(gh, &gl->gl_waiters3, gh_list) { 1972 error = dump_holder("Waiter3", gh); 1973 if (error) 1974 goto out; 1975 } 1976 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) { 1977 if (!test_bit(GLF_LOCK, &gl->gl_flags) && 1978 list_empty(&gl->gl_holders)) { 1979 error = dump_inode(gl->gl_object); 1980 if (error) 1981 goto out; 1982 } else { 1983 error = -ENOBUFS; 1984 printk(KERN_INFO " Inode: busy\n"); 1985 } 1986 } 1987 1988 error = 0; 1989 1990 out: 1991 spin_unlock(&gl->gl_spin); 1992 return error; 1993 } 1994 1995 /** 1996 * gfs2_dump_lockstate - print out the current lockstate 1997 * @sdp: the filesystem 1998 * @ub: the buffer to copy the information into 1999 * 2000 * If @ub is NULL, dump the lockstate to the console. 2001 * 2002 */ 2003 2004 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp) 2005 { 2006 struct gfs2_glock *gl; 2007 struct hlist_node *h; 2008 unsigned int x; 2009 int error = 0; 2010 2011 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) { 2012 2013 read_lock(gl_lock_addr(x)); 2014 2015 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) { 2016 if (gl->gl_sbd != sdp) 2017 continue; 2018 2019 error = dump_glock(gl); 2020 if (error) 2021 break; 2022 } 2023 2024 read_unlock(gl_lock_addr(x)); 2025 2026 if (error) 2027 break; 2028 } 2029 2030 2031 return error; 2032 } 2033 2034 int __init gfs2_glock_init(void) 2035 { 2036 unsigned i; 2037 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) { 2038 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list); 2039 } 2040 #ifdef GL_HASH_LOCK_SZ 2041 for(i = 0; i < GL_HASH_LOCK_SZ; i++) { 2042 rwlock_init(&gl_hash_locks[i]); 2043 } 2044 #endif 2045 return 0; 2046 } 2047 2048