1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 12 #include <linux/sched.h> 13 #include <linux/slab.h> 14 #include <linux/spinlock.h> 15 #include <linux/buffer_head.h> 16 #include <linux/delay.h> 17 #include <linux/sort.h> 18 #include <linux/jhash.h> 19 #include <linux/kallsyms.h> 20 #include <linux/gfs2_ondisk.h> 21 #include <linux/list.h> 22 #include <linux/wait.h> 23 #include <linux/module.h> 24 #include <asm/uaccess.h> 25 #include <linux/seq_file.h> 26 #include <linux/debugfs.h> 27 #include <linux/kthread.h> 28 #include <linux/freezer.h> 29 #include <linux/workqueue.h> 30 #include <linux/jiffies.h> 31 #include <linux/rcupdate.h> 32 #include <linux/rculist_bl.h> 33 #include <linux/bit_spinlock.h> 34 #include <linux/percpu.h> 35 #include <linux/list_sort.h> 36 #include <linux/lockref.h> 37 38 #include "gfs2.h" 39 #include "incore.h" 40 #include "glock.h" 41 #include "glops.h" 42 #include "inode.h" 43 #include "lops.h" 44 #include "meta_io.h" 45 #include "quota.h" 46 #include "super.h" 47 #include "util.h" 48 #include "bmap.h" 49 #define CREATE_TRACE_POINTS 50 #include "trace_gfs2.h" 51 52 struct gfs2_glock_iter { 53 int hash; /* hash bucket index */ 54 unsigned nhash; /* Index within current bucket */ 55 struct gfs2_sbd *sdp; /* incore superblock */ 56 struct gfs2_glock *gl; /* current glock struct */ 57 loff_t last_pos; /* last position */ 58 }; 59 60 typedef void (*glock_examiner) (struct gfs2_glock * gl); 61 62 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target); 63 64 static struct dentry *gfs2_root; 65 static struct workqueue_struct *glock_workqueue; 66 struct workqueue_struct *gfs2_delete_workqueue; 67 static LIST_HEAD(lru_list); 68 static atomic_t lru_count = ATOMIC_INIT(0); 69 static DEFINE_SPINLOCK(lru_lock); 70 71 #define GFS2_GL_HASH_SHIFT 15 72 #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) 73 #define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1) 74 75 static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE]; 76 static struct dentry *gfs2_root; 77 78 /** 79 * gl_hash() - Turn glock number into hash bucket number 80 * @lock: The glock number 81 * 82 * Returns: The number of the corresponding hash bucket 83 */ 84 85 static unsigned int gl_hash(const struct gfs2_sbd *sdp, 86 const struct lm_lockname *name) 87 { 88 unsigned int h; 89 90 h = jhash(&name->ln_number, sizeof(u64), 0); 91 h = jhash(&name->ln_type, sizeof(unsigned int), h); 92 h = jhash(&sdp, sizeof(struct gfs2_sbd *), h); 93 h &= GFS2_GL_HASH_MASK; 94 95 return h; 96 } 97 98 static inline void spin_lock_bucket(unsigned int hash) 99 { 100 hlist_bl_lock(&gl_hash_table[hash]); 101 } 102 103 static inline void spin_unlock_bucket(unsigned int hash) 104 { 105 hlist_bl_unlock(&gl_hash_table[hash]); 106 } 107 108 static void gfs2_glock_dealloc(struct rcu_head *rcu) 109 { 110 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); 111 112 if (gl->gl_ops->go_flags & GLOF_ASPACE) { 113 kmem_cache_free(gfs2_glock_aspace_cachep, gl); 114 } else { 115 kfree(gl->gl_lksb.sb_lvbptr); 116 kmem_cache_free(gfs2_glock_cachep, gl); 117 } 118 } 119 120 void gfs2_glock_free(struct gfs2_glock *gl) 121 { 122 struct gfs2_sbd *sdp = gl->gl_sbd; 123 124 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc); 125 if (atomic_dec_and_test(&sdp->sd_glock_disposal)) 126 wake_up(&sdp->sd_glock_wait); 127 } 128 129 /** 130 * gfs2_glock_hold() - increment reference count on glock 131 * @gl: The glock to hold 132 * 133 */ 134 135 static void gfs2_glock_hold(struct gfs2_glock *gl) 136 { 137 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); 138 lockref_get(&gl->gl_lockref); 139 } 140 141 /** 142 * demote_ok - Check to see if it's ok to unlock a glock 143 * @gl: the glock 144 * 145 * Returns: 1 if it's ok 146 */ 147 148 static int demote_ok(const struct gfs2_glock *gl) 149 { 150 const struct gfs2_glock_operations *glops = gl->gl_ops; 151 152 if (gl->gl_state == LM_ST_UNLOCKED) 153 return 0; 154 if (!list_empty(&gl->gl_holders)) 155 return 0; 156 if (glops->go_demote_ok) 157 return glops->go_demote_ok(gl); 158 return 1; 159 } 160 161 162 void gfs2_glock_add_to_lru(struct gfs2_glock *gl) 163 { 164 spin_lock(&lru_lock); 165 166 if (!list_empty(&gl->gl_lru)) 167 list_del_init(&gl->gl_lru); 168 else 169 atomic_inc(&lru_count); 170 171 list_add_tail(&gl->gl_lru, &lru_list); 172 set_bit(GLF_LRU, &gl->gl_flags); 173 spin_unlock(&lru_lock); 174 } 175 176 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) 177 { 178 spin_lock(&lru_lock); 179 if (!list_empty(&gl->gl_lru)) { 180 list_del_init(&gl->gl_lru); 181 atomic_dec(&lru_count); 182 clear_bit(GLF_LRU, &gl->gl_flags); 183 } 184 spin_unlock(&lru_lock); 185 } 186 187 /** 188 * gfs2_glock_put() - Decrement reference count on glock 189 * @gl: The glock to put 190 * 191 */ 192 193 void gfs2_glock_put(struct gfs2_glock *gl) 194 { 195 struct gfs2_sbd *sdp = gl->gl_sbd; 196 struct address_space *mapping = gfs2_glock2aspace(gl); 197 198 if (lockref_put_or_lock(&gl->gl_lockref)) 199 return; 200 201 lockref_mark_dead(&gl->gl_lockref); 202 203 gfs2_glock_remove_from_lru(gl); 204 spin_unlock(&gl->gl_lockref.lock); 205 spin_lock_bucket(gl->gl_hash); 206 hlist_bl_del_rcu(&gl->gl_list); 207 spin_unlock_bucket(gl->gl_hash); 208 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); 209 GLOCK_BUG_ON(gl, mapping && mapping->nrpages); 210 trace_gfs2_glock_put(gl); 211 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); 212 } 213 214 /** 215 * search_bucket() - Find struct gfs2_glock by lock number 216 * @bucket: the bucket to search 217 * @name: The lock name 218 * 219 * Returns: NULL, or the struct gfs2_glock with the requested number 220 */ 221 222 static struct gfs2_glock *search_bucket(unsigned int hash, 223 const struct gfs2_sbd *sdp, 224 const struct lm_lockname *name) 225 { 226 struct gfs2_glock *gl; 227 struct hlist_bl_node *h; 228 229 hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) { 230 if (!lm_name_equal(&gl->gl_name, name)) 231 continue; 232 if (gl->gl_sbd != sdp) 233 continue; 234 if (lockref_get_not_dead(&gl->gl_lockref)) 235 return gl; 236 } 237 238 return NULL; 239 } 240 241 /** 242 * may_grant - check if its ok to grant a new lock 243 * @gl: The glock 244 * @gh: The lock request which we wish to grant 245 * 246 * Returns: true if its ok to grant the lock 247 */ 248 249 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh) 250 { 251 const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list); 252 if ((gh->gh_state == LM_ST_EXCLUSIVE || 253 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head) 254 return 0; 255 if (gl->gl_state == gh->gh_state) 256 return 1; 257 if (gh->gh_flags & GL_EXACT) 258 return 0; 259 if (gl->gl_state == LM_ST_EXCLUSIVE) { 260 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED) 261 return 1; 262 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED) 263 return 1; 264 } 265 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY)) 266 return 1; 267 return 0; 268 } 269 270 static void gfs2_holder_wake(struct gfs2_holder *gh) 271 { 272 clear_bit(HIF_WAIT, &gh->gh_iflags); 273 smp_mb__after_atomic(); 274 wake_up_bit(&gh->gh_iflags, HIF_WAIT); 275 } 276 277 /** 278 * do_error - Something unexpected has happened during a lock request 279 * 280 */ 281 282 static inline void do_error(struct gfs2_glock *gl, const int ret) 283 { 284 struct gfs2_holder *gh, *tmp; 285 286 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { 287 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 288 continue; 289 if (ret & LM_OUT_ERROR) 290 gh->gh_error = -EIO; 291 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) 292 gh->gh_error = GLR_TRYFAILED; 293 else 294 continue; 295 list_del_init(&gh->gh_list); 296 trace_gfs2_glock_queue(gh, 0); 297 gfs2_holder_wake(gh); 298 } 299 } 300 301 /** 302 * do_promote - promote as many requests as possible on the current queue 303 * @gl: The glock 304 * 305 * Returns: 1 if there is a blocked holder at the head of the list, or 2 306 * if a type specific operation is underway. 307 */ 308 309 static int do_promote(struct gfs2_glock *gl) 310 __releases(&gl->gl_spin) 311 __acquires(&gl->gl_spin) 312 { 313 const struct gfs2_glock_operations *glops = gl->gl_ops; 314 struct gfs2_holder *gh, *tmp; 315 int ret; 316 317 restart: 318 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { 319 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 320 continue; 321 if (may_grant(gl, gh)) { 322 if (gh->gh_list.prev == &gl->gl_holders && 323 glops->go_lock) { 324 spin_unlock(&gl->gl_spin); 325 /* FIXME: eliminate this eventually */ 326 ret = glops->go_lock(gh); 327 spin_lock(&gl->gl_spin); 328 if (ret) { 329 if (ret == 1) 330 return 2; 331 gh->gh_error = ret; 332 list_del_init(&gh->gh_list); 333 trace_gfs2_glock_queue(gh, 0); 334 gfs2_holder_wake(gh); 335 goto restart; 336 } 337 set_bit(HIF_HOLDER, &gh->gh_iflags); 338 trace_gfs2_promote(gh, 1); 339 gfs2_holder_wake(gh); 340 goto restart; 341 } 342 set_bit(HIF_HOLDER, &gh->gh_iflags); 343 trace_gfs2_promote(gh, 0); 344 gfs2_holder_wake(gh); 345 continue; 346 } 347 if (gh->gh_list.prev == &gl->gl_holders) 348 return 1; 349 do_error(gl, 0); 350 break; 351 } 352 return 0; 353 } 354 355 /** 356 * find_first_waiter - find the first gh that's waiting for the glock 357 * @gl: the glock 358 */ 359 360 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl) 361 { 362 struct gfs2_holder *gh; 363 364 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 365 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) 366 return gh; 367 } 368 return NULL; 369 } 370 371 /** 372 * state_change - record that the glock is now in a different state 373 * @gl: the glock 374 * @new_state the new state 375 * 376 */ 377 378 static void state_change(struct gfs2_glock *gl, unsigned int new_state) 379 { 380 int held1, held2; 381 382 held1 = (gl->gl_state != LM_ST_UNLOCKED); 383 held2 = (new_state != LM_ST_UNLOCKED); 384 385 if (held1 != held2) { 386 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); 387 if (held2) 388 gl->gl_lockref.count++; 389 else 390 gl->gl_lockref.count--; 391 } 392 if (held1 && held2 && list_empty(&gl->gl_holders)) 393 clear_bit(GLF_QUEUED, &gl->gl_flags); 394 395 if (new_state != gl->gl_target) 396 /* shorten our minimum hold time */ 397 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR, 398 GL_GLOCK_MIN_HOLD); 399 gl->gl_state = new_state; 400 gl->gl_tchange = jiffies; 401 } 402 403 static void gfs2_demote_wake(struct gfs2_glock *gl) 404 { 405 gl->gl_demote_state = LM_ST_EXCLUSIVE; 406 clear_bit(GLF_DEMOTE, &gl->gl_flags); 407 smp_mb__after_atomic(); 408 wake_up_bit(&gl->gl_flags, GLF_DEMOTE); 409 } 410 411 /** 412 * finish_xmote - The DLM has replied to one of our lock requests 413 * @gl: The glock 414 * @ret: The status from the DLM 415 * 416 */ 417 418 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) 419 { 420 const struct gfs2_glock_operations *glops = gl->gl_ops; 421 struct gfs2_holder *gh; 422 unsigned state = ret & LM_OUT_ST_MASK; 423 int rv; 424 425 spin_lock(&gl->gl_spin); 426 trace_gfs2_glock_state_change(gl, state); 427 state_change(gl, state); 428 gh = find_first_waiter(gl); 429 430 /* Demote to UN request arrived during demote to SH or DF */ 431 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && 432 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED) 433 gl->gl_target = LM_ST_UNLOCKED; 434 435 /* Check for state != intended state */ 436 if (unlikely(state != gl->gl_target)) { 437 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { 438 /* move to back of queue and try next entry */ 439 if (ret & LM_OUT_CANCELED) { 440 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0) 441 list_move_tail(&gh->gh_list, &gl->gl_holders); 442 gh = find_first_waiter(gl); 443 gl->gl_target = gh->gh_state; 444 goto retry; 445 } 446 /* Some error or failed "try lock" - report it */ 447 if ((ret & LM_OUT_ERROR) || 448 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { 449 gl->gl_target = gl->gl_state; 450 do_error(gl, ret); 451 goto out; 452 } 453 } 454 switch(state) { 455 /* Unlocked due to conversion deadlock, try again */ 456 case LM_ST_UNLOCKED: 457 retry: 458 do_xmote(gl, gh, gl->gl_target); 459 break; 460 /* Conversion fails, unlock and try again */ 461 case LM_ST_SHARED: 462 case LM_ST_DEFERRED: 463 do_xmote(gl, gh, LM_ST_UNLOCKED); 464 break; 465 default: /* Everything else */ 466 pr_err("wanted %u got %u\n", gl->gl_target, state); 467 GLOCK_BUG_ON(gl, 1); 468 } 469 spin_unlock(&gl->gl_spin); 470 return; 471 } 472 473 /* Fast path - we got what we asked for */ 474 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) 475 gfs2_demote_wake(gl); 476 if (state != LM_ST_UNLOCKED) { 477 if (glops->go_xmote_bh) { 478 spin_unlock(&gl->gl_spin); 479 rv = glops->go_xmote_bh(gl, gh); 480 spin_lock(&gl->gl_spin); 481 if (rv) { 482 do_error(gl, rv); 483 goto out; 484 } 485 } 486 rv = do_promote(gl); 487 if (rv == 2) 488 goto out_locked; 489 } 490 out: 491 clear_bit(GLF_LOCK, &gl->gl_flags); 492 out_locked: 493 spin_unlock(&gl->gl_spin); 494 } 495 496 /** 497 * do_xmote - Calls the DLM to change the state of a lock 498 * @gl: The lock state 499 * @gh: The holder (only for promotes) 500 * @target: The target lock state 501 * 502 */ 503 504 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target) 505 __releases(&gl->gl_spin) 506 __acquires(&gl->gl_spin) 507 { 508 const struct gfs2_glock_operations *glops = gl->gl_ops; 509 struct gfs2_sbd *sdp = gl->gl_sbd; 510 unsigned int lck_flags = gh ? gh->gh_flags : 0; 511 int ret; 512 513 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP | 514 LM_FLAG_PRIORITY); 515 GLOCK_BUG_ON(gl, gl->gl_state == target); 516 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target); 517 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) && 518 glops->go_inval) { 519 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); 520 do_error(gl, 0); /* Fail queued try locks */ 521 } 522 gl->gl_req = target; 523 set_bit(GLF_BLOCKING, &gl->gl_flags); 524 if ((gl->gl_req == LM_ST_UNLOCKED) || 525 (gl->gl_state == LM_ST_EXCLUSIVE) || 526 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB))) 527 clear_bit(GLF_BLOCKING, &gl->gl_flags); 528 spin_unlock(&gl->gl_spin); 529 if (glops->go_sync) 530 glops->go_sync(gl); 531 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) 532 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA); 533 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); 534 535 gfs2_glock_hold(gl); 536 if (sdp->sd_lockstruct.ls_ops->lm_lock) { 537 /* lock_dlm */ 538 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags); 539 if (ret) { 540 pr_err("lm_lock ret %d\n", ret); 541 GLOCK_BUG_ON(gl, 1); 542 } 543 } else { /* lock_nolock */ 544 finish_xmote(gl, target); 545 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 546 gfs2_glock_put(gl); 547 } 548 549 spin_lock(&gl->gl_spin); 550 } 551 552 /** 553 * find_first_holder - find the first "holder" gh 554 * @gl: the glock 555 */ 556 557 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) 558 { 559 struct gfs2_holder *gh; 560 561 if (!list_empty(&gl->gl_holders)) { 562 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); 563 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 564 return gh; 565 } 566 return NULL; 567 } 568 569 /** 570 * run_queue - do all outstanding tasks related to a glock 571 * @gl: The glock in question 572 * @nonblock: True if we must not block in run_queue 573 * 574 */ 575 576 static void run_queue(struct gfs2_glock *gl, const int nonblock) 577 __releases(&gl->gl_spin) 578 __acquires(&gl->gl_spin) 579 { 580 struct gfs2_holder *gh = NULL; 581 int ret; 582 583 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) 584 return; 585 586 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); 587 588 if (test_bit(GLF_DEMOTE, &gl->gl_flags) && 589 gl->gl_demote_state != gl->gl_state) { 590 if (find_first_holder(gl)) 591 goto out_unlock; 592 if (nonblock) 593 goto out_sched; 594 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); 595 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); 596 gl->gl_target = gl->gl_demote_state; 597 } else { 598 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) 599 gfs2_demote_wake(gl); 600 ret = do_promote(gl); 601 if (ret == 0) 602 goto out_unlock; 603 if (ret == 2) 604 goto out; 605 gh = find_first_waiter(gl); 606 gl->gl_target = gh->gh_state; 607 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) 608 do_error(gl, 0); /* Fail queued try locks */ 609 } 610 do_xmote(gl, gh, gl->gl_target); 611 out: 612 return; 613 614 out_sched: 615 clear_bit(GLF_LOCK, &gl->gl_flags); 616 smp_mb__after_atomic(); 617 gl->gl_lockref.count++; 618 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 619 gl->gl_lockref.count--; 620 return; 621 622 out_unlock: 623 clear_bit(GLF_LOCK, &gl->gl_flags); 624 smp_mb__after_atomic(); 625 return; 626 } 627 628 static void delete_work_func(struct work_struct *work) 629 { 630 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete); 631 struct gfs2_sbd *sdp = gl->gl_sbd; 632 struct gfs2_inode *ip; 633 struct inode *inode; 634 u64 no_addr = gl->gl_name.ln_number; 635 636 ip = gl->gl_object; 637 /* Note: Unsafe to dereference ip as we don't hold right refs/locks */ 638 639 if (ip) 640 inode = gfs2_ilookup(sdp->sd_vfs, no_addr, 1); 641 else 642 inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED); 643 if (inode && !IS_ERR(inode)) { 644 d_prune_aliases(inode); 645 iput(inode); 646 } 647 gfs2_glock_put(gl); 648 } 649 650 static void glock_work_func(struct work_struct *work) 651 { 652 unsigned long delay = 0; 653 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); 654 int drop_ref = 0; 655 656 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { 657 finish_xmote(gl, gl->gl_reply); 658 drop_ref = 1; 659 } 660 spin_lock(&gl->gl_spin); 661 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 662 gl->gl_state != LM_ST_UNLOCKED && 663 gl->gl_demote_state != LM_ST_EXCLUSIVE) { 664 unsigned long holdtime, now = jiffies; 665 666 holdtime = gl->gl_tchange + gl->gl_hold_time; 667 if (time_before(now, holdtime)) 668 delay = holdtime - now; 669 670 if (!delay) { 671 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); 672 set_bit(GLF_DEMOTE, &gl->gl_flags); 673 } 674 } 675 run_queue(gl, 0); 676 spin_unlock(&gl->gl_spin); 677 if (!delay) 678 gfs2_glock_put(gl); 679 else { 680 if (gl->gl_name.ln_type != LM_TYPE_INODE) 681 delay = 0; 682 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) 683 gfs2_glock_put(gl); 684 } 685 if (drop_ref) 686 gfs2_glock_put(gl); 687 } 688 689 /** 690 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist 691 * @sdp: The GFS2 superblock 692 * @number: the lock number 693 * @glops: The glock_operations to use 694 * @create: If 0, don't create the glock if it doesn't exist 695 * @glp: the glock is returned here 696 * 697 * This does not lock a glock, just finds/creates structures for one. 698 * 699 * Returns: errno 700 */ 701 702 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, 703 const struct gfs2_glock_operations *glops, int create, 704 struct gfs2_glock **glp) 705 { 706 struct super_block *s = sdp->sd_vfs; 707 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type }; 708 struct gfs2_glock *gl, *tmp; 709 unsigned int hash = gl_hash(sdp, &name); 710 struct address_space *mapping; 711 struct kmem_cache *cachep; 712 713 rcu_read_lock(); 714 gl = search_bucket(hash, sdp, &name); 715 rcu_read_unlock(); 716 717 *glp = gl; 718 if (gl) 719 return 0; 720 if (!create) 721 return -ENOENT; 722 723 if (glops->go_flags & GLOF_ASPACE) 724 cachep = gfs2_glock_aspace_cachep; 725 else 726 cachep = gfs2_glock_cachep; 727 gl = kmem_cache_alloc(cachep, GFP_NOFS); 728 if (!gl) 729 return -ENOMEM; 730 731 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); 732 733 if (glops->go_flags & GLOF_LVB) { 734 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS); 735 if (!gl->gl_lksb.sb_lvbptr) { 736 kmem_cache_free(cachep, gl); 737 return -ENOMEM; 738 } 739 } 740 741 atomic_inc(&sdp->sd_glock_disposal); 742 gl->gl_sbd = sdp; 743 gl->gl_flags = 0; 744 gl->gl_name = name; 745 gl->gl_lockref.count = 1; 746 gl->gl_state = LM_ST_UNLOCKED; 747 gl->gl_target = LM_ST_UNLOCKED; 748 gl->gl_demote_state = LM_ST_EXCLUSIVE; 749 gl->gl_hash = hash; 750 gl->gl_ops = glops; 751 gl->gl_dstamp = ktime_set(0, 0); 752 preempt_disable(); 753 /* We use the global stats to estimate the initial per-glock stats */ 754 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type]; 755 preempt_enable(); 756 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0; 757 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0; 758 gl->gl_tchange = jiffies; 759 gl->gl_object = NULL; 760 gl->gl_hold_time = GL_GLOCK_DFT_HOLD; 761 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); 762 INIT_WORK(&gl->gl_delete, delete_work_func); 763 764 mapping = gfs2_glock2aspace(gl); 765 if (mapping) { 766 mapping->a_ops = &gfs2_meta_aops; 767 mapping->host = s->s_bdev->bd_inode; 768 mapping->flags = 0; 769 mapping_set_gfp_mask(mapping, GFP_NOFS); 770 mapping->private_data = NULL; 771 mapping->writeback_index = 0; 772 } 773 774 spin_lock_bucket(hash); 775 tmp = search_bucket(hash, sdp, &name); 776 if (tmp) { 777 spin_unlock_bucket(hash); 778 kfree(gl->gl_lksb.sb_lvbptr); 779 kmem_cache_free(cachep, gl); 780 atomic_dec(&sdp->sd_glock_disposal); 781 gl = tmp; 782 } else { 783 hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]); 784 spin_unlock_bucket(hash); 785 } 786 787 *glp = gl; 788 789 return 0; 790 } 791 792 /** 793 * gfs2_holder_init - initialize a struct gfs2_holder in the default way 794 * @gl: the glock 795 * @state: the state we're requesting 796 * @flags: the modifier flags 797 * @gh: the holder structure 798 * 799 */ 800 801 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags, 802 struct gfs2_holder *gh) 803 { 804 INIT_LIST_HEAD(&gh->gh_list); 805 gh->gh_gl = gl; 806 gh->gh_ip = _RET_IP_; 807 gh->gh_owner_pid = get_pid(task_pid(current)); 808 gh->gh_state = state; 809 gh->gh_flags = flags; 810 gh->gh_error = 0; 811 gh->gh_iflags = 0; 812 gfs2_glock_hold(gl); 813 } 814 815 /** 816 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it 817 * @state: the state we're requesting 818 * @flags: the modifier flags 819 * @gh: the holder structure 820 * 821 * Don't mess with the glock. 822 * 823 */ 824 825 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh) 826 { 827 gh->gh_state = state; 828 gh->gh_flags = flags; 829 gh->gh_iflags = 0; 830 gh->gh_ip = _RET_IP_; 831 put_pid(gh->gh_owner_pid); 832 gh->gh_owner_pid = get_pid(task_pid(current)); 833 } 834 835 /** 836 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference) 837 * @gh: the holder structure 838 * 839 */ 840 841 void gfs2_holder_uninit(struct gfs2_holder *gh) 842 { 843 put_pid(gh->gh_owner_pid); 844 gfs2_glock_put(gh->gh_gl); 845 gh->gh_gl = NULL; 846 gh->gh_ip = 0; 847 } 848 849 /** 850 * gfs2_glock_wait - wait on a glock acquisition 851 * @gh: the glock holder 852 * 853 * Returns: 0 on success 854 */ 855 856 int gfs2_glock_wait(struct gfs2_holder *gh) 857 { 858 unsigned long time1 = jiffies; 859 860 might_sleep(); 861 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE); 862 if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */ 863 /* Lengthen the minimum hold time. */ 864 gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time + 865 GL_GLOCK_HOLD_INCR, 866 GL_GLOCK_MAX_HOLD); 867 return gh->gh_error; 868 } 869 870 /** 871 * handle_callback - process a demote request 872 * @gl: the glock 873 * @state: the state the caller wants us to change to 874 * 875 * There are only two requests that we are going to see in actual 876 * practise: LM_ST_SHARED and LM_ST_UNLOCKED 877 */ 878 879 static void handle_callback(struct gfs2_glock *gl, unsigned int state, 880 unsigned long delay, bool remote) 881 { 882 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE; 883 884 set_bit(bit, &gl->gl_flags); 885 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { 886 gl->gl_demote_state = state; 887 gl->gl_demote_time = jiffies; 888 } else if (gl->gl_demote_state != LM_ST_UNLOCKED && 889 gl->gl_demote_state != state) { 890 gl->gl_demote_state = LM_ST_UNLOCKED; 891 } 892 if (gl->gl_ops->go_callback) 893 gl->gl_ops->go_callback(gl, remote); 894 trace_gfs2_demote_rq(gl, remote); 895 } 896 897 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...) 898 { 899 struct va_format vaf; 900 va_list args; 901 902 va_start(args, fmt); 903 904 if (seq) { 905 seq_vprintf(seq, fmt, args); 906 } else { 907 vaf.fmt = fmt; 908 vaf.va = &args; 909 910 pr_err("%pV", &vaf); 911 } 912 913 va_end(args); 914 } 915 916 /** 917 * add_to_queue - Add a holder to the wait queue (but look for recursion) 918 * @gh: the holder structure to add 919 * 920 * Eventually we should move the recursive locking trap to a 921 * debugging option or something like that. This is the fast 922 * path and needs to have the minimum number of distractions. 923 * 924 */ 925 926 static inline void add_to_queue(struct gfs2_holder *gh) 927 __releases(&gl->gl_spin) 928 __acquires(&gl->gl_spin) 929 { 930 struct gfs2_glock *gl = gh->gh_gl; 931 struct gfs2_sbd *sdp = gl->gl_sbd; 932 struct list_head *insert_pt = NULL; 933 struct gfs2_holder *gh2; 934 int try_futile = 0; 935 936 BUG_ON(gh->gh_owner_pid == NULL); 937 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) 938 BUG(); 939 940 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { 941 if (test_bit(GLF_LOCK, &gl->gl_flags)) 942 try_futile = !may_grant(gl, gh); 943 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) 944 goto fail; 945 } 946 947 list_for_each_entry(gh2, &gl->gl_holders, gh_list) { 948 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid && 949 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK))) 950 goto trap_recursive; 951 if (try_futile && 952 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { 953 fail: 954 gh->gh_error = GLR_TRYFAILED; 955 gfs2_holder_wake(gh); 956 return; 957 } 958 if (test_bit(HIF_HOLDER, &gh2->gh_iflags)) 959 continue; 960 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt)) 961 insert_pt = &gh2->gh_list; 962 } 963 set_bit(GLF_QUEUED, &gl->gl_flags); 964 trace_gfs2_glock_queue(gh, 1); 965 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT); 966 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT); 967 if (likely(insert_pt == NULL)) { 968 list_add_tail(&gh->gh_list, &gl->gl_holders); 969 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY)) 970 goto do_cancel; 971 return; 972 } 973 list_add_tail(&gh->gh_list, insert_pt); 974 do_cancel: 975 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); 976 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) { 977 spin_unlock(&gl->gl_spin); 978 if (sdp->sd_lockstruct.ls_ops->lm_cancel) 979 sdp->sd_lockstruct.ls_ops->lm_cancel(gl); 980 spin_lock(&gl->gl_spin); 981 } 982 return; 983 984 trap_recursive: 985 pr_err("original: %pSR\n", (void *)gh2->gh_ip); 986 pr_err("pid: %d\n", pid_nr(gh2->gh_owner_pid)); 987 pr_err("lock type: %d req lock state : %d\n", 988 gh2->gh_gl->gl_name.ln_type, gh2->gh_state); 989 pr_err("new: %pSR\n", (void *)gh->gh_ip); 990 pr_err("pid: %d\n", pid_nr(gh->gh_owner_pid)); 991 pr_err("lock type: %d req lock state : %d\n", 992 gh->gh_gl->gl_name.ln_type, gh->gh_state); 993 gfs2_dump_glock(NULL, gl); 994 BUG(); 995 } 996 997 /** 998 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock) 999 * @gh: the holder structure 1000 * 1001 * if (gh->gh_flags & GL_ASYNC), this never returns an error 1002 * 1003 * Returns: 0, GLR_TRYFAILED, or errno on failure 1004 */ 1005 1006 int gfs2_glock_nq(struct gfs2_holder *gh) 1007 { 1008 struct gfs2_glock *gl = gh->gh_gl; 1009 struct gfs2_sbd *sdp = gl->gl_sbd; 1010 int error = 0; 1011 1012 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 1013 return -EIO; 1014 1015 if (test_bit(GLF_LRU, &gl->gl_flags)) 1016 gfs2_glock_remove_from_lru(gl); 1017 1018 spin_lock(&gl->gl_spin); 1019 add_to_queue(gh); 1020 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) && 1021 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) { 1022 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1023 gl->gl_lockref.count++; 1024 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1025 gl->gl_lockref.count--; 1026 } 1027 run_queue(gl, 1); 1028 spin_unlock(&gl->gl_spin); 1029 1030 if (!(gh->gh_flags & GL_ASYNC)) 1031 error = gfs2_glock_wait(gh); 1032 1033 return error; 1034 } 1035 1036 /** 1037 * gfs2_glock_poll - poll to see if an async request has been completed 1038 * @gh: the holder 1039 * 1040 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on 1041 */ 1042 1043 int gfs2_glock_poll(struct gfs2_holder *gh) 1044 { 1045 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1; 1046 } 1047 1048 /** 1049 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock) 1050 * @gh: the glock holder 1051 * 1052 */ 1053 1054 void gfs2_glock_dq(struct gfs2_holder *gh) 1055 { 1056 struct gfs2_glock *gl = gh->gh_gl; 1057 const struct gfs2_glock_operations *glops = gl->gl_ops; 1058 unsigned delay = 0; 1059 int fast_path = 0; 1060 1061 spin_lock(&gl->gl_spin); 1062 if (gh->gh_flags & GL_NOCACHE) 1063 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1064 1065 list_del_init(&gh->gh_list); 1066 if (find_first_holder(gl) == NULL) { 1067 if (glops->go_unlock) { 1068 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags)); 1069 spin_unlock(&gl->gl_spin); 1070 glops->go_unlock(gh); 1071 spin_lock(&gl->gl_spin); 1072 clear_bit(GLF_LOCK, &gl->gl_flags); 1073 } 1074 if (list_empty(&gl->gl_holders) && 1075 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 1076 !test_bit(GLF_DEMOTE, &gl->gl_flags)) 1077 fast_path = 1; 1078 } 1079 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl)) 1080 gfs2_glock_add_to_lru(gl); 1081 1082 trace_gfs2_glock_queue(gh, 0); 1083 spin_unlock(&gl->gl_spin); 1084 if (likely(fast_path)) 1085 return; 1086 1087 gfs2_glock_hold(gl); 1088 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 1089 !test_bit(GLF_DEMOTE, &gl->gl_flags) && 1090 gl->gl_name.ln_type == LM_TYPE_INODE) 1091 delay = gl->gl_hold_time; 1092 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) 1093 gfs2_glock_put(gl); 1094 } 1095 1096 void gfs2_glock_dq_wait(struct gfs2_holder *gh) 1097 { 1098 struct gfs2_glock *gl = gh->gh_gl; 1099 gfs2_glock_dq(gh); 1100 might_sleep(); 1101 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE); 1102 } 1103 1104 /** 1105 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it 1106 * @gh: the holder structure 1107 * 1108 */ 1109 1110 void gfs2_glock_dq_uninit(struct gfs2_holder *gh) 1111 { 1112 gfs2_glock_dq(gh); 1113 gfs2_holder_uninit(gh); 1114 } 1115 1116 /** 1117 * gfs2_glock_nq_num - acquire a glock based on lock number 1118 * @sdp: the filesystem 1119 * @number: the lock number 1120 * @glops: the glock operations for the type of glock 1121 * @state: the state to acquire the glock in 1122 * @flags: modifier flags for the acquisition 1123 * @gh: the struct gfs2_holder 1124 * 1125 * Returns: errno 1126 */ 1127 1128 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number, 1129 const struct gfs2_glock_operations *glops, 1130 unsigned int state, int flags, struct gfs2_holder *gh) 1131 { 1132 struct gfs2_glock *gl; 1133 int error; 1134 1135 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl); 1136 if (!error) { 1137 error = gfs2_glock_nq_init(gl, state, flags, gh); 1138 gfs2_glock_put(gl); 1139 } 1140 1141 return error; 1142 } 1143 1144 /** 1145 * glock_compare - Compare two struct gfs2_glock structures for sorting 1146 * @arg_a: the first structure 1147 * @arg_b: the second structure 1148 * 1149 */ 1150 1151 static int glock_compare(const void *arg_a, const void *arg_b) 1152 { 1153 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a; 1154 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b; 1155 const struct lm_lockname *a = &gh_a->gh_gl->gl_name; 1156 const struct lm_lockname *b = &gh_b->gh_gl->gl_name; 1157 1158 if (a->ln_number > b->ln_number) 1159 return 1; 1160 if (a->ln_number < b->ln_number) 1161 return -1; 1162 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type); 1163 return 0; 1164 } 1165 1166 /** 1167 * nq_m_sync - synchonously acquire more than one glock in deadlock free order 1168 * @num_gh: the number of structures 1169 * @ghs: an array of struct gfs2_holder structures 1170 * 1171 * Returns: 0 on success (all glocks acquired), 1172 * errno on failure (no glocks acquired) 1173 */ 1174 1175 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs, 1176 struct gfs2_holder **p) 1177 { 1178 unsigned int x; 1179 int error = 0; 1180 1181 for (x = 0; x < num_gh; x++) 1182 p[x] = &ghs[x]; 1183 1184 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL); 1185 1186 for (x = 0; x < num_gh; x++) { 1187 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); 1188 1189 error = gfs2_glock_nq(p[x]); 1190 if (error) { 1191 while (x--) 1192 gfs2_glock_dq(p[x]); 1193 break; 1194 } 1195 } 1196 1197 return error; 1198 } 1199 1200 /** 1201 * gfs2_glock_nq_m - acquire multiple glocks 1202 * @num_gh: the number of structures 1203 * @ghs: an array of struct gfs2_holder structures 1204 * 1205 * 1206 * Returns: 0 on success (all glocks acquired), 1207 * errno on failure (no glocks acquired) 1208 */ 1209 1210 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs) 1211 { 1212 struct gfs2_holder *tmp[4]; 1213 struct gfs2_holder **pph = tmp; 1214 int error = 0; 1215 1216 switch(num_gh) { 1217 case 0: 1218 return 0; 1219 case 1: 1220 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); 1221 return gfs2_glock_nq(ghs); 1222 default: 1223 if (num_gh <= 4) 1224 break; 1225 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS); 1226 if (!pph) 1227 return -ENOMEM; 1228 } 1229 1230 error = nq_m_sync(num_gh, ghs, pph); 1231 1232 if (pph != tmp) 1233 kfree(pph); 1234 1235 return error; 1236 } 1237 1238 /** 1239 * gfs2_glock_dq_m - release multiple glocks 1240 * @num_gh: the number of structures 1241 * @ghs: an array of struct gfs2_holder structures 1242 * 1243 */ 1244 1245 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs) 1246 { 1247 while (num_gh--) 1248 gfs2_glock_dq(&ghs[num_gh]); 1249 } 1250 1251 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) 1252 { 1253 unsigned long delay = 0; 1254 unsigned long holdtime; 1255 unsigned long now = jiffies; 1256 1257 gfs2_glock_hold(gl); 1258 holdtime = gl->gl_tchange + gl->gl_hold_time; 1259 if (test_bit(GLF_QUEUED, &gl->gl_flags) && 1260 gl->gl_name.ln_type == LM_TYPE_INODE) { 1261 if (time_before(now, holdtime)) 1262 delay = holdtime - now; 1263 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) 1264 delay = gl->gl_hold_time; 1265 } 1266 1267 spin_lock(&gl->gl_spin); 1268 handle_callback(gl, state, delay, true); 1269 spin_unlock(&gl->gl_spin); 1270 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) 1271 gfs2_glock_put(gl); 1272 } 1273 1274 /** 1275 * gfs2_should_freeze - Figure out if glock should be frozen 1276 * @gl: The glock in question 1277 * 1278 * Glocks are not frozen if (a) the result of the dlm operation is 1279 * an error, (b) the locking operation was an unlock operation or 1280 * (c) if there is a "noexp" flagged request anywhere in the queue 1281 * 1282 * Returns: 1 if freezing should occur, 0 otherwise 1283 */ 1284 1285 static int gfs2_should_freeze(const struct gfs2_glock *gl) 1286 { 1287 const struct gfs2_holder *gh; 1288 1289 if (gl->gl_reply & ~LM_OUT_ST_MASK) 1290 return 0; 1291 if (gl->gl_target == LM_ST_UNLOCKED) 1292 return 0; 1293 1294 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 1295 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 1296 continue; 1297 if (LM_FLAG_NOEXP & gh->gh_flags) 1298 return 0; 1299 } 1300 1301 return 1; 1302 } 1303 1304 /** 1305 * gfs2_glock_complete - Callback used by locking 1306 * @gl: Pointer to the glock 1307 * @ret: The return value from the dlm 1308 * 1309 * The gl_reply field is under the gl_spin lock so that it is ok 1310 * to use a bitfield shared with other glock state fields. 1311 */ 1312 1313 void gfs2_glock_complete(struct gfs2_glock *gl, int ret) 1314 { 1315 struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; 1316 1317 spin_lock(&gl->gl_spin); 1318 gl->gl_reply = ret; 1319 1320 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) { 1321 if (gfs2_should_freeze(gl)) { 1322 set_bit(GLF_FROZEN, &gl->gl_flags); 1323 spin_unlock(&gl->gl_spin); 1324 return; 1325 } 1326 } 1327 1328 gl->gl_lockref.count++; 1329 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1330 spin_unlock(&gl->gl_spin); 1331 1332 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1333 gfs2_glock_put(gl); 1334 } 1335 1336 static int glock_cmp(void *priv, struct list_head *a, struct list_head *b) 1337 { 1338 struct gfs2_glock *gla, *glb; 1339 1340 gla = list_entry(a, struct gfs2_glock, gl_lru); 1341 glb = list_entry(b, struct gfs2_glock, gl_lru); 1342 1343 if (gla->gl_name.ln_number > glb->gl_name.ln_number) 1344 return 1; 1345 if (gla->gl_name.ln_number < glb->gl_name.ln_number) 1346 return -1; 1347 1348 return 0; 1349 } 1350 1351 /** 1352 * gfs2_dispose_glock_lru - Demote a list of glocks 1353 * @list: The list to dispose of 1354 * 1355 * Disposing of glocks may involve disk accesses, so that here we sort 1356 * the glocks by number (i.e. disk location of the inodes) so that if 1357 * there are any such accesses, they'll be sent in order (mostly). 1358 * 1359 * Must be called under the lru_lock, but may drop and retake this 1360 * lock. While the lru_lock is dropped, entries may vanish from the 1361 * list, but no new entries will appear on the list (since it is 1362 * private) 1363 */ 1364 1365 static void gfs2_dispose_glock_lru(struct list_head *list) 1366 __releases(&lru_lock) 1367 __acquires(&lru_lock) 1368 { 1369 struct gfs2_glock *gl; 1370 1371 list_sort(NULL, list, glock_cmp); 1372 1373 while(!list_empty(list)) { 1374 gl = list_entry(list->next, struct gfs2_glock, gl_lru); 1375 list_del_init(&gl->gl_lru); 1376 if (!spin_trylock(&gl->gl_spin)) { 1377 add_back_to_lru: 1378 list_add(&gl->gl_lru, &lru_list); 1379 atomic_inc(&lru_count); 1380 continue; 1381 } 1382 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 1383 spin_unlock(&gl->gl_spin); 1384 goto add_back_to_lru; 1385 } 1386 clear_bit(GLF_LRU, &gl->gl_flags); 1387 gl->gl_lockref.count++; 1388 if (demote_ok(gl)) 1389 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1390 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags)); 1391 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1392 gl->gl_lockref.count--; 1393 spin_unlock(&gl->gl_spin); 1394 cond_resched_lock(&lru_lock); 1395 } 1396 } 1397 1398 /** 1399 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote 1400 * @nr: The number of entries to scan 1401 * 1402 * This function selects the entries on the LRU which are able to 1403 * be demoted, and then kicks off the process by calling 1404 * gfs2_dispose_glock_lru() above. 1405 */ 1406 1407 static long gfs2_scan_glock_lru(int nr) 1408 { 1409 struct gfs2_glock *gl; 1410 LIST_HEAD(skipped); 1411 LIST_HEAD(dispose); 1412 long freed = 0; 1413 1414 spin_lock(&lru_lock); 1415 while ((nr-- >= 0) && !list_empty(&lru_list)) { 1416 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); 1417 1418 /* Test for being demotable */ 1419 if (!test_bit(GLF_LOCK, &gl->gl_flags)) { 1420 list_move(&gl->gl_lru, &dispose); 1421 atomic_dec(&lru_count); 1422 freed++; 1423 continue; 1424 } 1425 1426 list_move(&gl->gl_lru, &skipped); 1427 } 1428 list_splice(&skipped, &lru_list); 1429 if (!list_empty(&dispose)) 1430 gfs2_dispose_glock_lru(&dispose); 1431 spin_unlock(&lru_lock); 1432 1433 return freed; 1434 } 1435 1436 static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink, 1437 struct shrink_control *sc) 1438 { 1439 if (!(sc->gfp_mask & __GFP_FS)) 1440 return SHRINK_STOP; 1441 return gfs2_scan_glock_lru(sc->nr_to_scan); 1442 } 1443 1444 static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink, 1445 struct shrink_control *sc) 1446 { 1447 return vfs_pressure_ratio(atomic_read(&lru_count)); 1448 } 1449 1450 static struct shrinker glock_shrinker = { 1451 .seeks = DEFAULT_SEEKS, 1452 .count_objects = gfs2_glock_shrink_count, 1453 .scan_objects = gfs2_glock_shrink_scan, 1454 }; 1455 1456 /** 1457 * examine_bucket - Call a function for glock in a hash bucket 1458 * @examiner: the function 1459 * @sdp: the filesystem 1460 * @bucket: the bucket 1461 * 1462 */ 1463 1464 static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp, 1465 unsigned int hash) 1466 { 1467 struct gfs2_glock *gl; 1468 struct hlist_bl_head *head = &gl_hash_table[hash]; 1469 struct hlist_bl_node *pos; 1470 1471 rcu_read_lock(); 1472 hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) { 1473 if ((gl->gl_sbd == sdp) && lockref_get_not_dead(&gl->gl_lockref)) 1474 examiner(gl); 1475 } 1476 rcu_read_unlock(); 1477 cond_resched(); 1478 } 1479 1480 static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp) 1481 { 1482 unsigned x; 1483 1484 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) 1485 examine_bucket(examiner, sdp, x); 1486 } 1487 1488 1489 /** 1490 * thaw_glock - thaw out a glock which has an unprocessed reply waiting 1491 * @gl: The glock to thaw 1492 * 1493 */ 1494 1495 static void thaw_glock(struct gfs2_glock *gl) 1496 { 1497 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) 1498 goto out; 1499 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1500 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) { 1501 out: 1502 gfs2_glock_put(gl); 1503 } 1504 } 1505 1506 /** 1507 * clear_glock - look at a glock and see if we can free it from glock cache 1508 * @gl: the glock to look at 1509 * 1510 */ 1511 1512 static void clear_glock(struct gfs2_glock *gl) 1513 { 1514 gfs2_glock_remove_from_lru(gl); 1515 1516 spin_lock(&gl->gl_spin); 1517 if (gl->gl_state != LM_ST_UNLOCKED) 1518 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1519 spin_unlock(&gl->gl_spin); 1520 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1521 gfs2_glock_put(gl); 1522 } 1523 1524 /** 1525 * gfs2_glock_thaw - Thaw any frozen glocks 1526 * @sdp: The super block 1527 * 1528 */ 1529 1530 void gfs2_glock_thaw(struct gfs2_sbd *sdp) 1531 { 1532 glock_hash_walk(thaw_glock, sdp); 1533 } 1534 1535 static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl) 1536 { 1537 spin_lock(&gl->gl_spin); 1538 gfs2_dump_glock(seq, gl); 1539 spin_unlock(&gl->gl_spin); 1540 } 1541 1542 static void dump_glock_func(struct gfs2_glock *gl) 1543 { 1544 dump_glock(NULL, gl); 1545 } 1546 1547 /** 1548 * gfs2_gl_hash_clear - Empty out the glock hash table 1549 * @sdp: the filesystem 1550 * @wait: wait until it's all gone 1551 * 1552 * Called when unmounting the filesystem. 1553 */ 1554 1555 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) 1556 { 1557 set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags); 1558 flush_workqueue(glock_workqueue); 1559 glock_hash_walk(clear_glock, sdp); 1560 flush_workqueue(glock_workqueue); 1561 wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0); 1562 glock_hash_walk(dump_glock_func, sdp); 1563 } 1564 1565 void gfs2_glock_finish_truncate(struct gfs2_inode *ip) 1566 { 1567 struct gfs2_glock *gl = ip->i_gl; 1568 int ret; 1569 1570 ret = gfs2_truncatei_resume(ip); 1571 gfs2_assert_withdraw(gl->gl_sbd, ret == 0); 1572 1573 spin_lock(&gl->gl_spin); 1574 clear_bit(GLF_LOCK, &gl->gl_flags); 1575 run_queue(gl, 1); 1576 spin_unlock(&gl->gl_spin); 1577 } 1578 1579 static const char *state2str(unsigned state) 1580 { 1581 switch(state) { 1582 case LM_ST_UNLOCKED: 1583 return "UN"; 1584 case LM_ST_SHARED: 1585 return "SH"; 1586 case LM_ST_DEFERRED: 1587 return "DF"; 1588 case LM_ST_EXCLUSIVE: 1589 return "EX"; 1590 } 1591 return "??"; 1592 } 1593 1594 static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags) 1595 { 1596 char *p = buf; 1597 if (flags & LM_FLAG_TRY) 1598 *p++ = 't'; 1599 if (flags & LM_FLAG_TRY_1CB) 1600 *p++ = 'T'; 1601 if (flags & LM_FLAG_NOEXP) 1602 *p++ = 'e'; 1603 if (flags & LM_FLAG_ANY) 1604 *p++ = 'A'; 1605 if (flags & LM_FLAG_PRIORITY) 1606 *p++ = 'p'; 1607 if (flags & GL_ASYNC) 1608 *p++ = 'a'; 1609 if (flags & GL_EXACT) 1610 *p++ = 'E'; 1611 if (flags & GL_NOCACHE) 1612 *p++ = 'c'; 1613 if (test_bit(HIF_HOLDER, &iflags)) 1614 *p++ = 'H'; 1615 if (test_bit(HIF_WAIT, &iflags)) 1616 *p++ = 'W'; 1617 if (test_bit(HIF_FIRST, &iflags)) 1618 *p++ = 'F'; 1619 *p = 0; 1620 return buf; 1621 } 1622 1623 /** 1624 * dump_holder - print information about a glock holder 1625 * @seq: the seq_file struct 1626 * @gh: the glock holder 1627 * 1628 */ 1629 1630 static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh) 1631 { 1632 struct task_struct *gh_owner = NULL; 1633 char flags_buf[32]; 1634 1635 rcu_read_lock(); 1636 if (gh->gh_owner_pid) 1637 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID); 1638 gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n", 1639 state2str(gh->gh_state), 1640 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags), 1641 gh->gh_error, 1642 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1, 1643 gh_owner ? gh_owner->comm : "(ended)", 1644 (void *)gh->gh_ip); 1645 rcu_read_unlock(); 1646 } 1647 1648 static const char *gflags2str(char *buf, const struct gfs2_glock *gl) 1649 { 1650 const unsigned long *gflags = &gl->gl_flags; 1651 char *p = buf; 1652 1653 if (test_bit(GLF_LOCK, gflags)) 1654 *p++ = 'l'; 1655 if (test_bit(GLF_DEMOTE, gflags)) 1656 *p++ = 'D'; 1657 if (test_bit(GLF_PENDING_DEMOTE, gflags)) 1658 *p++ = 'd'; 1659 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags)) 1660 *p++ = 'p'; 1661 if (test_bit(GLF_DIRTY, gflags)) 1662 *p++ = 'y'; 1663 if (test_bit(GLF_LFLUSH, gflags)) 1664 *p++ = 'f'; 1665 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags)) 1666 *p++ = 'i'; 1667 if (test_bit(GLF_REPLY_PENDING, gflags)) 1668 *p++ = 'r'; 1669 if (test_bit(GLF_INITIAL, gflags)) 1670 *p++ = 'I'; 1671 if (test_bit(GLF_FROZEN, gflags)) 1672 *p++ = 'F'; 1673 if (test_bit(GLF_QUEUED, gflags)) 1674 *p++ = 'q'; 1675 if (test_bit(GLF_LRU, gflags)) 1676 *p++ = 'L'; 1677 if (gl->gl_object) 1678 *p++ = 'o'; 1679 if (test_bit(GLF_BLOCKING, gflags)) 1680 *p++ = 'b'; 1681 *p = 0; 1682 return buf; 1683 } 1684 1685 /** 1686 * gfs2_dump_glock - print information about a glock 1687 * @seq: The seq_file struct 1688 * @gl: the glock 1689 * 1690 * The file format is as follows: 1691 * One line per object, capital letters are used to indicate objects 1692 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented, 1693 * other objects are indented by a single space and follow the glock to 1694 * which they are related. Fields are indicated by lower case letters 1695 * followed by a colon and the field value, except for strings which are in 1696 * [] so that its possible to see if they are composed of spaces for 1697 * example. The field's are n = number (id of the object), f = flags, 1698 * t = type, s = state, r = refcount, e = error, p = pid. 1699 * 1700 */ 1701 1702 void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl) 1703 { 1704 const struct gfs2_glock_operations *glops = gl->gl_ops; 1705 unsigned long long dtime; 1706 const struct gfs2_holder *gh; 1707 char gflags_buf[32]; 1708 1709 dtime = jiffies - gl->gl_demote_time; 1710 dtime *= 1000000/HZ; /* demote time in uSec */ 1711 if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) 1712 dtime = 0; 1713 gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n", 1714 state2str(gl->gl_state), 1715 gl->gl_name.ln_type, 1716 (unsigned long long)gl->gl_name.ln_number, 1717 gflags2str(gflags_buf, gl), 1718 state2str(gl->gl_target), 1719 state2str(gl->gl_demote_state), dtime, 1720 atomic_read(&gl->gl_ail_count), 1721 atomic_read(&gl->gl_revokes), 1722 (int)gl->gl_lockref.count, gl->gl_hold_time); 1723 1724 list_for_each_entry(gh, &gl->gl_holders, gh_list) 1725 dump_holder(seq, gh); 1726 1727 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump) 1728 glops->go_dump(seq, gl); 1729 } 1730 1731 static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr) 1732 { 1733 struct gfs2_glock *gl = iter_ptr; 1734 1735 seq_printf(seq, "G: n:%u/%llx rtt:%lld/%lld rttb:%lld/%lld irt:%lld/%lld dcnt: %lld qcnt: %lld\n", 1736 gl->gl_name.ln_type, 1737 (unsigned long long)gl->gl_name.ln_number, 1738 (long long)gl->gl_stats.stats[GFS2_LKS_SRTT], 1739 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR], 1740 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTB], 1741 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB], 1742 (long long)gl->gl_stats.stats[GFS2_LKS_SIRT], 1743 (long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR], 1744 (long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT], 1745 (long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]); 1746 return 0; 1747 } 1748 1749 static const char *gfs2_gltype[] = { 1750 "type", 1751 "reserved", 1752 "nondisk", 1753 "inode", 1754 "rgrp", 1755 "meta", 1756 "iopen", 1757 "flock", 1758 "plock", 1759 "quota", 1760 "journal", 1761 }; 1762 1763 static const char *gfs2_stype[] = { 1764 [GFS2_LKS_SRTT] = "srtt", 1765 [GFS2_LKS_SRTTVAR] = "srttvar", 1766 [GFS2_LKS_SRTTB] = "srttb", 1767 [GFS2_LKS_SRTTVARB] = "srttvarb", 1768 [GFS2_LKS_SIRT] = "sirt", 1769 [GFS2_LKS_SIRTVAR] = "sirtvar", 1770 [GFS2_LKS_DCOUNT] = "dlm", 1771 [GFS2_LKS_QCOUNT] = "queue", 1772 }; 1773 1774 #define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype)) 1775 1776 static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr) 1777 { 1778 struct gfs2_glock_iter *gi = seq->private; 1779 struct gfs2_sbd *sdp = gi->sdp; 1780 unsigned index = gi->hash >> 3; 1781 unsigned subindex = gi->hash & 0x07; 1782 s64 value; 1783 int i; 1784 1785 if (index == 0 && subindex != 0) 1786 return 0; 1787 1788 seq_printf(seq, "%-10s %8s:", gfs2_gltype[index], 1789 (index == 0) ? "cpu": gfs2_stype[subindex]); 1790 1791 for_each_possible_cpu(i) { 1792 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i); 1793 if (index == 0) { 1794 value = i; 1795 } else { 1796 value = lkstats->lkstats[index - 1].stats[subindex]; 1797 } 1798 seq_printf(seq, " %15lld", (long long)value); 1799 } 1800 seq_putc(seq, '\n'); 1801 return 0; 1802 } 1803 1804 int __init gfs2_glock_init(void) 1805 { 1806 unsigned i; 1807 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) { 1808 INIT_HLIST_BL_HEAD(&gl_hash_table[i]); 1809 } 1810 1811 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | 1812 WQ_HIGHPRI | WQ_FREEZABLE, 0); 1813 if (!glock_workqueue) 1814 return -ENOMEM; 1815 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", 1816 WQ_MEM_RECLAIM | WQ_FREEZABLE, 1817 0); 1818 if (!gfs2_delete_workqueue) { 1819 destroy_workqueue(glock_workqueue); 1820 return -ENOMEM; 1821 } 1822 1823 register_shrinker(&glock_shrinker); 1824 1825 return 0; 1826 } 1827 1828 void gfs2_glock_exit(void) 1829 { 1830 unregister_shrinker(&glock_shrinker); 1831 destroy_workqueue(glock_workqueue); 1832 destroy_workqueue(gfs2_delete_workqueue); 1833 } 1834 1835 static inline struct gfs2_glock *glock_hash_chain(unsigned hash) 1836 { 1837 return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]), 1838 struct gfs2_glock, gl_list); 1839 } 1840 1841 static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl) 1842 { 1843 return hlist_bl_entry(rcu_dereference(gl->gl_list.next), 1844 struct gfs2_glock, gl_list); 1845 } 1846 1847 static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi) 1848 { 1849 struct gfs2_glock *gl; 1850 1851 do { 1852 gl = gi->gl; 1853 if (gl) { 1854 gi->gl = glock_hash_next(gl); 1855 gi->nhash++; 1856 } else { 1857 if (gi->hash >= GFS2_GL_HASH_SIZE) { 1858 rcu_read_unlock(); 1859 return 1; 1860 } 1861 gi->gl = glock_hash_chain(gi->hash); 1862 gi->nhash = 0; 1863 } 1864 while (gi->gl == NULL) { 1865 gi->hash++; 1866 if (gi->hash >= GFS2_GL_HASH_SIZE) { 1867 rcu_read_unlock(); 1868 return 1; 1869 } 1870 gi->gl = glock_hash_chain(gi->hash); 1871 gi->nhash = 0; 1872 } 1873 /* Skip entries for other sb and dead entries */ 1874 } while (gi->sdp != gi->gl->gl_sbd || 1875 __lockref_is_dead(&gi->gl->gl_lockref)); 1876 1877 return 0; 1878 } 1879 1880 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos) 1881 { 1882 struct gfs2_glock_iter *gi = seq->private; 1883 loff_t n = *pos; 1884 1885 if (gi->last_pos <= *pos) 1886 n = gi->nhash + (*pos - gi->last_pos); 1887 else 1888 gi->hash = 0; 1889 1890 gi->nhash = 0; 1891 rcu_read_lock(); 1892 1893 do { 1894 if (gfs2_glock_iter_next(gi)) 1895 return NULL; 1896 } while (n--); 1897 1898 gi->last_pos = *pos; 1899 return gi->gl; 1900 } 1901 1902 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr, 1903 loff_t *pos) 1904 { 1905 struct gfs2_glock_iter *gi = seq->private; 1906 1907 (*pos)++; 1908 gi->last_pos = *pos; 1909 if (gfs2_glock_iter_next(gi)) 1910 return NULL; 1911 1912 return gi->gl; 1913 } 1914 1915 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr) 1916 { 1917 struct gfs2_glock_iter *gi = seq->private; 1918 1919 if (gi->gl) 1920 rcu_read_unlock(); 1921 gi->gl = NULL; 1922 } 1923 1924 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr) 1925 { 1926 dump_glock(seq, iter_ptr); 1927 return 0; 1928 } 1929 1930 static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos) 1931 { 1932 struct gfs2_glock_iter *gi = seq->private; 1933 1934 gi->hash = *pos; 1935 if (*pos >= GFS2_NR_SBSTATS) 1936 return NULL; 1937 preempt_disable(); 1938 return SEQ_START_TOKEN; 1939 } 1940 1941 static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr, 1942 loff_t *pos) 1943 { 1944 struct gfs2_glock_iter *gi = seq->private; 1945 (*pos)++; 1946 gi->hash++; 1947 if (gi->hash >= GFS2_NR_SBSTATS) { 1948 preempt_enable(); 1949 return NULL; 1950 } 1951 return SEQ_START_TOKEN; 1952 } 1953 1954 static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr) 1955 { 1956 preempt_enable(); 1957 } 1958 1959 static const struct seq_operations gfs2_glock_seq_ops = { 1960 .start = gfs2_glock_seq_start, 1961 .next = gfs2_glock_seq_next, 1962 .stop = gfs2_glock_seq_stop, 1963 .show = gfs2_glock_seq_show, 1964 }; 1965 1966 static const struct seq_operations gfs2_glstats_seq_ops = { 1967 .start = gfs2_glock_seq_start, 1968 .next = gfs2_glock_seq_next, 1969 .stop = gfs2_glock_seq_stop, 1970 .show = gfs2_glstats_seq_show, 1971 }; 1972 1973 static const struct seq_operations gfs2_sbstats_seq_ops = { 1974 .start = gfs2_sbstats_seq_start, 1975 .next = gfs2_sbstats_seq_next, 1976 .stop = gfs2_sbstats_seq_stop, 1977 .show = gfs2_sbstats_seq_show, 1978 }; 1979 1980 #define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL) 1981 1982 static int gfs2_glocks_open(struct inode *inode, struct file *file) 1983 { 1984 int ret = seq_open_private(file, &gfs2_glock_seq_ops, 1985 sizeof(struct gfs2_glock_iter)); 1986 if (ret == 0) { 1987 struct seq_file *seq = file->private_data; 1988 struct gfs2_glock_iter *gi = seq->private; 1989 gi->sdp = inode->i_private; 1990 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN); 1991 if (seq->buf) 1992 seq->size = GFS2_SEQ_GOODSIZE; 1993 } 1994 return ret; 1995 } 1996 1997 static int gfs2_glstats_open(struct inode *inode, struct file *file) 1998 { 1999 int ret = seq_open_private(file, &gfs2_glstats_seq_ops, 2000 sizeof(struct gfs2_glock_iter)); 2001 if (ret == 0) { 2002 struct seq_file *seq = file->private_data; 2003 struct gfs2_glock_iter *gi = seq->private; 2004 gi->sdp = inode->i_private; 2005 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN); 2006 if (seq->buf) 2007 seq->size = GFS2_SEQ_GOODSIZE; 2008 } 2009 return ret; 2010 } 2011 2012 static int gfs2_sbstats_open(struct inode *inode, struct file *file) 2013 { 2014 int ret = seq_open_private(file, &gfs2_sbstats_seq_ops, 2015 sizeof(struct gfs2_glock_iter)); 2016 if (ret == 0) { 2017 struct seq_file *seq = file->private_data; 2018 struct gfs2_glock_iter *gi = seq->private; 2019 gi->sdp = inode->i_private; 2020 } 2021 return ret; 2022 } 2023 2024 static const struct file_operations gfs2_glocks_fops = { 2025 .owner = THIS_MODULE, 2026 .open = gfs2_glocks_open, 2027 .read = seq_read, 2028 .llseek = seq_lseek, 2029 .release = seq_release_private, 2030 }; 2031 2032 static const struct file_operations gfs2_glstats_fops = { 2033 .owner = THIS_MODULE, 2034 .open = gfs2_glstats_open, 2035 .read = seq_read, 2036 .llseek = seq_lseek, 2037 .release = seq_release_private, 2038 }; 2039 2040 static const struct file_operations gfs2_sbstats_fops = { 2041 .owner = THIS_MODULE, 2042 .open = gfs2_sbstats_open, 2043 .read = seq_read, 2044 .llseek = seq_lseek, 2045 .release = seq_release_private, 2046 }; 2047 2048 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp) 2049 { 2050 struct dentry *dent; 2051 2052 dent = debugfs_create_dir(sdp->sd_table_name, gfs2_root); 2053 if (IS_ERR_OR_NULL(dent)) 2054 goto fail; 2055 sdp->debugfs_dir = dent; 2056 2057 dent = debugfs_create_file("glocks", 2058 S_IFREG | S_IRUGO, 2059 sdp->debugfs_dir, sdp, 2060 &gfs2_glocks_fops); 2061 if (IS_ERR_OR_NULL(dent)) 2062 goto fail; 2063 sdp->debugfs_dentry_glocks = dent; 2064 2065 dent = debugfs_create_file("glstats", 2066 S_IFREG | S_IRUGO, 2067 sdp->debugfs_dir, sdp, 2068 &gfs2_glstats_fops); 2069 if (IS_ERR_OR_NULL(dent)) 2070 goto fail; 2071 sdp->debugfs_dentry_glstats = dent; 2072 2073 dent = debugfs_create_file("sbstats", 2074 S_IFREG | S_IRUGO, 2075 sdp->debugfs_dir, sdp, 2076 &gfs2_sbstats_fops); 2077 if (IS_ERR_OR_NULL(dent)) 2078 goto fail; 2079 sdp->debugfs_dentry_sbstats = dent; 2080 2081 return 0; 2082 fail: 2083 gfs2_delete_debugfs_file(sdp); 2084 return dent ? PTR_ERR(dent) : -ENOMEM; 2085 } 2086 2087 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp) 2088 { 2089 if (sdp->debugfs_dir) { 2090 if (sdp->debugfs_dentry_glocks) { 2091 debugfs_remove(sdp->debugfs_dentry_glocks); 2092 sdp->debugfs_dentry_glocks = NULL; 2093 } 2094 if (sdp->debugfs_dentry_glstats) { 2095 debugfs_remove(sdp->debugfs_dentry_glstats); 2096 sdp->debugfs_dentry_glstats = NULL; 2097 } 2098 if (sdp->debugfs_dentry_sbstats) { 2099 debugfs_remove(sdp->debugfs_dentry_sbstats); 2100 sdp->debugfs_dentry_sbstats = NULL; 2101 } 2102 debugfs_remove(sdp->debugfs_dir); 2103 sdp->debugfs_dir = NULL; 2104 } 2105 } 2106 2107 int gfs2_register_debugfs(void) 2108 { 2109 gfs2_root = debugfs_create_dir("gfs2", NULL); 2110 if (IS_ERR(gfs2_root)) 2111 return PTR_ERR(gfs2_root); 2112 return gfs2_root ? 0 : -ENOMEM; 2113 } 2114 2115 void gfs2_unregister_debugfs(void) 2116 { 2117 debugfs_remove(gfs2_root); 2118 gfs2_root = NULL; 2119 } 2120