1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/sched.h> 10 #include <linux/slab.h> 11 #include <linux/spinlock.h> 12 #include <linux/buffer_head.h> 13 #include <linux/delay.h> 14 #include <linux/sort.h> 15 #include <linux/hash.h> 16 #include <linux/jhash.h> 17 #include <linux/kallsyms.h> 18 #include <linux/gfs2_ondisk.h> 19 #include <linux/list.h> 20 #include <linux/wait.h> 21 #include <linux/module.h> 22 #include <linux/uaccess.h> 23 #include <linux/seq_file.h> 24 #include <linux/debugfs.h> 25 #include <linux/kthread.h> 26 #include <linux/freezer.h> 27 #include <linux/workqueue.h> 28 #include <linux/jiffies.h> 29 #include <linux/rcupdate.h> 30 #include <linux/rculist_bl.h> 31 #include <linux/bit_spinlock.h> 32 #include <linux/percpu.h> 33 #include <linux/list_sort.h> 34 #include <linux/lockref.h> 35 #include <linux/rhashtable.h> 36 37 #include "gfs2.h" 38 #include "incore.h" 39 #include "glock.h" 40 #include "glops.h" 41 #include "inode.h" 42 #include "lops.h" 43 #include "meta_io.h" 44 #include "quota.h" 45 #include "super.h" 46 #include "util.h" 47 #include "bmap.h" 48 #define CREATE_TRACE_POINTS 49 #include "trace_gfs2.h" 50 51 struct gfs2_glock_iter { 52 struct gfs2_sbd *sdp; /* incore superblock */ 53 struct rhashtable_iter hti; /* rhashtable iterator */ 54 struct gfs2_glock *gl; /* current glock struct */ 55 loff_t last_pos; /* last position */ 56 }; 57 58 typedef void (*glock_examiner) (struct gfs2_glock * gl); 59 60 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target); 61 62 static struct dentry *gfs2_root; 63 static struct workqueue_struct *glock_workqueue; 64 struct workqueue_struct *gfs2_delete_workqueue; 65 static LIST_HEAD(lru_list); 66 static atomic_t lru_count = ATOMIC_INIT(0); 67 static DEFINE_SPINLOCK(lru_lock); 68 69 #define GFS2_GL_HASH_SHIFT 15 70 #define GFS2_GL_HASH_SIZE BIT(GFS2_GL_HASH_SHIFT) 71 72 static const struct rhashtable_params ht_parms = { 73 .nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4, 74 .key_len = offsetofend(struct lm_lockname, ln_type), 75 .key_offset = offsetof(struct gfs2_glock, gl_name), 76 .head_offset = offsetof(struct gfs2_glock, gl_node), 77 }; 78 79 static struct rhashtable gl_hash_table; 80 81 #define GLOCK_WAIT_TABLE_BITS 12 82 #define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS) 83 static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned; 84 85 struct wait_glock_queue { 86 struct lm_lockname *name; 87 wait_queue_entry_t wait; 88 }; 89 90 static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode, 91 int sync, void *key) 92 { 93 struct wait_glock_queue *wait_glock = 94 container_of(wait, struct wait_glock_queue, wait); 95 struct lm_lockname *wait_name = wait_glock->name; 96 struct lm_lockname *wake_name = key; 97 98 if (wake_name->ln_sbd != wait_name->ln_sbd || 99 wake_name->ln_number != wait_name->ln_number || 100 wake_name->ln_type != wait_name->ln_type) 101 return 0; 102 return autoremove_wake_function(wait, mode, sync, key); 103 } 104 105 static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name) 106 { 107 u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0); 108 109 return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS); 110 } 111 112 /** 113 * wake_up_glock - Wake up waiters on a glock 114 * @gl: the glock 115 */ 116 static void wake_up_glock(struct gfs2_glock *gl) 117 { 118 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name); 119 120 if (waitqueue_active(wq)) 121 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name); 122 } 123 124 static void gfs2_glock_dealloc(struct rcu_head *rcu) 125 { 126 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); 127 128 kfree(gl->gl_lksb.sb_lvbptr); 129 if (gl->gl_ops->go_flags & GLOF_ASPACE) 130 kmem_cache_free(gfs2_glock_aspace_cachep, gl); 131 else 132 kmem_cache_free(gfs2_glock_cachep, gl); 133 } 134 135 /** 136 * glock_blocked_by_withdraw - determine if we can still use a glock 137 * @gl: the glock 138 * 139 * We need to allow some glocks to be enqueued, dequeued, promoted, and demoted 140 * when we're withdrawn. For example, to maintain metadata integrity, we should 141 * disallow the use of inode and rgrp glocks when withdrawn. Other glocks, like 142 * iopen or the transaction glocks may be safely used because none of their 143 * metadata goes through the journal. So in general, we should disallow all 144 * glocks that are journaled, and allow all the others. One exception is: 145 * we need to allow our active journal to be promoted and demoted so others 146 * may recover it and we can reacquire it when they're done. 147 */ 148 static bool glock_blocked_by_withdraw(struct gfs2_glock *gl) 149 { 150 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 151 152 if (likely(!gfs2_withdrawn(sdp))) 153 return false; 154 if (gl->gl_ops->go_flags & GLOF_NONDISK) 155 return false; 156 if (!sdp->sd_jdesc || 157 gl->gl_name.ln_number == sdp->sd_jdesc->jd_no_addr) 158 return false; 159 return true; 160 } 161 162 void gfs2_glock_free(struct gfs2_glock *gl) 163 { 164 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 165 166 gfs2_glock_assert_withdraw(gl, atomic_read(&gl->gl_revokes) == 0); 167 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms); 168 smp_mb(); 169 wake_up_glock(gl); 170 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc); 171 if (atomic_dec_and_test(&sdp->sd_glock_disposal)) 172 wake_up(&sdp->sd_glock_wait); 173 } 174 175 /** 176 * gfs2_glock_hold() - increment reference count on glock 177 * @gl: The glock to hold 178 * 179 */ 180 181 void gfs2_glock_hold(struct gfs2_glock *gl) 182 { 183 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); 184 lockref_get(&gl->gl_lockref); 185 } 186 187 /** 188 * demote_ok - Check to see if it's ok to unlock a glock 189 * @gl: the glock 190 * 191 * Returns: 1 if it's ok 192 */ 193 194 static int demote_ok(const struct gfs2_glock *gl) 195 { 196 const struct gfs2_glock_operations *glops = gl->gl_ops; 197 198 if (gl->gl_state == LM_ST_UNLOCKED) 199 return 0; 200 if (!list_empty(&gl->gl_holders)) 201 return 0; 202 if (glops->go_demote_ok) 203 return glops->go_demote_ok(gl); 204 return 1; 205 } 206 207 208 void gfs2_glock_add_to_lru(struct gfs2_glock *gl) 209 { 210 if (!(gl->gl_ops->go_flags & GLOF_LRU)) 211 return; 212 213 spin_lock(&lru_lock); 214 215 list_del(&gl->gl_lru); 216 list_add_tail(&gl->gl_lru, &lru_list); 217 218 if (!test_bit(GLF_LRU, &gl->gl_flags)) { 219 set_bit(GLF_LRU, &gl->gl_flags); 220 atomic_inc(&lru_count); 221 } 222 223 spin_unlock(&lru_lock); 224 } 225 226 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) 227 { 228 if (!(gl->gl_ops->go_flags & GLOF_LRU)) 229 return; 230 231 spin_lock(&lru_lock); 232 if (test_bit(GLF_LRU, &gl->gl_flags)) { 233 list_del_init(&gl->gl_lru); 234 atomic_dec(&lru_count); 235 clear_bit(GLF_LRU, &gl->gl_flags); 236 } 237 spin_unlock(&lru_lock); 238 } 239 240 /* 241 * Enqueue the glock on the work queue. Passes one glock reference on to the 242 * work queue. 243 */ 244 static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { 245 if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) { 246 /* 247 * We are holding the lockref spinlock, and the work was still 248 * queued above. The queued work (glock_work_func) takes that 249 * spinlock before dropping its glock reference(s), so it 250 * cannot have dropped them in the meantime. 251 */ 252 GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2); 253 gl->gl_lockref.count--; 254 } 255 } 256 257 static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { 258 spin_lock(&gl->gl_lockref.lock); 259 __gfs2_glock_queue_work(gl, delay); 260 spin_unlock(&gl->gl_lockref.lock); 261 } 262 263 static void __gfs2_glock_put(struct gfs2_glock *gl) 264 { 265 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 266 struct address_space *mapping = gfs2_glock2aspace(gl); 267 268 lockref_mark_dead(&gl->gl_lockref); 269 270 gfs2_glock_remove_from_lru(gl); 271 spin_unlock(&gl->gl_lockref.lock); 272 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); 273 if (mapping) { 274 truncate_inode_pages_final(mapping); 275 if (!gfs2_withdrawn(sdp)) 276 GLOCK_BUG_ON(gl, !mapping_empty(mapping)); 277 } 278 trace_gfs2_glock_put(gl); 279 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); 280 } 281 282 /* 283 * Cause the glock to be put in work queue context. 284 */ 285 void gfs2_glock_queue_put(struct gfs2_glock *gl) 286 { 287 gfs2_glock_queue_work(gl, 0); 288 } 289 290 /** 291 * gfs2_glock_put() - Decrement reference count on glock 292 * @gl: The glock to put 293 * 294 */ 295 296 void gfs2_glock_put(struct gfs2_glock *gl) 297 { 298 if (lockref_put_or_lock(&gl->gl_lockref)) 299 return; 300 301 __gfs2_glock_put(gl); 302 } 303 304 /** 305 * may_grant - check if its ok to grant a new lock 306 * @gl: The glock 307 * @gh: The lock request which we wish to grant 308 * 309 * Returns: true if its ok to grant the lock 310 */ 311 312 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh) 313 { 314 const struct gfs2_holder *gh_head = list_first_entry(&gl->gl_holders, const struct gfs2_holder, gh_list); 315 316 if (gh != gh_head) { 317 /** 318 * Here we make a special exception to grant holders who agree 319 * to share the EX lock with other holders who also have the 320 * bit set. If the original holder has the LM_FLAG_NODE_SCOPE bit 321 * is set, we grant more holders with the bit set. 322 */ 323 if (gh_head->gh_state == LM_ST_EXCLUSIVE && 324 (gh_head->gh_flags & LM_FLAG_NODE_SCOPE) && 325 gh->gh_state == LM_ST_EXCLUSIVE && 326 (gh->gh_flags & LM_FLAG_NODE_SCOPE)) 327 return 1; 328 if ((gh->gh_state == LM_ST_EXCLUSIVE || 329 gh_head->gh_state == LM_ST_EXCLUSIVE)) 330 return 0; 331 } 332 if (gl->gl_state == gh->gh_state) 333 return 1; 334 if (gh->gh_flags & GL_EXACT) 335 return 0; 336 if (gl->gl_state == LM_ST_EXCLUSIVE) { 337 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED) 338 return 1; 339 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED) 340 return 1; 341 } 342 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY)) 343 return 1; 344 return 0; 345 } 346 347 static void gfs2_holder_wake(struct gfs2_holder *gh) 348 { 349 clear_bit(HIF_WAIT, &gh->gh_iflags); 350 smp_mb__after_atomic(); 351 wake_up_bit(&gh->gh_iflags, HIF_WAIT); 352 if (gh->gh_flags & GL_ASYNC) { 353 struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd; 354 355 wake_up(&sdp->sd_async_glock_wait); 356 } 357 } 358 359 /** 360 * do_error - Something unexpected has happened during a lock request 361 * @gl: The glock 362 * @ret: The status from the DLM 363 */ 364 365 static void do_error(struct gfs2_glock *gl, const int ret) 366 { 367 struct gfs2_holder *gh, *tmp; 368 369 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { 370 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 371 continue; 372 if (ret & LM_OUT_ERROR) 373 gh->gh_error = -EIO; 374 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) 375 gh->gh_error = GLR_TRYFAILED; 376 else 377 continue; 378 list_del_init(&gh->gh_list); 379 trace_gfs2_glock_queue(gh, 0); 380 gfs2_holder_wake(gh); 381 } 382 } 383 384 /** 385 * do_promote - promote as many requests as possible on the current queue 386 * @gl: The glock 387 * 388 * Returns: 1 if there is a blocked holder at the head of the list, or 2 389 * if a type specific operation is underway. 390 */ 391 392 static int do_promote(struct gfs2_glock *gl) 393 __releases(&gl->gl_lockref.lock) 394 __acquires(&gl->gl_lockref.lock) 395 { 396 const struct gfs2_glock_operations *glops = gl->gl_ops; 397 struct gfs2_holder *gh, *tmp; 398 int ret; 399 400 restart: 401 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { 402 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 403 continue; 404 if (may_grant(gl, gh)) { 405 if (gh->gh_list.prev == &gl->gl_holders && 406 glops->go_lock) { 407 spin_unlock(&gl->gl_lockref.lock); 408 /* FIXME: eliminate this eventually */ 409 ret = glops->go_lock(gh); 410 spin_lock(&gl->gl_lockref.lock); 411 if (ret) { 412 if (ret == 1) 413 return 2; 414 gh->gh_error = ret; 415 list_del_init(&gh->gh_list); 416 trace_gfs2_glock_queue(gh, 0); 417 gfs2_holder_wake(gh); 418 goto restart; 419 } 420 set_bit(HIF_HOLDER, &gh->gh_iflags); 421 trace_gfs2_promote(gh, 1); 422 gfs2_holder_wake(gh); 423 goto restart; 424 } 425 set_bit(HIF_HOLDER, &gh->gh_iflags); 426 trace_gfs2_promote(gh, 0); 427 gfs2_holder_wake(gh); 428 continue; 429 } 430 if (gh->gh_list.prev == &gl->gl_holders) 431 return 1; 432 do_error(gl, 0); 433 break; 434 } 435 return 0; 436 } 437 438 /** 439 * find_first_waiter - find the first gh that's waiting for the glock 440 * @gl: the glock 441 */ 442 443 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl) 444 { 445 struct gfs2_holder *gh; 446 447 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 448 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) 449 return gh; 450 } 451 return NULL; 452 } 453 454 /** 455 * state_change - record that the glock is now in a different state 456 * @gl: the glock 457 * @new_state: the new state 458 */ 459 460 static void state_change(struct gfs2_glock *gl, unsigned int new_state) 461 { 462 int held1, held2; 463 464 held1 = (gl->gl_state != LM_ST_UNLOCKED); 465 held2 = (new_state != LM_ST_UNLOCKED); 466 467 if (held1 != held2) { 468 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); 469 if (held2) 470 gl->gl_lockref.count++; 471 else 472 gl->gl_lockref.count--; 473 } 474 if (new_state != gl->gl_target) 475 /* shorten our minimum hold time */ 476 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR, 477 GL_GLOCK_MIN_HOLD); 478 gl->gl_state = new_state; 479 gl->gl_tchange = jiffies; 480 } 481 482 static void gfs2_set_demote(struct gfs2_glock *gl) 483 { 484 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 485 486 set_bit(GLF_DEMOTE, &gl->gl_flags); 487 smp_mb(); 488 wake_up(&sdp->sd_async_glock_wait); 489 } 490 491 static void gfs2_demote_wake(struct gfs2_glock *gl) 492 { 493 gl->gl_demote_state = LM_ST_EXCLUSIVE; 494 clear_bit(GLF_DEMOTE, &gl->gl_flags); 495 smp_mb__after_atomic(); 496 wake_up_bit(&gl->gl_flags, GLF_DEMOTE); 497 } 498 499 /** 500 * finish_xmote - The DLM has replied to one of our lock requests 501 * @gl: The glock 502 * @ret: The status from the DLM 503 * 504 */ 505 506 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) 507 { 508 const struct gfs2_glock_operations *glops = gl->gl_ops; 509 struct gfs2_holder *gh; 510 unsigned state = ret & LM_OUT_ST_MASK; 511 int rv; 512 513 spin_lock(&gl->gl_lockref.lock); 514 trace_gfs2_glock_state_change(gl, state); 515 state_change(gl, state); 516 gh = find_first_waiter(gl); 517 518 /* Demote to UN request arrived during demote to SH or DF */ 519 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && 520 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED) 521 gl->gl_target = LM_ST_UNLOCKED; 522 523 /* Check for state != intended state */ 524 if (unlikely(state != gl->gl_target)) { 525 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { 526 /* move to back of queue and try next entry */ 527 if (ret & LM_OUT_CANCELED) { 528 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0) 529 list_move_tail(&gh->gh_list, &gl->gl_holders); 530 gh = find_first_waiter(gl); 531 gl->gl_target = gh->gh_state; 532 goto retry; 533 } 534 /* Some error or failed "try lock" - report it */ 535 if ((ret & LM_OUT_ERROR) || 536 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { 537 gl->gl_target = gl->gl_state; 538 do_error(gl, ret); 539 goto out; 540 } 541 } 542 switch(state) { 543 /* Unlocked due to conversion deadlock, try again */ 544 case LM_ST_UNLOCKED: 545 retry: 546 do_xmote(gl, gh, gl->gl_target); 547 break; 548 /* Conversion fails, unlock and try again */ 549 case LM_ST_SHARED: 550 case LM_ST_DEFERRED: 551 do_xmote(gl, gh, LM_ST_UNLOCKED); 552 break; 553 default: /* Everything else */ 554 fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n", 555 gl->gl_target, state); 556 GLOCK_BUG_ON(gl, 1); 557 } 558 spin_unlock(&gl->gl_lockref.lock); 559 return; 560 } 561 562 /* Fast path - we got what we asked for */ 563 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) 564 gfs2_demote_wake(gl); 565 if (state != LM_ST_UNLOCKED) { 566 if (glops->go_xmote_bh) { 567 spin_unlock(&gl->gl_lockref.lock); 568 rv = glops->go_xmote_bh(gl); 569 spin_lock(&gl->gl_lockref.lock); 570 if (rv) { 571 do_error(gl, rv); 572 goto out; 573 } 574 } 575 rv = do_promote(gl); 576 if (rv == 2) 577 goto out_locked; 578 } 579 out: 580 clear_bit(GLF_LOCK, &gl->gl_flags); 581 out_locked: 582 spin_unlock(&gl->gl_lockref.lock); 583 } 584 585 /** 586 * do_xmote - Calls the DLM to change the state of a lock 587 * @gl: The lock state 588 * @gh: The holder (only for promotes) 589 * @target: The target lock state 590 * 591 */ 592 593 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target) 594 __releases(&gl->gl_lockref.lock) 595 __acquires(&gl->gl_lockref.lock) 596 { 597 const struct gfs2_glock_operations *glops = gl->gl_ops; 598 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 599 unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0); 600 int ret; 601 602 if (target != LM_ST_UNLOCKED && glock_blocked_by_withdraw(gl) && 603 gh && !(gh->gh_flags & LM_FLAG_NOEXP)) 604 return; 605 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP | 606 LM_FLAG_PRIORITY); 607 GLOCK_BUG_ON(gl, gl->gl_state == target); 608 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target); 609 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) && 610 glops->go_inval) { 611 /* 612 * If another process is already doing the invalidate, let that 613 * finish first. The glock state machine will get back to this 614 * holder again later. 615 */ 616 if (test_and_set_bit(GLF_INVALIDATE_IN_PROGRESS, 617 &gl->gl_flags)) 618 return; 619 do_error(gl, 0); /* Fail queued try locks */ 620 } 621 gl->gl_req = target; 622 set_bit(GLF_BLOCKING, &gl->gl_flags); 623 if ((gl->gl_req == LM_ST_UNLOCKED) || 624 (gl->gl_state == LM_ST_EXCLUSIVE) || 625 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB))) 626 clear_bit(GLF_BLOCKING, &gl->gl_flags); 627 spin_unlock(&gl->gl_lockref.lock); 628 if (glops->go_sync) { 629 ret = glops->go_sync(gl); 630 /* If we had a problem syncing (due to io errors or whatever, 631 * we should not invalidate the metadata or tell dlm to 632 * release the glock to other nodes. 633 */ 634 if (ret) { 635 if (cmpxchg(&sdp->sd_log_error, 0, ret)) { 636 fs_err(sdp, "Error %d syncing glock \n", ret); 637 gfs2_dump_glock(NULL, gl, true); 638 } 639 goto skip_inval; 640 } 641 } 642 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) { 643 /* 644 * The call to go_sync should have cleared out the ail list. 645 * If there are still items, we have a problem. We ought to 646 * withdraw, but we can't because the withdraw code also uses 647 * glocks. Warn about the error, dump the glock, then fall 648 * through and wait for logd to do the withdraw for us. 649 */ 650 if ((atomic_read(&gl->gl_ail_count) != 0) && 651 (!cmpxchg(&sdp->sd_log_error, 0, -EIO))) { 652 gfs2_glock_assert_warn(gl, 653 !atomic_read(&gl->gl_ail_count)); 654 gfs2_dump_glock(NULL, gl, true); 655 } 656 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA); 657 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); 658 } 659 660 skip_inval: 661 gfs2_glock_hold(gl); 662 /* 663 * Check for an error encountered since we called go_sync and go_inval. 664 * If so, we can't withdraw from the glock code because the withdraw 665 * code itself uses glocks (see function signal_our_withdraw) to 666 * change the mount to read-only. Most importantly, we must not call 667 * dlm to unlock the glock until the journal is in a known good state 668 * (after journal replay) otherwise other nodes may use the object 669 * (rgrp or dinode) and then later, journal replay will corrupt the 670 * file system. The best we can do here is wait for the logd daemon 671 * to see sd_log_error and withdraw, and in the meantime, requeue the 672 * work for later. 673 * 674 * However, if we're just unlocking the lock (say, for unmount, when 675 * gfs2_gl_hash_clear calls clear_glock) and recovery is complete 676 * then it's okay to tell dlm to unlock it. 677 */ 678 if (unlikely(sdp->sd_log_error && !gfs2_withdrawn(sdp))) 679 gfs2_withdraw_delayed(sdp); 680 if (glock_blocked_by_withdraw(gl)) { 681 if (target != LM_ST_UNLOCKED || 682 test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags)) { 683 gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD); 684 goto out; 685 } 686 } 687 688 if (sdp->sd_lockstruct.ls_ops->lm_lock) { 689 /* lock_dlm */ 690 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags); 691 if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED && 692 target == LM_ST_UNLOCKED && 693 test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) { 694 finish_xmote(gl, target); 695 gfs2_glock_queue_work(gl, 0); 696 } else if (ret) { 697 fs_err(sdp, "lm_lock ret %d\n", ret); 698 GLOCK_BUG_ON(gl, !gfs2_withdrawn(sdp)); 699 } 700 } else { /* lock_nolock */ 701 finish_xmote(gl, target); 702 gfs2_glock_queue_work(gl, 0); 703 } 704 out: 705 spin_lock(&gl->gl_lockref.lock); 706 } 707 708 /** 709 * find_first_holder - find the first "holder" gh 710 * @gl: the glock 711 */ 712 713 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) 714 { 715 struct gfs2_holder *gh; 716 717 if (!list_empty(&gl->gl_holders)) { 718 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list); 719 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 720 return gh; 721 } 722 return NULL; 723 } 724 725 /** 726 * run_queue - do all outstanding tasks related to a glock 727 * @gl: The glock in question 728 * @nonblock: True if we must not block in run_queue 729 * 730 */ 731 732 static void run_queue(struct gfs2_glock *gl, const int nonblock) 733 __releases(&gl->gl_lockref.lock) 734 __acquires(&gl->gl_lockref.lock) 735 { 736 struct gfs2_holder *gh = NULL; 737 int ret; 738 739 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) 740 return; 741 742 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); 743 744 if (test_bit(GLF_DEMOTE, &gl->gl_flags) && 745 gl->gl_demote_state != gl->gl_state) { 746 if (find_first_holder(gl)) 747 goto out_unlock; 748 if (nonblock) 749 goto out_sched; 750 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); 751 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); 752 gl->gl_target = gl->gl_demote_state; 753 } else { 754 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) 755 gfs2_demote_wake(gl); 756 ret = do_promote(gl); 757 if (ret == 0) 758 goto out_unlock; 759 if (ret == 2) 760 goto out; 761 gh = find_first_waiter(gl); 762 gl->gl_target = gh->gh_state; 763 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) 764 do_error(gl, 0); /* Fail queued try locks */ 765 } 766 do_xmote(gl, gh, gl->gl_target); 767 out: 768 return; 769 770 out_sched: 771 clear_bit(GLF_LOCK, &gl->gl_flags); 772 smp_mb__after_atomic(); 773 gl->gl_lockref.count++; 774 __gfs2_glock_queue_work(gl, 0); 775 return; 776 777 out_unlock: 778 clear_bit(GLF_LOCK, &gl->gl_flags); 779 smp_mb__after_atomic(); 780 return; 781 } 782 783 void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation) 784 { 785 struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr; 786 787 if (ri->ri_magic == 0) 788 ri->ri_magic = cpu_to_be32(GFS2_MAGIC); 789 if (ri->ri_magic == cpu_to_be32(GFS2_MAGIC)) 790 ri->ri_generation_deleted = cpu_to_be64(generation); 791 } 792 793 bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation) 794 { 795 struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr; 796 797 if (ri->ri_magic != cpu_to_be32(GFS2_MAGIC)) 798 return false; 799 return generation <= be64_to_cpu(ri->ri_generation_deleted); 800 } 801 802 static void gfs2_glock_poke(struct gfs2_glock *gl) 803 { 804 int flags = LM_FLAG_TRY_1CB | LM_FLAG_ANY | GL_SKIP; 805 struct gfs2_holder gh; 806 int error; 807 808 gfs2_holder_init(gl, LM_ST_SHARED, flags, &gh); 809 error = gfs2_glock_nq(&gh); 810 if (!error) 811 gfs2_glock_dq(&gh); 812 gfs2_holder_uninit(&gh); 813 } 814 815 static bool gfs2_try_evict(struct gfs2_glock *gl) 816 { 817 struct gfs2_inode *ip; 818 bool evicted = false; 819 820 /* 821 * If there is contention on the iopen glock and we have an inode, try 822 * to grab and release the inode so that it can be evicted. This will 823 * allow the remote node to go ahead and delete the inode without us 824 * having to do it, which will avoid rgrp glock thrashing. 825 * 826 * The remote node is likely still holding the corresponding inode 827 * glock, so it will run before we get to verify that the delete has 828 * happened below. 829 */ 830 spin_lock(&gl->gl_lockref.lock); 831 ip = gl->gl_object; 832 if (ip && !igrab(&ip->i_inode)) 833 ip = NULL; 834 spin_unlock(&gl->gl_lockref.lock); 835 if (ip) { 836 struct gfs2_glock *inode_gl = NULL; 837 838 gl->gl_no_formal_ino = ip->i_no_formal_ino; 839 set_bit(GIF_DEFERRED_DELETE, &ip->i_flags); 840 d_prune_aliases(&ip->i_inode); 841 iput(&ip->i_inode); 842 843 /* If the inode was evicted, gl->gl_object will now be NULL. */ 844 spin_lock(&gl->gl_lockref.lock); 845 ip = gl->gl_object; 846 if (ip) { 847 inode_gl = ip->i_gl; 848 lockref_get(&inode_gl->gl_lockref); 849 clear_bit(GIF_DEFERRED_DELETE, &ip->i_flags); 850 } 851 spin_unlock(&gl->gl_lockref.lock); 852 if (inode_gl) { 853 gfs2_glock_poke(inode_gl); 854 gfs2_glock_put(inode_gl); 855 } 856 evicted = !ip; 857 } 858 return evicted; 859 } 860 861 static void delete_work_func(struct work_struct *work) 862 { 863 struct delayed_work *dwork = to_delayed_work(work); 864 struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete); 865 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 866 struct inode *inode; 867 u64 no_addr = gl->gl_name.ln_number; 868 869 spin_lock(&gl->gl_lockref.lock); 870 clear_bit(GLF_PENDING_DELETE, &gl->gl_flags); 871 spin_unlock(&gl->gl_lockref.lock); 872 873 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { 874 /* 875 * If we can evict the inode, give the remote node trying to 876 * delete the inode some time before verifying that the delete 877 * has happened. Otherwise, if we cause contention on the inode glock 878 * immediately, the remote node will think that we still have 879 * the inode in use, and so it will give up waiting. 880 * 881 * If we can't evict the inode, signal to the remote node that 882 * the inode is still in use. We'll later try to delete the 883 * inode locally in gfs2_evict_inode. 884 * 885 * FIXME: We only need to verify that the remote node has 886 * deleted the inode because nodes before this remote delete 887 * rework won't cooperate. At a later time, when we no longer 888 * care about compatibility with such nodes, we can skip this 889 * step entirely. 890 */ 891 if (gfs2_try_evict(gl)) { 892 if (gfs2_queue_delete_work(gl, 5 * HZ)) 893 return; 894 } 895 goto out; 896 } 897 898 inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino, 899 GFS2_BLKST_UNLINKED); 900 if (!IS_ERR_OR_NULL(inode)) { 901 d_prune_aliases(inode); 902 iput(inode); 903 } 904 out: 905 gfs2_glock_put(gl); 906 } 907 908 static void glock_work_func(struct work_struct *work) 909 { 910 unsigned long delay = 0; 911 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); 912 unsigned int drop_refs = 1; 913 914 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { 915 finish_xmote(gl, gl->gl_reply); 916 drop_refs++; 917 } 918 spin_lock(&gl->gl_lockref.lock); 919 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 920 gl->gl_state != LM_ST_UNLOCKED && 921 gl->gl_demote_state != LM_ST_EXCLUSIVE) { 922 unsigned long holdtime, now = jiffies; 923 924 holdtime = gl->gl_tchange + gl->gl_hold_time; 925 if (time_before(now, holdtime)) 926 delay = holdtime - now; 927 928 if (!delay) { 929 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); 930 gfs2_set_demote(gl); 931 } 932 } 933 run_queue(gl, 0); 934 if (delay) { 935 /* Keep one glock reference for the work we requeue. */ 936 drop_refs--; 937 if (gl->gl_name.ln_type != LM_TYPE_INODE) 938 delay = 0; 939 __gfs2_glock_queue_work(gl, delay); 940 } 941 942 /* 943 * Drop the remaining glock references manually here. (Mind that 944 * __gfs2_glock_queue_work depends on the lockref spinlock begin held 945 * here as well.) 946 */ 947 gl->gl_lockref.count -= drop_refs; 948 if (!gl->gl_lockref.count) { 949 __gfs2_glock_put(gl); 950 return; 951 } 952 spin_unlock(&gl->gl_lockref.lock); 953 } 954 955 static struct gfs2_glock *find_insert_glock(struct lm_lockname *name, 956 struct gfs2_glock *new) 957 { 958 struct wait_glock_queue wait; 959 wait_queue_head_t *wq = glock_waitqueue(name); 960 struct gfs2_glock *gl; 961 962 wait.name = name; 963 init_wait(&wait.wait); 964 wait.wait.func = glock_wake_function; 965 966 again: 967 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); 968 rcu_read_lock(); 969 if (new) { 970 gl = rhashtable_lookup_get_insert_fast(&gl_hash_table, 971 &new->gl_node, ht_parms); 972 if (IS_ERR(gl)) 973 goto out; 974 } else { 975 gl = rhashtable_lookup_fast(&gl_hash_table, 976 name, ht_parms); 977 } 978 if (gl && !lockref_get_not_dead(&gl->gl_lockref)) { 979 rcu_read_unlock(); 980 schedule(); 981 goto again; 982 } 983 out: 984 rcu_read_unlock(); 985 finish_wait(wq, &wait.wait); 986 return gl; 987 } 988 989 /** 990 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist 991 * @sdp: The GFS2 superblock 992 * @number: the lock number 993 * @glops: The glock_operations to use 994 * @create: If 0, don't create the glock if it doesn't exist 995 * @glp: the glock is returned here 996 * 997 * This does not lock a glock, just finds/creates structures for one. 998 * 999 * Returns: errno 1000 */ 1001 1002 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, 1003 const struct gfs2_glock_operations *glops, int create, 1004 struct gfs2_glock **glp) 1005 { 1006 struct super_block *s = sdp->sd_vfs; 1007 struct lm_lockname name = { .ln_number = number, 1008 .ln_type = glops->go_type, 1009 .ln_sbd = sdp }; 1010 struct gfs2_glock *gl, *tmp; 1011 struct address_space *mapping; 1012 struct kmem_cache *cachep; 1013 int ret = 0; 1014 1015 gl = find_insert_glock(&name, NULL); 1016 if (gl) { 1017 *glp = gl; 1018 return 0; 1019 } 1020 if (!create) 1021 return -ENOENT; 1022 1023 if (glops->go_flags & GLOF_ASPACE) 1024 cachep = gfs2_glock_aspace_cachep; 1025 else 1026 cachep = gfs2_glock_cachep; 1027 gl = kmem_cache_alloc(cachep, GFP_NOFS); 1028 if (!gl) 1029 return -ENOMEM; 1030 1031 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); 1032 1033 if (glops->go_flags & GLOF_LVB) { 1034 gl->gl_lksb.sb_lvbptr = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); 1035 if (!gl->gl_lksb.sb_lvbptr) { 1036 kmem_cache_free(cachep, gl); 1037 return -ENOMEM; 1038 } 1039 } 1040 1041 atomic_inc(&sdp->sd_glock_disposal); 1042 gl->gl_node.next = NULL; 1043 gl->gl_flags = 0; 1044 gl->gl_name = name; 1045 lockdep_set_subclass(&gl->gl_lockref.lock, glops->go_subclass); 1046 gl->gl_lockref.count = 1; 1047 gl->gl_state = LM_ST_UNLOCKED; 1048 gl->gl_target = LM_ST_UNLOCKED; 1049 gl->gl_demote_state = LM_ST_EXCLUSIVE; 1050 gl->gl_ops = glops; 1051 gl->gl_dstamp = 0; 1052 preempt_disable(); 1053 /* We use the global stats to estimate the initial per-glock stats */ 1054 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type]; 1055 preempt_enable(); 1056 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0; 1057 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0; 1058 gl->gl_tchange = jiffies; 1059 gl->gl_object = NULL; 1060 gl->gl_hold_time = GL_GLOCK_DFT_HOLD; 1061 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); 1062 if (gl->gl_name.ln_type == LM_TYPE_IOPEN) 1063 INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func); 1064 1065 mapping = gfs2_glock2aspace(gl); 1066 if (mapping) { 1067 mapping->a_ops = &gfs2_meta_aops; 1068 mapping->host = s->s_bdev->bd_inode; 1069 mapping->flags = 0; 1070 mapping_set_gfp_mask(mapping, GFP_NOFS); 1071 mapping->private_data = NULL; 1072 mapping->writeback_index = 0; 1073 } 1074 1075 tmp = find_insert_glock(&name, gl); 1076 if (!tmp) { 1077 *glp = gl; 1078 goto out; 1079 } 1080 if (IS_ERR(tmp)) { 1081 ret = PTR_ERR(tmp); 1082 goto out_free; 1083 } 1084 *glp = tmp; 1085 1086 out_free: 1087 kfree(gl->gl_lksb.sb_lvbptr); 1088 kmem_cache_free(cachep, gl); 1089 if (atomic_dec_and_test(&sdp->sd_glock_disposal)) 1090 wake_up(&sdp->sd_glock_wait); 1091 1092 out: 1093 return ret; 1094 } 1095 1096 /** 1097 * gfs2_holder_init - initialize a struct gfs2_holder in the default way 1098 * @gl: the glock 1099 * @state: the state we're requesting 1100 * @flags: the modifier flags 1101 * @gh: the holder structure 1102 * 1103 */ 1104 1105 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags, 1106 struct gfs2_holder *gh) 1107 { 1108 INIT_LIST_HEAD(&gh->gh_list); 1109 gh->gh_gl = gl; 1110 gh->gh_ip = _RET_IP_; 1111 gh->gh_owner_pid = get_pid(task_pid(current)); 1112 gh->gh_state = state; 1113 gh->gh_flags = flags; 1114 gh->gh_error = 0; 1115 gh->gh_iflags = 0; 1116 gfs2_glock_hold(gl); 1117 } 1118 1119 /** 1120 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it 1121 * @state: the state we're requesting 1122 * @flags: the modifier flags 1123 * @gh: the holder structure 1124 * 1125 * Don't mess with the glock. 1126 * 1127 */ 1128 1129 void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh) 1130 { 1131 gh->gh_state = state; 1132 gh->gh_flags = flags; 1133 gh->gh_iflags = 0; 1134 gh->gh_ip = _RET_IP_; 1135 put_pid(gh->gh_owner_pid); 1136 gh->gh_owner_pid = get_pid(task_pid(current)); 1137 } 1138 1139 /** 1140 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference) 1141 * @gh: the holder structure 1142 * 1143 */ 1144 1145 void gfs2_holder_uninit(struct gfs2_holder *gh) 1146 { 1147 put_pid(gh->gh_owner_pid); 1148 gfs2_glock_put(gh->gh_gl); 1149 gfs2_holder_mark_uninitialized(gh); 1150 gh->gh_ip = 0; 1151 } 1152 1153 static void gfs2_glock_update_hold_time(struct gfs2_glock *gl, 1154 unsigned long start_time) 1155 { 1156 /* Have we waited longer that a second? */ 1157 if (time_after(jiffies, start_time + HZ)) { 1158 /* Lengthen the minimum hold time. */ 1159 gl->gl_hold_time = min(gl->gl_hold_time + GL_GLOCK_HOLD_INCR, 1160 GL_GLOCK_MAX_HOLD); 1161 } 1162 } 1163 1164 /** 1165 * gfs2_glock_wait - wait on a glock acquisition 1166 * @gh: the glock holder 1167 * 1168 * Returns: 0 on success 1169 */ 1170 1171 int gfs2_glock_wait(struct gfs2_holder *gh) 1172 { 1173 unsigned long start_time = jiffies; 1174 1175 might_sleep(); 1176 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE); 1177 gfs2_glock_update_hold_time(gh->gh_gl, start_time); 1178 return gh->gh_error; 1179 } 1180 1181 static int glocks_pending(unsigned int num_gh, struct gfs2_holder *ghs) 1182 { 1183 int i; 1184 1185 for (i = 0; i < num_gh; i++) 1186 if (test_bit(HIF_WAIT, &ghs[i].gh_iflags)) 1187 return 1; 1188 return 0; 1189 } 1190 1191 /** 1192 * gfs2_glock_async_wait - wait on multiple asynchronous glock acquisitions 1193 * @num_gh: the number of holders in the array 1194 * @ghs: the glock holder array 1195 * 1196 * Returns: 0 on success, meaning all glocks have been granted and are held. 1197 * -ESTALE if the request timed out, meaning all glocks were released, 1198 * and the caller should retry the operation. 1199 */ 1200 1201 int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs) 1202 { 1203 struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd; 1204 int i, ret = 0, timeout = 0; 1205 unsigned long start_time = jiffies; 1206 bool keep_waiting; 1207 1208 might_sleep(); 1209 /* 1210 * Total up the (minimum hold time * 2) of all glocks and use that to 1211 * determine the max amount of time we should wait. 1212 */ 1213 for (i = 0; i < num_gh; i++) 1214 timeout += ghs[i].gh_gl->gl_hold_time << 1; 1215 1216 wait_for_dlm: 1217 if (!wait_event_timeout(sdp->sd_async_glock_wait, 1218 !glocks_pending(num_gh, ghs), timeout)) 1219 ret = -ESTALE; /* request timed out. */ 1220 1221 /* 1222 * If dlm granted all our requests, we need to adjust the glock 1223 * minimum hold time values according to how long we waited. 1224 * 1225 * If our request timed out, we need to repeatedly release any held 1226 * glocks we acquired thus far to allow dlm to acquire the remaining 1227 * glocks without deadlocking. We cannot currently cancel outstanding 1228 * glock acquisitions. 1229 * 1230 * The HIF_WAIT bit tells us which requests still need a response from 1231 * dlm. 1232 * 1233 * If dlm sent us any errors, we return the first error we find. 1234 */ 1235 keep_waiting = false; 1236 for (i = 0; i < num_gh; i++) { 1237 /* Skip holders we have already dequeued below. */ 1238 if (!gfs2_holder_queued(&ghs[i])) 1239 continue; 1240 /* Skip holders with a pending DLM response. */ 1241 if (test_bit(HIF_WAIT, &ghs[i].gh_iflags)) { 1242 keep_waiting = true; 1243 continue; 1244 } 1245 1246 if (test_bit(HIF_HOLDER, &ghs[i].gh_iflags)) { 1247 if (ret == -ESTALE) 1248 gfs2_glock_dq(&ghs[i]); 1249 else 1250 gfs2_glock_update_hold_time(ghs[i].gh_gl, 1251 start_time); 1252 } 1253 if (!ret) 1254 ret = ghs[i].gh_error; 1255 } 1256 1257 if (keep_waiting) 1258 goto wait_for_dlm; 1259 1260 /* 1261 * At this point, we've either acquired all locks or released them all. 1262 */ 1263 return ret; 1264 } 1265 1266 /** 1267 * handle_callback - process a demote request 1268 * @gl: the glock 1269 * @state: the state the caller wants us to change to 1270 * @delay: zero to demote immediately; otherwise pending demote 1271 * @remote: true if this came from a different cluster node 1272 * 1273 * There are only two requests that we are going to see in actual 1274 * practise: LM_ST_SHARED and LM_ST_UNLOCKED 1275 */ 1276 1277 static void handle_callback(struct gfs2_glock *gl, unsigned int state, 1278 unsigned long delay, bool remote) 1279 { 1280 if (delay) 1281 set_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); 1282 else 1283 gfs2_set_demote(gl); 1284 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { 1285 gl->gl_demote_state = state; 1286 gl->gl_demote_time = jiffies; 1287 } else if (gl->gl_demote_state != LM_ST_UNLOCKED && 1288 gl->gl_demote_state != state) { 1289 gl->gl_demote_state = LM_ST_UNLOCKED; 1290 } 1291 if (gl->gl_ops->go_callback) 1292 gl->gl_ops->go_callback(gl, remote); 1293 trace_gfs2_demote_rq(gl, remote); 1294 } 1295 1296 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...) 1297 { 1298 struct va_format vaf; 1299 va_list args; 1300 1301 va_start(args, fmt); 1302 1303 if (seq) { 1304 seq_vprintf(seq, fmt, args); 1305 } else { 1306 vaf.fmt = fmt; 1307 vaf.va = &args; 1308 1309 pr_err("%pV", &vaf); 1310 } 1311 1312 va_end(args); 1313 } 1314 1315 /** 1316 * add_to_queue - Add a holder to the wait queue (but look for recursion) 1317 * @gh: the holder structure to add 1318 * 1319 * Eventually we should move the recursive locking trap to a 1320 * debugging option or something like that. This is the fast 1321 * path and needs to have the minimum number of distractions. 1322 * 1323 */ 1324 1325 static inline void add_to_queue(struct gfs2_holder *gh) 1326 __releases(&gl->gl_lockref.lock) 1327 __acquires(&gl->gl_lockref.lock) 1328 { 1329 struct gfs2_glock *gl = gh->gh_gl; 1330 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 1331 struct list_head *insert_pt = NULL; 1332 struct gfs2_holder *gh2; 1333 int try_futile = 0; 1334 1335 GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL); 1336 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) 1337 GLOCK_BUG_ON(gl, true); 1338 1339 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { 1340 if (test_bit(GLF_LOCK, &gl->gl_flags)) 1341 try_futile = !may_grant(gl, gh); 1342 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) 1343 goto fail; 1344 } 1345 1346 list_for_each_entry(gh2, &gl->gl_holders, gh_list) { 1347 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid && 1348 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK))) 1349 goto trap_recursive; 1350 if (try_futile && 1351 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { 1352 fail: 1353 gh->gh_error = GLR_TRYFAILED; 1354 gfs2_holder_wake(gh); 1355 return; 1356 } 1357 if (test_bit(HIF_HOLDER, &gh2->gh_iflags)) 1358 continue; 1359 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt)) 1360 insert_pt = &gh2->gh_list; 1361 } 1362 trace_gfs2_glock_queue(gh, 1); 1363 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT); 1364 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT); 1365 if (likely(insert_pt == NULL)) { 1366 list_add_tail(&gh->gh_list, &gl->gl_holders); 1367 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY)) 1368 goto do_cancel; 1369 return; 1370 } 1371 list_add_tail(&gh->gh_list, insert_pt); 1372 do_cancel: 1373 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list); 1374 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) { 1375 spin_unlock(&gl->gl_lockref.lock); 1376 if (sdp->sd_lockstruct.ls_ops->lm_cancel) 1377 sdp->sd_lockstruct.ls_ops->lm_cancel(gl); 1378 spin_lock(&gl->gl_lockref.lock); 1379 } 1380 return; 1381 1382 trap_recursive: 1383 fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip); 1384 fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid)); 1385 fs_err(sdp, "lock type: %d req lock state : %d\n", 1386 gh2->gh_gl->gl_name.ln_type, gh2->gh_state); 1387 fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip); 1388 fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid)); 1389 fs_err(sdp, "lock type: %d req lock state : %d\n", 1390 gh->gh_gl->gl_name.ln_type, gh->gh_state); 1391 gfs2_dump_glock(NULL, gl, true); 1392 BUG(); 1393 } 1394 1395 /** 1396 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock) 1397 * @gh: the holder structure 1398 * 1399 * if (gh->gh_flags & GL_ASYNC), this never returns an error 1400 * 1401 * Returns: 0, GLR_TRYFAILED, or errno on failure 1402 */ 1403 1404 int gfs2_glock_nq(struct gfs2_holder *gh) 1405 { 1406 struct gfs2_glock *gl = gh->gh_gl; 1407 int error = 0; 1408 1409 if (glock_blocked_by_withdraw(gl) && !(gh->gh_flags & LM_FLAG_NOEXP)) 1410 return -EIO; 1411 1412 if (test_bit(GLF_LRU, &gl->gl_flags)) 1413 gfs2_glock_remove_from_lru(gl); 1414 1415 spin_lock(&gl->gl_lockref.lock); 1416 add_to_queue(gh); 1417 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) && 1418 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) { 1419 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1420 gl->gl_lockref.count++; 1421 __gfs2_glock_queue_work(gl, 0); 1422 } 1423 run_queue(gl, 1); 1424 spin_unlock(&gl->gl_lockref.lock); 1425 1426 if (!(gh->gh_flags & GL_ASYNC)) 1427 error = gfs2_glock_wait(gh); 1428 1429 return error; 1430 } 1431 1432 /** 1433 * gfs2_glock_poll - poll to see if an async request has been completed 1434 * @gh: the holder 1435 * 1436 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on 1437 */ 1438 1439 int gfs2_glock_poll(struct gfs2_holder *gh) 1440 { 1441 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1; 1442 } 1443 1444 /** 1445 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock) 1446 * @gh: the glock holder 1447 * 1448 */ 1449 1450 void gfs2_glock_dq(struct gfs2_holder *gh) 1451 { 1452 struct gfs2_glock *gl = gh->gh_gl; 1453 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 1454 unsigned delay = 0; 1455 int fast_path = 0; 1456 1457 spin_lock(&gl->gl_lockref.lock); 1458 /* 1459 * If we're in the process of file system withdraw, we cannot just 1460 * dequeue any glocks until our journal is recovered, lest we 1461 * introduce file system corruption. We need two exceptions to this 1462 * rule: We need to allow unlocking of nondisk glocks and the glock 1463 * for our own journal that needs recovery. 1464 */ 1465 if (test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags) && 1466 glock_blocked_by_withdraw(gl) && 1467 gh->gh_gl != sdp->sd_jinode_gl) { 1468 sdp->sd_glock_dqs_held++; 1469 might_sleep(); 1470 wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY, 1471 TASK_UNINTERRUPTIBLE); 1472 } 1473 if (gh->gh_flags & GL_NOCACHE) 1474 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1475 1476 list_del_init(&gh->gh_list); 1477 clear_bit(HIF_HOLDER, &gh->gh_iflags); 1478 if (find_first_holder(gl) == NULL) { 1479 if (list_empty(&gl->gl_holders) && 1480 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 1481 !test_bit(GLF_DEMOTE, &gl->gl_flags)) 1482 fast_path = 1; 1483 } 1484 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl)) 1485 gfs2_glock_add_to_lru(gl); 1486 1487 trace_gfs2_glock_queue(gh, 0); 1488 if (unlikely(!fast_path)) { 1489 gl->gl_lockref.count++; 1490 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 1491 !test_bit(GLF_DEMOTE, &gl->gl_flags) && 1492 gl->gl_name.ln_type == LM_TYPE_INODE) 1493 delay = gl->gl_hold_time; 1494 __gfs2_glock_queue_work(gl, delay); 1495 } 1496 spin_unlock(&gl->gl_lockref.lock); 1497 } 1498 1499 void gfs2_glock_dq_wait(struct gfs2_holder *gh) 1500 { 1501 struct gfs2_glock *gl = gh->gh_gl; 1502 gfs2_glock_dq(gh); 1503 might_sleep(); 1504 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE); 1505 } 1506 1507 /** 1508 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it 1509 * @gh: the holder structure 1510 * 1511 */ 1512 1513 void gfs2_glock_dq_uninit(struct gfs2_holder *gh) 1514 { 1515 gfs2_glock_dq(gh); 1516 gfs2_holder_uninit(gh); 1517 } 1518 1519 /** 1520 * gfs2_glock_nq_num - acquire a glock based on lock number 1521 * @sdp: the filesystem 1522 * @number: the lock number 1523 * @glops: the glock operations for the type of glock 1524 * @state: the state to acquire the glock in 1525 * @flags: modifier flags for the acquisition 1526 * @gh: the struct gfs2_holder 1527 * 1528 * Returns: errno 1529 */ 1530 1531 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number, 1532 const struct gfs2_glock_operations *glops, 1533 unsigned int state, u16 flags, struct gfs2_holder *gh) 1534 { 1535 struct gfs2_glock *gl; 1536 int error; 1537 1538 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl); 1539 if (!error) { 1540 error = gfs2_glock_nq_init(gl, state, flags, gh); 1541 gfs2_glock_put(gl); 1542 } 1543 1544 return error; 1545 } 1546 1547 /** 1548 * glock_compare - Compare two struct gfs2_glock structures for sorting 1549 * @arg_a: the first structure 1550 * @arg_b: the second structure 1551 * 1552 */ 1553 1554 static int glock_compare(const void *arg_a, const void *arg_b) 1555 { 1556 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a; 1557 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b; 1558 const struct lm_lockname *a = &gh_a->gh_gl->gl_name; 1559 const struct lm_lockname *b = &gh_b->gh_gl->gl_name; 1560 1561 if (a->ln_number > b->ln_number) 1562 return 1; 1563 if (a->ln_number < b->ln_number) 1564 return -1; 1565 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type); 1566 return 0; 1567 } 1568 1569 /** 1570 * nq_m_sync - synchonously acquire more than one glock in deadlock free order 1571 * @num_gh: the number of structures 1572 * @ghs: an array of struct gfs2_holder structures 1573 * @p: placeholder for the holder structure to pass back 1574 * 1575 * Returns: 0 on success (all glocks acquired), 1576 * errno on failure (no glocks acquired) 1577 */ 1578 1579 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs, 1580 struct gfs2_holder **p) 1581 { 1582 unsigned int x; 1583 int error = 0; 1584 1585 for (x = 0; x < num_gh; x++) 1586 p[x] = &ghs[x]; 1587 1588 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL); 1589 1590 for (x = 0; x < num_gh; x++) { 1591 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); 1592 1593 error = gfs2_glock_nq(p[x]); 1594 if (error) { 1595 while (x--) 1596 gfs2_glock_dq(p[x]); 1597 break; 1598 } 1599 } 1600 1601 return error; 1602 } 1603 1604 /** 1605 * gfs2_glock_nq_m - acquire multiple glocks 1606 * @num_gh: the number of structures 1607 * @ghs: an array of struct gfs2_holder structures 1608 * 1609 * 1610 * Returns: 0 on success (all glocks acquired), 1611 * errno on failure (no glocks acquired) 1612 */ 1613 1614 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs) 1615 { 1616 struct gfs2_holder *tmp[4]; 1617 struct gfs2_holder **pph = tmp; 1618 int error = 0; 1619 1620 switch(num_gh) { 1621 case 0: 1622 return 0; 1623 case 1: 1624 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); 1625 return gfs2_glock_nq(ghs); 1626 default: 1627 if (num_gh <= 4) 1628 break; 1629 pph = kmalloc_array(num_gh, sizeof(struct gfs2_holder *), 1630 GFP_NOFS); 1631 if (!pph) 1632 return -ENOMEM; 1633 } 1634 1635 error = nq_m_sync(num_gh, ghs, pph); 1636 1637 if (pph != tmp) 1638 kfree(pph); 1639 1640 return error; 1641 } 1642 1643 /** 1644 * gfs2_glock_dq_m - release multiple glocks 1645 * @num_gh: the number of structures 1646 * @ghs: an array of struct gfs2_holder structures 1647 * 1648 */ 1649 1650 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs) 1651 { 1652 while (num_gh--) 1653 gfs2_glock_dq(&ghs[num_gh]); 1654 } 1655 1656 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) 1657 { 1658 unsigned long delay = 0; 1659 unsigned long holdtime; 1660 unsigned long now = jiffies; 1661 1662 gfs2_glock_hold(gl); 1663 spin_lock(&gl->gl_lockref.lock); 1664 holdtime = gl->gl_tchange + gl->gl_hold_time; 1665 if (!list_empty(&gl->gl_holders) && 1666 gl->gl_name.ln_type == LM_TYPE_INODE) { 1667 if (time_before(now, holdtime)) 1668 delay = holdtime - now; 1669 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) 1670 delay = gl->gl_hold_time; 1671 } 1672 handle_callback(gl, state, delay, true); 1673 __gfs2_glock_queue_work(gl, delay); 1674 spin_unlock(&gl->gl_lockref.lock); 1675 } 1676 1677 /** 1678 * gfs2_should_freeze - Figure out if glock should be frozen 1679 * @gl: The glock in question 1680 * 1681 * Glocks are not frozen if (a) the result of the dlm operation is 1682 * an error, (b) the locking operation was an unlock operation or 1683 * (c) if there is a "noexp" flagged request anywhere in the queue 1684 * 1685 * Returns: 1 if freezing should occur, 0 otherwise 1686 */ 1687 1688 static int gfs2_should_freeze(const struct gfs2_glock *gl) 1689 { 1690 const struct gfs2_holder *gh; 1691 1692 if (gl->gl_reply & ~LM_OUT_ST_MASK) 1693 return 0; 1694 if (gl->gl_target == LM_ST_UNLOCKED) 1695 return 0; 1696 1697 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 1698 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 1699 continue; 1700 if (LM_FLAG_NOEXP & gh->gh_flags) 1701 return 0; 1702 } 1703 1704 return 1; 1705 } 1706 1707 /** 1708 * gfs2_glock_complete - Callback used by locking 1709 * @gl: Pointer to the glock 1710 * @ret: The return value from the dlm 1711 * 1712 * The gl_reply field is under the gl_lockref.lock lock so that it is ok 1713 * to use a bitfield shared with other glock state fields. 1714 */ 1715 1716 void gfs2_glock_complete(struct gfs2_glock *gl, int ret) 1717 { 1718 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; 1719 1720 spin_lock(&gl->gl_lockref.lock); 1721 gl->gl_reply = ret; 1722 1723 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) { 1724 if (gfs2_should_freeze(gl)) { 1725 set_bit(GLF_FROZEN, &gl->gl_flags); 1726 spin_unlock(&gl->gl_lockref.lock); 1727 return; 1728 } 1729 } 1730 1731 gl->gl_lockref.count++; 1732 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1733 __gfs2_glock_queue_work(gl, 0); 1734 spin_unlock(&gl->gl_lockref.lock); 1735 } 1736 1737 static int glock_cmp(void *priv, const struct list_head *a, 1738 const struct list_head *b) 1739 { 1740 struct gfs2_glock *gla, *glb; 1741 1742 gla = list_entry(a, struct gfs2_glock, gl_lru); 1743 glb = list_entry(b, struct gfs2_glock, gl_lru); 1744 1745 if (gla->gl_name.ln_number > glb->gl_name.ln_number) 1746 return 1; 1747 if (gla->gl_name.ln_number < glb->gl_name.ln_number) 1748 return -1; 1749 1750 return 0; 1751 } 1752 1753 /** 1754 * gfs2_dispose_glock_lru - Demote a list of glocks 1755 * @list: The list to dispose of 1756 * 1757 * Disposing of glocks may involve disk accesses, so that here we sort 1758 * the glocks by number (i.e. disk location of the inodes) so that if 1759 * there are any such accesses, they'll be sent in order (mostly). 1760 * 1761 * Must be called under the lru_lock, but may drop and retake this 1762 * lock. While the lru_lock is dropped, entries may vanish from the 1763 * list, but no new entries will appear on the list (since it is 1764 * private) 1765 */ 1766 1767 static void gfs2_dispose_glock_lru(struct list_head *list) 1768 __releases(&lru_lock) 1769 __acquires(&lru_lock) 1770 { 1771 struct gfs2_glock *gl; 1772 1773 list_sort(NULL, list, glock_cmp); 1774 1775 while(!list_empty(list)) { 1776 gl = list_first_entry(list, struct gfs2_glock, gl_lru); 1777 list_del_init(&gl->gl_lru); 1778 if (!spin_trylock(&gl->gl_lockref.lock)) { 1779 add_back_to_lru: 1780 list_add(&gl->gl_lru, &lru_list); 1781 set_bit(GLF_LRU, &gl->gl_flags); 1782 atomic_inc(&lru_count); 1783 continue; 1784 } 1785 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 1786 spin_unlock(&gl->gl_lockref.lock); 1787 goto add_back_to_lru; 1788 } 1789 gl->gl_lockref.count++; 1790 if (demote_ok(gl)) 1791 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1792 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags)); 1793 __gfs2_glock_queue_work(gl, 0); 1794 spin_unlock(&gl->gl_lockref.lock); 1795 cond_resched_lock(&lru_lock); 1796 } 1797 } 1798 1799 /** 1800 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote 1801 * @nr: The number of entries to scan 1802 * 1803 * This function selects the entries on the LRU which are able to 1804 * be demoted, and then kicks off the process by calling 1805 * gfs2_dispose_glock_lru() above. 1806 */ 1807 1808 static long gfs2_scan_glock_lru(int nr) 1809 { 1810 struct gfs2_glock *gl; 1811 LIST_HEAD(skipped); 1812 LIST_HEAD(dispose); 1813 long freed = 0; 1814 1815 spin_lock(&lru_lock); 1816 while ((nr-- >= 0) && !list_empty(&lru_list)) { 1817 gl = list_first_entry(&lru_list, struct gfs2_glock, gl_lru); 1818 1819 /* Test for being demotable */ 1820 if (!test_bit(GLF_LOCK, &gl->gl_flags)) { 1821 list_move(&gl->gl_lru, &dispose); 1822 atomic_dec(&lru_count); 1823 clear_bit(GLF_LRU, &gl->gl_flags); 1824 freed++; 1825 continue; 1826 } 1827 1828 list_move(&gl->gl_lru, &skipped); 1829 } 1830 list_splice(&skipped, &lru_list); 1831 if (!list_empty(&dispose)) 1832 gfs2_dispose_glock_lru(&dispose); 1833 spin_unlock(&lru_lock); 1834 1835 return freed; 1836 } 1837 1838 static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink, 1839 struct shrink_control *sc) 1840 { 1841 if (!(sc->gfp_mask & __GFP_FS)) 1842 return SHRINK_STOP; 1843 return gfs2_scan_glock_lru(sc->nr_to_scan); 1844 } 1845 1846 static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink, 1847 struct shrink_control *sc) 1848 { 1849 return vfs_pressure_ratio(atomic_read(&lru_count)); 1850 } 1851 1852 static struct shrinker glock_shrinker = { 1853 .seeks = DEFAULT_SEEKS, 1854 .count_objects = gfs2_glock_shrink_count, 1855 .scan_objects = gfs2_glock_shrink_scan, 1856 }; 1857 1858 /** 1859 * glock_hash_walk - Call a function for glock in a hash bucket 1860 * @examiner: the function 1861 * @sdp: the filesystem 1862 * 1863 * Note that the function can be called multiple times on the same 1864 * object. So the user must ensure that the function can cope with 1865 * that. 1866 */ 1867 1868 static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp) 1869 { 1870 struct gfs2_glock *gl; 1871 struct rhashtable_iter iter; 1872 1873 rhashtable_walk_enter(&gl_hash_table, &iter); 1874 1875 do { 1876 rhashtable_walk_start(&iter); 1877 1878 while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) 1879 if (gl->gl_name.ln_sbd == sdp && 1880 lockref_get_not_dead(&gl->gl_lockref)) 1881 examiner(gl); 1882 1883 rhashtable_walk_stop(&iter); 1884 } while (cond_resched(), gl == ERR_PTR(-EAGAIN)); 1885 1886 rhashtable_walk_exit(&iter); 1887 } 1888 1889 bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay) 1890 { 1891 bool queued; 1892 1893 spin_lock(&gl->gl_lockref.lock); 1894 queued = queue_delayed_work(gfs2_delete_workqueue, 1895 &gl->gl_delete, delay); 1896 if (queued) 1897 set_bit(GLF_PENDING_DELETE, &gl->gl_flags); 1898 spin_unlock(&gl->gl_lockref.lock); 1899 return queued; 1900 } 1901 1902 void gfs2_cancel_delete_work(struct gfs2_glock *gl) 1903 { 1904 if (cancel_delayed_work_sync(&gl->gl_delete)) { 1905 clear_bit(GLF_PENDING_DELETE, &gl->gl_flags); 1906 gfs2_glock_put(gl); 1907 } 1908 } 1909 1910 bool gfs2_delete_work_queued(const struct gfs2_glock *gl) 1911 { 1912 return test_bit(GLF_PENDING_DELETE, &gl->gl_flags); 1913 } 1914 1915 static void flush_delete_work(struct gfs2_glock *gl) 1916 { 1917 if (gl->gl_name.ln_type == LM_TYPE_IOPEN) { 1918 if (cancel_delayed_work(&gl->gl_delete)) { 1919 queue_delayed_work(gfs2_delete_workqueue, 1920 &gl->gl_delete, 0); 1921 } 1922 } 1923 gfs2_glock_queue_work(gl, 0); 1924 } 1925 1926 void gfs2_flush_delete_work(struct gfs2_sbd *sdp) 1927 { 1928 glock_hash_walk(flush_delete_work, sdp); 1929 flush_workqueue(gfs2_delete_workqueue); 1930 } 1931 1932 /** 1933 * thaw_glock - thaw out a glock which has an unprocessed reply waiting 1934 * @gl: The glock to thaw 1935 * 1936 */ 1937 1938 static void thaw_glock(struct gfs2_glock *gl) 1939 { 1940 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) { 1941 gfs2_glock_put(gl); 1942 return; 1943 } 1944 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1945 gfs2_glock_queue_work(gl, 0); 1946 } 1947 1948 /** 1949 * clear_glock - look at a glock and see if we can free it from glock cache 1950 * @gl: the glock to look at 1951 * 1952 */ 1953 1954 static void clear_glock(struct gfs2_glock *gl) 1955 { 1956 gfs2_glock_remove_from_lru(gl); 1957 1958 spin_lock(&gl->gl_lockref.lock); 1959 if (gl->gl_state != LM_ST_UNLOCKED) 1960 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1961 __gfs2_glock_queue_work(gl, 0); 1962 spin_unlock(&gl->gl_lockref.lock); 1963 } 1964 1965 /** 1966 * gfs2_glock_thaw - Thaw any frozen glocks 1967 * @sdp: The super block 1968 * 1969 */ 1970 1971 void gfs2_glock_thaw(struct gfs2_sbd *sdp) 1972 { 1973 glock_hash_walk(thaw_glock, sdp); 1974 } 1975 1976 static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid) 1977 { 1978 spin_lock(&gl->gl_lockref.lock); 1979 gfs2_dump_glock(seq, gl, fsid); 1980 spin_unlock(&gl->gl_lockref.lock); 1981 } 1982 1983 static void dump_glock_func(struct gfs2_glock *gl) 1984 { 1985 dump_glock(NULL, gl, true); 1986 } 1987 1988 /** 1989 * gfs2_gl_hash_clear - Empty out the glock hash table 1990 * @sdp: the filesystem 1991 * 1992 * Called when unmounting the filesystem. 1993 */ 1994 1995 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) 1996 { 1997 set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags); 1998 flush_workqueue(glock_workqueue); 1999 glock_hash_walk(clear_glock, sdp); 2000 flush_workqueue(glock_workqueue); 2001 wait_event_timeout(sdp->sd_glock_wait, 2002 atomic_read(&sdp->sd_glock_disposal) == 0, 2003 HZ * 600); 2004 glock_hash_walk(dump_glock_func, sdp); 2005 } 2006 2007 void gfs2_glock_finish_truncate(struct gfs2_inode *ip) 2008 { 2009 struct gfs2_glock *gl = ip->i_gl; 2010 int ret; 2011 2012 ret = gfs2_truncatei_resume(ip); 2013 gfs2_glock_assert_withdraw(gl, ret == 0); 2014 2015 spin_lock(&gl->gl_lockref.lock); 2016 clear_bit(GLF_LOCK, &gl->gl_flags); 2017 run_queue(gl, 1); 2018 spin_unlock(&gl->gl_lockref.lock); 2019 } 2020 2021 static const char *state2str(unsigned state) 2022 { 2023 switch(state) { 2024 case LM_ST_UNLOCKED: 2025 return "UN"; 2026 case LM_ST_SHARED: 2027 return "SH"; 2028 case LM_ST_DEFERRED: 2029 return "DF"; 2030 case LM_ST_EXCLUSIVE: 2031 return "EX"; 2032 } 2033 return "??"; 2034 } 2035 2036 static const char *hflags2str(char *buf, u16 flags, unsigned long iflags) 2037 { 2038 char *p = buf; 2039 if (flags & LM_FLAG_TRY) 2040 *p++ = 't'; 2041 if (flags & LM_FLAG_TRY_1CB) 2042 *p++ = 'T'; 2043 if (flags & LM_FLAG_NOEXP) 2044 *p++ = 'e'; 2045 if (flags & LM_FLAG_ANY) 2046 *p++ = 'A'; 2047 if (flags & LM_FLAG_PRIORITY) 2048 *p++ = 'p'; 2049 if (flags & LM_FLAG_NODE_SCOPE) 2050 *p++ = 'n'; 2051 if (flags & GL_ASYNC) 2052 *p++ = 'a'; 2053 if (flags & GL_EXACT) 2054 *p++ = 'E'; 2055 if (flags & GL_NOCACHE) 2056 *p++ = 'c'; 2057 if (test_bit(HIF_HOLDER, &iflags)) 2058 *p++ = 'H'; 2059 if (test_bit(HIF_WAIT, &iflags)) 2060 *p++ = 'W'; 2061 if (test_bit(HIF_FIRST, &iflags)) 2062 *p++ = 'F'; 2063 *p = 0; 2064 return buf; 2065 } 2066 2067 /** 2068 * dump_holder - print information about a glock holder 2069 * @seq: the seq_file struct 2070 * @gh: the glock holder 2071 * @fs_id_buf: pointer to file system id (if requested) 2072 * 2073 */ 2074 2075 static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh, 2076 const char *fs_id_buf) 2077 { 2078 struct task_struct *gh_owner = NULL; 2079 char flags_buf[32]; 2080 2081 rcu_read_lock(); 2082 if (gh->gh_owner_pid) 2083 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID); 2084 gfs2_print_dbg(seq, "%s H: s:%s f:%s e:%d p:%ld [%s] %pS\n", 2085 fs_id_buf, state2str(gh->gh_state), 2086 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags), 2087 gh->gh_error, 2088 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1, 2089 gh_owner ? gh_owner->comm : "(ended)", 2090 (void *)gh->gh_ip); 2091 rcu_read_unlock(); 2092 } 2093 2094 static const char *gflags2str(char *buf, const struct gfs2_glock *gl) 2095 { 2096 const unsigned long *gflags = &gl->gl_flags; 2097 char *p = buf; 2098 2099 if (test_bit(GLF_LOCK, gflags)) 2100 *p++ = 'l'; 2101 if (test_bit(GLF_DEMOTE, gflags)) 2102 *p++ = 'D'; 2103 if (test_bit(GLF_PENDING_DEMOTE, gflags)) 2104 *p++ = 'd'; 2105 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags)) 2106 *p++ = 'p'; 2107 if (test_bit(GLF_DIRTY, gflags)) 2108 *p++ = 'y'; 2109 if (test_bit(GLF_LFLUSH, gflags)) 2110 *p++ = 'f'; 2111 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags)) 2112 *p++ = 'i'; 2113 if (test_bit(GLF_REPLY_PENDING, gflags)) 2114 *p++ = 'r'; 2115 if (test_bit(GLF_INITIAL, gflags)) 2116 *p++ = 'I'; 2117 if (test_bit(GLF_FROZEN, gflags)) 2118 *p++ = 'F'; 2119 if (!list_empty(&gl->gl_holders)) 2120 *p++ = 'q'; 2121 if (test_bit(GLF_LRU, gflags)) 2122 *p++ = 'L'; 2123 if (gl->gl_object) 2124 *p++ = 'o'; 2125 if (test_bit(GLF_BLOCKING, gflags)) 2126 *p++ = 'b'; 2127 if (test_bit(GLF_PENDING_DELETE, gflags)) 2128 *p++ = 'P'; 2129 if (test_bit(GLF_FREEING, gflags)) 2130 *p++ = 'x'; 2131 *p = 0; 2132 return buf; 2133 } 2134 2135 /** 2136 * gfs2_dump_glock - print information about a glock 2137 * @seq: The seq_file struct 2138 * @gl: the glock 2139 * @fsid: If true, also dump the file system id 2140 * 2141 * The file format is as follows: 2142 * One line per object, capital letters are used to indicate objects 2143 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented, 2144 * other objects are indented by a single space and follow the glock to 2145 * which they are related. Fields are indicated by lower case letters 2146 * followed by a colon and the field value, except for strings which are in 2147 * [] so that its possible to see if they are composed of spaces for 2148 * example. The field's are n = number (id of the object), f = flags, 2149 * t = type, s = state, r = refcount, e = error, p = pid. 2150 * 2151 */ 2152 2153 void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid) 2154 { 2155 const struct gfs2_glock_operations *glops = gl->gl_ops; 2156 unsigned long long dtime; 2157 const struct gfs2_holder *gh; 2158 char gflags_buf[32]; 2159 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 2160 char fs_id_buf[sizeof(sdp->sd_fsname) + 7]; 2161 unsigned long nrpages = 0; 2162 2163 if (gl->gl_ops->go_flags & GLOF_ASPACE) { 2164 struct address_space *mapping = gfs2_glock2aspace(gl); 2165 2166 nrpages = mapping->nrpages; 2167 } 2168 memset(fs_id_buf, 0, sizeof(fs_id_buf)); 2169 if (fsid && sdp) /* safety precaution */ 2170 sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname); 2171 dtime = jiffies - gl->gl_demote_time; 2172 dtime *= 1000000/HZ; /* demote time in uSec */ 2173 if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) 2174 dtime = 0; 2175 gfs2_print_dbg(seq, "%sG: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d " 2176 "v:%d r:%d m:%ld p:%lu\n", 2177 fs_id_buf, state2str(gl->gl_state), 2178 gl->gl_name.ln_type, 2179 (unsigned long long)gl->gl_name.ln_number, 2180 gflags2str(gflags_buf, gl), 2181 state2str(gl->gl_target), 2182 state2str(gl->gl_demote_state), dtime, 2183 atomic_read(&gl->gl_ail_count), 2184 atomic_read(&gl->gl_revokes), 2185 (int)gl->gl_lockref.count, gl->gl_hold_time, nrpages); 2186 2187 list_for_each_entry(gh, &gl->gl_holders, gh_list) 2188 dump_holder(seq, gh, fs_id_buf); 2189 2190 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump) 2191 glops->go_dump(seq, gl, fs_id_buf); 2192 } 2193 2194 static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr) 2195 { 2196 struct gfs2_glock *gl = iter_ptr; 2197 2198 seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n", 2199 gl->gl_name.ln_type, 2200 (unsigned long long)gl->gl_name.ln_number, 2201 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT], 2202 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR], 2203 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB], 2204 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB], 2205 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT], 2206 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR], 2207 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT], 2208 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]); 2209 return 0; 2210 } 2211 2212 static const char *gfs2_gltype[] = { 2213 "type", 2214 "reserved", 2215 "nondisk", 2216 "inode", 2217 "rgrp", 2218 "meta", 2219 "iopen", 2220 "flock", 2221 "plock", 2222 "quota", 2223 "journal", 2224 }; 2225 2226 static const char *gfs2_stype[] = { 2227 [GFS2_LKS_SRTT] = "srtt", 2228 [GFS2_LKS_SRTTVAR] = "srttvar", 2229 [GFS2_LKS_SRTTB] = "srttb", 2230 [GFS2_LKS_SRTTVARB] = "srttvarb", 2231 [GFS2_LKS_SIRT] = "sirt", 2232 [GFS2_LKS_SIRTVAR] = "sirtvar", 2233 [GFS2_LKS_DCOUNT] = "dlm", 2234 [GFS2_LKS_QCOUNT] = "queue", 2235 }; 2236 2237 #define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype)) 2238 2239 static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr) 2240 { 2241 struct gfs2_sbd *sdp = seq->private; 2242 loff_t pos = *(loff_t *)iter_ptr; 2243 unsigned index = pos >> 3; 2244 unsigned subindex = pos & 0x07; 2245 int i; 2246 2247 if (index == 0 && subindex != 0) 2248 return 0; 2249 2250 seq_printf(seq, "%-10s %8s:", gfs2_gltype[index], 2251 (index == 0) ? "cpu": gfs2_stype[subindex]); 2252 2253 for_each_possible_cpu(i) { 2254 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i); 2255 2256 if (index == 0) 2257 seq_printf(seq, " %15u", i); 2258 else 2259 seq_printf(seq, " %15llu", (unsigned long long)lkstats-> 2260 lkstats[index - 1].stats[subindex]); 2261 } 2262 seq_putc(seq, '\n'); 2263 return 0; 2264 } 2265 2266 int __init gfs2_glock_init(void) 2267 { 2268 int i, ret; 2269 2270 ret = rhashtable_init(&gl_hash_table, &ht_parms); 2271 if (ret < 0) 2272 return ret; 2273 2274 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | 2275 WQ_HIGHPRI | WQ_FREEZABLE, 0); 2276 if (!glock_workqueue) { 2277 rhashtable_destroy(&gl_hash_table); 2278 return -ENOMEM; 2279 } 2280 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", 2281 WQ_MEM_RECLAIM | WQ_FREEZABLE, 2282 0); 2283 if (!gfs2_delete_workqueue) { 2284 destroy_workqueue(glock_workqueue); 2285 rhashtable_destroy(&gl_hash_table); 2286 return -ENOMEM; 2287 } 2288 2289 ret = register_shrinker(&glock_shrinker); 2290 if (ret) { 2291 destroy_workqueue(gfs2_delete_workqueue); 2292 destroy_workqueue(glock_workqueue); 2293 rhashtable_destroy(&gl_hash_table); 2294 return ret; 2295 } 2296 2297 for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++) 2298 init_waitqueue_head(glock_wait_table + i); 2299 2300 return 0; 2301 } 2302 2303 void gfs2_glock_exit(void) 2304 { 2305 unregister_shrinker(&glock_shrinker); 2306 rhashtable_destroy(&gl_hash_table); 2307 destroy_workqueue(glock_workqueue); 2308 destroy_workqueue(gfs2_delete_workqueue); 2309 } 2310 2311 static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n) 2312 { 2313 struct gfs2_glock *gl = gi->gl; 2314 2315 if (gl) { 2316 if (n == 0) 2317 return; 2318 if (!lockref_put_not_zero(&gl->gl_lockref)) 2319 gfs2_glock_queue_put(gl); 2320 } 2321 for (;;) { 2322 gl = rhashtable_walk_next(&gi->hti); 2323 if (IS_ERR_OR_NULL(gl)) { 2324 if (gl == ERR_PTR(-EAGAIN)) { 2325 n = 1; 2326 continue; 2327 } 2328 gl = NULL; 2329 break; 2330 } 2331 if (gl->gl_name.ln_sbd != gi->sdp) 2332 continue; 2333 if (n <= 1) { 2334 if (!lockref_get_not_dead(&gl->gl_lockref)) 2335 continue; 2336 break; 2337 } else { 2338 if (__lockref_is_dead(&gl->gl_lockref)) 2339 continue; 2340 n--; 2341 } 2342 } 2343 gi->gl = gl; 2344 } 2345 2346 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos) 2347 __acquires(RCU) 2348 { 2349 struct gfs2_glock_iter *gi = seq->private; 2350 loff_t n; 2351 2352 /* 2353 * We can either stay where we are, skip to the next hash table 2354 * entry, or start from the beginning. 2355 */ 2356 if (*pos < gi->last_pos) { 2357 rhashtable_walk_exit(&gi->hti); 2358 rhashtable_walk_enter(&gl_hash_table, &gi->hti); 2359 n = *pos + 1; 2360 } else { 2361 n = *pos - gi->last_pos; 2362 } 2363 2364 rhashtable_walk_start(&gi->hti); 2365 2366 gfs2_glock_iter_next(gi, n); 2367 gi->last_pos = *pos; 2368 return gi->gl; 2369 } 2370 2371 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr, 2372 loff_t *pos) 2373 { 2374 struct gfs2_glock_iter *gi = seq->private; 2375 2376 (*pos)++; 2377 gi->last_pos = *pos; 2378 gfs2_glock_iter_next(gi, 1); 2379 return gi->gl; 2380 } 2381 2382 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr) 2383 __releases(RCU) 2384 { 2385 struct gfs2_glock_iter *gi = seq->private; 2386 2387 rhashtable_walk_stop(&gi->hti); 2388 } 2389 2390 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr) 2391 { 2392 dump_glock(seq, iter_ptr, false); 2393 return 0; 2394 } 2395 2396 static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos) 2397 { 2398 preempt_disable(); 2399 if (*pos >= GFS2_NR_SBSTATS) 2400 return NULL; 2401 return pos; 2402 } 2403 2404 static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr, 2405 loff_t *pos) 2406 { 2407 (*pos)++; 2408 if (*pos >= GFS2_NR_SBSTATS) 2409 return NULL; 2410 return pos; 2411 } 2412 2413 static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr) 2414 { 2415 preempt_enable(); 2416 } 2417 2418 static const struct seq_operations gfs2_glock_seq_ops = { 2419 .start = gfs2_glock_seq_start, 2420 .next = gfs2_glock_seq_next, 2421 .stop = gfs2_glock_seq_stop, 2422 .show = gfs2_glock_seq_show, 2423 }; 2424 2425 static const struct seq_operations gfs2_glstats_seq_ops = { 2426 .start = gfs2_glock_seq_start, 2427 .next = gfs2_glock_seq_next, 2428 .stop = gfs2_glock_seq_stop, 2429 .show = gfs2_glstats_seq_show, 2430 }; 2431 2432 static const struct seq_operations gfs2_sbstats_sops = { 2433 .start = gfs2_sbstats_seq_start, 2434 .next = gfs2_sbstats_seq_next, 2435 .stop = gfs2_sbstats_seq_stop, 2436 .show = gfs2_sbstats_seq_show, 2437 }; 2438 2439 #define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL) 2440 2441 static int __gfs2_glocks_open(struct inode *inode, struct file *file, 2442 const struct seq_operations *ops) 2443 { 2444 int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter)); 2445 if (ret == 0) { 2446 struct seq_file *seq = file->private_data; 2447 struct gfs2_glock_iter *gi = seq->private; 2448 2449 gi->sdp = inode->i_private; 2450 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN); 2451 if (seq->buf) 2452 seq->size = GFS2_SEQ_GOODSIZE; 2453 /* 2454 * Initially, we are "before" the first hash table entry; the 2455 * first call to rhashtable_walk_next gets us the first entry. 2456 */ 2457 gi->last_pos = -1; 2458 gi->gl = NULL; 2459 rhashtable_walk_enter(&gl_hash_table, &gi->hti); 2460 } 2461 return ret; 2462 } 2463 2464 static int gfs2_glocks_open(struct inode *inode, struct file *file) 2465 { 2466 return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops); 2467 } 2468 2469 static int gfs2_glocks_release(struct inode *inode, struct file *file) 2470 { 2471 struct seq_file *seq = file->private_data; 2472 struct gfs2_glock_iter *gi = seq->private; 2473 2474 if (gi->gl) 2475 gfs2_glock_put(gi->gl); 2476 rhashtable_walk_exit(&gi->hti); 2477 return seq_release_private(inode, file); 2478 } 2479 2480 static int gfs2_glstats_open(struct inode *inode, struct file *file) 2481 { 2482 return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops); 2483 } 2484 2485 static const struct file_operations gfs2_glocks_fops = { 2486 .owner = THIS_MODULE, 2487 .open = gfs2_glocks_open, 2488 .read = seq_read, 2489 .llseek = seq_lseek, 2490 .release = gfs2_glocks_release, 2491 }; 2492 2493 static const struct file_operations gfs2_glstats_fops = { 2494 .owner = THIS_MODULE, 2495 .open = gfs2_glstats_open, 2496 .read = seq_read, 2497 .llseek = seq_lseek, 2498 .release = gfs2_glocks_release, 2499 }; 2500 2501 DEFINE_SEQ_ATTRIBUTE(gfs2_sbstats); 2502 2503 void gfs2_create_debugfs_file(struct gfs2_sbd *sdp) 2504 { 2505 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root); 2506 2507 debugfs_create_file("glocks", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, 2508 &gfs2_glocks_fops); 2509 2510 debugfs_create_file("glstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, 2511 &gfs2_glstats_fops); 2512 2513 debugfs_create_file("sbstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, 2514 &gfs2_sbstats_fops); 2515 } 2516 2517 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp) 2518 { 2519 debugfs_remove_recursive(sdp->debugfs_dir); 2520 sdp->debugfs_dir = NULL; 2521 } 2522 2523 void gfs2_register_debugfs(void) 2524 { 2525 gfs2_root = debugfs_create_dir("gfs2", NULL); 2526 } 2527 2528 void gfs2_unregister_debugfs(void) 2529 { 2530 debugfs_remove(gfs2_root); 2531 gfs2_root = NULL; 2532 } 2533