1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/sched.h> 10 #include <linux/slab.h> 11 #include <linux/spinlock.h> 12 #include <linux/buffer_head.h> 13 #include <linux/delay.h> 14 #include <linux/sort.h> 15 #include <linux/hash.h> 16 #include <linux/jhash.h> 17 #include <linux/kallsyms.h> 18 #include <linux/gfs2_ondisk.h> 19 #include <linux/list.h> 20 #include <linux/wait.h> 21 #include <linux/module.h> 22 #include <linux/uaccess.h> 23 #include <linux/seq_file.h> 24 #include <linux/debugfs.h> 25 #include <linux/kthread.h> 26 #include <linux/freezer.h> 27 #include <linux/workqueue.h> 28 #include <linux/jiffies.h> 29 #include <linux/rcupdate.h> 30 #include <linux/rculist_bl.h> 31 #include <linux/bit_spinlock.h> 32 #include <linux/percpu.h> 33 #include <linux/list_sort.h> 34 #include <linux/lockref.h> 35 #include <linux/rhashtable.h> 36 37 #include "gfs2.h" 38 #include "incore.h" 39 #include "glock.h" 40 #include "glops.h" 41 #include "inode.h" 42 #include "lops.h" 43 #include "meta_io.h" 44 #include "quota.h" 45 #include "super.h" 46 #include "util.h" 47 #include "bmap.h" 48 #define CREATE_TRACE_POINTS 49 #include "trace_gfs2.h" 50 51 struct gfs2_glock_iter { 52 struct gfs2_sbd *sdp; /* incore superblock */ 53 struct rhashtable_iter hti; /* rhashtable iterator */ 54 struct gfs2_glock *gl; /* current glock struct */ 55 loff_t last_pos; /* last position */ 56 }; 57 58 typedef void (*glock_examiner) (struct gfs2_glock * gl); 59 60 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target); 61 62 static struct dentry *gfs2_root; 63 static struct workqueue_struct *glock_workqueue; 64 struct workqueue_struct *gfs2_delete_workqueue; 65 static LIST_HEAD(lru_list); 66 static atomic_t lru_count = ATOMIC_INIT(0); 67 static DEFINE_SPINLOCK(lru_lock); 68 69 #define GFS2_GL_HASH_SHIFT 15 70 #define GFS2_GL_HASH_SIZE BIT(GFS2_GL_HASH_SHIFT) 71 72 static const struct rhashtable_params ht_parms = { 73 .nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4, 74 .key_len = offsetofend(struct lm_lockname, ln_type), 75 .key_offset = offsetof(struct gfs2_glock, gl_name), 76 .head_offset = offsetof(struct gfs2_glock, gl_node), 77 }; 78 79 static struct rhashtable gl_hash_table; 80 81 #define GLOCK_WAIT_TABLE_BITS 12 82 #define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS) 83 static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned; 84 85 struct wait_glock_queue { 86 struct lm_lockname *name; 87 wait_queue_entry_t wait; 88 }; 89 90 static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode, 91 int sync, void *key) 92 { 93 struct wait_glock_queue *wait_glock = 94 container_of(wait, struct wait_glock_queue, wait); 95 struct lm_lockname *wait_name = wait_glock->name; 96 struct lm_lockname *wake_name = key; 97 98 if (wake_name->ln_sbd != wait_name->ln_sbd || 99 wake_name->ln_number != wait_name->ln_number || 100 wake_name->ln_type != wait_name->ln_type) 101 return 0; 102 return autoremove_wake_function(wait, mode, sync, key); 103 } 104 105 static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name) 106 { 107 u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0); 108 109 return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS); 110 } 111 112 /** 113 * wake_up_glock - Wake up waiters on a glock 114 * @gl: the glock 115 */ 116 static void wake_up_glock(struct gfs2_glock *gl) 117 { 118 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name); 119 120 if (waitqueue_active(wq)) 121 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name); 122 } 123 124 static void gfs2_glock_dealloc(struct rcu_head *rcu) 125 { 126 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); 127 128 if (gl->gl_ops->go_flags & GLOF_ASPACE) { 129 kmem_cache_free(gfs2_glock_aspace_cachep, gl); 130 } else { 131 kfree(gl->gl_lksb.sb_lvbptr); 132 kmem_cache_free(gfs2_glock_cachep, gl); 133 } 134 } 135 136 /** 137 * glock_blocked_by_withdraw - determine if we can still use a glock 138 * @gl: the glock 139 * 140 * We need to allow some glocks to be enqueued, dequeued, promoted, and demoted 141 * when we're withdrawn. For example, to maintain metadata integrity, we should 142 * disallow the use of inode and rgrp glocks when withdrawn. Other glocks, like 143 * iopen or the transaction glocks may be safely used because none of their 144 * metadata goes through the journal. So in general, we should disallow all 145 * glocks that are journaled, and allow all the others. One exception is: 146 * we need to allow our active journal to be promoted and demoted so others 147 * may recover it and we can reacquire it when they're done. 148 */ 149 static bool glock_blocked_by_withdraw(struct gfs2_glock *gl) 150 { 151 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 152 153 if (likely(!gfs2_withdrawn(sdp))) 154 return false; 155 if (gl->gl_ops->go_flags & GLOF_NONDISK) 156 return false; 157 if (!sdp->sd_jdesc || 158 gl->gl_name.ln_number == sdp->sd_jdesc->jd_no_addr) 159 return false; 160 return true; 161 } 162 163 void gfs2_glock_free(struct gfs2_glock *gl) 164 { 165 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 166 167 BUG_ON(atomic_read(&gl->gl_revokes)); 168 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms); 169 smp_mb(); 170 wake_up_glock(gl); 171 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc); 172 if (atomic_dec_and_test(&sdp->sd_glock_disposal)) 173 wake_up(&sdp->sd_glock_wait); 174 } 175 176 /** 177 * gfs2_glock_hold() - increment reference count on glock 178 * @gl: The glock to hold 179 * 180 */ 181 182 void gfs2_glock_hold(struct gfs2_glock *gl) 183 { 184 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); 185 lockref_get(&gl->gl_lockref); 186 } 187 188 /** 189 * demote_ok - Check to see if it's ok to unlock a glock 190 * @gl: the glock 191 * 192 * Returns: 1 if it's ok 193 */ 194 195 static int demote_ok(const struct gfs2_glock *gl) 196 { 197 const struct gfs2_glock_operations *glops = gl->gl_ops; 198 199 if (gl->gl_state == LM_ST_UNLOCKED) 200 return 0; 201 if (!list_empty(&gl->gl_holders)) 202 return 0; 203 if (glops->go_demote_ok) 204 return glops->go_demote_ok(gl); 205 return 1; 206 } 207 208 209 void gfs2_glock_add_to_lru(struct gfs2_glock *gl) 210 { 211 if (!(gl->gl_ops->go_flags & GLOF_LRU)) 212 return; 213 214 spin_lock(&lru_lock); 215 216 list_del(&gl->gl_lru); 217 list_add_tail(&gl->gl_lru, &lru_list); 218 219 if (!test_bit(GLF_LRU, &gl->gl_flags)) { 220 set_bit(GLF_LRU, &gl->gl_flags); 221 atomic_inc(&lru_count); 222 } 223 224 spin_unlock(&lru_lock); 225 } 226 227 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) 228 { 229 if (!(gl->gl_ops->go_flags & GLOF_LRU)) 230 return; 231 232 spin_lock(&lru_lock); 233 if (test_bit(GLF_LRU, &gl->gl_flags)) { 234 list_del_init(&gl->gl_lru); 235 atomic_dec(&lru_count); 236 clear_bit(GLF_LRU, &gl->gl_flags); 237 } 238 spin_unlock(&lru_lock); 239 } 240 241 /* 242 * Enqueue the glock on the work queue. Passes one glock reference on to the 243 * work queue. 244 */ 245 static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { 246 if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) { 247 /* 248 * We are holding the lockref spinlock, and the work was still 249 * queued above. The queued work (glock_work_func) takes that 250 * spinlock before dropping its glock reference(s), so it 251 * cannot have dropped them in the meantime. 252 */ 253 GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2); 254 gl->gl_lockref.count--; 255 } 256 } 257 258 static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { 259 spin_lock(&gl->gl_lockref.lock); 260 __gfs2_glock_queue_work(gl, delay); 261 spin_unlock(&gl->gl_lockref.lock); 262 } 263 264 static void __gfs2_glock_put(struct gfs2_glock *gl) 265 { 266 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 267 struct address_space *mapping = gfs2_glock2aspace(gl); 268 269 lockref_mark_dead(&gl->gl_lockref); 270 271 gfs2_glock_remove_from_lru(gl); 272 spin_unlock(&gl->gl_lockref.lock); 273 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); 274 GLOCK_BUG_ON(gl, mapping && mapping->nrpages && !gfs2_withdrawn(sdp)); 275 trace_gfs2_glock_put(gl); 276 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); 277 } 278 279 /* 280 * Cause the glock to be put in work queue context. 281 */ 282 void gfs2_glock_queue_put(struct gfs2_glock *gl) 283 { 284 gfs2_glock_queue_work(gl, 0); 285 } 286 287 /** 288 * gfs2_glock_put() - Decrement reference count on glock 289 * @gl: The glock to put 290 * 291 */ 292 293 void gfs2_glock_put(struct gfs2_glock *gl) 294 { 295 if (lockref_put_or_lock(&gl->gl_lockref)) 296 return; 297 298 __gfs2_glock_put(gl); 299 } 300 301 /** 302 * may_grant - check if its ok to grant a new lock 303 * @gl: The glock 304 * @gh: The lock request which we wish to grant 305 * 306 * Returns: true if its ok to grant the lock 307 */ 308 309 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh) 310 { 311 const struct gfs2_holder *gh_head = list_first_entry(&gl->gl_holders, const struct gfs2_holder, gh_list); 312 if ((gh->gh_state == LM_ST_EXCLUSIVE || 313 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head) 314 return 0; 315 if (gl->gl_state == gh->gh_state) 316 return 1; 317 if (gh->gh_flags & GL_EXACT) 318 return 0; 319 if (gl->gl_state == LM_ST_EXCLUSIVE) { 320 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED) 321 return 1; 322 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED) 323 return 1; 324 } 325 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY)) 326 return 1; 327 return 0; 328 } 329 330 static void gfs2_holder_wake(struct gfs2_holder *gh) 331 { 332 clear_bit(HIF_WAIT, &gh->gh_iflags); 333 smp_mb__after_atomic(); 334 wake_up_bit(&gh->gh_iflags, HIF_WAIT); 335 if (gh->gh_flags & GL_ASYNC) { 336 struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd; 337 338 wake_up(&sdp->sd_async_glock_wait); 339 } 340 } 341 342 /** 343 * do_error - Something unexpected has happened during a lock request 344 * 345 */ 346 347 static void do_error(struct gfs2_glock *gl, const int ret) 348 { 349 struct gfs2_holder *gh, *tmp; 350 351 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { 352 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 353 continue; 354 if (ret & LM_OUT_ERROR) 355 gh->gh_error = -EIO; 356 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) 357 gh->gh_error = GLR_TRYFAILED; 358 else 359 continue; 360 list_del_init(&gh->gh_list); 361 trace_gfs2_glock_queue(gh, 0); 362 gfs2_holder_wake(gh); 363 } 364 } 365 366 /** 367 * do_promote - promote as many requests as possible on the current queue 368 * @gl: The glock 369 * 370 * Returns: 1 if there is a blocked holder at the head of the list, or 2 371 * if a type specific operation is underway. 372 */ 373 374 static int do_promote(struct gfs2_glock *gl) 375 __releases(&gl->gl_lockref.lock) 376 __acquires(&gl->gl_lockref.lock) 377 { 378 const struct gfs2_glock_operations *glops = gl->gl_ops; 379 struct gfs2_holder *gh, *tmp; 380 int ret; 381 382 restart: 383 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { 384 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 385 continue; 386 if (may_grant(gl, gh)) { 387 if (gh->gh_list.prev == &gl->gl_holders && 388 glops->go_lock) { 389 spin_unlock(&gl->gl_lockref.lock); 390 /* FIXME: eliminate this eventually */ 391 ret = glops->go_lock(gh); 392 spin_lock(&gl->gl_lockref.lock); 393 if (ret) { 394 if (ret == 1) 395 return 2; 396 gh->gh_error = ret; 397 list_del_init(&gh->gh_list); 398 trace_gfs2_glock_queue(gh, 0); 399 gfs2_holder_wake(gh); 400 goto restart; 401 } 402 set_bit(HIF_HOLDER, &gh->gh_iflags); 403 trace_gfs2_promote(gh, 1); 404 gfs2_holder_wake(gh); 405 goto restart; 406 } 407 set_bit(HIF_HOLDER, &gh->gh_iflags); 408 trace_gfs2_promote(gh, 0); 409 gfs2_holder_wake(gh); 410 continue; 411 } 412 if (gh->gh_list.prev == &gl->gl_holders) 413 return 1; 414 do_error(gl, 0); 415 break; 416 } 417 return 0; 418 } 419 420 /** 421 * find_first_waiter - find the first gh that's waiting for the glock 422 * @gl: the glock 423 */ 424 425 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl) 426 { 427 struct gfs2_holder *gh; 428 429 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 430 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) 431 return gh; 432 } 433 return NULL; 434 } 435 436 /** 437 * state_change - record that the glock is now in a different state 438 * @gl: the glock 439 * @new_state the new state 440 * 441 */ 442 443 static void state_change(struct gfs2_glock *gl, unsigned int new_state) 444 { 445 int held1, held2; 446 447 held1 = (gl->gl_state != LM_ST_UNLOCKED); 448 held2 = (new_state != LM_ST_UNLOCKED); 449 450 if (held1 != held2) { 451 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); 452 if (held2) 453 gl->gl_lockref.count++; 454 else 455 gl->gl_lockref.count--; 456 } 457 if (held1 && held2 && list_empty(&gl->gl_holders)) 458 clear_bit(GLF_QUEUED, &gl->gl_flags); 459 460 if (new_state != gl->gl_target) 461 /* shorten our minimum hold time */ 462 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR, 463 GL_GLOCK_MIN_HOLD); 464 gl->gl_state = new_state; 465 gl->gl_tchange = jiffies; 466 } 467 468 static void gfs2_demote_wake(struct gfs2_glock *gl) 469 { 470 gl->gl_demote_state = LM_ST_EXCLUSIVE; 471 clear_bit(GLF_DEMOTE, &gl->gl_flags); 472 smp_mb__after_atomic(); 473 wake_up_bit(&gl->gl_flags, GLF_DEMOTE); 474 } 475 476 /** 477 * finish_xmote - The DLM has replied to one of our lock requests 478 * @gl: The glock 479 * @ret: The status from the DLM 480 * 481 */ 482 483 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) 484 { 485 const struct gfs2_glock_operations *glops = gl->gl_ops; 486 struct gfs2_holder *gh; 487 unsigned state = ret & LM_OUT_ST_MASK; 488 int rv; 489 490 spin_lock(&gl->gl_lockref.lock); 491 trace_gfs2_glock_state_change(gl, state); 492 state_change(gl, state); 493 gh = find_first_waiter(gl); 494 495 /* Demote to UN request arrived during demote to SH or DF */ 496 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && 497 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED) 498 gl->gl_target = LM_ST_UNLOCKED; 499 500 /* Check for state != intended state */ 501 if (unlikely(state != gl->gl_target)) { 502 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { 503 /* move to back of queue and try next entry */ 504 if (ret & LM_OUT_CANCELED) { 505 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0) 506 list_move_tail(&gh->gh_list, &gl->gl_holders); 507 gh = find_first_waiter(gl); 508 gl->gl_target = gh->gh_state; 509 goto retry; 510 } 511 /* Some error or failed "try lock" - report it */ 512 if ((ret & LM_OUT_ERROR) || 513 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { 514 gl->gl_target = gl->gl_state; 515 do_error(gl, ret); 516 goto out; 517 } 518 } 519 switch(state) { 520 /* Unlocked due to conversion deadlock, try again */ 521 case LM_ST_UNLOCKED: 522 retry: 523 do_xmote(gl, gh, gl->gl_target); 524 break; 525 /* Conversion fails, unlock and try again */ 526 case LM_ST_SHARED: 527 case LM_ST_DEFERRED: 528 do_xmote(gl, gh, LM_ST_UNLOCKED); 529 break; 530 default: /* Everything else */ 531 fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n", 532 gl->gl_target, state); 533 GLOCK_BUG_ON(gl, 1); 534 } 535 spin_unlock(&gl->gl_lockref.lock); 536 return; 537 } 538 539 /* Fast path - we got what we asked for */ 540 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) 541 gfs2_demote_wake(gl); 542 if (state != LM_ST_UNLOCKED) { 543 if (glops->go_xmote_bh) { 544 spin_unlock(&gl->gl_lockref.lock); 545 rv = glops->go_xmote_bh(gl, gh); 546 spin_lock(&gl->gl_lockref.lock); 547 if (rv) { 548 do_error(gl, rv); 549 goto out; 550 } 551 } 552 rv = do_promote(gl); 553 if (rv == 2) 554 goto out_locked; 555 } 556 out: 557 clear_bit(GLF_LOCK, &gl->gl_flags); 558 out_locked: 559 spin_unlock(&gl->gl_lockref.lock); 560 } 561 562 /** 563 * do_xmote - Calls the DLM to change the state of a lock 564 * @gl: The lock state 565 * @gh: The holder (only for promotes) 566 * @target: The target lock state 567 * 568 */ 569 570 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target) 571 __releases(&gl->gl_lockref.lock) 572 __acquires(&gl->gl_lockref.lock) 573 { 574 const struct gfs2_glock_operations *glops = gl->gl_ops; 575 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 576 unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0); 577 int ret; 578 579 if (target != LM_ST_UNLOCKED && glock_blocked_by_withdraw(gl) && 580 gh && !(gh->gh_flags & LM_FLAG_NOEXP)) 581 return; 582 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP | 583 LM_FLAG_PRIORITY); 584 GLOCK_BUG_ON(gl, gl->gl_state == target); 585 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target); 586 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) && 587 glops->go_inval) { 588 /* 589 * If another process is already doing the invalidate, let that 590 * finish first. The glock state machine will get back to this 591 * holder again later. 592 */ 593 if (test_and_set_bit(GLF_INVALIDATE_IN_PROGRESS, 594 &gl->gl_flags)) 595 return; 596 do_error(gl, 0); /* Fail queued try locks */ 597 } 598 gl->gl_req = target; 599 set_bit(GLF_BLOCKING, &gl->gl_flags); 600 if ((gl->gl_req == LM_ST_UNLOCKED) || 601 (gl->gl_state == LM_ST_EXCLUSIVE) || 602 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB))) 603 clear_bit(GLF_BLOCKING, &gl->gl_flags); 604 spin_unlock(&gl->gl_lockref.lock); 605 if (glops->go_sync) { 606 ret = glops->go_sync(gl); 607 /* If we had a problem syncing (due to io errors or whatever, 608 * we should not invalidate the metadata or tell dlm to 609 * release the glock to other nodes. 610 */ 611 if (ret) { 612 if (cmpxchg(&sdp->sd_log_error, 0, ret)) { 613 fs_err(sdp, "Error %d syncing glock \n", ret); 614 gfs2_dump_glock(NULL, gl, true); 615 } 616 return; 617 } 618 } 619 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) { 620 /* 621 * The call to go_sync should have cleared out the ail list. 622 * If there are still items, we have a problem. We ought to 623 * withdraw, but we can't because the withdraw code also uses 624 * glocks. Warn about the error, dump the glock, then fall 625 * through and wait for logd to do the withdraw for us. 626 */ 627 if ((atomic_read(&gl->gl_ail_count) != 0) && 628 (!cmpxchg(&sdp->sd_log_error, 0, -EIO))) { 629 gfs2_assert_warn(sdp, !atomic_read(&gl->gl_ail_count)); 630 gfs2_dump_glock(NULL, gl, true); 631 } 632 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA); 633 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); 634 } 635 636 gfs2_glock_hold(gl); 637 /* 638 * Check for an error encountered since we called go_sync and go_inval. 639 * If so, we can't withdraw from the glock code because the withdraw 640 * code itself uses glocks (see function signal_our_withdraw) to 641 * change the mount to read-only. Most importantly, we must not call 642 * dlm to unlock the glock until the journal is in a known good state 643 * (after journal replay) otherwise other nodes may use the object 644 * (rgrp or dinode) and then later, journal replay will corrupt the 645 * file system. The best we can do here is wait for the logd daemon 646 * to see sd_log_error and withdraw, and in the meantime, requeue the 647 * work for later. 648 * 649 * However, if we're just unlocking the lock (say, for unmount, when 650 * gfs2_gl_hash_clear calls clear_glock) and recovery is complete 651 * then it's okay to tell dlm to unlock it. 652 */ 653 if (unlikely(sdp->sd_log_error && !gfs2_withdrawn(sdp))) 654 gfs2_withdraw_delayed(sdp); 655 if (glock_blocked_by_withdraw(gl)) { 656 if (target != LM_ST_UNLOCKED || 657 test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags)) { 658 gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD); 659 goto out; 660 } 661 } 662 663 if (sdp->sd_lockstruct.ls_ops->lm_lock) { 664 /* lock_dlm */ 665 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags); 666 if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED && 667 target == LM_ST_UNLOCKED && 668 test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) { 669 finish_xmote(gl, target); 670 gfs2_glock_queue_work(gl, 0); 671 } else if (ret) { 672 fs_err(sdp, "lm_lock ret %d\n", ret); 673 GLOCK_BUG_ON(gl, !gfs2_withdrawn(sdp)); 674 } 675 } else { /* lock_nolock */ 676 finish_xmote(gl, target); 677 gfs2_glock_queue_work(gl, 0); 678 } 679 out: 680 spin_lock(&gl->gl_lockref.lock); 681 } 682 683 /** 684 * find_first_holder - find the first "holder" gh 685 * @gl: the glock 686 */ 687 688 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) 689 { 690 struct gfs2_holder *gh; 691 692 if (!list_empty(&gl->gl_holders)) { 693 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list); 694 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 695 return gh; 696 } 697 return NULL; 698 } 699 700 /** 701 * run_queue - do all outstanding tasks related to a glock 702 * @gl: The glock in question 703 * @nonblock: True if we must not block in run_queue 704 * 705 */ 706 707 static void run_queue(struct gfs2_glock *gl, const int nonblock) 708 __releases(&gl->gl_lockref.lock) 709 __acquires(&gl->gl_lockref.lock) 710 { 711 struct gfs2_holder *gh = NULL; 712 int ret; 713 714 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) 715 return; 716 717 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); 718 719 if (test_bit(GLF_DEMOTE, &gl->gl_flags) && 720 gl->gl_demote_state != gl->gl_state) { 721 if (find_first_holder(gl)) 722 goto out_unlock; 723 if (nonblock) 724 goto out_sched; 725 smp_mb(); 726 if (atomic_read(&gl->gl_revokes) != 0) 727 goto out_sched; 728 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); 729 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); 730 gl->gl_target = gl->gl_demote_state; 731 } else { 732 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) 733 gfs2_demote_wake(gl); 734 ret = do_promote(gl); 735 if (ret == 0) 736 goto out_unlock; 737 if (ret == 2) 738 goto out; 739 gh = find_first_waiter(gl); 740 gl->gl_target = gh->gh_state; 741 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) 742 do_error(gl, 0); /* Fail queued try locks */ 743 } 744 do_xmote(gl, gh, gl->gl_target); 745 out: 746 return; 747 748 out_sched: 749 clear_bit(GLF_LOCK, &gl->gl_flags); 750 smp_mb__after_atomic(); 751 gl->gl_lockref.count++; 752 __gfs2_glock_queue_work(gl, 0); 753 return; 754 755 out_unlock: 756 clear_bit(GLF_LOCK, &gl->gl_flags); 757 smp_mb__after_atomic(); 758 return; 759 } 760 761 static void delete_work_func(struct work_struct *work) 762 { 763 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete); 764 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 765 struct inode *inode; 766 u64 no_addr = gl->gl_name.ln_number; 767 768 /* If someone's using this glock to create a new dinode, the block must 769 have been freed by another node, then re-used, in which case our 770 iopen callback is too late after the fact. Ignore it. */ 771 if (test_bit(GLF_INODE_CREATING, &gl->gl_flags)) 772 goto out; 773 774 inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED); 775 if (!IS_ERR_OR_NULL(inode)) { 776 d_prune_aliases(inode); 777 iput(inode); 778 } 779 out: 780 gfs2_glock_put(gl); 781 } 782 783 static void glock_work_func(struct work_struct *work) 784 { 785 unsigned long delay = 0; 786 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); 787 unsigned int drop_refs = 1; 788 789 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { 790 finish_xmote(gl, gl->gl_reply); 791 drop_refs++; 792 } 793 spin_lock(&gl->gl_lockref.lock); 794 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 795 gl->gl_state != LM_ST_UNLOCKED && 796 gl->gl_demote_state != LM_ST_EXCLUSIVE) { 797 unsigned long holdtime, now = jiffies; 798 799 holdtime = gl->gl_tchange + gl->gl_hold_time; 800 if (time_before(now, holdtime)) 801 delay = holdtime - now; 802 803 if (!delay) { 804 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); 805 set_bit(GLF_DEMOTE, &gl->gl_flags); 806 } 807 } 808 run_queue(gl, 0); 809 if (delay) { 810 /* Keep one glock reference for the work we requeue. */ 811 drop_refs--; 812 if (gl->gl_name.ln_type != LM_TYPE_INODE) 813 delay = 0; 814 __gfs2_glock_queue_work(gl, delay); 815 } 816 817 /* 818 * Drop the remaining glock references manually here. (Mind that 819 * __gfs2_glock_queue_work depends on the lockref spinlock begin held 820 * here as well.) 821 */ 822 gl->gl_lockref.count -= drop_refs; 823 if (!gl->gl_lockref.count) { 824 __gfs2_glock_put(gl); 825 return; 826 } 827 spin_unlock(&gl->gl_lockref.lock); 828 } 829 830 static struct gfs2_glock *find_insert_glock(struct lm_lockname *name, 831 struct gfs2_glock *new) 832 { 833 struct wait_glock_queue wait; 834 wait_queue_head_t *wq = glock_waitqueue(name); 835 struct gfs2_glock *gl; 836 837 wait.name = name; 838 init_wait(&wait.wait); 839 wait.wait.func = glock_wake_function; 840 841 again: 842 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); 843 rcu_read_lock(); 844 if (new) { 845 gl = rhashtable_lookup_get_insert_fast(&gl_hash_table, 846 &new->gl_node, ht_parms); 847 if (IS_ERR(gl)) 848 goto out; 849 } else { 850 gl = rhashtable_lookup_fast(&gl_hash_table, 851 name, ht_parms); 852 } 853 if (gl && !lockref_get_not_dead(&gl->gl_lockref)) { 854 rcu_read_unlock(); 855 schedule(); 856 goto again; 857 } 858 out: 859 rcu_read_unlock(); 860 finish_wait(wq, &wait.wait); 861 return gl; 862 } 863 864 /** 865 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist 866 * @sdp: The GFS2 superblock 867 * @number: the lock number 868 * @glops: The glock_operations to use 869 * @create: If 0, don't create the glock if it doesn't exist 870 * @glp: the glock is returned here 871 * 872 * This does not lock a glock, just finds/creates structures for one. 873 * 874 * Returns: errno 875 */ 876 877 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, 878 const struct gfs2_glock_operations *glops, int create, 879 struct gfs2_glock **glp) 880 { 881 struct super_block *s = sdp->sd_vfs; 882 struct lm_lockname name = { .ln_number = number, 883 .ln_type = glops->go_type, 884 .ln_sbd = sdp }; 885 struct gfs2_glock *gl, *tmp; 886 struct address_space *mapping; 887 struct kmem_cache *cachep; 888 int ret = 0; 889 890 gl = find_insert_glock(&name, NULL); 891 if (gl) { 892 *glp = gl; 893 return 0; 894 } 895 if (!create) 896 return -ENOENT; 897 898 if (glops->go_flags & GLOF_ASPACE) 899 cachep = gfs2_glock_aspace_cachep; 900 else 901 cachep = gfs2_glock_cachep; 902 gl = kmem_cache_alloc(cachep, GFP_NOFS); 903 if (!gl) 904 return -ENOMEM; 905 906 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); 907 908 if (glops->go_flags & GLOF_LVB) { 909 gl->gl_lksb.sb_lvbptr = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); 910 if (!gl->gl_lksb.sb_lvbptr) { 911 kmem_cache_free(cachep, gl); 912 return -ENOMEM; 913 } 914 } 915 916 atomic_inc(&sdp->sd_glock_disposal); 917 gl->gl_node.next = NULL; 918 gl->gl_flags = 0; 919 gl->gl_name = name; 920 gl->gl_lockref.count = 1; 921 gl->gl_state = LM_ST_UNLOCKED; 922 gl->gl_target = LM_ST_UNLOCKED; 923 gl->gl_demote_state = LM_ST_EXCLUSIVE; 924 gl->gl_ops = glops; 925 gl->gl_dstamp = 0; 926 preempt_disable(); 927 /* We use the global stats to estimate the initial per-glock stats */ 928 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type]; 929 preempt_enable(); 930 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0; 931 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0; 932 gl->gl_tchange = jiffies; 933 gl->gl_object = NULL; 934 gl->gl_hold_time = GL_GLOCK_DFT_HOLD; 935 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); 936 INIT_WORK(&gl->gl_delete, delete_work_func); 937 938 mapping = gfs2_glock2aspace(gl); 939 if (mapping) { 940 mapping->a_ops = &gfs2_meta_aops; 941 mapping->host = s->s_bdev->bd_inode; 942 mapping->flags = 0; 943 mapping_set_gfp_mask(mapping, GFP_NOFS); 944 mapping->private_data = NULL; 945 mapping->writeback_index = 0; 946 } 947 948 tmp = find_insert_glock(&name, gl); 949 if (!tmp) { 950 *glp = gl; 951 goto out; 952 } 953 if (IS_ERR(tmp)) { 954 ret = PTR_ERR(tmp); 955 goto out_free; 956 } 957 *glp = tmp; 958 959 out_free: 960 kfree(gl->gl_lksb.sb_lvbptr); 961 kmem_cache_free(cachep, gl); 962 atomic_dec(&sdp->sd_glock_disposal); 963 964 out: 965 return ret; 966 } 967 968 /** 969 * gfs2_holder_init - initialize a struct gfs2_holder in the default way 970 * @gl: the glock 971 * @state: the state we're requesting 972 * @flags: the modifier flags 973 * @gh: the holder structure 974 * 975 */ 976 977 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags, 978 struct gfs2_holder *gh) 979 { 980 INIT_LIST_HEAD(&gh->gh_list); 981 gh->gh_gl = gl; 982 gh->gh_ip = _RET_IP_; 983 gh->gh_owner_pid = get_pid(task_pid(current)); 984 gh->gh_state = state; 985 gh->gh_flags = flags; 986 gh->gh_error = 0; 987 gh->gh_iflags = 0; 988 gfs2_glock_hold(gl); 989 } 990 991 /** 992 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it 993 * @state: the state we're requesting 994 * @flags: the modifier flags 995 * @gh: the holder structure 996 * 997 * Don't mess with the glock. 998 * 999 */ 1000 1001 void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh) 1002 { 1003 gh->gh_state = state; 1004 gh->gh_flags = flags; 1005 gh->gh_iflags = 0; 1006 gh->gh_ip = _RET_IP_; 1007 put_pid(gh->gh_owner_pid); 1008 gh->gh_owner_pid = get_pid(task_pid(current)); 1009 } 1010 1011 /** 1012 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference) 1013 * @gh: the holder structure 1014 * 1015 */ 1016 1017 void gfs2_holder_uninit(struct gfs2_holder *gh) 1018 { 1019 put_pid(gh->gh_owner_pid); 1020 gfs2_glock_put(gh->gh_gl); 1021 gfs2_holder_mark_uninitialized(gh); 1022 gh->gh_ip = 0; 1023 } 1024 1025 static void gfs2_glock_update_hold_time(struct gfs2_glock *gl, 1026 unsigned long start_time) 1027 { 1028 /* Have we waited longer that a second? */ 1029 if (time_after(jiffies, start_time + HZ)) { 1030 /* Lengthen the minimum hold time. */ 1031 gl->gl_hold_time = min(gl->gl_hold_time + GL_GLOCK_HOLD_INCR, 1032 GL_GLOCK_MAX_HOLD); 1033 } 1034 } 1035 1036 /** 1037 * gfs2_glock_wait - wait on a glock acquisition 1038 * @gh: the glock holder 1039 * 1040 * Returns: 0 on success 1041 */ 1042 1043 int gfs2_glock_wait(struct gfs2_holder *gh) 1044 { 1045 unsigned long start_time = jiffies; 1046 1047 might_sleep(); 1048 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE); 1049 gfs2_glock_update_hold_time(gh->gh_gl, start_time); 1050 return gh->gh_error; 1051 } 1052 1053 static int glocks_pending(unsigned int num_gh, struct gfs2_holder *ghs) 1054 { 1055 int i; 1056 1057 for (i = 0; i < num_gh; i++) 1058 if (test_bit(HIF_WAIT, &ghs[i].gh_iflags)) 1059 return 1; 1060 return 0; 1061 } 1062 1063 /** 1064 * gfs2_glock_async_wait - wait on multiple asynchronous glock acquisitions 1065 * @num_gh: the number of holders in the array 1066 * @ghs: the glock holder array 1067 * 1068 * Returns: 0 on success, meaning all glocks have been granted and are held. 1069 * -ESTALE if the request timed out, meaning all glocks were released, 1070 * and the caller should retry the operation. 1071 */ 1072 1073 int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs) 1074 { 1075 struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd; 1076 int i, ret = 0, timeout = 0; 1077 unsigned long start_time = jiffies; 1078 bool keep_waiting; 1079 1080 might_sleep(); 1081 /* 1082 * Total up the (minimum hold time * 2) of all glocks and use that to 1083 * determine the max amount of time we should wait. 1084 */ 1085 for (i = 0; i < num_gh; i++) 1086 timeout += ghs[i].gh_gl->gl_hold_time << 1; 1087 1088 wait_for_dlm: 1089 if (!wait_event_timeout(sdp->sd_async_glock_wait, 1090 !glocks_pending(num_gh, ghs), timeout)) 1091 ret = -ESTALE; /* request timed out. */ 1092 1093 /* 1094 * If dlm granted all our requests, we need to adjust the glock 1095 * minimum hold time values according to how long we waited. 1096 * 1097 * If our request timed out, we need to repeatedly release any held 1098 * glocks we acquired thus far to allow dlm to acquire the remaining 1099 * glocks without deadlocking. We cannot currently cancel outstanding 1100 * glock acquisitions. 1101 * 1102 * The HIF_WAIT bit tells us which requests still need a response from 1103 * dlm. 1104 * 1105 * If dlm sent us any errors, we return the first error we find. 1106 */ 1107 keep_waiting = false; 1108 for (i = 0; i < num_gh; i++) { 1109 /* Skip holders we have already dequeued below. */ 1110 if (!gfs2_holder_queued(&ghs[i])) 1111 continue; 1112 /* Skip holders with a pending DLM response. */ 1113 if (test_bit(HIF_WAIT, &ghs[i].gh_iflags)) { 1114 keep_waiting = true; 1115 continue; 1116 } 1117 1118 if (test_bit(HIF_HOLDER, &ghs[i].gh_iflags)) { 1119 if (ret == -ESTALE) 1120 gfs2_glock_dq(&ghs[i]); 1121 else 1122 gfs2_glock_update_hold_time(ghs[i].gh_gl, 1123 start_time); 1124 } 1125 if (!ret) 1126 ret = ghs[i].gh_error; 1127 } 1128 1129 if (keep_waiting) 1130 goto wait_for_dlm; 1131 1132 /* 1133 * At this point, we've either acquired all locks or released them all. 1134 */ 1135 return ret; 1136 } 1137 1138 /** 1139 * handle_callback - process a demote request 1140 * @gl: the glock 1141 * @state: the state the caller wants us to change to 1142 * 1143 * There are only two requests that we are going to see in actual 1144 * practise: LM_ST_SHARED and LM_ST_UNLOCKED 1145 */ 1146 1147 static void handle_callback(struct gfs2_glock *gl, unsigned int state, 1148 unsigned long delay, bool remote) 1149 { 1150 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE; 1151 1152 set_bit(bit, &gl->gl_flags); 1153 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { 1154 gl->gl_demote_state = state; 1155 gl->gl_demote_time = jiffies; 1156 } else if (gl->gl_demote_state != LM_ST_UNLOCKED && 1157 gl->gl_demote_state != state) { 1158 gl->gl_demote_state = LM_ST_UNLOCKED; 1159 } 1160 if (gl->gl_ops->go_callback) 1161 gl->gl_ops->go_callback(gl, remote); 1162 trace_gfs2_demote_rq(gl, remote); 1163 } 1164 1165 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...) 1166 { 1167 struct va_format vaf; 1168 va_list args; 1169 1170 va_start(args, fmt); 1171 1172 if (seq) { 1173 seq_vprintf(seq, fmt, args); 1174 } else { 1175 vaf.fmt = fmt; 1176 vaf.va = &args; 1177 1178 pr_err("%pV", &vaf); 1179 } 1180 1181 va_end(args); 1182 } 1183 1184 /** 1185 * add_to_queue - Add a holder to the wait queue (but look for recursion) 1186 * @gh: the holder structure to add 1187 * 1188 * Eventually we should move the recursive locking trap to a 1189 * debugging option or something like that. This is the fast 1190 * path and needs to have the minimum number of distractions. 1191 * 1192 */ 1193 1194 static inline void add_to_queue(struct gfs2_holder *gh) 1195 __releases(&gl->gl_lockref.lock) 1196 __acquires(&gl->gl_lockref.lock) 1197 { 1198 struct gfs2_glock *gl = gh->gh_gl; 1199 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 1200 struct list_head *insert_pt = NULL; 1201 struct gfs2_holder *gh2; 1202 int try_futile = 0; 1203 1204 GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL); 1205 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) 1206 GLOCK_BUG_ON(gl, true); 1207 1208 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { 1209 if (test_bit(GLF_LOCK, &gl->gl_flags)) 1210 try_futile = !may_grant(gl, gh); 1211 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) 1212 goto fail; 1213 } 1214 1215 list_for_each_entry(gh2, &gl->gl_holders, gh_list) { 1216 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid && 1217 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK))) 1218 goto trap_recursive; 1219 if (try_futile && 1220 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { 1221 fail: 1222 gh->gh_error = GLR_TRYFAILED; 1223 gfs2_holder_wake(gh); 1224 return; 1225 } 1226 if (test_bit(HIF_HOLDER, &gh2->gh_iflags)) 1227 continue; 1228 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt)) 1229 insert_pt = &gh2->gh_list; 1230 } 1231 set_bit(GLF_QUEUED, &gl->gl_flags); 1232 trace_gfs2_glock_queue(gh, 1); 1233 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT); 1234 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT); 1235 if (likely(insert_pt == NULL)) { 1236 list_add_tail(&gh->gh_list, &gl->gl_holders); 1237 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY)) 1238 goto do_cancel; 1239 return; 1240 } 1241 list_add_tail(&gh->gh_list, insert_pt); 1242 do_cancel: 1243 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list); 1244 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) { 1245 spin_unlock(&gl->gl_lockref.lock); 1246 if (sdp->sd_lockstruct.ls_ops->lm_cancel) 1247 sdp->sd_lockstruct.ls_ops->lm_cancel(gl); 1248 spin_lock(&gl->gl_lockref.lock); 1249 } 1250 return; 1251 1252 trap_recursive: 1253 fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip); 1254 fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid)); 1255 fs_err(sdp, "lock type: %d req lock state : %d\n", 1256 gh2->gh_gl->gl_name.ln_type, gh2->gh_state); 1257 fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip); 1258 fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid)); 1259 fs_err(sdp, "lock type: %d req lock state : %d\n", 1260 gh->gh_gl->gl_name.ln_type, gh->gh_state); 1261 gfs2_dump_glock(NULL, gl, true); 1262 BUG(); 1263 } 1264 1265 /** 1266 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock) 1267 * @gh: the holder structure 1268 * 1269 * if (gh->gh_flags & GL_ASYNC), this never returns an error 1270 * 1271 * Returns: 0, GLR_TRYFAILED, or errno on failure 1272 */ 1273 1274 int gfs2_glock_nq(struct gfs2_holder *gh) 1275 { 1276 struct gfs2_glock *gl = gh->gh_gl; 1277 int error = 0; 1278 1279 if (glock_blocked_by_withdraw(gl) && !(gh->gh_flags & LM_FLAG_NOEXP)) 1280 return -EIO; 1281 1282 if (test_bit(GLF_LRU, &gl->gl_flags)) 1283 gfs2_glock_remove_from_lru(gl); 1284 1285 spin_lock(&gl->gl_lockref.lock); 1286 add_to_queue(gh); 1287 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) && 1288 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) { 1289 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1290 gl->gl_lockref.count++; 1291 __gfs2_glock_queue_work(gl, 0); 1292 } 1293 run_queue(gl, 1); 1294 spin_unlock(&gl->gl_lockref.lock); 1295 1296 if (!(gh->gh_flags & GL_ASYNC)) 1297 error = gfs2_glock_wait(gh); 1298 1299 return error; 1300 } 1301 1302 /** 1303 * gfs2_glock_poll - poll to see if an async request has been completed 1304 * @gh: the holder 1305 * 1306 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on 1307 */ 1308 1309 int gfs2_glock_poll(struct gfs2_holder *gh) 1310 { 1311 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1; 1312 } 1313 1314 /** 1315 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock) 1316 * @gh: the glock holder 1317 * 1318 */ 1319 1320 void gfs2_glock_dq(struct gfs2_holder *gh) 1321 { 1322 struct gfs2_glock *gl = gh->gh_gl; 1323 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 1324 unsigned delay = 0; 1325 int fast_path = 0; 1326 1327 spin_lock(&gl->gl_lockref.lock); 1328 /* 1329 * If we're in the process of file system withdraw, we cannot just 1330 * dequeue any glocks until our journal is recovered, lest we 1331 * introduce file system corruption. We need two exceptions to this 1332 * rule: We need to allow unlocking of nondisk glocks and the glock 1333 * for our own journal that needs recovery. 1334 */ 1335 if (test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags) && 1336 glock_blocked_by_withdraw(gl) && 1337 gh->gh_gl != sdp->sd_jinode_gl) { 1338 sdp->sd_glock_dqs_held++; 1339 might_sleep(); 1340 wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY, 1341 TASK_UNINTERRUPTIBLE); 1342 } 1343 if (gh->gh_flags & GL_NOCACHE) 1344 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1345 1346 list_del_init(&gh->gh_list); 1347 clear_bit(HIF_HOLDER, &gh->gh_iflags); 1348 if (find_first_holder(gl) == NULL) { 1349 if (list_empty(&gl->gl_holders) && 1350 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 1351 !test_bit(GLF_DEMOTE, &gl->gl_flags)) 1352 fast_path = 1; 1353 } 1354 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl)) 1355 gfs2_glock_add_to_lru(gl); 1356 1357 trace_gfs2_glock_queue(gh, 0); 1358 if (unlikely(!fast_path)) { 1359 gl->gl_lockref.count++; 1360 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 1361 !test_bit(GLF_DEMOTE, &gl->gl_flags) && 1362 gl->gl_name.ln_type == LM_TYPE_INODE) 1363 delay = gl->gl_hold_time; 1364 __gfs2_glock_queue_work(gl, delay); 1365 } 1366 spin_unlock(&gl->gl_lockref.lock); 1367 } 1368 1369 void gfs2_glock_dq_wait(struct gfs2_holder *gh) 1370 { 1371 struct gfs2_glock *gl = gh->gh_gl; 1372 gfs2_glock_dq(gh); 1373 might_sleep(); 1374 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE); 1375 } 1376 1377 /** 1378 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it 1379 * @gh: the holder structure 1380 * 1381 */ 1382 1383 void gfs2_glock_dq_uninit(struct gfs2_holder *gh) 1384 { 1385 gfs2_glock_dq(gh); 1386 gfs2_holder_uninit(gh); 1387 } 1388 1389 /** 1390 * gfs2_glock_nq_num - acquire a glock based on lock number 1391 * @sdp: the filesystem 1392 * @number: the lock number 1393 * @glops: the glock operations for the type of glock 1394 * @state: the state to acquire the glock in 1395 * @flags: modifier flags for the acquisition 1396 * @gh: the struct gfs2_holder 1397 * 1398 * Returns: errno 1399 */ 1400 1401 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number, 1402 const struct gfs2_glock_operations *glops, 1403 unsigned int state, u16 flags, struct gfs2_holder *gh) 1404 { 1405 struct gfs2_glock *gl; 1406 int error; 1407 1408 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl); 1409 if (!error) { 1410 error = gfs2_glock_nq_init(gl, state, flags, gh); 1411 gfs2_glock_put(gl); 1412 } 1413 1414 return error; 1415 } 1416 1417 /** 1418 * glock_compare - Compare two struct gfs2_glock structures for sorting 1419 * @arg_a: the first structure 1420 * @arg_b: the second structure 1421 * 1422 */ 1423 1424 static int glock_compare(const void *arg_a, const void *arg_b) 1425 { 1426 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a; 1427 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b; 1428 const struct lm_lockname *a = &gh_a->gh_gl->gl_name; 1429 const struct lm_lockname *b = &gh_b->gh_gl->gl_name; 1430 1431 if (a->ln_number > b->ln_number) 1432 return 1; 1433 if (a->ln_number < b->ln_number) 1434 return -1; 1435 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type); 1436 return 0; 1437 } 1438 1439 /** 1440 * nq_m_sync - synchonously acquire more than one glock in deadlock free order 1441 * @num_gh: the number of structures 1442 * @ghs: an array of struct gfs2_holder structures 1443 * 1444 * Returns: 0 on success (all glocks acquired), 1445 * errno on failure (no glocks acquired) 1446 */ 1447 1448 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs, 1449 struct gfs2_holder **p) 1450 { 1451 unsigned int x; 1452 int error = 0; 1453 1454 for (x = 0; x < num_gh; x++) 1455 p[x] = &ghs[x]; 1456 1457 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL); 1458 1459 for (x = 0; x < num_gh; x++) { 1460 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); 1461 1462 error = gfs2_glock_nq(p[x]); 1463 if (error) { 1464 while (x--) 1465 gfs2_glock_dq(p[x]); 1466 break; 1467 } 1468 } 1469 1470 return error; 1471 } 1472 1473 /** 1474 * gfs2_glock_nq_m - acquire multiple glocks 1475 * @num_gh: the number of structures 1476 * @ghs: an array of struct gfs2_holder structures 1477 * 1478 * 1479 * Returns: 0 on success (all glocks acquired), 1480 * errno on failure (no glocks acquired) 1481 */ 1482 1483 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs) 1484 { 1485 struct gfs2_holder *tmp[4]; 1486 struct gfs2_holder **pph = tmp; 1487 int error = 0; 1488 1489 switch(num_gh) { 1490 case 0: 1491 return 0; 1492 case 1: 1493 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); 1494 return gfs2_glock_nq(ghs); 1495 default: 1496 if (num_gh <= 4) 1497 break; 1498 pph = kmalloc_array(num_gh, sizeof(struct gfs2_holder *), 1499 GFP_NOFS); 1500 if (!pph) 1501 return -ENOMEM; 1502 } 1503 1504 error = nq_m_sync(num_gh, ghs, pph); 1505 1506 if (pph != tmp) 1507 kfree(pph); 1508 1509 return error; 1510 } 1511 1512 /** 1513 * gfs2_glock_dq_m - release multiple glocks 1514 * @num_gh: the number of structures 1515 * @ghs: an array of struct gfs2_holder structures 1516 * 1517 */ 1518 1519 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs) 1520 { 1521 while (num_gh--) 1522 gfs2_glock_dq(&ghs[num_gh]); 1523 } 1524 1525 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) 1526 { 1527 unsigned long delay = 0; 1528 unsigned long holdtime; 1529 unsigned long now = jiffies; 1530 1531 gfs2_glock_hold(gl); 1532 holdtime = gl->gl_tchange + gl->gl_hold_time; 1533 if (test_bit(GLF_QUEUED, &gl->gl_flags) && 1534 gl->gl_name.ln_type == LM_TYPE_INODE) { 1535 if (time_before(now, holdtime)) 1536 delay = holdtime - now; 1537 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) 1538 delay = gl->gl_hold_time; 1539 } 1540 1541 spin_lock(&gl->gl_lockref.lock); 1542 handle_callback(gl, state, delay, true); 1543 __gfs2_glock_queue_work(gl, delay); 1544 spin_unlock(&gl->gl_lockref.lock); 1545 } 1546 1547 /** 1548 * gfs2_should_freeze - Figure out if glock should be frozen 1549 * @gl: The glock in question 1550 * 1551 * Glocks are not frozen if (a) the result of the dlm operation is 1552 * an error, (b) the locking operation was an unlock operation or 1553 * (c) if there is a "noexp" flagged request anywhere in the queue 1554 * 1555 * Returns: 1 if freezing should occur, 0 otherwise 1556 */ 1557 1558 static int gfs2_should_freeze(const struct gfs2_glock *gl) 1559 { 1560 const struct gfs2_holder *gh; 1561 1562 if (gl->gl_reply & ~LM_OUT_ST_MASK) 1563 return 0; 1564 if (gl->gl_target == LM_ST_UNLOCKED) 1565 return 0; 1566 1567 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 1568 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 1569 continue; 1570 if (LM_FLAG_NOEXP & gh->gh_flags) 1571 return 0; 1572 } 1573 1574 return 1; 1575 } 1576 1577 /** 1578 * gfs2_glock_complete - Callback used by locking 1579 * @gl: Pointer to the glock 1580 * @ret: The return value from the dlm 1581 * 1582 * The gl_reply field is under the gl_lockref.lock lock so that it is ok 1583 * to use a bitfield shared with other glock state fields. 1584 */ 1585 1586 void gfs2_glock_complete(struct gfs2_glock *gl, int ret) 1587 { 1588 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; 1589 1590 spin_lock(&gl->gl_lockref.lock); 1591 gl->gl_reply = ret; 1592 1593 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) { 1594 if (gfs2_should_freeze(gl)) { 1595 set_bit(GLF_FROZEN, &gl->gl_flags); 1596 spin_unlock(&gl->gl_lockref.lock); 1597 return; 1598 } 1599 } 1600 1601 gl->gl_lockref.count++; 1602 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1603 __gfs2_glock_queue_work(gl, 0); 1604 spin_unlock(&gl->gl_lockref.lock); 1605 } 1606 1607 static int glock_cmp(void *priv, struct list_head *a, struct list_head *b) 1608 { 1609 struct gfs2_glock *gla, *glb; 1610 1611 gla = list_entry(a, struct gfs2_glock, gl_lru); 1612 glb = list_entry(b, struct gfs2_glock, gl_lru); 1613 1614 if (gla->gl_name.ln_number > glb->gl_name.ln_number) 1615 return 1; 1616 if (gla->gl_name.ln_number < glb->gl_name.ln_number) 1617 return -1; 1618 1619 return 0; 1620 } 1621 1622 /** 1623 * gfs2_dispose_glock_lru - Demote a list of glocks 1624 * @list: The list to dispose of 1625 * 1626 * Disposing of glocks may involve disk accesses, so that here we sort 1627 * the glocks by number (i.e. disk location of the inodes) so that if 1628 * there are any such accesses, they'll be sent in order (mostly). 1629 * 1630 * Must be called under the lru_lock, but may drop and retake this 1631 * lock. While the lru_lock is dropped, entries may vanish from the 1632 * list, but no new entries will appear on the list (since it is 1633 * private) 1634 */ 1635 1636 static void gfs2_dispose_glock_lru(struct list_head *list) 1637 __releases(&lru_lock) 1638 __acquires(&lru_lock) 1639 { 1640 struct gfs2_glock *gl; 1641 1642 list_sort(NULL, list, glock_cmp); 1643 1644 while(!list_empty(list)) { 1645 gl = list_first_entry(list, struct gfs2_glock, gl_lru); 1646 list_del_init(&gl->gl_lru); 1647 if (!spin_trylock(&gl->gl_lockref.lock)) { 1648 add_back_to_lru: 1649 list_add(&gl->gl_lru, &lru_list); 1650 set_bit(GLF_LRU, &gl->gl_flags); 1651 atomic_inc(&lru_count); 1652 continue; 1653 } 1654 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 1655 spin_unlock(&gl->gl_lockref.lock); 1656 goto add_back_to_lru; 1657 } 1658 gl->gl_lockref.count++; 1659 if (demote_ok(gl)) 1660 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1661 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags)); 1662 __gfs2_glock_queue_work(gl, 0); 1663 spin_unlock(&gl->gl_lockref.lock); 1664 cond_resched_lock(&lru_lock); 1665 } 1666 } 1667 1668 /** 1669 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote 1670 * @nr: The number of entries to scan 1671 * 1672 * This function selects the entries on the LRU which are able to 1673 * be demoted, and then kicks off the process by calling 1674 * gfs2_dispose_glock_lru() above. 1675 */ 1676 1677 static long gfs2_scan_glock_lru(int nr) 1678 { 1679 struct gfs2_glock *gl; 1680 LIST_HEAD(skipped); 1681 LIST_HEAD(dispose); 1682 long freed = 0; 1683 1684 spin_lock(&lru_lock); 1685 while ((nr-- >= 0) && !list_empty(&lru_list)) { 1686 gl = list_first_entry(&lru_list, struct gfs2_glock, gl_lru); 1687 1688 /* Test for being demotable */ 1689 if (!test_bit(GLF_LOCK, &gl->gl_flags)) { 1690 list_move(&gl->gl_lru, &dispose); 1691 atomic_dec(&lru_count); 1692 clear_bit(GLF_LRU, &gl->gl_flags); 1693 freed++; 1694 continue; 1695 } 1696 1697 list_move(&gl->gl_lru, &skipped); 1698 } 1699 list_splice(&skipped, &lru_list); 1700 if (!list_empty(&dispose)) 1701 gfs2_dispose_glock_lru(&dispose); 1702 spin_unlock(&lru_lock); 1703 1704 return freed; 1705 } 1706 1707 static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink, 1708 struct shrink_control *sc) 1709 { 1710 if (!(sc->gfp_mask & __GFP_FS)) 1711 return SHRINK_STOP; 1712 return gfs2_scan_glock_lru(sc->nr_to_scan); 1713 } 1714 1715 static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink, 1716 struct shrink_control *sc) 1717 { 1718 return vfs_pressure_ratio(atomic_read(&lru_count)); 1719 } 1720 1721 static struct shrinker glock_shrinker = { 1722 .seeks = DEFAULT_SEEKS, 1723 .count_objects = gfs2_glock_shrink_count, 1724 .scan_objects = gfs2_glock_shrink_scan, 1725 }; 1726 1727 /** 1728 * examine_bucket - Call a function for glock in a hash bucket 1729 * @examiner: the function 1730 * @sdp: the filesystem 1731 * @bucket: the bucket 1732 * 1733 * Note that the function can be called multiple times on the same 1734 * object. So the user must ensure that the function can cope with 1735 * that. 1736 */ 1737 1738 static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp) 1739 { 1740 struct gfs2_glock *gl; 1741 struct rhashtable_iter iter; 1742 1743 rhashtable_walk_enter(&gl_hash_table, &iter); 1744 1745 do { 1746 rhashtable_walk_start(&iter); 1747 1748 while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) 1749 if (gl->gl_name.ln_sbd == sdp && 1750 lockref_get_not_dead(&gl->gl_lockref)) 1751 examiner(gl); 1752 1753 rhashtable_walk_stop(&iter); 1754 } while (cond_resched(), gl == ERR_PTR(-EAGAIN)); 1755 1756 rhashtable_walk_exit(&iter); 1757 } 1758 1759 /** 1760 * thaw_glock - thaw out a glock which has an unprocessed reply waiting 1761 * @gl: The glock to thaw 1762 * 1763 */ 1764 1765 static void thaw_glock(struct gfs2_glock *gl) 1766 { 1767 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) { 1768 gfs2_glock_put(gl); 1769 return; 1770 } 1771 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1772 gfs2_glock_queue_work(gl, 0); 1773 } 1774 1775 /** 1776 * clear_glock - look at a glock and see if we can free it from glock cache 1777 * @gl: the glock to look at 1778 * 1779 */ 1780 1781 static void clear_glock(struct gfs2_glock *gl) 1782 { 1783 gfs2_glock_remove_from_lru(gl); 1784 1785 spin_lock(&gl->gl_lockref.lock); 1786 if (gl->gl_state != LM_ST_UNLOCKED) 1787 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1788 __gfs2_glock_queue_work(gl, 0); 1789 spin_unlock(&gl->gl_lockref.lock); 1790 } 1791 1792 /** 1793 * gfs2_glock_thaw - Thaw any frozen glocks 1794 * @sdp: The super block 1795 * 1796 */ 1797 1798 void gfs2_glock_thaw(struct gfs2_sbd *sdp) 1799 { 1800 glock_hash_walk(thaw_glock, sdp); 1801 } 1802 1803 static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid) 1804 { 1805 spin_lock(&gl->gl_lockref.lock); 1806 gfs2_dump_glock(seq, gl, fsid); 1807 spin_unlock(&gl->gl_lockref.lock); 1808 } 1809 1810 static void dump_glock_func(struct gfs2_glock *gl) 1811 { 1812 dump_glock(NULL, gl, true); 1813 } 1814 1815 /** 1816 * gfs2_gl_hash_clear - Empty out the glock hash table 1817 * @sdp: the filesystem 1818 * @wait: wait until it's all gone 1819 * 1820 * Called when unmounting the filesystem. 1821 */ 1822 1823 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) 1824 { 1825 set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags); 1826 flush_workqueue(glock_workqueue); 1827 glock_hash_walk(clear_glock, sdp); 1828 flush_workqueue(glock_workqueue); 1829 wait_event_timeout(sdp->sd_glock_wait, 1830 atomic_read(&sdp->sd_glock_disposal) == 0, 1831 HZ * 600); 1832 glock_hash_walk(dump_glock_func, sdp); 1833 } 1834 1835 void gfs2_glock_finish_truncate(struct gfs2_inode *ip) 1836 { 1837 struct gfs2_glock *gl = ip->i_gl; 1838 int ret; 1839 1840 ret = gfs2_truncatei_resume(ip); 1841 gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0); 1842 1843 spin_lock(&gl->gl_lockref.lock); 1844 clear_bit(GLF_LOCK, &gl->gl_flags); 1845 run_queue(gl, 1); 1846 spin_unlock(&gl->gl_lockref.lock); 1847 } 1848 1849 static const char *state2str(unsigned state) 1850 { 1851 switch(state) { 1852 case LM_ST_UNLOCKED: 1853 return "UN"; 1854 case LM_ST_SHARED: 1855 return "SH"; 1856 case LM_ST_DEFERRED: 1857 return "DF"; 1858 case LM_ST_EXCLUSIVE: 1859 return "EX"; 1860 } 1861 return "??"; 1862 } 1863 1864 static const char *hflags2str(char *buf, u16 flags, unsigned long iflags) 1865 { 1866 char *p = buf; 1867 if (flags & LM_FLAG_TRY) 1868 *p++ = 't'; 1869 if (flags & LM_FLAG_TRY_1CB) 1870 *p++ = 'T'; 1871 if (flags & LM_FLAG_NOEXP) 1872 *p++ = 'e'; 1873 if (flags & LM_FLAG_ANY) 1874 *p++ = 'A'; 1875 if (flags & LM_FLAG_PRIORITY) 1876 *p++ = 'p'; 1877 if (flags & GL_ASYNC) 1878 *p++ = 'a'; 1879 if (flags & GL_EXACT) 1880 *p++ = 'E'; 1881 if (flags & GL_NOCACHE) 1882 *p++ = 'c'; 1883 if (test_bit(HIF_HOLDER, &iflags)) 1884 *p++ = 'H'; 1885 if (test_bit(HIF_WAIT, &iflags)) 1886 *p++ = 'W'; 1887 if (test_bit(HIF_FIRST, &iflags)) 1888 *p++ = 'F'; 1889 *p = 0; 1890 return buf; 1891 } 1892 1893 /** 1894 * dump_holder - print information about a glock holder 1895 * @seq: the seq_file struct 1896 * @gh: the glock holder 1897 * @fs_id_buf: pointer to file system id (if requested) 1898 * 1899 */ 1900 1901 static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh, 1902 const char *fs_id_buf) 1903 { 1904 struct task_struct *gh_owner = NULL; 1905 char flags_buf[32]; 1906 1907 rcu_read_lock(); 1908 if (gh->gh_owner_pid) 1909 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID); 1910 gfs2_print_dbg(seq, "%s H: s:%s f:%s e:%d p:%ld [%s] %pS\n", 1911 fs_id_buf, state2str(gh->gh_state), 1912 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags), 1913 gh->gh_error, 1914 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1, 1915 gh_owner ? gh_owner->comm : "(ended)", 1916 (void *)gh->gh_ip); 1917 rcu_read_unlock(); 1918 } 1919 1920 static const char *gflags2str(char *buf, const struct gfs2_glock *gl) 1921 { 1922 const unsigned long *gflags = &gl->gl_flags; 1923 char *p = buf; 1924 1925 if (test_bit(GLF_LOCK, gflags)) 1926 *p++ = 'l'; 1927 if (test_bit(GLF_DEMOTE, gflags)) 1928 *p++ = 'D'; 1929 if (test_bit(GLF_PENDING_DEMOTE, gflags)) 1930 *p++ = 'd'; 1931 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags)) 1932 *p++ = 'p'; 1933 if (test_bit(GLF_DIRTY, gflags)) 1934 *p++ = 'y'; 1935 if (test_bit(GLF_LFLUSH, gflags)) 1936 *p++ = 'f'; 1937 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags)) 1938 *p++ = 'i'; 1939 if (test_bit(GLF_REPLY_PENDING, gflags)) 1940 *p++ = 'r'; 1941 if (test_bit(GLF_INITIAL, gflags)) 1942 *p++ = 'I'; 1943 if (test_bit(GLF_FROZEN, gflags)) 1944 *p++ = 'F'; 1945 if (test_bit(GLF_QUEUED, gflags)) 1946 *p++ = 'q'; 1947 if (test_bit(GLF_LRU, gflags)) 1948 *p++ = 'L'; 1949 if (gl->gl_object) 1950 *p++ = 'o'; 1951 if (test_bit(GLF_BLOCKING, gflags)) 1952 *p++ = 'b'; 1953 *p = 0; 1954 return buf; 1955 } 1956 1957 /** 1958 * gfs2_dump_glock - print information about a glock 1959 * @seq: The seq_file struct 1960 * @gl: the glock 1961 * @fsid: If true, also dump the file system id 1962 * 1963 * The file format is as follows: 1964 * One line per object, capital letters are used to indicate objects 1965 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented, 1966 * other objects are indented by a single space and follow the glock to 1967 * which they are related. Fields are indicated by lower case letters 1968 * followed by a colon and the field value, except for strings which are in 1969 * [] so that its possible to see if they are composed of spaces for 1970 * example. The field's are n = number (id of the object), f = flags, 1971 * t = type, s = state, r = refcount, e = error, p = pid. 1972 * 1973 */ 1974 1975 void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid) 1976 { 1977 const struct gfs2_glock_operations *glops = gl->gl_ops; 1978 unsigned long long dtime; 1979 const struct gfs2_holder *gh; 1980 char gflags_buf[32]; 1981 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 1982 char fs_id_buf[sizeof(sdp->sd_fsname) + 7]; 1983 1984 memset(fs_id_buf, 0, sizeof(fs_id_buf)); 1985 if (fsid && sdp) /* safety precaution */ 1986 sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname); 1987 dtime = jiffies - gl->gl_demote_time; 1988 dtime *= 1000000/HZ; /* demote time in uSec */ 1989 if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) 1990 dtime = 0; 1991 gfs2_print_dbg(seq, "%sG: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d " 1992 "v:%d r:%d m:%ld\n", fs_id_buf, state2str(gl->gl_state), 1993 gl->gl_name.ln_type, 1994 (unsigned long long)gl->gl_name.ln_number, 1995 gflags2str(gflags_buf, gl), 1996 state2str(gl->gl_target), 1997 state2str(gl->gl_demote_state), dtime, 1998 atomic_read(&gl->gl_ail_count), 1999 atomic_read(&gl->gl_revokes), 2000 (int)gl->gl_lockref.count, gl->gl_hold_time); 2001 2002 list_for_each_entry(gh, &gl->gl_holders, gh_list) 2003 dump_holder(seq, gh, fs_id_buf); 2004 2005 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump) 2006 glops->go_dump(seq, gl, fs_id_buf); 2007 } 2008 2009 static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr) 2010 { 2011 struct gfs2_glock *gl = iter_ptr; 2012 2013 seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n", 2014 gl->gl_name.ln_type, 2015 (unsigned long long)gl->gl_name.ln_number, 2016 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT], 2017 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR], 2018 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB], 2019 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB], 2020 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT], 2021 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR], 2022 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT], 2023 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]); 2024 return 0; 2025 } 2026 2027 static const char *gfs2_gltype[] = { 2028 "type", 2029 "reserved", 2030 "nondisk", 2031 "inode", 2032 "rgrp", 2033 "meta", 2034 "iopen", 2035 "flock", 2036 "plock", 2037 "quota", 2038 "journal", 2039 }; 2040 2041 static const char *gfs2_stype[] = { 2042 [GFS2_LKS_SRTT] = "srtt", 2043 [GFS2_LKS_SRTTVAR] = "srttvar", 2044 [GFS2_LKS_SRTTB] = "srttb", 2045 [GFS2_LKS_SRTTVARB] = "srttvarb", 2046 [GFS2_LKS_SIRT] = "sirt", 2047 [GFS2_LKS_SIRTVAR] = "sirtvar", 2048 [GFS2_LKS_DCOUNT] = "dlm", 2049 [GFS2_LKS_QCOUNT] = "queue", 2050 }; 2051 2052 #define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype)) 2053 2054 static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr) 2055 { 2056 struct gfs2_sbd *sdp = seq->private; 2057 loff_t pos = *(loff_t *)iter_ptr; 2058 unsigned index = pos >> 3; 2059 unsigned subindex = pos & 0x07; 2060 int i; 2061 2062 if (index == 0 && subindex != 0) 2063 return 0; 2064 2065 seq_printf(seq, "%-10s %8s:", gfs2_gltype[index], 2066 (index == 0) ? "cpu": gfs2_stype[subindex]); 2067 2068 for_each_possible_cpu(i) { 2069 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i); 2070 2071 if (index == 0) 2072 seq_printf(seq, " %15u", i); 2073 else 2074 seq_printf(seq, " %15llu", (unsigned long long)lkstats-> 2075 lkstats[index - 1].stats[subindex]); 2076 } 2077 seq_putc(seq, '\n'); 2078 return 0; 2079 } 2080 2081 int __init gfs2_glock_init(void) 2082 { 2083 int i, ret; 2084 2085 ret = rhashtable_init(&gl_hash_table, &ht_parms); 2086 if (ret < 0) 2087 return ret; 2088 2089 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | 2090 WQ_HIGHPRI | WQ_FREEZABLE, 0); 2091 if (!glock_workqueue) { 2092 rhashtable_destroy(&gl_hash_table); 2093 return -ENOMEM; 2094 } 2095 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", 2096 WQ_MEM_RECLAIM | WQ_FREEZABLE, 2097 0); 2098 if (!gfs2_delete_workqueue) { 2099 destroy_workqueue(glock_workqueue); 2100 rhashtable_destroy(&gl_hash_table); 2101 return -ENOMEM; 2102 } 2103 2104 ret = register_shrinker(&glock_shrinker); 2105 if (ret) { 2106 destroy_workqueue(gfs2_delete_workqueue); 2107 destroy_workqueue(glock_workqueue); 2108 rhashtable_destroy(&gl_hash_table); 2109 return ret; 2110 } 2111 2112 for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++) 2113 init_waitqueue_head(glock_wait_table + i); 2114 2115 return 0; 2116 } 2117 2118 void gfs2_glock_exit(void) 2119 { 2120 unregister_shrinker(&glock_shrinker); 2121 rhashtable_destroy(&gl_hash_table); 2122 destroy_workqueue(glock_workqueue); 2123 destroy_workqueue(gfs2_delete_workqueue); 2124 } 2125 2126 static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n) 2127 { 2128 struct gfs2_glock *gl = gi->gl; 2129 2130 if (gl) { 2131 if (n == 0) 2132 return; 2133 if (!lockref_put_not_zero(&gl->gl_lockref)) 2134 gfs2_glock_queue_put(gl); 2135 } 2136 for (;;) { 2137 gl = rhashtable_walk_next(&gi->hti); 2138 if (IS_ERR_OR_NULL(gl)) { 2139 if (gl == ERR_PTR(-EAGAIN)) { 2140 n = 1; 2141 continue; 2142 } 2143 gl = NULL; 2144 break; 2145 } 2146 if (gl->gl_name.ln_sbd != gi->sdp) 2147 continue; 2148 if (n <= 1) { 2149 if (!lockref_get_not_dead(&gl->gl_lockref)) 2150 continue; 2151 break; 2152 } else { 2153 if (__lockref_is_dead(&gl->gl_lockref)) 2154 continue; 2155 n--; 2156 } 2157 } 2158 gi->gl = gl; 2159 } 2160 2161 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos) 2162 __acquires(RCU) 2163 { 2164 struct gfs2_glock_iter *gi = seq->private; 2165 loff_t n; 2166 2167 /* 2168 * We can either stay where we are, skip to the next hash table 2169 * entry, or start from the beginning. 2170 */ 2171 if (*pos < gi->last_pos) { 2172 rhashtable_walk_exit(&gi->hti); 2173 rhashtable_walk_enter(&gl_hash_table, &gi->hti); 2174 n = *pos + 1; 2175 } else { 2176 n = *pos - gi->last_pos; 2177 } 2178 2179 rhashtable_walk_start(&gi->hti); 2180 2181 gfs2_glock_iter_next(gi, n); 2182 gi->last_pos = *pos; 2183 return gi->gl; 2184 } 2185 2186 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr, 2187 loff_t *pos) 2188 { 2189 struct gfs2_glock_iter *gi = seq->private; 2190 2191 (*pos)++; 2192 gi->last_pos = *pos; 2193 gfs2_glock_iter_next(gi, 1); 2194 return gi->gl; 2195 } 2196 2197 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr) 2198 __releases(RCU) 2199 { 2200 struct gfs2_glock_iter *gi = seq->private; 2201 2202 rhashtable_walk_stop(&gi->hti); 2203 } 2204 2205 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr) 2206 { 2207 dump_glock(seq, iter_ptr, false); 2208 return 0; 2209 } 2210 2211 static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos) 2212 { 2213 preempt_disable(); 2214 if (*pos >= GFS2_NR_SBSTATS) 2215 return NULL; 2216 return pos; 2217 } 2218 2219 static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr, 2220 loff_t *pos) 2221 { 2222 (*pos)++; 2223 if (*pos >= GFS2_NR_SBSTATS) 2224 return NULL; 2225 return pos; 2226 } 2227 2228 static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr) 2229 { 2230 preempt_enable(); 2231 } 2232 2233 static const struct seq_operations gfs2_glock_seq_ops = { 2234 .start = gfs2_glock_seq_start, 2235 .next = gfs2_glock_seq_next, 2236 .stop = gfs2_glock_seq_stop, 2237 .show = gfs2_glock_seq_show, 2238 }; 2239 2240 static const struct seq_operations gfs2_glstats_seq_ops = { 2241 .start = gfs2_glock_seq_start, 2242 .next = gfs2_glock_seq_next, 2243 .stop = gfs2_glock_seq_stop, 2244 .show = gfs2_glstats_seq_show, 2245 }; 2246 2247 static const struct seq_operations gfs2_sbstats_seq_ops = { 2248 .start = gfs2_sbstats_seq_start, 2249 .next = gfs2_sbstats_seq_next, 2250 .stop = gfs2_sbstats_seq_stop, 2251 .show = gfs2_sbstats_seq_show, 2252 }; 2253 2254 #define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL) 2255 2256 static int __gfs2_glocks_open(struct inode *inode, struct file *file, 2257 const struct seq_operations *ops) 2258 { 2259 int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter)); 2260 if (ret == 0) { 2261 struct seq_file *seq = file->private_data; 2262 struct gfs2_glock_iter *gi = seq->private; 2263 2264 gi->sdp = inode->i_private; 2265 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN); 2266 if (seq->buf) 2267 seq->size = GFS2_SEQ_GOODSIZE; 2268 /* 2269 * Initially, we are "before" the first hash table entry; the 2270 * first call to rhashtable_walk_next gets us the first entry. 2271 */ 2272 gi->last_pos = -1; 2273 gi->gl = NULL; 2274 rhashtable_walk_enter(&gl_hash_table, &gi->hti); 2275 } 2276 return ret; 2277 } 2278 2279 static int gfs2_glocks_open(struct inode *inode, struct file *file) 2280 { 2281 return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops); 2282 } 2283 2284 static int gfs2_glocks_release(struct inode *inode, struct file *file) 2285 { 2286 struct seq_file *seq = file->private_data; 2287 struct gfs2_glock_iter *gi = seq->private; 2288 2289 if (gi->gl) 2290 gfs2_glock_put(gi->gl); 2291 rhashtable_walk_exit(&gi->hti); 2292 return seq_release_private(inode, file); 2293 } 2294 2295 static int gfs2_glstats_open(struct inode *inode, struct file *file) 2296 { 2297 return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops); 2298 } 2299 2300 static int gfs2_sbstats_open(struct inode *inode, struct file *file) 2301 { 2302 int ret = seq_open(file, &gfs2_sbstats_seq_ops); 2303 if (ret == 0) { 2304 struct seq_file *seq = file->private_data; 2305 seq->private = inode->i_private; /* sdp */ 2306 } 2307 return ret; 2308 } 2309 2310 static const struct file_operations gfs2_glocks_fops = { 2311 .owner = THIS_MODULE, 2312 .open = gfs2_glocks_open, 2313 .read = seq_read, 2314 .llseek = seq_lseek, 2315 .release = gfs2_glocks_release, 2316 }; 2317 2318 static const struct file_operations gfs2_glstats_fops = { 2319 .owner = THIS_MODULE, 2320 .open = gfs2_glstats_open, 2321 .read = seq_read, 2322 .llseek = seq_lseek, 2323 .release = gfs2_glocks_release, 2324 }; 2325 2326 static const struct file_operations gfs2_sbstats_fops = { 2327 .owner = THIS_MODULE, 2328 .open = gfs2_sbstats_open, 2329 .read = seq_read, 2330 .llseek = seq_lseek, 2331 .release = seq_release, 2332 }; 2333 2334 void gfs2_create_debugfs_file(struct gfs2_sbd *sdp) 2335 { 2336 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root); 2337 2338 debugfs_create_file("glocks", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, 2339 &gfs2_glocks_fops); 2340 2341 debugfs_create_file("glstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, 2342 &gfs2_glstats_fops); 2343 2344 debugfs_create_file("sbstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, 2345 &gfs2_sbstats_fops); 2346 } 2347 2348 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp) 2349 { 2350 debugfs_remove_recursive(sdp->debugfs_dir); 2351 sdp->debugfs_dir = NULL; 2352 } 2353 2354 void gfs2_register_debugfs(void) 2355 { 2356 gfs2_root = debugfs_create_dir("gfs2", NULL); 2357 } 2358 2359 void gfs2_unregister_debugfs(void) 2360 { 2361 debugfs_remove(gfs2_root); 2362 gfs2_root = NULL; 2363 } 2364