1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/sched.h> 10 #include <linux/slab.h> 11 #include <linux/spinlock.h> 12 #include <linux/buffer_head.h> 13 #include <linux/delay.h> 14 #include <linux/sort.h> 15 #include <linux/hash.h> 16 #include <linux/jhash.h> 17 #include <linux/kallsyms.h> 18 #include <linux/gfs2_ondisk.h> 19 #include <linux/list.h> 20 #include <linux/wait.h> 21 #include <linux/module.h> 22 #include <linux/uaccess.h> 23 #include <linux/seq_file.h> 24 #include <linux/debugfs.h> 25 #include <linux/kthread.h> 26 #include <linux/freezer.h> 27 #include <linux/workqueue.h> 28 #include <linux/jiffies.h> 29 #include <linux/rcupdate.h> 30 #include <linux/rculist_bl.h> 31 #include <linux/bit_spinlock.h> 32 #include <linux/percpu.h> 33 #include <linux/list_sort.h> 34 #include <linux/lockref.h> 35 #include <linux/rhashtable.h> 36 #include <linux/pid_namespace.h> 37 #include <linux/file.h> 38 39 #include "gfs2.h" 40 #include "incore.h" 41 #include "glock.h" 42 #include "glops.h" 43 #include "inode.h" 44 #include "lops.h" 45 #include "meta_io.h" 46 #include "quota.h" 47 #include "super.h" 48 #include "util.h" 49 #include "bmap.h" 50 #define CREATE_TRACE_POINTS 51 #include "trace_gfs2.h" 52 53 struct gfs2_glock_iter { 54 struct gfs2_sbd *sdp; /* incore superblock */ 55 struct rhashtable_iter hti; /* rhashtable iterator */ 56 struct gfs2_glock *gl; /* current glock struct */ 57 loff_t last_pos; /* last position */ 58 }; 59 60 typedef void (*glock_examiner) (struct gfs2_glock * gl); 61 62 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target); 63 static void request_demote(struct gfs2_glock *gl, unsigned int state, 64 unsigned long delay, bool remote); 65 66 static struct dentry *gfs2_root; 67 static LIST_HEAD(lru_list); 68 static atomic_t lru_count = ATOMIC_INIT(0); 69 static DEFINE_SPINLOCK(lru_lock); 70 71 #define GFS2_GL_HASH_SHIFT 15 72 #define GFS2_GL_HASH_SIZE BIT(GFS2_GL_HASH_SHIFT) 73 74 static const struct rhashtable_params ht_parms = { 75 .nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4, 76 .key_len = offsetofend(struct lm_lockname, ln_type), 77 .key_offset = offsetof(struct gfs2_glock, gl_name), 78 .head_offset = offsetof(struct gfs2_glock, gl_node), 79 }; 80 81 static struct rhashtable gl_hash_table; 82 83 #define GLOCK_WAIT_TABLE_BITS 12 84 #define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS) 85 static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned; 86 87 struct wait_glock_queue { 88 struct lm_lockname *name; 89 wait_queue_entry_t wait; 90 }; 91 92 static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode, 93 int sync, void *key) 94 { 95 struct wait_glock_queue *wait_glock = 96 container_of(wait, struct wait_glock_queue, wait); 97 struct lm_lockname *wait_name = wait_glock->name; 98 struct lm_lockname *wake_name = key; 99 100 if (wake_name->ln_sbd != wait_name->ln_sbd || 101 wake_name->ln_number != wait_name->ln_number || 102 wake_name->ln_type != wait_name->ln_type) 103 return 0; 104 return autoremove_wake_function(wait, mode, sync, key); 105 } 106 107 static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name) 108 { 109 u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0); 110 111 return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS); 112 } 113 114 /** 115 * wake_up_glock - Wake up waiters on a glock 116 * @gl: the glock 117 */ 118 static void wake_up_glock(struct gfs2_glock *gl) 119 { 120 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name); 121 122 if (waitqueue_active(wq)) 123 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name); 124 } 125 126 static void gfs2_glock_dealloc(struct rcu_head *rcu) 127 { 128 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); 129 130 kfree(gl->gl_lksb.sb_lvbptr); 131 if (gl->gl_ops->go_flags & GLOF_ASPACE) { 132 struct gfs2_glock_aspace *gla = 133 container_of(gl, struct gfs2_glock_aspace, glock); 134 kmem_cache_free(gfs2_glock_aspace_cachep, gla); 135 } else 136 kmem_cache_free(gfs2_glock_cachep, gl); 137 } 138 139 /** 140 * glock_blocked_by_withdraw - determine if we can still use a glock 141 * @gl: the glock 142 * 143 * We need to allow some glocks to be enqueued, dequeued, promoted, and demoted 144 * when we're withdrawn. For example, to maintain metadata integrity, we should 145 * disallow the use of inode and rgrp glocks when withdrawn. Other glocks like 146 * the iopen or freeze glock may be safely used because none of their 147 * metadata goes through the journal. So in general, we should disallow all 148 * glocks that are journaled, and allow all the others. One exception is: 149 * we need to allow our active journal to be promoted and demoted so others 150 * may recover it and we can reacquire it when they're done. 151 */ 152 static bool glock_blocked_by_withdraw(struct gfs2_glock *gl) 153 { 154 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 155 156 if (!gfs2_withdrawing_or_withdrawn(sdp)) 157 return false; 158 if (gl->gl_ops->go_flags & GLOF_NONDISK) 159 return false; 160 if (!sdp->sd_jdesc || 161 gl->gl_name.ln_number == sdp->sd_jdesc->jd_no_addr) 162 return false; 163 return true; 164 } 165 166 static void __gfs2_glock_free(struct gfs2_glock *gl) 167 { 168 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms); 169 smp_mb(); 170 wake_up_glock(gl); 171 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc); 172 } 173 174 void gfs2_glock_free(struct gfs2_glock *gl) { 175 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 176 177 __gfs2_glock_free(gl); 178 if (atomic_dec_and_test(&sdp->sd_glock_disposal)) 179 wake_up(&sdp->sd_kill_wait); 180 } 181 182 void gfs2_glock_free_later(struct gfs2_glock *gl) { 183 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 184 185 spin_lock(&lru_lock); 186 list_add(&gl->gl_lru, &sdp->sd_dead_glocks); 187 spin_unlock(&lru_lock); 188 if (atomic_dec_and_test(&sdp->sd_glock_disposal)) 189 wake_up(&sdp->sd_kill_wait); 190 } 191 192 static void gfs2_free_dead_glocks(struct gfs2_sbd *sdp) 193 { 194 struct list_head *list = &sdp->sd_dead_glocks; 195 196 while(!list_empty(list)) { 197 struct gfs2_glock *gl; 198 199 gl = list_first_entry(list, struct gfs2_glock, gl_lru); 200 list_del_init(&gl->gl_lru); 201 __gfs2_glock_free(gl); 202 } 203 } 204 205 /** 206 * gfs2_glock_hold() - increment reference count on glock 207 * @gl: The glock to hold 208 * 209 */ 210 211 struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl) 212 { 213 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); 214 lockref_get(&gl->gl_lockref); 215 return gl; 216 } 217 218 static void gfs2_glock_add_to_lru(struct gfs2_glock *gl) 219 { 220 spin_lock(&lru_lock); 221 list_move_tail(&gl->gl_lru, &lru_list); 222 223 if (!test_bit(GLF_LRU, &gl->gl_flags)) { 224 set_bit(GLF_LRU, &gl->gl_flags); 225 atomic_inc(&lru_count); 226 } 227 228 spin_unlock(&lru_lock); 229 } 230 231 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) 232 { 233 spin_lock(&lru_lock); 234 if (test_bit(GLF_LRU, &gl->gl_flags)) { 235 list_del_init(&gl->gl_lru); 236 atomic_dec(&lru_count); 237 clear_bit(GLF_LRU, &gl->gl_flags); 238 } 239 spin_unlock(&lru_lock); 240 } 241 242 /* 243 * Enqueue the glock on the work queue. Passes one glock reference on to the 244 * work queue. 245 */ 246 static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { 247 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 248 249 if (!queue_delayed_work(sdp->sd_glock_wq, &gl->gl_work, delay)) { 250 /* 251 * We are holding the lockref spinlock, and the work was still 252 * queued above. The queued work (glock_work_func) takes that 253 * spinlock before dropping its glock reference(s), so it 254 * cannot have dropped them in the meantime. 255 */ 256 GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2); 257 gl->gl_lockref.count--; 258 } 259 } 260 261 static void __gfs2_glock_put(struct gfs2_glock *gl) 262 { 263 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 264 struct address_space *mapping = gfs2_glock2aspace(gl); 265 266 lockref_mark_dead(&gl->gl_lockref); 267 spin_unlock(&gl->gl_lockref.lock); 268 gfs2_glock_remove_from_lru(gl); 269 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); 270 if (mapping) { 271 truncate_inode_pages_final(mapping); 272 if (!gfs2_withdrawing_or_withdrawn(sdp)) 273 GLOCK_BUG_ON(gl, !mapping_empty(mapping)); 274 } 275 trace_gfs2_glock_put(gl); 276 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); 277 } 278 279 static bool __gfs2_glock_put_or_lock(struct gfs2_glock *gl) 280 { 281 if (lockref_put_or_lock(&gl->gl_lockref)) 282 return true; 283 GLOCK_BUG_ON(gl, gl->gl_lockref.count != 1); 284 if (gl->gl_state != LM_ST_UNLOCKED) { 285 gl->gl_lockref.count--; 286 gfs2_glock_add_to_lru(gl); 287 spin_unlock(&gl->gl_lockref.lock); 288 return true; 289 } 290 return false; 291 } 292 293 /** 294 * gfs2_glock_put() - Decrement reference count on glock 295 * @gl: The glock to put 296 * 297 */ 298 299 void gfs2_glock_put(struct gfs2_glock *gl) 300 { 301 if (__gfs2_glock_put_or_lock(gl)) 302 return; 303 304 __gfs2_glock_put(gl); 305 } 306 307 /* 308 * gfs2_glock_put_async - Decrement reference count without sleeping 309 * @gl: The glock to put 310 * 311 * Decrement the reference count on glock immediately unless it is the last 312 * reference. Defer putting the last reference to work queue context. 313 */ 314 void gfs2_glock_put_async(struct gfs2_glock *gl) 315 { 316 if (__gfs2_glock_put_or_lock(gl)) 317 return; 318 319 gfs2_glock_queue_work(gl, 0); 320 spin_unlock(&gl->gl_lockref.lock); 321 } 322 323 /** 324 * may_grant - check if it's ok to grant a new lock 325 * @gl: The glock 326 * @current_gh: One of the current holders of @gl 327 * @gh: The lock request which we wish to grant 328 * 329 * With our current compatibility rules, if a glock has one or more active 330 * holders (HIF_HOLDER flag set), any of those holders can be passed in as 331 * @current_gh; they are all the same as far as compatibility with the new @gh 332 * goes. 333 * 334 * Returns true if it's ok to grant the lock. 335 */ 336 337 static inline bool may_grant(struct gfs2_glock *gl, 338 struct gfs2_holder *current_gh, 339 struct gfs2_holder *gh) 340 { 341 if (current_gh) { 342 GLOCK_BUG_ON(gl, !test_bit(HIF_HOLDER, ¤t_gh->gh_iflags)); 343 344 switch(current_gh->gh_state) { 345 case LM_ST_EXCLUSIVE: 346 /* 347 * Here we make a special exception to grant holders 348 * who agree to share the EX lock with other holders 349 * who also have the bit set. If the original holder 350 * has the LM_FLAG_NODE_SCOPE bit set, we grant more 351 * holders with the bit set. 352 */ 353 return gh->gh_state == LM_ST_EXCLUSIVE && 354 (current_gh->gh_flags & LM_FLAG_NODE_SCOPE) && 355 (gh->gh_flags & LM_FLAG_NODE_SCOPE); 356 357 case LM_ST_SHARED: 358 case LM_ST_DEFERRED: 359 return gh->gh_state == current_gh->gh_state; 360 361 default: 362 return false; 363 } 364 } 365 366 if (gl->gl_state == gh->gh_state) 367 return true; 368 if (gh->gh_flags & GL_EXACT) 369 return false; 370 if (gl->gl_state == LM_ST_EXCLUSIVE) { 371 return gh->gh_state == LM_ST_SHARED || 372 gh->gh_state == LM_ST_DEFERRED; 373 } 374 if (gh->gh_flags & LM_FLAG_ANY) 375 return gl->gl_state != LM_ST_UNLOCKED; 376 return false; 377 } 378 379 static void gfs2_holder_wake(struct gfs2_holder *gh) 380 { 381 clear_bit(HIF_WAIT, &gh->gh_iflags); 382 smp_mb__after_atomic(); 383 wake_up_bit(&gh->gh_iflags, HIF_WAIT); 384 if (gh->gh_flags & GL_ASYNC) { 385 struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd; 386 387 wake_up(&sdp->sd_async_glock_wait); 388 } 389 } 390 391 /** 392 * do_error - Something unexpected has happened during a lock request 393 * @gl: The glock 394 * @ret: The status from the DLM 395 */ 396 397 static void do_error(struct gfs2_glock *gl, const int ret) 398 { 399 struct gfs2_holder *gh, *tmp; 400 401 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { 402 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 403 continue; 404 if (ret & LM_OUT_ERROR) 405 gh->gh_error = -EIO; 406 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) 407 gh->gh_error = GLR_TRYFAILED; 408 else 409 continue; 410 list_del_init(&gh->gh_list); 411 trace_gfs2_glock_queue(gh, 0); 412 gfs2_holder_wake(gh); 413 } 414 } 415 416 /** 417 * find_first_holder - find the first "holder" gh 418 * @gl: the glock 419 */ 420 421 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) 422 { 423 struct gfs2_holder *gh; 424 425 if (!list_empty(&gl->gl_holders)) { 426 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, 427 gh_list); 428 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 429 return gh; 430 } 431 return NULL; 432 } 433 434 /* 435 * gfs2_instantiate - Call the glops instantiate function 436 * @gh: The glock holder 437 * 438 * Returns: 0 if instantiate was successful, or error. 439 */ 440 int gfs2_instantiate(struct gfs2_holder *gh) 441 { 442 struct gfs2_glock *gl = gh->gh_gl; 443 const struct gfs2_glock_operations *glops = gl->gl_ops; 444 int ret; 445 446 again: 447 if (!test_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags)) 448 goto done; 449 450 /* 451 * Since we unlock the lockref lock, we set a flag to indicate 452 * instantiate is in progress. 453 */ 454 if (test_and_set_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags)) { 455 wait_on_bit(&gl->gl_flags, GLF_INSTANTIATE_IN_PROG, 456 TASK_UNINTERRUPTIBLE); 457 /* 458 * Here we just waited for a different instantiate to finish. 459 * But that may not have been successful, as when a process 460 * locks an inode glock _before_ it has an actual inode to 461 * instantiate into. So we check again. This process might 462 * have an inode to instantiate, so might be successful. 463 */ 464 goto again; 465 } 466 467 ret = glops->go_instantiate(gl); 468 if (!ret) 469 clear_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags); 470 clear_and_wake_up_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags); 471 if (ret) 472 return ret; 473 474 done: 475 if (glops->go_held) 476 return glops->go_held(gh); 477 return 0; 478 } 479 480 /** 481 * do_promote - promote as many requests as possible on the current queue 482 * @gl: The glock 483 * 484 * Returns true on success (i.e., progress was made or there are no waiters). 485 */ 486 487 static bool do_promote(struct gfs2_glock *gl) 488 { 489 struct gfs2_holder *gh, *current_gh; 490 491 current_gh = find_first_holder(gl); 492 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 493 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 494 continue; 495 if (!may_grant(gl, current_gh, gh)) { 496 /* 497 * If we get here, it means we may not grant this 498 * holder for some reason. If this holder is at the 499 * head of the list, it means we have a blocked holder 500 * at the head, so return false. 501 */ 502 if (list_is_first(&gh->gh_list, &gl->gl_holders)) 503 return false; 504 do_error(gl, 0); 505 break; 506 } 507 set_bit(HIF_HOLDER, &gh->gh_iflags); 508 trace_gfs2_promote(gh); 509 gfs2_holder_wake(gh); 510 if (!current_gh) 511 current_gh = gh; 512 } 513 return true; 514 } 515 516 /** 517 * find_first_waiter - find the first gh that's waiting for the glock 518 * @gl: the glock 519 */ 520 521 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl) 522 { 523 struct gfs2_holder *gh; 524 525 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 526 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) 527 return gh; 528 } 529 return NULL; 530 } 531 532 /** 533 * find_last_waiter - find the last gh that's waiting for the glock 534 * @gl: the glock 535 * 536 * This also is a fast way of finding out if there are any waiters. 537 */ 538 539 static inline struct gfs2_holder *find_last_waiter(const struct gfs2_glock *gl) 540 { 541 struct gfs2_holder *gh; 542 543 if (list_empty(&gl->gl_holders)) 544 return NULL; 545 gh = list_last_entry(&gl->gl_holders, struct gfs2_holder, gh_list); 546 return test_bit(HIF_HOLDER, &gh->gh_iflags) ? NULL : gh; 547 } 548 549 /** 550 * state_change - record that the glock is now in a different state 551 * @gl: the glock 552 * @new_state: the new state 553 */ 554 555 static void state_change(struct gfs2_glock *gl, unsigned int new_state) 556 { 557 if (new_state != gl->gl_target) 558 /* shorten our minimum hold time */ 559 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR, 560 GL_GLOCK_MIN_HOLD); 561 gl->gl_state = new_state; 562 gl->gl_tchange = jiffies; 563 } 564 565 static void gfs2_set_demote(struct gfs2_glock *gl) 566 { 567 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 568 569 set_bit(GLF_DEMOTE, &gl->gl_flags); 570 smp_mb(); 571 wake_up(&sdp->sd_async_glock_wait); 572 } 573 574 static void gfs2_demote_wake(struct gfs2_glock *gl) 575 { 576 gl->gl_demote_state = LM_ST_EXCLUSIVE; 577 clear_bit(GLF_DEMOTE, &gl->gl_flags); 578 smp_mb__after_atomic(); 579 wake_up_bit(&gl->gl_flags, GLF_DEMOTE); 580 } 581 582 /** 583 * finish_xmote - The DLM has replied to one of our lock requests 584 * @gl: The glock 585 * @ret: The status from the DLM 586 * 587 */ 588 589 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) 590 { 591 const struct gfs2_glock_operations *glops = gl->gl_ops; 592 struct gfs2_holder *gh; 593 unsigned state = ret & LM_OUT_ST_MASK; 594 595 trace_gfs2_glock_state_change(gl, state); 596 state_change(gl, state); 597 gh = find_first_waiter(gl); 598 599 /* Demote to UN request arrived during demote to SH or DF */ 600 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && 601 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED) 602 gl->gl_target = LM_ST_UNLOCKED; 603 604 /* Check for state != intended state */ 605 if (unlikely(state != gl->gl_target)) { 606 if (gh && (ret & LM_OUT_CANCELED)) 607 gfs2_holder_wake(gh); 608 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { 609 /* move to back of queue and try next entry */ 610 if (ret & LM_OUT_CANCELED) { 611 list_move_tail(&gh->gh_list, &gl->gl_holders); 612 gh = find_first_waiter(gl); 613 gl->gl_target = gh->gh_state; 614 if (do_promote(gl)) 615 goto out; 616 goto retry; 617 } 618 /* Some error or failed "try lock" - report it */ 619 if ((ret & LM_OUT_ERROR) || 620 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { 621 gl->gl_target = gl->gl_state; 622 do_error(gl, ret); 623 goto out; 624 } 625 } 626 switch(state) { 627 /* Unlocked due to conversion deadlock, try again */ 628 case LM_ST_UNLOCKED: 629 retry: 630 do_xmote(gl, gh, gl->gl_target); 631 break; 632 /* Conversion fails, unlock and try again */ 633 case LM_ST_SHARED: 634 case LM_ST_DEFERRED: 635 do_xmote(gl, gh, LM_ST_UNLOCKED); 636 break; 637 default: /* Everything else */ 638 fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n", 639 gl->gl_target, state); 640 GLOCK_BUG_ON(gl, 1); 641 } 642 return; 643 } 644 645 /* Fast path - we got what we asked for */ 646 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) 647 gfs2_demote_wake(gl); 648 if (state != LM_ST_UNLOCKED) { 649 if (glops->go_xmote_bh) { 650 int rv; 651 652 spin_unlock(&gl->gl_lockref.lock); 653 rv = glops->go_xmote_bh(gl); 654 spin_lock(&gl->gl_lockref.lock); 655 if (rv) { 656 do_error(gl, rv); 657 goto out; 658 } 659 } 660 do_promote(gl); 661 } 662 out: 663 clear_bit(GLF_LOCK, &gl->gl_flags); 664 } 665 666 static bool is_system_glock(struct gfs2_glock *gl) 667 { 668 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 669 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 670 671 if (gl == m_ip->i_gl) 672 return true; 673 return false; 674 } 675 676 /** 677 * do_xmote - Calls the DLM to change the state of a lock 678 * @gl: The lock state 679 * @gh: The holder (only for promotes) 680 * @target: The target lock state 681 * 682 */ 683 684 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, 685 unsigned int target) 686 __releases(&gl->gl_lockref.lock) 687 __acquires(&gl->gl_lockref.lock) 688 { 689 const struct gfs2_glock_operations *glops = gl->gl_ops; 690 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 691 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 692 unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0); 693 int ret; 694 695 if (target != LM_ST_UNLOCKED && glock_blocked_by_withdraw(gl) && 696 gh && !(gh->gh_flags & LM_FLAG_NOEXP)) 697 goto skip_inval; 698 699 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP); 700 GLOCK_BUG_ON(gl, gl->gl_state == target); 701 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target); 702 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) && 703 glops->go_inval) { 704 /* 705 * If another process is already doing the invalidate, let that 706 * finish first. The glock state machine will get back to this 707 * holder again later. 708 */ 709 if (test_and_set_bit(GLF_INVALIDATE_IN_PROGRESS, 710 &gl->gl_flags)) 711 return; 712 do_error(gl, 0); /* Fail queued try locks */ 713 } 714 gl->gl_req = target; 715 set_bit(GLF_BLOCKING, &gl->gl_flags); 716 if ((gl->gl_req == LM_ST_UNLOCKED) || 717 (gl->gl_state == LM_ST_EXCLUSIVE) || 718 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB))) 719 clear_bit(GLF_BLOCKING, &gl->gl_flags); 720 if (!glops->go_inval && !glops->go_sync) 721 goto skip_inval; 722 723 spin_unlock(&gl->gl_lockref.lock); 724 if (glops->go_sync) { 725 ret = glops->go_sync(gl); 726 /* If we had a problem syncing (due to io errors or whatever, 727 * we should not invalidate the metadata or tell dlm to 728 * release the glock to other nodes. 729 */ 730 if (ret) { 731 if (cmpxchg(&sdp->sd_log_error, 0, ret)) { 732 fs_err(sdp, "Error %d syncing glock \n", ret); 733 gfs2_dump_glock(NULL, gl, true); 734 } 735 spin_lock(&gl->gl_lockref.lock); 736 goto skip_inval; 737 } 738 } 739 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) { 740 /* 741 * The call to go_sync should have cleared out the ail list. 742 * If there are still items, we have a problem. We ought to 743 * withdraw, but we can't because the withdraw code also uses 744 * glocks. Warn about the error, dump the glock, then fall 745 * through and wait for logd to do the withdraw for us. 746 */ 747 if ((atomic_read(&gl->gl_ail_count) != 0) && 748 (!cmpxchg(&sdp->sd_log_error, 0, -EIO))) { 749 gfs2_glock_assert_warn(gl, 750 !atomic_read(&gl->gl_ail_count)); 751 gfs2_dump_glock(NULL, gl, true); 752 } 753 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA); 754 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); 755 } 756 spin_lock(&gl->gl_lockref.lock); 757 758 skip_inval: 759 gl->gl_lockref.count++; 760 /* 761 * Check for an error encountered since we called go_sync and go_inval. 762 * If so, we can't withdraw from the glock code because the withdraw 763 * code itself uses glocks (see function signal_our_withdraw) to 764 * change the mount to read-only. Most importantly, we must not call 765 * dlm to unlock the glock until the journal is in a known good state 766 * (after journal replay) otherwise other nodes may use the object 767 * (rgrp or dinode) and then later, journal replay will corrupt the 768 * file system. The best we can do here is wait for the logd daemon 769 * to see sd_log_error and withdraw, and in the meantime, requeue the 770 * work for later. 771 * 772 * We make a special exception for some system glocks, such as the 773 * system statfs inode glock, which needs to be granted before the 774 * gfs2_quotad daemon can exit, and that exit needs to finish before 775 * we can unmount the withdrawn file system. 776 * 777 * However, if we're just unlocking the lock (say, for unmount, when 778 * gfs2_gl_hash_clear calls clear_glock) and recovery is complete 779 * then it's okay to tell dlm to unlock it. 780 */ 781 if (unlikely(sdp->sd_log_error) && !gfs2_withdrawing_or_withdrawn(sdp)) 782 gfs2_withdraw_delayed(sdp); 783 if (glock_blocked_by_withdraw(gl) && 784 (target != LM_ST_UNLOCKED || 785 test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags))) { 786 if (!is_system_glock(gl)) { 787 request_demote(gl, LM_ST_UNLOCKED, 0, false); 788 /* 789 * Ordinarily, we would call dlm and its callback would call 790 * finish_xmote, which would call state_change() to the new state. 791 * Since we withdrew, we won't call dlm, so call state_change 792 * manually, but to the UNLOCKED state we desire. 793 */ 794 state_change(gl, LM_ST_UNLOCKED); 795 /* 796 * We skip telling dlm to do the locking, so we won't get a 797 * reply that would otherwise clear GLF_LOCK. So we clear it here. 798 */ 799 clear_bit(GLF_LOCK, &gl->gl_flags); 800 clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); 801 gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD); 802 return; 803 } else { 804 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); 805 } 806 } 807 808 if (ls->ls_ops->lm_lock) { 809 spin_unlock(&gl->gl_lockref.lock); 810 ret = ls->ls_ops->lm_lock(gl, target, lck_flags); 811 spin_lock(&gl->gl_lockref.lock); 812 813 if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED && 814 target == LM_ST_UNLOCKED && 815 test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) { 816 /* 817 * The lockspace has been released and the lock has 818 * been unlocked implicitly. 819 */ 820 } else if (ret) { 821 fs_err(sdp, "lm_lock ret %d\n", ret); 822 target = gl->gl_state | LM_OUT_ERROR; 823 } else { 824 /* The operation will be completed asynchronously. */ 825 return; 826 } 827 } 828 829 /* Complete the operation now. */ 830 finish_xmote(gl, target); 831 gfs2_glock_queue_work(gl, 0); 832 } 833 834 /** 835 * run_queue - do all outstanding tasks related to a glock 836 * @gl: The glock in question 837 * @nonblock: True if we must not block in run_queue 838 * 839 */ 840 841 static void run_queue(struct gfs2_glock *gl, const int nonblock) 842 __releases(&gl->gl_lockref.lock) 843 __acquires(&gl->gl_lockref.lock) 844 { 845 struct gfs2_holder *gh = NULL; 846 847 if (test_bit(GLF_LOCK, &gl->gl_flags)) 848 return; 849 set_bit(GLF_LOCK, &gl->gl_flags); 850 851 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); 852 853 if (test_bit(GLF_DEMOTE, &gl->gl_flags) && 854 gl->gl_demote_state != gl->gl_state) { 855 if (find_first_holder(gl)) 856 goto out_unlock; 857 if (nonblock) 858 goto out_sched; 859 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); 860 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); 861 gl->gl_target = gl->gl_demote_state; 862 } else { 863 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) 864 gfs2_demote_wake(gl); 865 if (do_promote(gl)) 866 goto out_unlock; 867 gh = find_first_waiter(gl); 868 gl->gl_target = gh->gh_state; 869 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) 870 do_error(gl, 0); /* Fail queued try locks */ 871 } 872 do_xmote(gl, gh, gl->gl_target); 873 return; 874 875 out_sched: 876 clear_bit(GLF_LOCK, &gl->gl_flags); 877 smp_mb__after_atomic(); 878 gl->gl_lockref.count++; 879 gfs2_glock_queue_work(gl, 0); 880 return; 881 882 out_unlock: 883 clear_bit(GLF_LOCK, &gl->gl_flags); 884 smp_mb__after_atomic(); 885 } 886 887 /** 888 * glock_set_object - set the gl_object field of a glock 889 * @gl: the glock 890 * @object: the object 891 */ 892 void glock_set_object(struct gfs2_glock *gl, void *object) 893 { 894 void *prev_object; 895 896 spin_lock(&gl->gl_lockref.lock); 897 prev_object = gl->gl_object; 898 gl->gl_object = object; 899 spin_unlock(&gl->gl_lockref.lock); 900 if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == NULL)) { 901 pr_warn("glock=%u/%llx\n", 902 gl->gl_name.ln_type, 903 (unsigned long long)gl->gl_name.ln_number); 904 gfs2_dump_glock(NULL, gl, true); 905 } 906 } 907 908 /** 909 * glock_clear_object - clear the gl_object field of a glock 910 * @gl: the glock 911 * @object: object the glock currently points at 912 */ 913 void glock_clear_object(struct gfs2_glock *gl, void *object) 914 { 915 void *prev_object; 916 917 spin_lock(&gl->gl_lockref.lock); 918 prev_object = gl->gl_object; 919 gl->gl_object = NULL; 920 spin_unlock(&gl->gl_lockref.lock); 921 if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == object)) { 922 pr_warn("glock=%u/%llx\n", 923 gl->gl_name.ln_type, 924 (unsigned long long)gl->gl_name.ln_number); 925 gfs2_dump_glock(NULL, gl, true); 926 } 927 } 928 929 void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation) 930 { 931 struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr; 932 933 if (ri->ri_magic == 0) 934 ri->ri_magic = cpu_to_be32(GFS2_MAGIC); 935 if (ri->ri_magic == cpu_to_be32(GFS2_MAGIC)) 936 ri->ri_generation_deleted = cpu_to_be64(generation); 937 } 938 939 bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation) 940 { 941 struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr; 942 943 if (ri->ri_magic != cpu_to_be32(GFS2_MAGIC)) 944 return false; 945 return generation <= be64_to_cpu(ri->ri_generation_deleted); 946 } 947 948 static void gfs2_glock_poke(struct gfs2_glock *gl) 949 { 950 int flags = LM_FLAG_TRY_1CB | LM_FLAG_ANY | GL_SKIP; 951 struct gfs2_holder gh; 952 int error; 953 954 __gfs2_holder_init(gl, LM_ST_SHARED, flags, &gh, _RET_IP_); 955 error = gfs2_glock_nq(&gh); 956 if (!error) 957 gfs2_glock_dq(&gh); 958 gfs2_holder_uninit(&gh); 959 } 960 961 static bool gfs2_try_evict(struct gfs2_glock *gl) 962 { 963 struct gfs2_inode *ip; 964 bool evicted = false; 965 966 /* 967 * If there is contention on the iopen glock and we have an inode, try 968 * to grab and release the inode so that it can be evicted. This will 969 * allow the remote node to go ahead and delete the inode without us 970 * having to do it, which will avoid rgrp glock thrashing. 971 * 972 * The remote node is likely still holding the corresponding inode 973 * glock, so it will run before we get to verify that the delete has 974 * happened below. 975 */ 976 spin_lock(&gl->gl_lockref.lock); 977 ip = gl->gl_object; 978 if (ip && !igrab(&ip->i_inode)) 979 ip = NULL; 980 spin_unlock(&gl->gl_lockref.lock); 981 if (ip) { 982 gl->gl_no_formal_ino = ip->i_no_formal_ino; 983 set_bit(GIF_DEFERRED_DELETE, &ip->i_flags); 984 d_prune_aliases(&ip->i_inode); 985 iput(&ip->i_inode); 986 987 /* If the inode was evicted, gl->gl_object will now be NULL. */ 988 spin_lock(&gl->gl_lockref.lock); 989 ip = gl->gl_object; 990 if (ip) { 991 clear_bit(GIF_DEFERRED_DELETE, &ip->i_flags); 992 if (!igrab(&ip->i_inode)) 993 ip = NULL; 994 } 995 spin_unlock(&gl->gl_lockref.lock); 996 if (ip) { 997 gfs2_glock_poke(ip->i_gl); 998 iput(&ip->i_inode); 999 } 1000 evicted = !ip; 1001 } 1002 return evicted; 1003 } 1004 1005 bool gfs2_queue_try_to_evict(struct gfs2_glock *gl) 1006 { 1007 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 1008 1009 if (test_and_set_bit(GLF_TRY_TO_EVICT, &gl->gl_flags)) 1010 return false; 1011 return queue_delayed_work(sdp->sd_delete_wq, 1012 &gl->gl_delete, 0); 1013 } 1014 1015 static bool gfs2_queue_verify_evict(struct gfs2_glock *gl) 1016 { 1017 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 1018 1019 if (test_and_set_bit(GLF_VERIFY_EVICT, &gl->gl_flags)) 1020 return false; 1021 return queue_delayed_work(sdp->sd_delete_wq, 1022 &gl->gl_delete, 5 * HZ); 1023 } 1024 1025 static void delete_work_func(struct work_struct *work) 1026 { 1027 struct delayed_work *dwork = to_delayed_work(work); 1028 struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete); 1029 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 1030 struct inode *inode; 1031 u64 no_addr = gl->gl_name.ln_number; 1032 1033 if (test_and_clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags)) { 1034 /* 1035 * If we can evict the inode, give the remote node trying to 1036 * delete the inode some time before verifying that the delete 1037 * has happened. Otherwise, if we cause contention on the inode glock 1038 * immediately, the remote node will think that we still have 1039 * the inode in use, and so it will give up waiting. 1040 * 1041 * If we can't evict the inode, signal to the remote node that 1042 * the inode is still in use. We'll later try to delete the 1043 * inode locally in gfs2_evict_inode. 1044 * 1045 * FIXME: We only need to verify that the remote node has 1046 * deleted the inode because nodes before this remote delete 1047 * rework won't cooperate. At a later time, when we no longer 1048 * care about compatibility with such nodes, we can skip this 1049 * step entirely. 1050 */ 1051 if (gfs2_try_evict(gl)) { 1052 if (test_bit(SDF_KILL, &sdp->sd_flags)) 1053 goto out; 1054 if (gfs2_queue_verify_evict(gl)) 1055 return; 1056 } 1057 goto out; 1058 } 1059 1060 if (test_and_clear_bit(GLF_VERIFY_EVICT, &gl->gl_flags)) { 1061 inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino, 1062 GFS2_BLKST_UNLINKED); 1063 if (IS_ERR(inode)) { 1064 if (PTR_ERR(inode) == -EAGAIN && 1065 !test_bit(SDF_KILL, &sdp->sd_flags) && 1066 gfs2_queue_verify_evict(gl)) 1067 return; 1068 } else { 1069 d_prune_aliases(inode); 1070 iput(inode); 1071 } 1072 } 1073 1074 out: 1075 gfs2_glock_put(gl); 1076 } 1077 1078 static void glock_work_func(struct work_struct *work) 1079 { 1080 unsigned long delay = 0; 1081 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); 1082 unsigned int drop_refs = 1; 1083 1084 spin_lock(&gl->gl_lockref.lock); 1085 if (test_bit(GLF_HAVE_REPLY, &gl->gl_flags)) { 1086 clear_bit(GLF_HAVE_REPLY, &gl->gl_flags); 1087 finish_xmote(gl, gl->gl_reply); 1088 drop_refs++; 1089 } 1090 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 1091 gl->gl_state != LM_ST_UNLOCKED && 1092 gl->gl_demote_state != LM_ST_EXCLUSIVE) { 1093 if (gl->gl_name.ln_type == LM_TYPE_INODE) { 1094 unsigned long holdtime, now = jiffies; 1095 1096 holdtime = gl->gl_tchange + gl->gl_hold_time; 1097 if (time_before(now, holdtime)) 1098 delay = holdtime - now; 1099 } 1100 1101 if (!delay) { 1102 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); 1103 gfs2_set_demote(gl); 1104 } 1105 } 1106 run_queue(gl, 0); 1107 if (delay) { 1108 /* Keep one glock reference for the work we requeue. */ 1109 drop_refs--; 1110 gfs2_glock_queue_work(gl, delay); 1111 } 1112 1113 /* Drop the remaining glock references manually. */ 1114 GLOCK_BUG_ON(gl, gl->gl_lockref.count < drop_refs); 1115 gl->gl_lockref.count -= drop_refs; 1116 if (!gl->gl_lockref.count) { 1117 if (gl->gl_state == LM_ST_UNLOCKED) { 1118 __gfs2_glock_put(gl); 1119 return; 1120 } 1121 gfs2_glock_add_to_lru(gl); 1122 } 1123 spin_unlock(&gl->gl_lockref.lock); 1124 } 1125 1126 static struct gfs2_glock *find_insert_glock(struct lm_lockname *name, 1127 struct gfs2_glock *new) 1128 { 1129 struct wait_glock_queue wait; 1130 wait_queue_head_t *wq = glock_waitqueue(name); 1131 struct gfs2_glock *gl; 1132 1133 wait.name = name; 1134 init_wait(&wait.wait); 1135 wait.wait.func = glock_wake_function; 1136 1137 again: 1138 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); 1139 rcu_read_lock(); 1140 if (new) { 1141 gl = rhashtable_lookup_get_insert_fast(&gl_hash_table, 1142 &new->gl_node, ht_parms); 1143 if (IS_ERR(gl)) 1144 goto out; 1145 } else { 1146 gl = rhashtable_lookup_fast(&gl_hash_table, 1147 name, ht_parms); 1148 } 1149 if (gl && !lockref_get_not_dead(&gl->gl_lockref)) { 1150 rcu_read_unlock(); 1151 schedule(); 1152 goto again; 1153 } 1154 out: 1155 rcu_read_unlock(); 1156 finish_wait(wq, &wait.wait); 1157 if (gl) 1158 gfs2_glock_remove_from_lru(gl); 1159 return gl; 1160 } 1161 1162 /** 1163 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist 1164 * @sdp: The GFS2 superblock 1165 * @number: the lock number 1166 * @glops: The glock_operations to use 1167 * @create: If 0, don't create the glock if it doesn't exist 1168 * @glp: the glock is returned here 1169 * 1170 * This does not lock a glock, just finds/creates structures for one. 1171 * 1172 * Returns: errno 1173 */ 1174 1175 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, 1176 const struct gfs2_glock_operations *glops, int create, 1177 struct gfs2_glock **glp) 1178 { 1179 struct super_block *s = sdp->sd_vfs; 1180 struct lm_lockname name = { .ln_number = number, 1181 .ln_type = glops->go_type, 1182 .ln_sbd = sdp }; 1183 struct gfs2_glock *gl, *tmp; 1184 struct address_space *mapping; 1185 1186 gl = find_insert_glock(&name, NULL); 1187 if (gl) 1188 goto found; 1189 if (!create) 1190 return -ENOENT; 1191 1192 if (glops->go_flags & GLOF_ASPACE) { 1193 struct gfs2_glock_aspace *gla = 1194 kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_NOFS); 1195 if (!gla) 1196 return -ENOMEM; 1197 gl = &gla->glock; 1198 } else { 1199 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_NOFS); 1200 if (!gl) 1201 return -ENOMEM; 1202 } 1203 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); 1204 gl->gl_ops = glops; 1205 1206 if (glops->go_flags & GLOF_LVB) { 1207 gl->gl_lksb.sb_lvbptr = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); 1208 if (!gl->gl_lksb.sb_lvbptr) { 1209 gfs2_glock_dealloc(&gl->gl_rcu); 1210 return -ENOMEM; 1211 } 1212 } 1213 1214 atomic_inc(&sdp->sd_glock_disposal); 1215 gl->gl_node.next = NULL; 1216 gl->gl_flags = BIT(GLF_INITIAL); 1217 if (glops->go_instantiate) 1218 gl->gl_flags |= BIT(GLF_INSTANTIATE_NEEDED); 1219 gl->gl_name = name; 1220 lockdep_set_subclass(&gl->gl_lockref.lock, glops->go_subclass); 1221 gl->gl_lockref.count = 1; 1222 gl->gl_state = LM_ST_UNLOCKED; 1223 gl->gl_target = LM_ST_UNLOCKED; 1224 gl->gl_demote_state = LM_ST_EXCLUSIVE; 1225 gl->gl_dstamp = 0; 1226 preempt_disable(); 1227 /* We use the global stats to estimate the initial per-glock stats */ 1228 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type]; 1229 preempt_enable(); 1230 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0; 1231 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0; 1232 gl->gl_tchange = jiffies; 1233 gl->gl_object = NULL; 1234 gl->gl_hold_time = GL_GLOCK_DFT_HOLD; 1235 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); 1236 if (gl->gl_name.ln_type == LM_TYPE_IOPEN) 1237 INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func); 1238 1239 mapping = gfs2_glock2aspace(gl); 1240 if (mapping) { 1241 mapping->a_ops = &gfs2_meta_aops; 1242 mapping->host = s->s_bdev->bd_mapping->host; 1243 mapping->flags = 0; 1244 mapping_set_gfp_mask(mapping, GFP_NOFS); 1245 mapping->i_private_data = NULL; 1246 mapping->writeback_index = 0; 1247 } 1248 1249 tmp = find_insert_glock(&name, gl); 1250 if (tmp) { 1251 gfs2_glock_dealloc(&gl->gl_rcu); 1252 if (atomic_dec_and_test(&sdp->sd_glock_disposal)) 1253 wake_up(&sdp->sd_kill_wait); 1254 1255 if (IS_ERR(tmp)) 1256 return PTR_ERR(tmp); 1257 gl = tmp; 1258 } 1259 1260 found: 1261 *glp = gl; 1262 return 0; 1263 } 1264 1265 /** 1266 * __gfs2_holder_init - initialize a struct gfs2_holder in the default way 1267 * @gl: the glock 1268 * @state: the state we're requesting 1269 * @flags: the modifier flags 1270 * @gh: the holder structure 1271 * 1272 */ 1273 1274 void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags, 1275 struct gfs2_holder *gh, unsigned long ip) 1276 { 1277 INIT_LIST_HEAD(&gh->gh_list); 1278 gh->gh_gl = gfs2_glock_hold(gl); 1279 gh->gh_ip = ip; 1280 gh->gh_owner_pid = get_pid(task_pid(current)); 1281 gh->gh_state = state; 1282 gh->gh_flags = flags; 1283 gh->gh_iflags = 0; 1284 } 1285 1286 /** 1287 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it 1288 * @state: the state we're requesting 1289 * @flags: the modifier flags 1290 * @gh: the holder structure 1291 * 1292 * Don't mess with the glock. 1293 * 1294 */ 1295 1296 void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh) 1297 { 1298 gh->gh_state = state; 1299 gh->gh_flags = flags; 1300 gh->gh_iflags = 0; 1301 gh->gh_ip = _RET_IP_; 1302 put_pid(gh->gh_owner_pid); 1303 gh->gh_owner_pid = get_pid(task_pid(current)); 1304 } 1305 1306 /** 1307 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference) 1308 * @gh: the holder structure 1309 * 1310 */ 1311 1312 void gfs2_holder_uninit(struct gfs2_holder *gh) 1313 { 1314 put_pid(gh->gh_owner_pid); 1315 gfs2_glock_put(gh->gh_gl); 1316 gfs2_holder_mark_uninitialized(gh); 1317 gh->gh_ip = 0; 1318 } 1319 1320 static void gfs2_glock_update_hold_time(struct gfs2_glock *gl, 1321 unsigned long start_time) 1322 { 1323 /* Have we waited longer that a second? */ 1324 if (time_after(jiffies, start_time + HZ)) { 1325 /* Lengthen the minimum hold time. */ 1326 gl->gl_hold_time = min(gl->gl_hold_time + GL_GLOCK_HOLD_INCR, 1327 GL_GLOCK_MAX_HOLD); 1328 } 1329 } 1330 1331 /** 1332 * gfs2_glock_holder_ready - holder is ready and its error code can be collected 1333 * @gh: the glock holder 1334 * 1335 * Called when a glock holder no longer needs to be waited for because it is 1336 * now either held (HIF_HOLDER set; gh_error == 0), or acquiring the lock has 1337 * failed (gh_error != 0). 1338 */ 1339 1340 int gfs2_glock_holder_ready(struct gfs2_holder *gh) 1341 { 1342 if (gh->gh_error || (gh->gh_flags & GL_SKIP)) 1343 return gh->gh_error; 1344 gh->gh_error = gfs2_instantiate(gh); 1345 if (gh->gh_error) 1346 gfs2_glock_dq(gh); 1347 return gh->gh_error; 1348 } 1349 1350 /** 1351 * gfs2_glock_wait - wait on a glock acquisition 1352 * @gh: the glock holder 1353 * 1354 * Returns: 0 on success 1355 */ 1356 1357 int gfs2_glock_wait(struct gfs2_holder *gh) 1358 { 1359 unsigned long start_time = jiffies; 1360 1361 might_sleep(); 1362 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE); 1363 gfs2_glock_update_hold_time(gh->gh_gl, start_time); 1364 return gfs2_glock_holder_ready(gh); 1365 } 1366 1367 static int glocks_pending(unsigned int num_gh, struct gfs2_holder *ghs) 1368 { 1369 int i; 1370 1371 for (i = 0; i < num_gh; i++) 1372 if (test_bit(HIF_WAIT, &ghs[i].gh_iflags)) 1373 return 1; 1374 return 0; 1375 } 1376 1377 /** 1378 * gfs2_glock_async_wait - wait on multiple asynchronous glock acquisitions 1379 * @num_gh: the number of holders in the array 1380 * @ghs: the glock holder array 1381 * 1382 * Returns: 0 on success, meaning all glocks have been granted and are held. 1383 * -ESTALE if the request timed out, meaning all glocks were released, 1384 * and the caller should retry the operation. 1385 */ 1386 1387 int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs) 1388 { 1389 struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd; 1390 int i, ret = 0, timeout = 0; 1391 unsigned long start_time = jiffies; 1392 1393 might_sleep(); 1394 /* 1395 * Total up the (minimum hold time * 2) of all glocks and use that to 1396 * determine the max amount of time we should wait. 1397 */ 1398 for (i = 0; i < num_gh; i++) 1399 timeout += ghs[i].gh_gl->gl_hold_time << 1; 1400 1401 if (!wait_event_timeout(sdp->sd_async_glock_wait, 1402 !glocks_pending(num_gh, ghs), timeout)) { 1403 ret = -ESTALE; /* request timed out. */ 1404 goto out; 1405 } 1406 1407 for (i = 0; i < num_gh; i++) { 1408 struct gfs2_holder *gh = &ghs[i]; 1409 int ret2; 1410 1411 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) { 1412 gfs2_glock_update_hold_time(gh->gh_gl, 1413 start_time); 1414 } 1415 ret2 = gfs2_glock_holder_ready(gh); 1416 if (!ret) 1417 ret = ret2; 1418 } 1419 1420 out: 1421 if (ret) { 1422 for (i = 0; i < num_gh; i++) { 1423 struct gfs2_holder *gh = &ghs[i]; 1424 1425 gfs2_glock_dq(gh); 1426 } 1427 } 1428 return ret; 1429 } 1430 1431 /** 1432 * request_demote - process a demote request 1433 * @gl: the glock 1434 * @state: the state the caller wants us to change to 1435 * @delay: zero to demote immediately; otherwise pending demote 1436 * @remote: true if this came from a different cluster node 1437 * 1438 * There are only two requests that we are going to see in actual 1439 * practise: LM_ST_SHARED and LM_ST_UNLOCKED 1440 */ 1441 1442 static void request_demote(struct gfs2_glock *gl, unsigned int state, 1443 unsigned long delay, bool remote) 1444 { 1445 if (delay) 1446 set_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); 1447 else 1448 gfs2_set_demote(gl); 1449 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { 1450 gl->gl_demote_state = state; 1451 gl->gl_demote_time = jiffies; 1452 } else if (gl->gl_demote_state != LM_ST_UNLOCKED && 1453 gl->gl_demote_state != state) { 1454 gl->gl_demote_state = LM_ST_UNLOCKED; 1455 } 1456 if (gl->gl_ops->go_callback) 1457 gl->gl_ops->go_callback(gl, remote); 1458 trace_gfs2_demote_rq(gl, remote); 1459 } 1460 1461 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...) 1462 { 1463 struct va_format vaf; 1464 va_list args; 1465 1466 va_start(args, fmt); 1467 1468 if (seq) { 1469 seq_vprintf(seq, fmt, args); 1470 } else { 1471 vaf.fmt = fmt; 1472 vaf.va = &args; 1473 1474 pr_err("%pV", &vaf); 1475 } 1476 1477 va_end(args); 1478 } 1479 1480 static inline bool pid_is_meaningful(const struct gfs2_holder *gh) 1481 { 1482 if (!(gh->gh_flags & GL_NOPID)) 1483 return true; 1484 if (gh->gh_state == LM_ST_UNLOCKED) 1485 return true; 1486 return false; 1487 } 1488 1489 /** 1490 * add_to_queue - Add a holder to the wait queue (but look for recursion) 1491 * @gh: the holder structure to add 1492 * 1493 * Eventually we should move the recursive locking trap to a 1494 * debugging option or something like that. This is the fast 1495 * path and needs to have the minimum number of distractions. 1496 * 1497 */ 1498 1499 static inline void add_to_queue(struct gfs2_holder *gh) 1500 __releases(&gl->gl_lockref.lock) 1501 __acquires(&gl->gl_lockref.lock) 1502 { 1503 struct gfs2_glock *gl = gh->gh_gl; 1504 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 1505 struct list_head *insert_pt = NULL; 1506 struct gfs2_holder *gh2; 1507 int try_futile = 0; 1508 1509 GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL); 1510 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) 1511 GLOCK_BUG_ON(gl, true); 1512 1513 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { 1514 if (test_bit(GLF_LOCK, &gl->gl_flags)) { 1515 struct gfs2_holder *current_gh; 1516 1517 current_gh = find_first_holder(gl); 1518 try_futile = !may_grant(gl, current_gh, gh); 1519 } 1520 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) 1521 goto fail; 1522 } 1523 1524 list_for_each_entry(gh2, &gl->gl_holders, gh_list) { 1525 if (likely(gh2->gh_owner_pid != gh->gh_owner_pid)) 1526 continue; 1527 if (gh->gh_gl->gl_ops->go_type == LM_TYPE_FLOCK) 1528 continue; 1529 if (!pid_is_meaningful(gh2)) 1530 continue; 1531 goto trap_recursive; 1532 } 1533 list_for_each_entry(gh2, &gl->gl_holders, gh_list) { 1534 if (try_futile && 1535 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { 1536 fail: 1537 gh->gh_error = GLR_TRYFAILED; 1538 gfs2_holder_wake(gh); 1539 return; 1540 } 1541 if (test_bit(HIF_HOLDER, &gh2->gh_iflags)) 1542 continue; 1543 } 1544 trace_gfs2_glock_queue(gh, 1); 1545 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT); 1546 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT); 1547 if (likely(insert_pt == NULL)) { 1548 list_add_tail(&gh->gh_list, &gl->gl_holders); 1549 return; 1550 } 1551 list_add_tail(&gh->gh_list, insert_pt); 1552 spin_unlock(&gl->gl_lockref.lock); 1553 if (sdp->sd_lockstruct.ls_ops->lm_cancel) 1554 sdp->sd_lockstruct.ls_ops->lm_cancel(gl); 1555 spin_lock(&gl->gl_lockref.lock); 1556 return; 1557 1558 trap_recursive: 1559 fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip); 1560 fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid)); 1561 fs_err(sdp, "lock type: %d req lock state : %d\n", 1562 gh2->gh_gl->gl_name.ln_type, gh2->gh_state); 1563 fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip); 1564 fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid)); 1565 fs_err(sdp, "lock type: %d req lock state : %d\n", 1566 gh->gh_gl->gl_name.ln_type, gh->gh_state); 1567 gfs2_dump_glock(NULL, gl, true); 1568 BUG(); 1569 } 1570 1571 /** 1572 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock) 1573 * @gh: the holder structure 1574 * 1575 * if (gh->gh_flags & GL_ASYNC), this never returns an error 1576 * 1577 * Returns: 0, GLR_TRYFAILED, or errno on failure 1578 */ 1579 1580 int gfs2_glock_nq(struct gfs2_holder *gh) 1581 { 1582 struct gfs2_glock *gl = gh->gh_gl; 1583 int error; 1584 1585 if (glock_blocked_by_withdraw(gl) && !(gh->gh_flags & LM_FLAG_NOEXP)) 1586 return -EIO; 1587 1588 if (gh->gh_flags & GL_NOBLOCK) { 1589 struct gfs2_holder *current_gh; 1590 1591 error = -ECHILD; 1592 spin_lock(&gl->gl_lockref.lock); 1593 if (find_last_waiter(gl)) 1594 goto unlock; 1595 current_gh = find_first_holder(gl); 1596 if (!may_grant(gl, current_gh, gh)) 1597 goto unlock; 1598 set_bit(HIF_HOLDER, &gh->gh_iflags); 1599 list_add_tail(&gh->gh_list, &gl->gl_holders); 1600 trace_gfs2_promote(gh); 1601 error = 0; 1602 unlock: 1603 spin_unlock(&gl->gl_lockref.lock); 1604 return error; 1605 } 1606 1607 gh->gh_error = 0; 1608 spin_lock(&gl->gl_lockref.lock); 1609 add_to_queue(gh); 1610 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) && 1611 test_and_clear_bit(GLF_HAVE_FROZEN_REPLY, &gl->gl_flags))) { 1612 set_bit(GLF_HAVE_REPLY, &gl->gl_flags); 1613 gl->gl_lockref.count++; 1614 gfs2_glock_queue_work(gl, 0); 1615 } 1616 run_queue(gl, 1); 1617 spin_unlock(&gl->gl_lockref.lock); 1618 1619 error = 0; 1620 if (!(gh->gh_flags & GL_ASYNC)) 1621 error = gfs2_glock_wait(gh); 1622 1623 return error; 1624 } 1625 1626 /** 1627 * gfs2_glock_poll - poll to see if an async request has been completed 1628 * @gh: the holder 1629 * 1630 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on 1631 */ 1632 1633 int gfs2_glock_poll(struct gfs2_holder *gh) 1634 { 1635 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1; 1636 } 1637 1638 static inline bool needs_demote(struct gfs2_glock *gl) 1639 { 1640 return (test_bit(GLF_DEMOTE, &gl->gl_flags) || 1641 test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags)); 1642 } 1643 1644 static void __gfs2_glock_dq(struct gfs2_holder *gh) 1645 { 1646 struct gfs2_glock *gl = gh->gh_gl; 1647 unsigned delay = 0; 1648 int fast_path = 0; 1649 1650 /* 1651 * This holder should not be cached, so mark it for demote. 1652 * Note: this should be done before the check for needs_demote 1653 * below. 1654 */ 1655 if (gh->gh_flags & GL_NOCACHE) 1656 request_demote(gl, LM_ST_UNLOCKED, 0, false); 1657 1658 list_del_init(&gh->gh_list); 1659 clear_bit(HIF_HOLDER, &gh->gh_iflags); 1660 trace_gfs2_glock_queue(gh, 0); 1661 1662 /* 1663 * If there hasn't been a demote request we are done. 1664 * (Let the remaining holders, if any, keep holding it.) 1665 */ 1666 if (!needs_demote(gl)) { 1667 if (list_empty(&gl->gl_holders)) 1668 fast_path = 1; 1669 } 1670 1671 if (unlikely(!fast_path)) { 1672 gl->gl_lockref.count++; 1673 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 1674 !test_bit(GLF_DEMOTE, &gl->gl_flags) && 1675 gl->gl_name.ln_type == LM_TYPE_INODE) 1676 delay = gl->gl_hold_time; 1677 gfs2_glock_queue_work(gl, delay); 1678 } 1679 } 1680 1681 /** 1682 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock) 1683 * @gh: the glock holder 1684 * 1685 */ 1686 void gfs2_glock_dq(struct gfs2_holder *gh) 1687 { 1688 struct gfs2_glock *gl = gh->gh_gl; 1689 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 1690 1691 spin_lock(&gl->gl_lockref.lock); 1692 if (!gfs2_holder_queued(gh)) { 1693 /* 1694 * May have already been dequeued because the locking request 1695 * was GL_ASYNC and it has failed in the meantime. 1696 */ 1697 goto out; 1698 } 1699 1700 if (list_is_first(&gh->gh_list, &gl->gl_holders) && 1701 !test_bit(HIF_HOLDER, &gh->gh_iflags)) { 1702 spin_unlock(&gl->gl_lockref.lock); 1703 gl->gl_name.ln_sbd->sd_lockstruct.ls_ops->lm_cancel(gl); 1704 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE); 1705 spin_lock(&gl->gl_lockref.lock); 1706 } 1707 1708 /* 1709 * If we're in the process of file system withdraw, we cannot just 1710 * dequeue any glocks until our journal is recovered, lest we introduce 1711 * file system corruption. We need two exceptions to this rule: We need 1712 * to allow unlocking of nondisk glocks and the glock for our own 1713 * journal that needs recovery. 1714 */ 1715 if (test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags) && 1716 glock_blocked_by_withdraw(gl) && 1717 gh->gh_gl != sdp->sd_jinode_gl) { 1718 sdp->sd_glock_dqs_held++; 1719 spin_unlock(&gl->gl_lockref.lock); 1720 might_sleep(); 1721 wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY, 1722 TASK_UNINTERRUPTIBLE); 1723 spin_lock(&gl->gl_lockref.lock); 1724 } 1725 1726 __gfs2_glock_dq(gh); 1727 out: 1728 spin_unlock(&gl->gl_lockref.lock); 1729 } 1730 1731 void gfs2_glock_dq_wait(struct gfs2_holder *gh) 1732 { 1733 struct gfs2_glock *gl = gh->gh_gl; 1734 gfs2_glock_dq(gh); 1735 might_sleep(); 1736 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE); 1737 } 1738 1739 /** 1740 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it 1741 * @gh: the holder structure 1742 * 1743 */ 1744 1745 void gfs2_glock_dq_uninit(struct gfs2_holder *gh) 1746 { 1747 gfs2_glock_dq(gh); 1748 gfs2_holder_uninit(gh); 1749 } 1750 1751 /** 1752 * gfs2_glock_nq_num - acquire a glock based on lock number 1753 * @sdp: the filesystem 1754 * @number: the lock number 1755 * @glops: the glock operations for the type of glock 1756 * @state: the state to acquire the glock in 1757 * @flags: modifier flags for the acquisition 1758 * @gh: the struct gfs2_holder 1759 * 1760 * Returns: errno 1761 */ 1762 1763 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number, 1764 const struct gfs2_glock_operations *glops, 1765 unsigned int state, u16 flags, struct gfs2_holder *gh) 1766 { 1767 struct gfs2_glock *gl; 1768 int error; 1769 1770 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl); 1771 if (!error) { 1772 error = gfs2_glock_nq_init(gl, state, flags, gh); 1773 gfs2_glock_put(gl); 1774 } 1775 1776 return error; 1777 } 1778 1779 /** 1780 * glock_compare - Compare two struct gfs2_glock structures for sorting 1781 * @arg_a: the first structure 1782 * @arg_b: the second structure 1783 * 1784 */ 1785 1786 static int glock_compare(const void *arg_a, const void *arg_b) 1787 { 1788 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a; 1789 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b; 1790 const struct lm_lockname *a = &gh_a->gh_gl->gl_name; 1791 const struct lm_lockname *b = &gh_b->gh_gl->gl_name; 1792 1793 if (a->ln_number > b->ln_number) 1794 return 1; 1795 if (a->ln_number < b->ln_number) 1796 return -1; 1797 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type); 1798 return 0; 1799 } 1800 1801 /** 1802 * nq_m_sync - synchronously acquire more than one glock in deadlock free order 1803 * @num_gh: the number of structures 1804 * @ghs: an array of struct gfs2_holder structures 1805 * @p: placeholder for the holder structure to pass back 1806 * 1807 * Returns: 0 on success (all glocks acquired), 1808 * errno on failure (no glocks acquired) 1809 */ 1810 1811 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs, 1812 struct gfs2_holder **p) 1813 { 1814 unsigned int x; 1815 int error = 0; 1816 1817 for (x = 0; x < num_gh; x++) 1818 p[x] = &ghs[x]; 1819 1820 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL); 1821 1822 for (x = 0; x < num_gh; x++) { 1823 error = gfs2_glock_nq(p[x]); 1824 if (error) { 1825 while (x--) 1826 gfs2_glock_dq(p[x]); 1827 break; 1828 } 1829 } 1830 1831 return error; 1832 } 1833 1834 /** 1835 * gfs2_glock_nq_m - acquire multiple glocks 1836 * @num_gh: the number of structures 1837 * @ghs: an array of struct gfs2_holder structures 1838 * 1839 * Returns: 0 on success (all glocks acquired), 1840 * errno on failure (no glocks acquired) 1841 */ 1842 1843 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs) 1844 { 1845 struct gfs2_holder *tmp[4]; 1846 struct gfs2_holder **pph = tmp; 1847 int error = 0; 1848 1849 switch(num_gh) { 1850 case 0: 1851 return 0; 1852 case 1: 1853 return gfs2_glock_nq(ghs); 1854 default: 1855 if (num_gh <= 4) 1856 break; 1857 pph = kmalloc_array(num_gh, sizeof(struct gfs2_holder *), 1858 GFP_NOFS); 1859 if (!pph) 1860 return -ENOMEM; 1861 } 1862 1863 error = nq_m_sync(num_gh, ghs, pph); 1864 1865 if (pph != tmp) 1866 kfree(pph); 1867 1868 return error; 1869 } 1870 1871 /** 1872 * gfs2_glock_dq_m - release multiple glocks 1873 * @num_gh: the number of structures 1874 * @ghs: an array of struct gfs2_holder structures 1875 * 1876 */ 1877 1878 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs) 1879 { 1880 while (num_gh--) 1881 gfs2_glock_dq(&ghs[num_gh]); 1882 } 1883 1884 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) 1885 { 1886 unsigned long delay = 0; 1887 1888 gfs2_glock_hold(gl); 1889 spin_lock(&gl->gl_lockref.lock); 1890 if (!list_empty(&gl->gl_holders) && 1891 gl->gl_name.ln_type == LM_TYPE_INODE) { 1892 unsigned long now = jiffies; 1893 unsigned long holdtime; 1894 1895 holdtime = gl->gl_tchange + gl->gl_hold_time; 1896 1897 if (time_before(now, holdtime)) 1898 delay = holdtime - now; 1899 if (test_bit(GLF_HAVE_REPLY, &gl->gl_flags)) 1900 delay = gl->gl_hold_time; 1901 } 1902 request_demote(gl, state, delay, true); 1903 gfs2_glock_queue_work(gl, delay); 1904 spin_unlock(&gl->gl_lockref.lock); 1905 } 1906 1907 /** 1908 * gfs2_should_freeze - Figure out if glock should be frozen 1909 * @gl: The glock in question 1910 * 1911 * Glocks are not frozen if (a) the result of the dlm operation is 1912 * an error, (b) the locking operation was an unlock operation or 1913 * (c) if there is a "noexp" flagged request anywhere in the queue 1914 * 1915 * Returns: 1 if freezing should occur, 0 otherwise 1916 */ 1917 1918 static int gfs2_should_freeze(const struct gfs2_glock *gl) 1919 { 1920 const struct gfs2_holder *gh; 1921 1922 if (gl->gl_reply & ~LM_OUT_ST_MASK) 1923 return 0; 1924 if (gl->gl_target == LM_ST_UNLOCKED) 1925 return 0; 1926 1927 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 1928 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 1929 continue; 1930 if (LM_FLAG_NOEXP & gh->gh_flags) 1931 return 0; 1932 } 1933 1934 return 1; 1935 } 1936 1937 /** 1938 * gfs2_glock_complete - Callback used by locking 1939 * @gl: Pointer to the glock 1940 * @ret: The return value from the dlm 1941 * 1942 * The gl_reply field is under the gl_lockref.lock lock so that it is ok 1943 * to use a bitfield shared with other glock state fields. 1944 */ 1945 1946 void gfs2_glock_complete(struct gfs2_glock *gl, int ret) 1947 { 1948 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; 1949 1950 spin_lock(&gl->gl_lockref.lock); 1951 gl->gl_reply = ret; 1952 1953 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) { 1954 if (gfs2_should_freeze(gl)) { 1955 set_bit(GLF_HAVE_FROZEN_REPLY, &gl->gl_flags); 1956 spin_unlock(&gl->gl_lockref.lock); 1957 return; 1958 } 1959 } 1960 1961 gl->gl_lockref.count++; 1962 set_bit(GLF_HAVE_REPLY, &gl->gl_flags); 1963 gfs2_glock_queue_work(gl, 0); 1964 spin_unlock(&gl->gl_lockref.lock); 1965 } 1966 1967 static int glock_cmp(void *priv, const struct list_head *a, 1968 const struct list_head *b) 1969 { 1970 struct gfs2_glock *gla, *glb; 1971 1972 gla = list_entry(a, struct gfs2_glock, gl_lru); 1973 glb = list_entry(b, struct gfs2_glock, gl_lru); 1974 1975 if (gla->gl_name.ln_number > glb->gl_name.ln_number) 1976 return 1; 1977 if (gla->gl_name.ln_number < glb->gl_name.ln_number) 1978 return -1; 1979 1980 return 0; 1981 } 1982 1983 static bool can_free_glock(struct gfs2_glock *gl) 1984 { 1985 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 1986 1987 return !test_bit(GLF_LOCK, &gl->gl_flags) && 1988 !gl->gl_lockref.count && 1989 (!test_bit(GLF_LFLUSH, &gl->gl_flags) || 1990 test_bit(SDF_KILL, &sdp->sd_flags)); 1991 } 1992 1993 /** 1994 * gfs2_dispose_glock_lru - Demote a list of glocks 1995 * @list: The list to dispose of 1996 * 1997 * Disposing of glocks may involve disk accesses, so that here we sort 1998 * the glocks by number (i.e. disk location of the inodes) so that if 1999 * there are any such accesses, they'll be sent in order (mostly). 2000 * 2001 * Must be called under the lru_lock, but may drop and retake this 2002 * lock. While the lru_lock is dropped, entries may vanish from the 2003 * list, but no new entries will appear on the list (since it is 2004 * private) 2005 */ 2006 2007 static unsigned long gfs2_dispose_glock_lru(struct list_head *list) 2008 __releases(&lru_lock) 2009 __acquires(&lru_lock) 2010 { 2011 struct gfs2_glock *gl; 2012 unsigned long freed = 0; 2013 2014 list_sort(NULL, list, glock_cmp); 2015 2016 while(!list_empty(list)) { 2017 gl = list_first_entry(list, struct gfs2_glock, gl_lru); 2018 if (!spin_trylock(&gl->gl_lockref.lock)) { 2019 add_back_to_lru: 2020 list_move(&gl->gl_lru, &lru_list); 2021 continue; 2022 } 2023 if (!can_free_glock(gl)) { 2024 spin_unlock(&gl->gl_lockref.lock); 2025 goto add_back_to_lru; 2026 } 2027 list_del_init(&gl->gl_lru); 2028 atomic_dec(&lru_count); 2029 clear_bit(GLF_LRU, &gl->gl_flags); 2030 freed++; 2031 gl->gl_lockref.count++; 2032 if (gl->gl_state != LM_ST_UNLOCKED) 2033 request_demote(gl, LM_ST_UNLOCKED, 0, false); 2034 gfs2_glock_queue_work(gl, 0); 2035 spin_unlock(&gl->gl_lockref.lock); 2036 cond_resched_lock(&lru_lock); 2037 } 2038 return freed; 2039 } 2040 2041 /** 2042 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote 2043 * @nr: The number of entries to scan 2044 * 2045 * This function selects the entries on the LRU which are able to 2046 * be demoted, and then kicks off the process by calling 2047 * gfs2_dispose_glock_lru() above. 2048 */ 2049 2050 static unsigned long gfs2_scan_glock_lru(unsigned long nr) 2051 { 2052 struct gfs2_glock *gl, *next; 2053 LIST_HEAD(dispose); 2054 unsigned long freed = 0; 2055 2056 spin_lock(&lru_lock); 2057 list_for_each_entry_safe(gl, next, &lru_list, gl_lru) { 2058 if (!nr--) 2059 break; 2060 if (can_free_glock(gl)) 2061 list_move(&gl->gl_lru, &dispose); 2062 } 2063 if (!list_empty(&dispose)) 2064 freed = gfs2_dispose_glock_lru(&dispose); 2065 spin_unlock(&lru_lock); 2066 2067 return freed; 2068 } 2069 2070 static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink, 2071 struct shrink_control *sc) 2072 { 2073 if (!(sc->gfp_mask & __GFP_FS)) 2074 return SHRINK_STOP; 2075 return gfs2_scan_glock_lru(sc->nr_to_scan); 2076 } 2077 2078 static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink, 2079 struct shrink_control *sc) 2080 { 2081 return vfs_pressure_ratio(atomic_read(&lru_count)); 2082 } 2083 2084 static struct shrinker *glock_shrinker; 2085 2086 /** 2087 * glock_hash_walk - Call a function for glock in a hash bucket 2088 * @examiner: the function 2089 * @sdp: the filesystem 2090 * 2091 * Note that the function can be called multiple times on the same 2092 * object. So the user must ensure that the function can cope with 2093 * that. 2094 */ 2095 2096 static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp) 2097 { 2098 struct gfs2_glock *gl; 2099 struct rhashtable_iter iter; 2100 2101 rhashtable_walk_enter(&gl_hash_table, &iter); 2102 2103 do { 2104 rhashtable_walk_start(&iter); 2105 2106 while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) { 2107 if (gl->gl_name.ln_sbd == sdp) 2108 examiner(gl); 2109 } 2110 2111 rhashtable_walk_stop(&iter); 2112 } while (cond_resched(), gl == ERR_PTR(-EAGAIN)); 2113 2114 rhashtable_walk_exit(&iter); 2115 } 2116 2117 void gfs2_cancel_delete_work(struct gfs2_glock *gl) 2118 { 2119 clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags); 2120 clear_bit(GLF_VERIFY_EVICT, &gl->gl_flags); 2121 if (cancel_delayed_work(&gl->gl_delete)) 2122 gfs2_glock_put(gl); 2123 } 2124 2125 static void flush_delete_work(struct gfs2_glock *gl) 2126 { 2127 if (gl->gl_name.ln_type == LM_TYPE_IOPEN) { 2128 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 2129 2130 if (cancel_delayed_work(&gl->gl_delete)) { 2131 queue_delayed_work(sdp->sd_delete_wq, 2132 &gl->gl_delete, 0); 2133 } 2134 } 2135 } 2136 2137 void gfs2_flush_delete_work(struct gfs2_sbd *sdp) 2138 { 2139 glock_hash_walk(flush_delete_work, sdp); 2140 flush_workqueue(sdp->sd_delete_wq); 2141 } 2142 2143 /** 2144 * thaw_glock - thaw out a glock which has an unprocessed reply waiting 2145 * @gl: The glock to thaw 2146 * 2147 */ 2148 2149 static void thaw_glock(struct gfs2_glock *gl) 2150 { 2151 if (!test_and_clear_bit(GLF_HAVE_FROZEN_REPLY, &gl->gl_flags)) 2152 return; 2153 if (!lockref_get_not_dead(&gl->gl_lockref)) 2154 return; 2155 2156 gfs2_glock_remove_from_lru(gl); 2157 spin_lock(&gl->gl_lockref.lock); 2158 set_bit(GLF_HAVE_REPLY, &gl->gl_flags); 2159 gfs2_glock_queue_work(gl, 0); 2160 spin_unlock(&gl->gl_lockref.lock); 2161 } 2162 2163 /** 2164 * clear_glock - look at a glock and see if we can free it from glock cache 2165 * @gl: the glock to look at 2166 * 2167 */ 2168 2169 static void clear_glock(struct gfs2_glock *gl) 2170 { 2171 gfs2_glock_remove_from_lru(gl); 2172 2173 spin_lock(&gl->gl_lockref.lock); 2174 if (!__lockref_is_dead(&gl->gl_lockref)) { 2175 gl->gl_lockref.count++; 2176 if (gl->gl_state != LM_ST_UNLOCKED) 2177 request_demote(gl, LM_ST_UNLOCKED, 0, false); 2178 gfs2_glock_queue_work(gl, 0); 2179 } 2180 spin_unlock(&gl->gl_lockref.lock); 2181 } 2182 2183 /** 2184 * gfs2_glock_thaw - Thaw any frozen glocks 2185 * @sdp: The super block 2186 * 2187 */ 2188 2189 void gfs2_glock_thaw(struct gfs2_sbd *sdp) 2190 { 2191 glock_hash_walk(thaw_glock, sdp); 2192 } 2193 2194 static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid) 2195 { 2196 spin_lock(&gl->gl_lockref.lock); 2197 gfs2_dump_glock(seq, gl, fsid); 2198 spin_unlock(&gl->gl_lockref.lock); 2199 } 2200 2201 static void dump_glock_func(struct gfs2_glock *gl) 2202 { 2203 dump_glock(NULL, gl, true); 2204 } 2205 2206 static void withdraw_dq(struct gfs2_glock *gl) 2207 { 2208 spin_lock(&gl->gl_lockref.lock); 2209 if (!__lockref_is_dead(&gl->gl_lockref) && 2210 glock_blocked_by_withdraw(gl)) 2211 do_error(gl, LM_OUT_ERROR); /* remove pending waiters */ 2212 spin_unlock(&gl->gl_lockref.lock); 2213 } 2214 2215 void gfs2_gl_dq_holders(struct gfs2_sbd *sdp) 2216 { 2217 glock_hash_walk(withdraw_dq, sdp); 2218 } 2219 2220 /** 2221 * gfs2_gl_hash_clear - Empty out the glock hash table 2222 * @sdp: the filesystem 2223 * 2224 * Called when unmounting the filesystem. 2225 */ 2226 2227 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) 2228 { 2229 unsigned long start = jiffies; 2230 bool timed_out = false; 2231 2232 set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags); 2233 flush_workqueue(sdp->sd_glock_wq); 2234 glock_hash_walk(clear_glock, sdp); 2235 flush_workqueue(sdp->sd_glock_wq); 2236 2237 while (!timed_out) { 2238 wait_event_timeout(sdp->sd_kill_wait, 2239 !atomic_read(&sdp->sd_glock_disposal), 2240 HZ * 60); 2241 if (!atomic_read(&sdp->sd_glock_disposal)) 2242 break; 2243 timed_out = time_after(jiffies, start + (HZ * 600)); 2244 fs_warn(sdp, "%u glocks left after %u seconds%s\n", 2245 atomic_read(&sdp->sd_glock_disposal), 2246 jiffies_to_msecs(jiffies - start) / 1000, 2247 timed_out ? ":" : "; still waiting"); 2248 } 2249 gfs2_lm_unmount(sdp); 2250 gfs2_free_dead_glocks(sdp); 2251 glock_hash_walk(dump_glock_func, sdp); 2252 destroy_workqueue(sdp->sd_glock_wq); 2253 sdp->sd_glock_wq = NULL; 2254 } 2255 2256 static const char *state2str(unsigned state) 2257 { 2258 switch(state) { 2259 case LM_ST_UNLOCKED: 2260 return "UN"; 2261 case LM_ST_SHARED: 2262 return "SH"; 2263 case LM_ST_DEFERRED: 2264 return "DF"; 2265 case LM_ST_EXCLUSIVE: 2266 return "EX"; 2267 } 2268 return "??"; 2269 } 2270 2271 static const char *hflags2str(char *buf, u16 flags, unsigned long iflags) 2272 { 2273 char *p = buf; 2274 if (flags & LM_FLAG_TRY) 2275 *p++ = 't'; 2276 if (flags & LM_FLAG_TRY_1CB) 2277 *p++ = 'T'; 2278 if (flags & LM_FLAG_NOEXP) 2279 *p++ = 'e'; 2280 if (flags & LM_FLAG_ANY) 2281 *p++ = 'A'; 2282 if (flags & LM_FLAG_NODE_SCOPE) 2283 *p++ = 'n'; 2284 if (flags & GL_ASYNC) 2285 *p++ = 'a'; 2286 if (flags & GL_EXACT) 2287 *p++ = 'E'; 2288 if (flags & GL_NOCACHE) 2289 *p++ = 'c'; 2290 if (test_bit(HIF_HOLDER, &iflags)) 2291 *p++ = 'H'; 2292 if (test_bit(HIF_WAIT, &iflags)) 2293 *p++ = 'W'; 2294 if (flags & GL_SKIP) 2295 *p++ = 's'; 2296 *p = 0; 2297 return buf; 2298 } 2299 2300 /** 2301 * dump_holder - print information about a glock holder 2302 * @seq: the seq_file struct 2303 * @gh: the glock holder 2304 * @fs_id_buf: pointer to file system id (if requested) 2305 * 2306 */ 2307 2308 static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh, 2309 const char *fs_id_buf) 2310 { 2311 const char *comm = "(none)"; 2312 pid_t owner_pid = 0; 2313 char flags_buf[32]; 2314 2315 rcu_read_lock(); 2316 if (pid_is_meaningful(gh)) { 2317 struct task_struct *gh_owner; 2318 2319 comm = "(ended)"; 2320 owner_pid = pid_nr(gh->gh_owner_pid); 2321 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID); 2322 if (gh_owner) 2323 comm = gh_owner->comm; 2324 } 2325 gfs2_print_dbg(seq, "%s H: s:%s f:%s e:%d p:%ld [%s] %pS\n", 2326 fs_id_buf, state2str(gh->gh_state), 2327 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags), 2328 gh->gh_error, (long)owner_pid, comm, (void *)gh->gh_ip); 2329 rcu_read_unlock(); 2330 } 2331 2332 static const char *gflags2str(char *buf, const struct gfs2_glock *gl) 2333 { 2334 const unsigned long *gflags = &gl->gl_flags; 2335 char *p = buf; 2336 2337 if (test_bit(GLF_LOCK, gflags)) 2338 *p++ = 'l'; 2339 if (test_bit(GLF_DEMOTE, gflags)) 2340 *p++ = 'D'; 2341 if (test_bit(GLF_PENDING_DEMOTE, gflags)) 2342 *p++ = 'd'; 2343 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags)) 2344 *p++ = 'p'; 2345 if (test_bit(GLF_DIRTY, gflags)) 2346 *p++ = 'y'; 2347 if (test_bit(GLF_LFLUSH, gflags)) 2348 *p++ = 'f'; 2349 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags)) 2350 *p++ = 'i'; 2351 if (test_bit(GLF_HAVE_REPLY, gflags)) 2352 *p++ = 'r'; 2353 if (test_bit(GLF_INITIAL, gflags)) 2354 *p++ = 'a'; 2355 if (test_bit(GLF_HAVE_FROZEN_REPLY, gflags)) 2356 *p++ = 'F'; 2357 if (!list_empty(&gl->gl_holders)) 2358 *p++ = 'q'; 2359 if (test_bit(GLF_LRU, gflags)) 2360 *p++ = 'L'; 2361 if (gl->gl_object) 2362 *p++ = 'o'; 2363 if (test_bit(GLF_BLOCKING, gflags)) 2364 *p++ = 'b'; 2365 if (test_bit(GLF_UNLOCKED, gflags)) 2366 *p++ = 'x'; 2367 if (test_bit(GLF_INSTANTIATE_NEEDED, gflags)) 2368 *p++ = 'n'; 2369 if (test_bit(GLF_INSTANTIATE_IN_PROG, gflags)) 2370 *p++ = 'N'; 2371 if (test_bit(GLF_TRY_TO_EVICT, gflags)) 2372 *p++ = 'e'; 2373 if (test_bit(GLF_VERIFY_EVICT, gflags)) 2374 *p++ = 'E'; 2375 *p = 0; 2376 return buf; 2377 } 2378 2379 /** 2380 * gfs2_dump_glock - print information about a glock 2381 * @seq: The seq_file struct 2382 * @gl: the glock 2383 * @fsid: If true, also dump the file system id 2384 * 2385 * The file format is as follows: 2386 * One line per object, capital letters are used to indicate objects 2387 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented, 2388 * other objects are indented by a single space and follow the glock to 2389 * which they are related. Fields are indicated by lower case letters 2390 * followed by a colon and the field value, except for strings which are in 2391 * [] so that its possible to see if they are composed of spaces for 2392 * example. The field's are n = number (id of the object), f = flags, 2393 * t = type, s = state, r = refcount, e = error, p = pid. 2394 * 2395 */ 2396 2397 void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid) 2398 { 2399 const struct gfs2_glock_operations *glops = gl->gl_ops; 2400 unsigned long long dtime; 2401 const struct gfs2_holder *gh; 2402 char gflags_buf[32]; 2403 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 2404 char fs_id_buf[sizeof(sdp->sd_fsname) + 7]; 2405 unsigned long nrpages = 0; 2406 2407 if (gl->gl_ops->go_flags & GLOF_ASPACE) { 2408 struct address_space *mapping = gfs2_glock2aspace(gl); 2409 2410 nrpages = mapping->nrpages; 2411 } 2412 memset(fs_id_buf, 0, sizeof(fs_id_buf)); 2413 if (fsid && sdp) /* safety precaution */ 2414 sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname); 2415 dtime = jiffies - gl->gl_demote_time; 2416 dtime *= 1000000/HZ; /* demote time in uSec */ 2417 if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) 2418 dtime = 0; 2419 gfs2_print_dbg(seq, "%sG: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d " 2420 "v:%d r:%d m:%ld p:%lu\n", 2421 fs_id_buf, state2str(gl->gl_state), 2422 gl->gl_name.ln_type, 2423 (unsigned long long)gl->gl_name.ln_number, 2424 gflags2str(gflags_buf, gl), 2425 state2str(gl->gl_target), 2426 state2str(gl->gl_demote_state), dtime, 2427 atomic_read(&gl->gl_ail_count), 2428 atomic_read(&gl->gl_revokes), 2429 (int)gl->gl_lockref.count, gl->gl_hold_time, nrpages); 2430 2431 list_for_each_entry(gh, &gl->gl_holders, gh_list) 2432 dump_holder(seq, gh, fs_id_buf); 2433 2434 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump) 2435 glops->go_dump(seq, gl, fs_id_buf); 2436 } 2437 2438 static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr) 2439 { 2440 struct gfs2_glock *gl = iter_ptr; 2441 2442 seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n", 2443 gl->gl_name.ln_type, 2444 (unsigned long long)gl->gl_name.ln_number, 2445 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT], 2446 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR], 2447 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB], 2448 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB], 2449 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT], 2450 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR], 2451 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT], 2452 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]); 2453 return 0; 2454 } 2455 2456 static const char *gfs2_gltype[] = { 2457 "type", 2458 "reserved", 2459 "nondisk", 2460 "inode", 2461 "rgrp", 2462 "meta", 2463 "iopen", 2464 "flock", 2465 "plock", 2466 "quota", 2467 "journal", 2468 }; 2469 2470 static const char *gfs2_stype[] = { 2471 [GFS2_LKS_SRTT] = "srtt", 2472 [GFS2_LKS_SRTTVAR] = "srttvar", 2473 [GFS2_LKS_SRTTB] = "srttb", 2474 [GFS2_LKS_SRTTVARB] = "srttvarb", 2475 [GFS2_LKS_SIRT] = "sirt", 2476 [GFS2_LKS_SIRTVAR] = "sirtvar", 2477 [GFS2_LKS_DCOUNT] = "dlm", 2478 [GFS2_LKS_QCOUNT] = "queue", 2479 }; 2480 2481 #define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype)) 2482 2483 static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr) 2484 { 2485 struct gfs2_sbd *sdp = seq->private; 2486 loff_t pos = *(loff_t *)iter_ptr; 2487 unsigned index = pos >> 3; 2488 unsigned subindex = pos & 0x07; 2489 int i; 2490 2491 if (index == 0 && subindex != 0) 2492 return 0; 2493 2494 seq_printf(seq, "%-10s %8s:", gfs2_gltype[index], 2495 (index == 0) ? "cpu": gfs2_stype[subindex]); 2496 2497 for_each_possible_cpu(i) { 2498 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i); 2499 2500 if (index == 0) 2501 seq_printf(seq, " %15u", i); 2502 else 2503 seq_printf(seq, " %15llu", (unsigned long long)lkstats-> 2504 lkstats[index - 1].stats[subindex]); 2505 } 2506 seq_putc(seq, '\n'); 2507 return 0; 2508 } 2509 2510 int __init gfs2_glock_init(void) 2511 { 2512 int i, ret; 2513 2514 ret = rhashtable_init(&gl_hash_table, &ht_parms); 2515 if (ret < 0) 2516 return ret; 2517 2518 glock_shrinker = shrinker_alloc(0, "gfs2-glock"); 2519 if (!glock_shrinker) { 2520 rhashtable_destroy(&gl_hash_table); 2521 return -ENOMEM; 2522 } 2523 2524 glock_shrinker->count_objects = gfs2_glock_shrink_count; 2525 glock_shrinker->scan_objects = gfs2_glock_shrink_scan; 2526 2527 shrinker_register(glock_shrinker); 2528 2529 for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++) 2530 init_waitqueue_head(glock_wait_table + i); 2531 2532 return 0; 2533 } 2534 2535 void gfs2_glock_exit(void) 2536 { 2537 shrinker_free(glock_shrinker); 2538 rhashtable_destroy(&gl_hash_table); 2539 } 2540 2541 static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n) 2542 { 2543 struct gfs2_glock *gl = gi->gl; 2544 2545 if (gl) { 2546 if (n == 0) 2547 return; 2548 gfs2_glock_put_async(gl); 2549 } 2550 for (;;) { 2551 gl = rhashtable_walk_next(&gi->hti); 2552 if (IS_ERR_OR_NULL(gl)) { 2553 if (gl == ERR_PTR(-EAGAIN)) { 2554 n = 1; 2555 continue; 2556 } 2557 gl = NULL; 2558 break; 2559 } 2560 if (gl->gl_name.ln_sbd != gi->sdp) 2561 continue; 2562 if (n <= 1) { 2563 if (!lockref_get_not_dead(&gl->gl_lockref)) 2564 continue; 2565 break; 2566 } else { 2567 if (__lockref_is_dead(&gl->gl_lockref)) 2568 continue; 2569 n--; 2570 } 2571 } 2572 gi->gl = gl; 2573 } 2574 2575 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos) 2576 __acquires(RCU) 2577 { 2578 struct gfs2_glock_iter *gi = seq->private; 2579 loff_t n; 2580 2581 /* 2582 * We can either stay where we are, skip to the next hash table 2583 * entry, or start from the beginning. 2584 */ 2585 if (*pos < gi->last_pos) { 2586 rhashtable_walk_exit(&gi->hti); 2587 rhashtable_walk_enter(&gl_hash_table, &gi->hti); 2588 n = *pos + 1; 2589 } else { 2590 n = *pos - gi->last_pos; 2591 } 2592 2593 rhashtable_walk_start(&gi->hti); 2594 2595 gfs2_glock_iter_next(gi, n); 2596 gi->last_pos = *pos; 2597 return gi->gl; 2598 } 2599 2600 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr, 2601 loff_t *pos) 2602 { 2603 struct gfs2_glock_iter *gi = seq->private; 2604 2605 (*pos)++; 2606 gi->last_pos = *pos; 2607 gfs2_glock_iter_next(gi, 1); 2608 return gi->gl; 2609 } 2610 2611 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr) 2612 __releases(RCU) 2613 { 2614 struct gfs2_glock_iter *gi = seq->private; 2615 2616 rhashtable_walk_stop(&gi->hti); 2617 } 2618 2619 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr) 2620 { 2621 dump_glock(seq, iter_ptr, false); 2622 return 0; 2623 } 2624 2625 static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos) 2626 { 2627 preempt_disable(); 2628 if (*pos >= GFS2_NR_SBSTATS) 2629 return NULL; 2630 return pos; 2631 } 2632 2633 static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr, 2634 loff_t *pos) 2635 { 2636 (*pos)++; 2637 if (*pos >= GFS2_NR_SBSTATS) 2638 return NULL; 2639 return pos; 2640 } 2641 2642 static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr) 2643 { 2644 preempt_enable(); 2645 } 2646 2647 static const struct seq_operations gfs2_glock_seq_ops = { 2648 .start = gfs2_glock_seq_start, 2649 .next = gfs2_glock_seq_next, 2650 .stop = gfs2_glock_seq_stop, 2651 .show = gfs2_glock_seq_show, 2652 }; 2653 2654 static const struct seq_operations gfs2_glstats_seq_ops = { 2655 .start = gfs2_glock_seq_start, 2656 .next = gfs2_glock_seq_next, 2657 .stop = gfs2_glock_seq_stop, 2658 .show = gfs2_glstats_seq_show, 2659 }; 2660 2661 static const struct seq_operations gfs2_sbstats_sops = { 2662 .start = gfs2_sbstats_seq_start, 2663 .next = gfs2_sbstats_seq_next, 2664 .stop = gfs2_sbstats_seq_stop, 2665 .show = gfs2_sbstats_seq_show, 2666 }; 2667 2668 #define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL) 2669 2670 static int __gfs2_glocks_open(struct inode *inode, struct file *file, 2671 const struct seq_operations *ops) 2672 { 2673 int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter)); 2674 if (ret == 0) { 2675 struct seq_file *seq = file->private_data; 2676 struct gfs2_glock_iter *gi = seq->private; 2677 2678 gi->sdp = inode->i_private; 2679 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN); 2680 if (seq->buf) 2681 seq->size = GFS2_SEQ_GOODSIZE; 2682 /* 2683 * Initially, we are "before" the first hash table entry; the 2684 * first call to rhashtable_walk_next gets us the first entry. 2685 */ 2686 gi->last_pos = -1; 2687 gi->gl = NULL; 2688 rhashtable_walk_enter(&gl_hash_table, &gi->hti); 2689 } 2690 return ret; 2691 } 2692 2693 static int gfs2_glocks_open(struct inode *inode, struct file *file) 2694 { 2695 return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops); 2696 } 2697 2698 static int gfs2_glocks_release(struct inode *inode, struct file *file) 2699 { 2700 struct seq_file *seq = file->private_data; 2701 struct gfs2_glock_iter *gi = seq->private; 2702 2703 if (gi->gl) 2704 gfs2_glock_put(gi->gl); 2705 rhashtable_walk_exit(&gi->hti); 2706 return seq_release_private(inode, file); 2707 } 2708 2709 static int gfs2_glstats_open(struct inode *inode, struct file *file) 2710 { 2711 return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops); 2712 } 2713 2714 static const struct file_operations gfs2_glocks_fops = { 2715 .owner = THIS_MODULE, 2716 .open = gfs2_glocks_open, 2717 .read = seq_read, 2718 .llseek = seq_lseek, 2719 .release = gfs2_glocks_release, 2720 }; 2721 2722 static const struct file_operations gfs2_glstats_fops = { 2723 .owner = THIS_MODULE, 2724 .open = gfs2_glstats_open, 2725 .read = seq_read, 2726 .llseek = seq_lseek, 2727 .release = gfs2_glocks_release, 2728 }; 2729 2730 struct gfs2_glockfd_iter { 2731 struct super_block *sb; 2732 unsigned int tgid; 2733 struct task_struct *task; 2734 unsigned int fd; 2735 struct file *file; 2736 }; 2737 2738 static struct task_struct *gfs2_glockfd_next_task(struct gfs2_glockfd_iter *i) 2739 { 2740 struct pid_namespace *ns = task_active_pid_ns(current); 2741 struct pid *pid; 2742 2743 if (i->task) 2744 put_task_struct(i->task); 2745 2746 rcu_read_lock(); 2747 retry: 2748 i->task = NULL; 2749 pid = find_ge_pid(i->tgid, ns); 2750 if (pid) { 2751 i->tgid = pid_nr_ns(pid, ns); 2752 i->task = pid_task(pid, PIDTYPE_TGID); 2753 if (!i->task) { 2754 i->tgid++; 2755 goto retry; 2756 } 2757 get_task_struct(i->task); 2758 } 2759 rcu_read_unlock(); 2760 return i->task; 2761 } 2762 2763 static struct file *gfs2_glockfd_next_file(struct gfs2_glockfd_iter *i) 2764 { 2765 if (i->file) { 2766 fput(i->file); 2767 i->file = NULL; 2768 } 2769 2770 for(;; i->fd++) { 2771 i->file = fget_task_next(i->task, &i->fd); 2772 if (!i->file) { 2773 i->fd = 0; 2774 break; 2775 } 2776 2777 if (file_inode(i->file)->i_sb == i->sb) 2778 break; 2779 2780 fput(i->file); 2781 } 2782 return i->file; 2783 } 2784 2785 static void *gfs2_glockfd_seq_start(struct seq_file *seq, loff_t *pos) 2786 { 2787 struct gfs2_glockfd_iter *i = seq->private; 2788 2789 if (*pos) 2790 return NULL; 2791 while (gfs2_glockfd_next_task(i)) { 2792 if (gfs2_glockfd_next_file(i)) 2793 return i; 2794 i->tgid++; 2795 } 2796 return NULL; 2797 } 2798 2799 static void *gfs2_glockfd_seq_next(struct seq_file *seq, void *iter_ptr, 2800 loff_t *pos) 2801 { 2802 struct gfs2_glockfd_iter *i = seq->private; 2803 2804 (*pos)++; 2805 i->fd++; 2806 do { 2807 if (gfs2_glockfd_next_file(i)) 2808 return i; 2809 i->tgid++; 2810 } while (gfs2_glockfd_next_task(i)); 2811 return NULL; 2812 } 2813 2814 static void gfs2_glockfd_seq_stop(struct seq_file *seq, void *iter_ptr) 2815 { 2816 struct gfs2_glockfd_iter *i = seq->private; 2817 2818 if (i->file) 2819 fput(i->file); 2820 if (i->task) 2821 put_task_struct(i->task); 2822 } 2823 2824 static void gfs2_glockfd_seq_show_flock(struct seq_file *seq, 2825 struct gfs2_glockfd_iter *i) 2826 { 2827 struct gfs2_file *fp = i->file->private_data; 2828 struct gfs2_holder *fl_gh = &fp->f_fl_gh; 2829 struct lm_lockname gl_name = { .ln_type = LM_TYPE_RESERVED }; 2830 2831 if (!READ_ONCE(fl_gh->gh_gl)) 2832 return; 2833 2834 spin_lock(&i->file->f_lock); 2835 if (gfs2_holder_initialized(fl_gh)) 2836 gl_name = fl_gh->gh_gl->gl_name; 2837 spin_unlock(&i->file->f_lock); 2838 2839 if (gl_name.ln_type != LM_TYPE_RESERVED) { 2840 seq_printf(seq, "%d %u %u/%llx\n", 2841 i->tgid, i->fd, gl_name.ln_type, 2842 (unsigned long long)gl_name.ln_number); 2843 } 2844 } 2845 2846 static int gfs2_glockfd_seq_show(struct seq_file *seq, void *iter_ptr) 2847 { 2848 struct gfs2_glockfd_iter *i = seq->private; 2849 struct inode *inode = file_inode(i->file); 2850 struct gfs2_glock *gl; 2851 2852 inode_lock_shared(inode); 2853 gl = GFS2_I(inode)->i_iopen_gh.gh_gl; 2854 if (gl) { 2855 seq_printf(seq, "%d %u %u/%llx\n", 2856 i->tgid, i->fd, gl->gl_name.ln_type, 2857 (unsigned long long)gl->gl_name.ln_number); 2858 } 2859 gfs2_glockfd_seq_show_flock(seq, i); 2860 inode_unlock_shared(inode); 2861 return 0; 2862 } 2863 2864 static const struct seq_operations gfs2_glockfd_seq_ops = { 2865 .start = gfs2_glockfd_seq_start, 2866 .next = gfs2_glockfd_seq_next, 2867 .stop = gfs2_glockfd_seq_stop, 2868 .show = gfs2_glockfd_seq_show, 2869 }; 2870 2871 static int gfs2_glockfd_open(struct inode *inode, struct file *file) 2872 { 2873 struct gfs2_glockfd_iter *i; 2874 struct gfs2_sbd *sdp = inode->i_private; 2875 2876 i = __seq_open_private(file, &gfs2_glockfd_seq_ops, 2877 sizeof(struct gfs2_glockfd_iter)); 2878 if (!i) 2879 return -ENOMEM; 2880 i->sb = sdp->sd_vfs; 2881 return 0; 2882 } 2883 2884 static const struct file_operations gfs2_glockfd_fops = { 2885 .owner = THIS_MODULE, 2886 .open = gfs2_glockfd_open, 2887 .read = seq_read, 2888 .llseek = seq_lseek, 2889 .release = seq_release_private, 2890 }; 2891 2892 DEFINE_SEQ_ATTRIBUTE(gfs2_sbstats); 2893 2894 void gfs2_create_debugfs_file(struct gfs2_sbd *sdp) 2895 { 2896 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root); 2897 2898 debugfs_create_file("glocks", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, 2899 &gfs2_glocks_fops); 2900 2901 debugfs_create_file("glockfd", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, 2902 &gfs2_glockfd_fops); 2903 2904 debugfs_create_file("glstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, 2905 &gfs2_glstats_fops); 2906 2907 debugfs_create_file("sbstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, 2908 &gfs2_sbstats_fops); 2909 } 2910 2911 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp) 2912 { 2913 debugfs_remove_recursive(sdp->debugfs_dir); 2914 sdp->debugfs_dir = NULL; 2915 } 2916 2917 void gfs2_register_debugfs(void) 2918 { 2919 gfs2_root = debugfs_create_dir("gfs2", NULL); 2920 } 2921 2922 void gfs2_unregister_debugfs(void) 2923 { 2924 debugfs_remove(gfs2_root); 2925 gfs2_root = NULL; 2926 } 2927