1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/sched.h> 10 #include <linux/slab.h> 11 #include <linux/spinlock.h> 12 #include <linux/buffer_head.h> 13 #include <linux/delay.h> 14 #include <linux/sort.h> 15 #include <linux/hash.h> 16 #include <linux/jhash.h> 17 #include <linux/kallsyms.h> 18 #include <linux/gfs2_ondisk.h> 19 #include <linux/list.h> 20 #include <linux/wait.h> 21 #include <linux/module.h> 22 #include <linux/uaccess.h> 23 #include <linux/seq_file.h> 24 #include <linux/debugfs.h> 25 #include <linux/kthread.h> 26 #include <linux/freezer.h> 27 #include <linux/workqueue.h> 28 #include <linux/jiffies.h> 29 #include <linux/rcupdate.h> 30 #include <linux/rculist_bl.h> 31 #include <linux/bit_spinlock.h> 32 #include <linux/percpu.h> 33 #include <linux/list_sort.h> 34 #include <linux/lockref.h> 35 #include <linux/rhashtable.h> 36 #include <linux/pid_namespace.h> 37 #include <linux/file.h> 38 #include <linux/random.h> 39 40 #include "gfs2.h" 41 #include "incore.h" 42 #include "glock.h" 43 #include "glops.h" 44 #include "inode.h" 45 #include "lops.h" 46 #include "meta_io.h" 47 #include "quota.h" 48 #include "super.h" 49 #include "util.h" 50 #include "bmap.h" 51 #define CREATE_TRACE_POINTS 52 #include "trace_gfs2.h" 53 54 struct gfs2_glock_iter { 55 struct gfs2_sbd *sdp; /* incore superblock */ 56 struct rhashtable_iter hti; /* rhashtable iterator */ 57 struct gfs2_glock *gl; /* current glock struct */ 58 loff_t last_pos; /* last position */ 59 }; 60 61 typedef void (*glock_examiner) (struct gfs2_glock * gl); 62 63 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, 64 unsigned int target, bool may_cancel); 65 static void request_demote(struct gfs2_glock *gl, unsigned int state, 66 unsigned long delay, bool remote); 67 68 static struct dentry *gfs2_root; 69 static LIST_HEAD(lru_list); 70 static atomic_t lru_count = ATOMIC_INIT(0); 71 static DEFINE_SPINLOCK(lru_lock); 72 73 #define GFS2_GL_HASH_SHIFT 15 74 #define GFS2_GL_HASH_SIZE BIT(GFS2_GL_HASH_SHIFT) 75 76 static const struct rhashtable_params ht_parms = { 77 .nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4, 78 .key_len = offsetofend(struct lm_lockname, ln_type), 79 .key_offset = offsetof(struct gfs2_glock, gl_name), 80 .head_offset = offsetof(struct gfs2_glock, gl_node), 81 }; 82 83 static struct rhashtable gl_hash_table; 84 85 #define GLOCK_WAIT_TABLE_BITS 12 86 #define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS) 87 static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned; 88 89 struct wait_glock_queue { 90 struct lm_lockname *name; 91 wait_queue_entry_t wait; 92 }; 93 94 static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode, 95 int sync, void *key) 96 { 97 struct wait_glock_queue *wait_glock = 98 container_of(wait, struct wait_glock_queue, wait); 99 struct lm_lockname *wait_name = wait_glock->name; 100 struct lm_lockname *wake_name = key; 101 102 if (wake_name->ln_sbd != wait_name->ln_sbd || 103 wake_name->ln_number != wait_name->ln_number || 104 wake_name->ln_type != wait_name->ln_type) 105 return 0; 106 return autoremove_wake_function(wait, mode, sync, key); 107 } 108 109 static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name) 110 { 111 u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0); 112 113 return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS); 114 } 115 116 /** 117 * wake_up_glock - Wake up waiters on a glock 118 * @gl: the glock 119 */ 120 static void wake_up_glock(struct gfs2_glock *gl) 121 { 122 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name); 123 124 if (waitqueue_active(wq)) 125 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name); 126 } 127 128 static void gfs2_glock_dealloc(struct rcu_head *rcu) 129 { 130 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); 131 132 kfree(gl->gl_lksb.sb_lvbptr); 133 if (gl->gl_ops->go_flags & GLOF_ASPACE) { 134 struct gfs2_glock_aspace *gla = 135 container_of(gl, struct gfs2_glock_aspace, glock); 136 kmem_cache_free(gfs2_glock_aspace_cachep, gla); 137 } else 138 kmem_cache_free(gfs2_glock_cachep, gl); 139 } 140 141 static void __gfs2_glock_free(struct gfs2_glock *gl) 142 { 143 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms); 144 smp_mb(); 145 wake_up_glock(gl); 146 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc); 147 } 148 149 void gfs2_glock_free(struct gfs2_glock *gl) { 150 struct gfs2_sbd *sdp = glock_sbd(gl); 151 152 __gfs2_glock_free(gl); 153 if (atomic_dec_and_test(&sdp->sd_glock_disposal)) 154 wake_up(&sdp->sd_kill_wait); 155 } 156 157 void gfs2_glock_free_later(struct gfs2_glock *gl) { 158 struct gfs2_sbd *sdp = glock_sbd(gl); 159 160 spin_lock(&lru_lock); 161 list_add(&gl->gl_lru, &sdp->sd_dead_glocks); 162 spin_unlock(&lru_lock); 163 if (atomic_dec_and_test(&sdp->sd_glock_disposal)) 164 wake_up(&sdp->sd_kill_wait); 165 } 166 167 static void gfs2_free_dead_glocks(struct gfs2_sbd *sdp) 168 { 169 struct list_head *list = &sdp->sd_dead_glocks; 170 171 while(!list_empty(list)) { 172 struct gfs2_glock *gl; 173 174 gl = list_first_entry(list, struct gfs2_glock, gl_lru); 175 list_del_init(&gl->gl_lru); 176 __gfs2_glock_free(gl); 177 } 178 } 179 180 /** 181 * gfs2_glock_hold() - increment reference count on glock 182 * @gl: The glock to hold 183 * 184 */ 185 186 struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl) 187 { 188 if (!lockref_get_not_dead(&gl->gl_lockref)) 189 GLOCK_BUG_ON(gl, 1); 190 return gl; 191 } 192 193 static void gfs2_glock_add_to_lru(struct gfs2_glock *gl) 194 { 195 spin_lock(&lru_lock); 196 list_move_tail(&gl->gl_lru, &lru_list); 197 198 if (!test_bit(GLF_LRU, &gl->gl_flags)) { 199 set_bit(GLF_LRU, &gl->gl_flags); 200 atomic_inc(&lru_count); 201 } 202 203 spin_unlock(&lru_lock); 204 } 205 206 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) 207 { 208 spin_lock(&lru_lock); 209 if (test_bit(GLF_LRU, &gl->gl_flags)) { 210 list_del_init(&gl->gl_lru); 211 atomic_dec(&lru_count); 212 clear_bit(GLF_LRU, &gl->gl_flags); 213 } 214 spin_unlock(&lru_lock); 215 } 216 217 /* 218 * Enqueue the glock on the work queue. Passes one glock reference on to the 219 * work queue. 220 */ 221 static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { 222 struct gfs2_sbd *sdp = glock_sbd(gl); 223 224 if (!queue_delayed_work(sdp->sd_glock_wq, &gl->gl_work, delay)) { 225 /* 226 * We are holding the lockref spinlock, and the work was still 227 * queued above. The queued work (glock_work_func) takes that 228 * spinlock before dropping its glock reference(s), so it 229 * cannot have dropped them in the meantime. 230 */ 231 GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2); 232 gl->gl_lockref.count--; 233 } 234 } 235 236 static void __gfs2_glock_put(struct gfs2_glock *gl) 237 { 238 struct gfs2_sbd *sdp = glock_sbd(gl); 239 struct address_space *mapping = gfs2_glock2aspace(gl); 240 241 lockref_mark_dead(&gl->gl_lockref); 242 spin_unlock(&gl->gl_lockref.lock); 243 gfs2_glock_remove_from_lru(gl); 244 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); 245 if (mapping) { 246 truncate_inode_pages_final(mapping); 247 if (!gfs2_withdrawn(sdp)) 248 GLOCK_BUG_ON(gl, !mapping_empty(mapping)); 249 } 250 trace_gfs2_glock_put(gl); 251 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); 252 } 253 254 static bool __gfs2_glock_put_or_lock(struct gfs2_glock *gl) 255 { 256 if (lockref_put_or_lock(&gl->gl_lockref)) 257 return true; 258 GLOCK_BUG_ON(gl, gl->gl_lockref.count != 1); 259 if (gl->gl_state != LM_ST_UNLOCKED) { 260 gl->gl_lockref.count--; 261 gfs2_glock_add_to_lru(gl); 262 spin_unlock(&gl->gl_lockref.lock); 263 return true; 264 } 265 return false; 266 } 267 268 /** 269 * gfs2_glock_put() - Decrement reference count on glock 270 * @gl: The glock to put 271 * 272 */ 273 274 void gfs2_glock_put(struct gfs2_glock *gl) 275 { 276 if (__gfs2_glock_put_or_lock(gl)) 277 return; 278 279 __gfs2_glock_put(gl); 280 } 281 282 /* 283 * gfs2_glock_put_async - Decrement reference count without sleeping 284 * @gl: The glock to put 285 * 286 * Decrement the reference count on glock immediately unless it is the last 287 * reference. Defer putting the last reference to work queue context. 288 */ 289 void gfs2_glock_put_async(struct gfs2_glock *gl) 290 { 291 if (__gfs2_glock_put_or_lock(gl)) 292 return; 293 294 gfs2_glock_queue_work(gl, 0); 295 spin_unlock(&gl->gl_lockref.lock); 296 } 297 298 /** 299 * may_grant - check if it's ok to grant a new lock 300 * @gl: The glock 301 * @current_gh: One of the current holders of @gl 302 * @gh: The lock request which we wish to grant 303 * 304 * With our current compatibility rules, if a glock has one or more active 305 * holders (HIF_HOLDER flag set), any of those holders can be passed in as 306 * @current_gh; they are all the same as far as compatibility with the new @gh 307 * goes. 308 * 309 * Returns true if it's ok to grant the lock. 310 */ 311 312 static inline bool may_grant(struct gfs2_glock *gl, 313 struct gfs2_holder *current_gh, 314 struct gfs2_holder *gh) 315 { 316 if (current_gh) { 317 GLOCK_BUG_ON(gl, !test_bit(HIF_HOLDER, ¤t_gh->gh_iflags)); 318 319 switch(current_gh->gh_state) { 320 case LM_ST_EXCLUSIVE: 321 /* 322 * Here we make a special exception to grant holders 323 * who agree to share the EX lock with other holders 324 * who also have the bit set. If the original holder 325 * has the LM_FLAG_NODE_SCOPE bit set, we grant more 326 * holders with the bit set. 327 */ 328 return gh->gh_state == LM_ST_EXCLUSIVE && 329 (current_gh->gh_flags & LM_FLAG_NODE_SCOPE) && 330 (gh->gh_flags & LM_FLAG_NODE_SCOPE); 331 332 case LM_ST_SHARED: 333 case LM_ST_DEFERRED: 334 return gh->gh_state == current_gh->gh_state; 335 336 default: 337 return false; 338 } 339 } 340 341 if (gl->gl_state == gh->gh_state) 342 return true; 343 if (gh->gh_flags & GL_EXACT) 344 return false; 345 if (gl->gl_state == LM_ST_EXCLUSIVE) { 346 return gh->gh_state == LM_ST_SHARED || 347 gh->gh_state == LM_ST_DEFERRED; 348 } 349 if (gh->gh_flags & LM_FLAG_ANY) 350 return gl->gl_state != LM_ST_UNLOCKED; 351 return false; 352 } 353 354 static void gfs2_holder_wake(struct gfs2_holder *gh) 355 { 356 clear_bit(HIF_WAIT, &gh->gh_iflags); 357 smp_mb__after_atomic(); 358 wake_up_bit(&gh->gh_iflags, HIF_WAIT); 359 if (gh->gh_flags & GL_ASYNC) { 360 struct gfs2_sbd *sdp = glock_sbd(gh->gh_gl); 361 362 wake_up(&sdp->sd_async_glock_wait); 363 } 364 } 365 366 /** 367 * do_error - Something unexpected has happened during a lock request 368 * @gl: The glock 369 * @ret: The status from the DLM 370 */ 371 372 static void do_error(struct gfs2_glock *gl, const int ret) 373 { 374 struct gfs2_holder *gh, *tmp; 375 376 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { 377 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 378 continue; 379 if (ret & LM_OUT_ERROR) 380 gh->gh_error = -EIO; 381 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) 382 gh->gh_error = GLR_TRYFAILED; 383 else 384 continue; 385 list_del_init(&gh->gh_list); 386 trace_gfs2_glock_queue(gh, 0); 387 gfs2_holder_wake(gh); 388 } 389 } 390 391 /** 392 * find_first_holder - find the first "holder" gh 393 * @gl: the glock 394 */ 395 396 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) 397 { 398 struct gfs2_holder *gh; 399 400 if (!list_empty(&gl->gl_holders)) { 401 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, 402 gh_list); 403 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 404 return gh; 405 } 406 return NULL; 407 } 408 409 /* 410 * gfs2_instantiate - Call the glops instantiate function 411 * @gh: The glock holder 412 * 413 * Returns: 0 if instantiate was successful, or error. 414 */ 415 int gfs2_instantiate(struct gfs2_holder *gh) 416 { 417 struct gfs2_glock *gl = gh->gh_gl; 418 const struct gfs2_glock_operations *glops = gl->gl_ops; 419 int ret; 420 421 again: 422 if (!test_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags)) 423 goto done; 424 425 /* 426 * Since we unlock the lockref lock, we set a flag to indicate 427 * instantiate is in progress. 428 */ 429 if (test_and_set_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags)) { 430 wait_on_bit(&gl->gl_flags, GLF_INSTANTIATE_IN_PROG, 431 TASK_UNINTERRUPTIBLE); 432 /* 433 * Here we just waited for a different instantiate to finish. 434 * But that may not have been successful, as when a process 435 * locks an inode glock _before_ it has an actual inode to 436 * instantiate into. So we check again. This process might 437 * have an inode to instantiate, so might be successful. 438 */ 439 goto again; 440 } 441 442 ret = glops->go_instantiate(gl); 443 if (!ret) 444 clear_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags); 445 clear_and_wake_up_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags); 446 if (ret) 447 return ret; 448 449 done: 450 if (glops->go_held) 451 return glops->go_held(gh); 452 return 0; 453 } 454 455 /** 456 * do_promote - promote as many requests as possible on the current queue 457 * @gl: The glock 458 */ 459 460 static void do_promote(struct gfs2_glock *gl) 461 { 462 struct gfs2_sbd *sdp = glock_sbd(gl); 463 struct gfs2_holder *gh, *current_gh; 464 465 if (gfs2_withdrawn(sdp)) { 466 do_error(gl, LM_OUT_ERROR); 467 return; 468 } 469 470 current_gh = find_first_holder(gl); 471 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 472 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 473 continue; 474 if (!may_grant(gl, current_gh, gh)) { 475 /* 476 * If we get here, it means we may not grant this 477 * holder for some reason. 478 */ 479 if (current_gh) 480 do_error(gl, 0); /* Fail queued try locks */ 481 break; 482 } 483 set_bit(HIF_HOLDER, &gh->gh_iflags); 484 trace_gfs2_promote(gh); 485 gfs2_holder_wake(gh); 486 if (!current_gh) 487 current_gh = gh; 488 } 489 } 490 491 /** 492 * find_first_waiter - find the first gh that's waiting for the glock 493 * @gl: the glock 494 */ 495 496 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl) 497 { 498 struct gfs2_holder *gh; 499 500 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 501 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) 502 return gh; 503 } 504 return NULL; 505 } 506 507 /** 508 * find_last_waiter - find the last gh that's waiting for the glock 509 * @gl: the glock 510 * 511 * This also is a fast way of finding out if there are any waiters. 512 */ 513 514 static inline struct gfs2_holder *find_last_waiter(const struct gfs2_glock *gl) 515 { 516 struct gfs2_holder *gh; 517 518 if (list_empty(&gl->gl_holders)) 519 return NULL; 520 gh = list_last_entry(&gl->gl_holders, struct gfs2_holder, gh_list); 521 return test_bit(HIF_HOLDER, &gh->gh_iflags) ? NULL : gh; 522 } 523 524 /** 525 * state_change - record that the glock is now in a different state 526 * @gl: the glock 527 * @new_state: the new state 528 */ 529 530 static void state_change(struct gfs2_glock *gl, unsigned int new_state) 531 { 532 if (new_state != gl->gl_target) 533 /* shorten our minimum hold time */ 534 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR, 535 GL_GLOCK_MIN_HOLD); 536 gl->gl_state = new_state; 537 gl->gl_tchange = jiffies; 538 } 539 540 static void gfs2_set_demote(int nr, struct gfs2_glock *gl) 541 { 542 struct gfs2_sbd *sdp = glock_sbd(gl); 543 544 set_bit(nr, &gl->gl_flags); 545 smp_mb(); 546 wake_up(&sdp->sd_async_glock_wait); 547 } 548 549 static void gfs2_demote_wake(struct gfs2_glock *gl) 550 { 551 gl->gl_demote_state = LM_ST_EXCLUSIVE; 552 clear_bit(GLF_DEMOTE, &gl->gl_flags); 553 smp_mb__after_atomic(); 554 wake_up_bit(&gl->gl_flags, GLF_DEMOTE); 555 } 556 557 /** 558 * finish_xmote - The DLM has replied to one of our lock requests 559 * @gl: The glock 560 * @ret: The status from the DLM 561 * 562 */ 563 564 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) 565 { 566 const struct gfs2_glock_operations *glops = gl->gl_ops; 567 568 if (!(ret & ~LM_OUT_ST_MASK)) { 569 unsigned state = ret & LM_OUT_ST_MASK; 570 571 trace_gfs2_glock_state_change(gl, state); 572 state_change(gl, state); 573 } 574 575 /* Demote to UN request arrived during demote to SH or DF */ 576 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && 577 gl->gl_state != LM_ST_UNLOCKED && 578 gl->gl_demote_state == LM_ST_UNLOCKED) 579 gl->gl_target = LM_ST_UNLOCKED; 580 581 /* Check for state != intended state */ 582 if (unlikely(gl->gl_state != gl->gl_target)) { 583 struct gfs2_holder *gh = find_first_waiter(gl); 584 585 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { 586 if (ret & LM_OUT_CANCELED) { 587 list_del_init(&gh->gh_list); 588 trace_gfs2_glock_queue(gh, 0); 589 gfs2_holder_wake(gh); 590 gl->gl_target = gl->gl_state; 591 goto out; 592 } 593 /* Some error or failed "try lock" - report it */ 594 if ((ret & LM_OUT_ERROR) || 595 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { 596 gl->gl_target = gl->gl_state; 597 do_error(gl, ret); 598 goto out; 599 } 600 } 601 switch(gl->gl_state) { 602 /* Unlocked due to conversion deadlock, try again */ 603 case LM_ST_UNLOCKED: 604 do_xmote(gl, gh, gl->gl_target, 605 !test_bit(GLF_DEMOTE_IN_PROGRESS, 606 &gl->gl_flags)); 607 break; 608 /* Conversion fails, unlock and try again */ 609 case LM_ST_SHARED: 610 case LM_ST_DEFERRED: 611 do_xmote(gl, gh, LM_ST_UNLOCKED, false); 612 break; 613 default: /* Everything else */ 614 fs_err(glock_sbd(gl), 615 "glock %u:%llu requested=%u ret=%u\n", 616 glock_type(gl), glock_number(gl), 617 gl->gl_req, ret); 618 GLOCK_BUG_ON(gl, 1); 619 } 620 return; 621 } 622 623 /* Fast path - we got what we asked for */ 624 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { 625 clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); 626 gfs2_demote_wake(gl); 627 } 628 if (gl->gl_state != LM_ST_UNLOCKED) { 629 if (glops->go_xmote_bh) { 630 int rv; 631 632 spin_unlock(&gl->gl_lockref.lock); 633 rv = glops->go_xmote_bh(gl); 634 spin_lock(&gl->gl_lockref.lock); 635 if (rv) { 636 do_error(gl, rv); 637 goto out; 638 } 639 } 640 do_promote(gl); 641 } 642 out: 643 if (!test_bit(GLF_CANCELING, &gl->gl_flags)) 644 clear_and_wake_up_bit(GLF_LOCK, &gl->gl_flags); 645 } 646 647 /** 648 * do_xmote - Calls the DLM to change the state of a lock 649 * @gl: The lock state 650 * @gh: The holder (only for promotes) 651 * @target: The target lock state 652 * @may_cancel: Operation may be canceled 653 * 654 */ 655 656 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, 657 unsigned int target, bool may_cancel) 658 __releases(&gl->gl_lockref.lock) 659 __acquires(&gl->gl_lockref.lock) 660 { 661 const struct gfs2_glock_operations *glops = gl->gl_ops; 662 struct gfs2_sbd *sdp = glock_sbd(gl); 663 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 664 int ret; 665 666 /* 667 * When a filesystem is withdrawing, the remaining cluster nodes will 668 * take care of recovering the withdrawing node's journal. We only 669 * need to make sure that once we trigger remote recovery, we won't 670 * write to the shared block device anymore. This means that here, 671 * 672 * - no new writes to the filesystem must be triggered (->go_sync()). 673 * 674 * - any cached data should be discarded by calling ->go_inval(), dirty 675 * or not and journaled or unjournaled. 676 * 677 * - no more dlm locking operations should be issued (->lm_lock()). 678 */ 679 680 GLOCK_BUG_ON(gl, gl->gl_state == target); 681 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target); 682 683 if (!glops->go_inval || !glops->go_sync) 684 goto skip_inval; 685 686 spin_unlock(&gl->gl_lockref.lock); 687 if (!gfs2_withdrawn(sdp)) { 688 ret = glops->go_sync(gl); 689 if (ret) { 690 if (cmpxchg(&sdp->sd_log_error, 0, ret)) { 691 fs_err(sdp, "Error %d syncing glock\n", ret); 692 gfs2_dump_glock(NULL, gl, true); 693 gfs2_withdraw(sdp); 694 } 695 } 696 } 697 698 if (target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) 699 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA); 700 spin_lock(&gl->gl_lockref.lock); 701 702 skip_inval: 703 if (gfs2_withdrawn(sdp)) { 704 if (target != LM_ST_UNLOCKED) 705 target = LM_OUT_ERROR; 706 goto out; 707 } 708 709 if (ls->ls_ops->lm_lock) { 710 spin_unlock(&gl->gl_lockref.lock); 711 ret = ls->ls_ops->lm_lock(gl, target, gh ? gh->gh_flags : 0); 712 spin_lock(&gl->gl_lockref.lock); 713 714 if (!ret) { 715 if (may_cancel) { 716 set_bit(GLF_MAY_CANCEL, &gl->gl_flags); 717 smp_mb__after_atomic(); 718 wake_up_bit(&gl->gl_flags, GLF_LOCK); 719 } 720 /* The operation will be completed asynchronously. */ 721 gl->gl_lockref.count++; 722 return; 723 } 724 725 if (ret == -ENODEV) { 726 /* 727 * The lockspace has been released and the lock has 728 * been unlocked implicitly. 729 */ 730 if (target != LM_ST_UNLOCKED) { 731 target = LM_OUT_ERROR; 732 goto out; 733 } 734 } else { 735 fs_err(sdp, "lm_lock ret %d\n", ret); 736 GLOCK_BUG_ON(gl, !gfs2_withdrawn(sdp)); 737 return; 738 } 739 } 740 741 out: 742 /* Complete the operation now. */ 743 finish_xmote(gl, target); 744 gl->gl_lockref.count++; 745 gfs2_glock_queue_work(gl, 0); 746 } 747 748 /** 749 * run_queue - do all outstanding tasks related to a glock 750 * @gl: The glock in question 751 * @nonblock: True if we must not block in run_queue 752 * 753 */ 754 755 static void run_queue(struct gfs2_glock *gl, const int nonblock) 756 __releases(&gl->gl_lockref.lock) 757 __acquires(&gl->gl_lockref.lock) 758 { 759 struct gfs2_holder *gh; 760 761 if (test_bit(GLF_LOCK, &gl->gl_flags)) 762 return; 763 764 /* 765 * The GLF_DEMOTE_IN_PROGRESS flag must only be set when the GLF_LOCK 766 * flag is set as well. 767 */ 768 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); 769 770 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { 771 if (gl->gl_demote_state == gl->gl_state) { 772 gfs2_demote_wake(gl); 773 goto promote; 774 } 775 776 if (find_first_holder(gl)) 777 return; 778 if (nonblock) 779 goto out_sched; 780 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); 781 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); 782 gl->gl_target = gl->gl_demote_state; 783 set_bit(GLF_LOCK, &gl->gl_flags); 784 do_xmote(gl, NULL, gl->gl_target, false); 785 return; 786 } 787 788 promote: 789 do_promote(gl); 790 if (find_first_holder(gl)) 791 return; 792 gh = find_first_waiter(gl); 793 if (!gh) 794 return; 795 if (nonblock) 796 goto out_sched; 797 gl->gl_target = gh->gh_state; 798 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) 799 do_error(gl, 0); /* Fail queued try locks */ 800 set_bit(GLF_LOCK, &gl->gl_flags); 801 do_xmote(gl, gh, gl->gl_target, true); 802 return; 803 804 out_sched: 805 gl->gl_lockref.count++; 806 gfs2_glock_queue_work(gl, 0); 807 } 808 809 /** 810 * glock_set_object - set the gl_object field of a glock 811 * @gl: the glock 812 * @object: the object 813 */ 814 void glock_set_object(struct gfs2_glock *gl, void *object) 815 { 816 void *prev_object; 817 818 spin_lock(&gl->gl_lockref.lock); 819 prev_object = gl->gl_object; 820 gl->gl_object = object; 821 spin_unlock(&gl->gl_lockref.lock); 822 if (gfs2_assert_warn(glock_sbd(gl), prev_object == NULL)) 823 gfs2_dump_glock(NULL, gl, true); 824 } 825 826 /** 827 * glock_clear_object - clear the gl_object field of a glock 828 * @gl: the glock 829 * @object: object the glock currently points at 830 */ 831 void glock_clear_object(struct gfs2_glock *gl, void *object) 832 { 833 void *prev_object; 834 835 spin_lock(&gl->gl_lockref.lock); 836 prev_object = gl->gl_object; 837 gl->gl_object = NULL; 838 spin_unlock(&gl->gl_lockref.lock); 839 if (gfs2_assert_warn(glock_sbd(gl), prev_object == object)) 840 gfs2_dump_glock(NULL, gl, true); 841 } 842 843 void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation) 844 { 845 struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr; 846 847 if (ri->ri_magic == 0) 848 ri->ri_magic = cpu_to_be32(GFS2_MAGIC); 849 if (ri->ri_magic == cpu_to_be32(GFS2_MAGIC)) 850 ri->ri_generation_deleted = cpu_to_be64(generation); 851 } 852 853 bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation) 854 { 855 struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr; 856 857 if (ri->ri_magic != cpu_to_be32(GFS2_MAGIC)) 858 return false; 859 return generation <= be64_to_cpu(ri->ri_generation_deleted); 860 } 861 862 static void gfs2_glock_poke(struct gfs2_glock *gl) 863 { 864 int flags = LM_FLAG_TRY_1CB | LM_FLAG_ANY | GL_SKIP; 865 struct gfs2_holder gh; 866 int error; 867 868 __gfs2_holder_init(gl, LM_ST_SHARED, flags, &gh, _RET_IP_); 869 error = gfs2_glock_nq(&gh); 870 if (!error) 871 gfs2_glock_dq(&gh); 872 gfs2_holder_uninit(&gh); 873 } 874 875 static struct gfs2_inode *gfs2_grab_existing_inode(struct gfs2_glock *gl) 876 { 877 struct gfs2_inode *ip; 878 879 spin_lock(&gl->gl_lockref.lock); 880 ip = gl->gl_object; 881 if (ip && !igrab(&ip->i_inode)) 882 ip = NULL; 883 spin_unlock(&gl->gl_lockref.lock); 884 if (ip) { 885 wait_on_new_inode(&ip->i_inode); 886 if (is_bad_inode(&ip->i_inode)) { 887 iput(&ip->i_inode); 888 ip = NULL; 889 } 890 } 891 return ip; 892 } 893 894 static void gfs2_try_to_evict(struct gfs2_glock *gl) 895 { 896 struct gfs2_inode *ip; 897 898 /* 899 * If there is contention on the iopen glock and we have an inode, try 900 * to grab and release the inode so that it can be evicted. The 901 * GLF_DEFER_DELETE flag indicates to gfs2_evict_inode() that the inode 902 * should not be deleted locally. This will allow the remote node to 903 * go ahead and delete the inode without us having to do it, which will 904 * avoid rgrp glock thrashing. 905 * 906 * The remote node is likely still holding the corresponding inode 907 * glock, so it will run before we get to verify that the delete has 908 * happened below. (Verification is triggered by the call to 909 * gfs2_queue_verify_delete() in gfs2_evict_inode().) 910 */ 911 ip = gfs2_grab_existing_inode(gl); 912 if (ip) { 913 set_bit(GLF_DEFER_DELETE, &gl->gl_flags); 914 d_prune_aliases(&ip->i_inode); 915 iput(&ip->i_inode); 916 clear_bit(GLF_DEFER_DELETE, &gl->gl_flags); 917 918 /* If the inode was evicted, gl->gl_object will now be NULL. */ 919 ip = gfs2_grab_existing_inode(gl); 920 if (ip) { 921 gfs2_glock_poke(ip->i_gl); 922 iput(&ip->i_inode); 923 } 924 } 925 } 926 927 bool gfs2_queue_try_to_evict(struct gfs2_glock *gl) 928 { 929 struct gfs2_sbd *sdp = glock_sbd(gl); 930 931 if (test_and_set_bit(GLF_TRY_TO_EVICT, &gl->gl_flags)) 932 return false; 933 return !mod_delayed_work(sdp->sd_delete_wq, &gl->gl_delete, 0); 934 } 935 936 bool gfs2_queue_verify_delete(struct gfs2_glock *gl, bool later) 937 { 938 struct gfs2_sbd *sdp = glock_sbd(gl); 939 unsigned long delay; 940 941 if (test_and_set_bit(GLF_VERIFY_DELETE, &gl->gl_flags)) 942 return false; 943 delay = later ? HZ + get_random_long() % (HZ * 9) : 0; 944 return queue_delayed_work(sdp->sd_delete_wq, &gl->gl_delete, delay); 945 } 946 947 static void delete_work_func(struct work_struct *work) 948 { 949 struct delayed_work *dwork = to_delayed_work(work); 950 struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete); 951 struct gfs2_sbd *sdp = glock_sbd(gl); 952 bool verify_delete = test_and_clear_bit(GLF_VERIFY_DELETE, &gl->gl_flags); 953 954 /* 955 * Check for the GLF_VERIFY_DELETE above: this ensures that we won't 956 * immediately process GLF_VERIFY_DELETE work that the below call to 957 * gfs2_try_to_evict() queues. 958 */ 959 960 if (test_and_clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags)) 961 gfs2_try_to_evict(gl); 962 963 if (verify_delete) { 964 u64 no_addr = glock_number(gl); 965 struct inode *inode; 966 967 inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino, 968 GFS2_BLKST_UNLINKED); 969 if (IS_ERR(inode)) { 970 if (PTR_ERR(inode) == -EAGAIN && 971 !test_bit(SDF_KILL, &sdp->sd_flags) && 972 gfs2_queue_verify_delete(gl, true)) 973 return; 974 } else { 975 d_prune_aliases(inode); 976 iput(inode); 977 } 978 } 979 980 gfs2_glock_put(gl); 981 } 982 983 static void glock_work_func(struct work_struct *work) 984 { 985 unsigned long delay = 0; 986 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); 987 unsigned int drop_refs = 1; 988 989 spin_lock(&gl->gl_lockref.lock); 990 if (test_bit(GLF_HAVE_REPLY, &gl->gl_flags)) { 991 clear_bit(GLF_HAVE_REPLY, &gl->gl_flags); 992 finish_xmote(gl, gl->gl_reply); 993 drop_refs++; 994 } 995 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 996 gl->gl_state != LM_ST_UNLOCKED && 997 gl->gl_demote_state != LM_ST_EXCLUSIVE) { 998 if (glock_type(gl) == LM_TYPE_INODE) { 999 unsigned long holdtime, now = jiffies; 1000 1001 holdtime = gl->gl_tchange + gl->gl_hold_time; 1002 if (time_before(now, holdtime)) 1003 delay = holdtime - now; 1004 } 1005 1006 if (!delay) { 1007 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); 1008 gfs2_set_demote(GLF_DEMOTE, gl); 1009 } 1010 } 1011 run_queue(gl, 0); 1012 if (delay) { 1013 /* Keep one glock reference for the work we requeue. */ 1014 drop_refs--; 1015 gfs2_glock_queue_work(gl, delay); 1016 } 1017 1018 /* Drop the remaining glock references manually. */ 1019 GLOCK_BUG_ON(gl, gl->gl_lockref.count < drop_refs); 1020 gl->gl_lockref.count -= drop_refs; 1021 if (!gl->gl_lockref.count) { 1022 if (gl->gl_state == LM_ST_UNLOCKED) { 1023 __gfs2_glock_put(gl); 1024 return; 1025 } 1026 gfs2_glock_add_to_lru(gl); 1027 } 1028 spin_unlock(&gl->gl_lockref.lock); 1029 } 1030 1031 static struct gfs2_glock *find_insert_glock(struct lm_lockname *name, 1032 struct gfs2_glock *new) 1033 { 1034 struct wait_glock_queue wait; 1035 wait_queue_head_t *wq = glock_waitqueue(name); 1036 struct gfs2_glock *gl; 1037 1038 wait.name = name; 1039 init_wait(&wait.wait); 1040 wait.wait.func = glock_wake_function; 1041 1042 again: 1043 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); 1044 rcu_read_lock(); 1045 if (new) { 1046 gl = rhashtable_lookup_get_insert_fast(&gl_hash_table, 1047 &new->gl_node, ht_parms); 1048 if (IS_ERR(gl)) 1049 goto out; 1050 } else { 1051 gl = rhashtable_lookup_fast(&gl_hash_table, 1052 name, ht_parms); 1053 } 1054 if (gl && !lockref_get_not_dead(&gl->gl_lockref)) { 1055 rcu_read_unlock(); 1056 schedule(); 1057 goto again; 1058 } 1059 out: 1060 rcu_read_unlock(); 1061 finish_wait(wq, &wait.wait); 1062 if (gl) 1063 gfs2_glock_remove_from_lru(gl); 1064 return gl; 1065 } 1066 1067 /** 1068 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist 1069 * @sdp: The GFS2 superblock 1070 * @number: the lock number 1071 * @glops: The glock_operations to use 1072 * @create: If 0, don't create the glock if it doesn't exist 1073 * @glp: the glock is returned here 1074 * 1075 * This does not lock a glock, just finds/creates structures for one. 1076 * 1077 * Returns: errno 1078 */ 1079 1080 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, 1081 const struct gfs2_glock_operations *glops, int create, 1082 struct gfs2_glock **glp) 1083 { 1084 struct lm_lockname name = { .ln_number = number, 1085 .ln_type = glops->go_type, 1086 .ln_sbd = sdp }; 1087 struct gfs2_glock *gl, *tmp; 1088 struct address_space *mapping; 1089 1090 gl = find_insert_glock(&name, NULL); 1091 if (gl) 1092 goto found; 1093 if (!create) 1094 return -ENOENT; 1095 1096 if (glops->go_flags & GLOF_ASPACE) { 1097 struct gfs2_glock_aspace *gla = 1098 kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_NOFS); 1099 if (!gla) 1100 return -ENOMEM; 1101 gl = &gla->glock; 1102 } else { 1103 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_NOFS); 1104 if (!gl) 1105 return -ENOMEM; 1106 } 1107 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); 1108 gl->gl_ops = glops; 1109 1110 if (glops->go_flags & GLOF_LVB) { 1111 gl->gl_lksb.sb_lvbptr = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); 1112 if (!gl->gl_lksb.sb_lvbptr) { 1113 gfs2_glock_dealloc(&gl->gl_rcu); 1114 return -ENOMEM; 1115 } 1116 } 1117 1118 atomic_inc(&sdp->sd_glock_disposal); 1119 gl->gl_node.next = NULL; 1120 gl->gl_flags = BIT(GLF_INITIAL); 1121 if (glops->go_instantiate) 1122 gl->gl_flags |= BIT(GLF_INSTANTIATE_NEEDED); 1123 gl->gl_name = name; 1124 lockref_init(&gl->gl_lockref); 1125 lockdep_set_subclass(&gl->gl_lockref.lock, glops->go_subclass); 1126 gl->gl_state = LM_ST_UNLOCKED; 1127 gl->gl_target = LM_ST_UNLOCKED; 1128 gl->gl_demote_state = LM_ST_EXCLUSIVE; 1129 gl->gl_dstamp = 0; 1130 preempt_disable(); 1131 /* We use the global stats to estimate the initial per-glock stats */ 1132 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type]; 1133 preempt_enable(); 1134 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0; 1135 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0; 1136 gl->gl_tchange = jiffies; 1137 gl->gl_object = NULL; 1138 gl->gl_hold_time = GL_GLOCK_DFT_HOLD; 1139 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); 1140 if (glock_type(gl) == LM_TYPE_IOPEN) 1141 INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func); 1142 1143 mapping = gfs2_glock2aspace(gl); 1144 if (mapping) { 1145 gfp_t gfp_mask; 1146 1147 mapping->a_ops = &gfs2_meta_aops; 1148 mapping->host = sdp->sd_inode; 1149 mapping->flags = 0; 1150 gfp_mask = mapping_gfp_mask(sdp->sd_inode->i_mapping); 1151 mapping_set_gfp_mask(mapping, gfp_mask); 1152 mapping->i_private_data = NULL; 1153 mapping->writeback_index = 0; 1154 } 1155 1156 tmp = find_insert_glock(&name, gl); 1157 if (tmp) { 1158 gfs2_glock_dealloc(&gl->gl_rcu); 1159 if (atomic_dec_and_test(&sdp->sd_glock_disposal)) 1160 wake_up(&sdp->sd_kill_wait); 1161 1162 if (IS_ERR(tmp)) 1163 return PTR_ERR(tmp); 1164 gl = tmp; 1165 } 1166 1167 found: 1168 *glp = gl; 1169 return 0; 1170 } 1171 1172 /** 1173 * __gfs2_holder_init - initialize a struct gfs2_holder in the default way 1174 * @gl: the glock 1175 * @state: the state we're requesting 1176 * @flags: the modifier flags 1177 * @gh: the holder structure 1178 * @ip: caller's return address for debugging 1179 */ 1180 1181 void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags, 1182 struct gfs2_holder *gh, unsigned long ip) 1183 { 1184 INIT_LIST_HEAD(&gh->gh_list); 1185 gh->gh_gl = gfs2_glock_hold(gl); 1186 gh->gh_ip = ip; 1187 gh->gh_owner_pid = get_pid(task_pid(current)); 1188 gh->gh_state = state; 1189 gh->gh_flags = flags; 1190 gh->gh_iflags = 0; 1191 } 1192 1193 /** 1194 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it 1195 * @state: the state we're requesting 1196 * @flags: the modifier flags 1197 * @gh: the holder structure 1198 * 1199 * Don't mess with the glock. 1200 * 1201 */ 1202 1203 void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh) 1204 { 1205 gh->gh_state = state; 1206 gh->gh_flags = flags; 1207 gh->gh_iflags = 0; 1208 gh->gh_ip = _RET_IP_; 1209 put_pid(gh->gh_owner_pid); 1210 gh->gh_owner_pid = get_pid(task_pid(current)); 1211 } 1212 1213 /** 1214 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference) 1215 * @gh: the holder structure 1216 * 1217 */ 1218 1219 void gfs2_holder_uninit(struct gfs2_holder *gh) 1220 { 1221 put_pid(gh->gh_owner_pid); 1222 gfs2_glock_put(gh->gh_gl); 1223 gfs2_holder_mark_uninitialized(gh); 1224 gh->gh_ip = 0; 1225 } 1226 1227 static void gfs2_glock_update_hold_time(struct gfs2_glock *gl, 1228 unsigned long start_time) 1229 { 1230 /* Have we waited longer that a second? */ 1231 if (time_after(jiffies, start_time + HZ)) { 1232 /* Lengthen the minimum hold time. */ 1233 gl->gl_hold_time = min(gl->gl_hold_time + GL_GLOCK_HOLD_INCR, 1234 GL_GLOCK_MAX_HOLD); 1235 } 1236 } 1237 1238 /** 1239 * gfs2_glock_holder_ready - holder is ready and its error code can be collected 1240 * @gh: the glock holder 1241 * 1242 * Called when a glock holder no longer needs to be waited for because it is 1243 * now either held (HIF_HOLDER set; gh_error == 0), or acquiring the lock has 1244 * failed (gh_error != 0). 1245 */ 1246 1247 int gfs2_glock_holder_ready(struct gfs2_holder *gh) 1248 { 1249 if (gh->gh_error || (gh->gh_flags & GL_SKIP)) 1250 return gh->gh_error; 1251 gh->gh_error = gfs2_instantiate(gh); 1252 if (gh->gh_error) 1253 gfs2_glock_dq(gh); 1254 return gh->gh_error; 1255 } 1256 1257 /** 1258 * gfs2_glock_wait - wait on a glock acquisition 1259 * @gh: the glock holder 1260 * 1261 * Returns: 0 on success 1262 */ 1263 1264 int gfs2_glock_wait(struct gfs2_holder *gh) 1265 { 1266 unsigned long start_time = jiffies; 1267 1268 might_sleep(); 1269 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE); 1270 gfs2_glock_update_hold_time(gh->gh_gl, start_time); 1271 return gfs2_glock_holder_ready(gh); 1272 } 1273 1274 static int glocks_pending(unsigned int num_gh, struct gfs2_holder *ghs) 1275 { 1276 int i; 1277 1278 for (i = 0; i < num_gh; i++) 1279 if (test_bit(HIF_WAIT, &ghs[i].gh_iflags)) 1280 return 1; 1281 return 0; 1282 } 1283 1284 /** 1285 * gfs2_glock_async_wait - wait on multiple asynchronous glock acquisitions 1286 * @num_gh: the number of holders in the array 1287 * @ghs: the glock holder array 1288 * @retries: number of retries attempted so far 1289 * 1290 * Returns: 0 on success, meaning all glocks have been granted and are held. 1291 * -ESTALE if the request timed out, meaning all glocks were released, 1292 * and the caller should retry the operation. 1293 */ 1294 1295 int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs, 1296 unsigned int retries) 1297 { 1298 struct gfs2_sbd *sdp = glock_sbd(ghs[0].gh_gl); 1299 unsigned long start_time = jiffies; 1300 int i, ret = 0; 1301 long timeout; 1302 1303 might_sleep(); 1304 1305 timeout = GL_GLOCK_MIN_HOLD; 1306 if (retries) { 1307 unsigned int max_shift; 1308 long incr; 1309 1310 /* Add a random delay and increase the timeout exponentially. */ 1311 max_shift = BITS_PER_LONG - 2 - __fls(GL_GLOCK_HOLD_INCR); 1312 incr = min(GL_GLOCK_HOLD_INCR << min(retries - 1, max_shift), 1313 10 * HZ - GL_GLOCK_MIN_HOLD); 1314 schedule_timeout_interruptible(get_random_long() % (incr / 3)); 1315 if (signal_pending(current)) 1316 goto interrupted; 1317 timeout += (incr / 3) + get_random_long() % (incr / 3); 1318 } 1319 1320 if (!wait_event_interruptible_timeout(sdp->sd_async_glock_wait, 1321 !glocks_pending(num_gh, ghs), timeout)) { 1322 ret = -ESTALE; /* request timed out. */ 1323 goto out; 1324 } 1325 if (signal_pending(current)) 1326 goto interrupted; 1327 1328 for (i = 0; i < num_gh; i++) { 1329 struct gfs2_holder *gh = &ghs[i]; 1330 int ret2; 1331 1332 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) { 1333 gfs2_glock_update_hold_time(gh->gh_gl, 1334 start_time); 1335 } 1336 ret2 = gfs2_glock_holder_ready(gh); 1337 if (!ret) 1338 ret = ret2; 1339 } 1340 1341 out: 1342 if (ret) { 1343 for (i = 0; i < num_gh; i++) { 1344 struct gfs2_holder *gh = &ghs[i]; 1345 1346 gfs2_glock_dq(gh); 1347 } 1348 } 1349 return ret; 1350 1351 interrupted: 1352 ret = -EINTR; 1353 goto out; 1354 } 1355 1356 /** 1357 * request_demote - process a demote request 1358 * @gl: the glock 1359 * @state: the state the caller wants us to change to 1360 * @delay: zero to demote immediately; otherwise pending demote 1361 * @remote: true if this came from a different cluster node 1362 * 1363 * There are only two requests that we are going to see in actual 1364 * practise: LM_ST_SHARED and LM_ST_UNLOCKED 1365 */ 1366 1367 static void request_demote(struct gfs2_glock *gl, unsigned int state, 1368 unsigned long delay, bool remote) 1369 { 1370 gfs2_set_demote(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, gl); 1371 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { 1372 gl->gl_demote_state = state; 1373 gl->gl_demote_time = jiffies; 1374 } else if (gl->gl_demote_state != LM_ST_UNLOCKED && 1375 gl->gl_demote_state != state) { 1376 gl->gl_demote_state = LM_ST_UNLOCKED; 1377 } 1378 if (gl->gl_ops->go_callback) 1379 gl->gl_ops->go_callback(gl, remote); 1380 trace_gfs2_demote_rq(gl, remote); 1381 } 1382 1383 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...) 1384 { 1385 struct va_format vaf; 1386 va_list args; 1387 1388 va_start(args, fmt); 1389 1390 if (seq) { 1391 seq_vprintf(seq, fmt, args); 1392 } else { 1393 vaf.fmt = fmt; 1394 vaf.va = &args; 1395 1396 pr_err("%pV", &vaf); 1397 } 1398 1399 va_end(args); 1400 } 1401 1402 static bool gfs2_should_queue_trylock(struct gfs2_glock *gl, 1403 struct gfs2_holder *gh) 1404 { 1405 struct gfs2_holder *current_gh, *gh2; 1406 1407 current_gh = find_first_holder(gl); 1408 if (current_gh && !may_grant(gl, current_gh, gh)) 1409 return false; 1410 1411 list_for_each_entry(gh2, &gl->gl_holders, gh_list) { 1412 if (test_bit(HIF_HOLDER, &gh2->gh_iflags)) 1413 continue; 1414 if (!(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) 1415 return false; 1416 } 1417 return true; 1418 } 1419 1420 static inline bool pid_is_meaningful(const struct gfs2_holder *gh) 1421 { 1422 if (!(gh->gh_flags & GL_NOPID)) 1423 return true; 1424 return !test_bit(HIF_HOLDER, &gh->gh_iflags); 1425 } 1426 1427 /** 1428 * add_to_queue - Add a holder to the wait queue (but look for recursion) 1429 * @gh: the holder structure to add 1430 * 1431 * Eventually we should move the recursive locking trap to a 1432 * debugging option or something like that. This is the fast 1433 * path and needs to have the minimum number of distractions. 1434 * 1435 */ 1436 1437 static inline void add_to_queue(struct gfs2_holder *gh) 1438 { 1439 struct gfs2_glock *gl = gh->gh_gl; 1440 struct gfs2_sbd *sdp = glock_sbd(gl); 1441 struct gfs2_holder *gh2; 1442 1443 GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL); 1444 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) 1445 GLOCK_BUG_ON(gl, true); 1446 1447 if ((gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) && 1448 !gfs2_should_queue_trylock(gl, gh)) { 1449 gh->gh_error = GLR_TRYFAILED; 1450 gfs2_holder_wake(gh); 1451 return; 1452 } 1453 1454 list_for_each_entry(gh2, &gl->gl_holders, gh_list) { 1455 if (likely(gh2->gh_owner_pid != gh->gh_owner_pid)) 1456 continue; 1457 if (gh->gh_gl->gl_ops->go_type == LM_TYPE_FLOCK) 1458 continue; 1459 if (!pid_is_meaningful(gh2)) 1460 continue; 1461 goto trap_recursive; 1462 } 1463 trace_gfs2_glock_queue(gh, 1); 1464 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT); 1465 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT); 1466 list_add_tail(&gh->gh_list, &gl->gl_holders); 1467 return; 1468 1469 trap_recursive: 1470 fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip); 1471 fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid)); 1472 fs_err(sdp, "lock type: %d req lock state : %d\n", 1473 glock_type(gh2->gh_gl), gh2->gh_state); 1474 fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip); 1475 fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid)); 1476 fs_err(sdp, "lock type: %d req lock state : %d\n", 1477 glock_type(gh->gh_gl), gh->gh_state); 1478 gfs2_dump_glock(NULL, gl, true); 1479 BUG(); 1480 } 1481 1482 /** 1483 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock) 1484 * @gh: the holder structure 1485 * 1486 * if (gh->gh_flags & GL_ASYNC), this never returns an error 1487 * 1488 * Returns: 0, GLR_TRYFAILED, or errno on failure 1489 */ 1490 1491 int gfs2_glock_nq(struct gfs2_holder *gh) 1492 { 1493 struct gfs2_glock *gl = gh->gh_gl; 1494 struct gfs2_sbd *sdp = glock_sbd(gl); 1495 int error; 1496 1497 if (gfs2_withdrawn(sdp)) 1498 return -EIO; 1499 1500 if (gh->gh_flags & GL_NOBLOCK) { 1501 struct gfs2_holder *current_gh; 1502 1503 error = -ECHILD; 1504 spin_lock(&gl->gl_lockref.lock); 1505 if (find_last_waiter(gl)) 1506 goto unlock; 1507 current_gh = find_first_holder(gl); 1508 if (!may_grant(gl, current_gh, gh)) 1509 goto unlock; 1510 set_bit(HIF_HOLDER, &gh->gh_iflags); 1511 list_add_tail(&gh->gh_list, &gl->gl_holders); 1512 trace_gfs2_promote(gh); 1513 error = 0; 1514 unlock: 1515 spin_unlock(&gl->gl_lockref.lock); 1516 return error; 1517 } 1518 1519 gh->gh_error = 0; 1520 spin_lock(&gl->gl_lockref.lock); 1521 add_to_queue(gh); 1522 if (unlikely((LM_FLAG_RECOVER & gh->gh_flags) && 1523 test_and_clear_bit(GLF_HAVE_FROZEN_REPLY, &gl->gl_flags))) { 1524 set_bit(GLF_HAVE_REPLY, &gl->gl_flags); 1525 gl->gl_lockref.count++; 1526 gfs2_glock_queue_work(gl, 0); 1527 } 1528 run_queue(gl, 1); 1529 spin_unlock(&gl->gl_lockref.lock); 1530 1531 error = 0; 1532 if (!(gh->gh_flags & GL_ASYNC)) 1533 error = gfs2_glock_wait(gh); 1534 1535 return error; 1536 } 1537 1538 /** 1539 * gfs2_glock_poll - poll to see if an async request has been completed 1540 * @gh: the holder 1541 * 1542 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on 1543 */ 1544 1545 int gfs2_glock_poll(struct gfs2_holder *gh) 1546 { 1547 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1; 1548 } 1549 1550 static void __gfs2_glock_dq(struct gfs2_holder *gh) 1551 { 1552 struct gfs2_glock *gl = gh->gh_gl; 1553 unsigned delay = 0; 1554 int fast_path = 0; 1555 1556 /* 1557 * This holder should not be cached, so mark it for demote. 1558 * Note: this should be done before the glock_needs_demote 1559 * check below. 1560 */ 1561 if (gh->gh_flags & GL_NOCACHE) 1562 request_demote(gl, LM_ST_UNLOCKED, 0, false); 1563 1564 list_del_init(&gh->gh_list); 1565 clear_bit(HIF_HOLDER, &gh->gh_iflags); 1566 trace_gfs2_glock_queue(gh, 0); 1567 if (test_bit(HIF_WAIT, &gh->gh_iflags)) 1568 gfs2_holder_wake(gh); 1569 1570 /* 1571 * If there hasn't been a demote request we are done. 1572 * (Let the remaining holders, if any, keep holding it.) 1573 */ 1574 if (!glock_needs_demote(gl)) { 1575 if (list_empty(&gl->gl_holders)) 1576 fast_path = 1; 1577 } 1578 1579 if (unlikely(!fast_path)) { 1580 gl->gl_lockref.count++; 1581 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 1582 !test_bit(GLF_DEMOTE, &gl->gl_flags) && 1583 glock_type(gl) == LM_TYPE_INODE) 1584 delay = gl->gl_hold_time; 1585 gfs2_glock_queue_work(gl, delay); 1586 } 1587 } 1588 1589 /** 1590 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock) 1591 * @gh: the glock holder 1592 * 1593 */ 1594 void gfs2_glock_dq(struct gfs2_holder *gh) 1595 { 1596 struct gfs2_glock *gl = gh->gh_gl; 1597 1598 again: 1599 spin_lock(&gl->gl_lockref.lock); 1600 if (!gfs2_holder_queued(gh)) { 1601 /* 1602 * May have already been dequeued because the locking request 1603 * was GL_ASYNC and it has failed in the meantime. 1604 */ 1605 goto out; 1606 } 1607 1608 if (list_is_first(&gh->gh_list, &gl->gl_holders) && 1609 !test_bit(HIF_HOLDER, &gh->gh_iflags) && 1610 test_bit(GLF_LOCK, &gl->gl_flags) && 1611 !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && 1612 !test_bit(GLF_CANCELING, &gl->gl_flags)) { 1613 if (!test_bit(GLF_MAY_CANCEL, &gl->gl_flags)) { 1614 struct wait_queue_head *wq; 1615 DEFINE_WAIT(wait); 1616 1617 wq = bit_waitqueue(&gl->gl_flags, GLF_LOCK); 1618 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); 1619 spin_unlock(&gl->gl_lockref.lock); 1620 schedule(); 1621 finish_wait(wq, &wait); 1622 goto again; 1623 } 1624 1625 set_bit(GLF_CANCELING, &gl->gl_flags); 1626 spin_unlock(&gl->gl_lockref.lock); 1627 glock_sbd(gl)->sd_lockstruct.ls_ops->lm_cancel(gl); 1628 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE); 1629 spin_lock(&gl->gl_lockref.lock); 1630 clear_bit(GLF_CANCELING, &gl->gl_flags); 1631 clear_and_wake_up_bit(GLF_LOCK, &gl->gl_flags); 1632 if (!gfs2_holder_queued(gh)) 1633 goto out; 1634 } 1635 1636 __gfs2_glock_dq(gh); 1637 out: 1638 spin_unlock(&gl->gl_lockref.lock); 1639 } 1640 1641 void gfs2_glock_dq_wait(struct gfs2_holder *gh) 1642 { 1643 struct gfs2_glock *gl = gh->gh_gl; 1644 gfs2_glock_dq(gh); 1645 might_sleep(); 1646 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE); 1647 } 1648 1649 /** 1650 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it 1651 * @gh: the holder structure 1652 * 1653 */ 1654 1655 void gfs2_glock_dq_uninit(struct gfs2_holder *gh) 1656 { 1657 gfs2_glock_dq(gh); 1658 gfs2_holder_uninit(gh); 1659 } 1660 1661 /** 1662 * gfs2_glock_nq_num - acquire a glock based on lock number 1663 * @sdp: the filesystem 1664 * @number: the lock number 1665 * @glops: the glock operations for the type of glock 1666 * @state: the state to acquire the glock in 1667 * @flags: modifier flags for the acquisition 1668 * @gh: the struct gfs2_holder 1669 * 1670 * Returns: errno 1671 */ 1672 1673 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number, 1674 const struct gfs2_glock_operations *glops, 1675 unsigned int state, u16 flags, struct gfs2_holder *gh) 1676 { 1677 struct gfs2_glock *gl; 1678 int error; 1679 1680 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl); 1681 if (!error) { 1682 error = gfs2_glock_nq_init(gl, state, flags, gh); 1683 gfs2_glock_put(gl); 1684 } 1685 1686 return error; 1687 } 1688 1689 /** 1690 * glock_compare - Compare two struct gfs2_glock structures for sorting 1691 * @arg_a: the first structure 1692 * @arg_b: the second structure 1693 * 1694 */ 1695 1696 static int glock_compare(const void *arg_a, const void *arg_b) 1697 { 1698 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a; 1699 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b; 1700 const struct lm_lockname *a = &gh_a->gh_gl->gl_name; 1701 const struct lm_lockname *b = &gh_b->gh_gl->gl_name; 1702 1703 if (a->ln_number > b->ln_number) 1704 return 1; 1705 if (a->ln_number < b->ln_number) 1706 return -1; 1707 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type); 1708 return 0; 1709 } 1710 1711 /** 1712 * nq_m_sync - synchronously acquire more than one glock in deadlock free order 1713 * @num_gh: the number of structures 1714 * @ghs: an array of struct gfs2_holder structures 1715 * @p: placeholder for the holder structure to pass back 1716 * 1717 * Returns: 0 on success (all glocks acquired), 1718 * errno on failure (no glocks acquired) 1719 */ 1720 1721 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs, 1722 struct gfs2_holder **p) 1723 { 1724 unsigned int x; 1725 int error = 0; 1726 1727 for (x = 0; x < num_gh; x++) 1728 p[x] = &ghs[x]; 1729 1730 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL); 1731 1732 for (x = 0; x < num_gh; x++) { 1733 error = gfs2_glock_nq(p[x]); 1734 if (error) { 1735 while (x--) 1736 gfs2_glock_dq(p[x]); 1737 break; 1738 } 1739 } 1740 1741 return error; 1742 } 1743 1744 /** 1745 * gfs2_glock_nq_m - acquire multiple glocks 1746 * @num_gh: the number of structures 1747 * @ghs: an array of struct gfs2_holder structures 1748 * 1749 * Returns: 0 on success (all glocks acquired), 1750 * errno on failure (no glocks acquired) 1751 */ 1752 1753 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs) 1754 { 1755 struct gfs2_holder *tmp[4]; 1756 struct gfs2_holder **pph = tmp; 1757 int error = 0; 1758 1759 switch(num_gh) { 1760 case 0: 1761 return 0; 1762 case 1: 1763 return gfs2_glock_nq(ghs); 1764 default: 1765 if (num_gh <= 4) 1766 break; 1767 pph = kmalloc_objs(struct gfs2_holder *, num_gh, GFP_NOFS); 1768 if (!pph) 1769 return -ENOMEM; 1770 } 1771 1772 error = nq_m_sync(num_gh, ghs, pph); 1773 1774 if (pph != tmp) 1775 kfree(pph); 1776 1777 return error; 1778 } 1779 1780 /** 1781 * gfs2_glock_dq_m - release multiple glocks 1782 * @num_gh: the number of structures 1783 * @ghs: an array of struct gfs2_holder structures 1784 * 1785 */ 1786 1787 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs) 1788 { 1789 while (num_gh--) 1790 gfs2_glock_dq(&ghs[num_gh]); 1791 } 1792 1793 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) 1794 { 1795 unsigned long delay = 0; 1796 1797 gfs2_glock_hold(gl); 1798 spin_lock(&gl->gl_lockref.lock); 1799 if (!list_empty(&gl->gl_holders) && 1800 glock_type(gl) == LM_TYPE_INODE) { 1801 unsigned long now = jiffies; 1802 unsigned long holdtime; 1803 1804 holdtime = gl->gl_tchange + gl->gl_hold_time; 1805 1806 if (time_before(now, holdtime)) 1807 delay = holdtime - now; 1808 if (test_bit(GLF_HAVE_REPLY, &gl->gl_flags)) 1809 delay = gl->gl_hold_time; 1810 } 1811 request_demote(gl, state, delay, true); 1812 gfs2_glock_queue_work(gl, delay); 1813 spin_unlock(&gl->gl_lockref.lock); 1814 } 1815 1816 /** 1817 * gfs2_should_freeze - Figure out if glock should be frozen 1818 * @gl: The glock in question 1819 * 1820 * Glocks are not frozen if (a) the result of the dlm operation is 1821 * an error, (b) the locking operation was an unlock operation or 1822 * (c) if there is a "recover" flagged request anywhere in the queue 1823 * 1824 * Returns: 1 if freezing should occur, 0 otherwise 1825 */ 1826 1827 static int gfs2_should_freeze(const struct gfs2_glock *gl) 1828 { 1829 const struct gfs2_holder *gh; 1830 1831 if (gl->gl_reply & ~LM_OUT_ST_MASK) 1832 return 0; 1833 if (gl->gl_target == LM_ST_UNLOCKED) 1834 return 0; 1835 1836 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 1837 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 1838 continue; 1839 if (LM_FLAG_RECOVER & gh->gh_flags) 1840 return 0; 1841 } 1842 1843 return 1; 1844 } 1845 1846 /** 1847 * gfs2_glock_complete - Callback used by locking 1848 * @gl: Pointer to the glock 1849 * @ret: The return value from the dlm 1850 * 1851 * The gl_reply field is under the gl_lockref.lock lock so that it is ok 1852 * to use a bitfield shared with other glock state fields. 1853 */ 1854 1855 void gfs2_glock_complete(struct gfs2_glock *gl, int ret) 1856 { 1857 struct lm_lockstruct *ls = &glock_sbd(gl)->sd_lockstruct; 1858 1859 spin_lock(&gl->gl_lockref.lock); 1860 clear_bit(GLF_MAY_CANCEL, &gl->gl_flags); 1861 gl->gl_reply = ret; 1862 1863 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) { 1864 if (gfs2_should_freeze(gl)) { 1865 set_bit(GLF_HAVE_FROZEN_REPLY, &gl->gl_flags); 1866 spin_unlock(&gl->gl_lockref.lock); 1867 return; 1868 } 1869 } 1870 1871 gl->gl_lockref.count++; 1872 set_bit(GLF_HAVE_REPLY, &gl->gl_flags); 1873 gfs2_glock_queue_work(gl, 0); 1874 spin_unlock(&gl->gl_lockref.lock); 1875 } 1876 1877 static int glock_cmp(void *priv, const struct list_head *a, 1878 const struct list_head *b) 1879 { 1880 struct gfs2_glock *gla, *glb; 1881 1882 gla = list_entry(a, struct gfs2_glock, gl_lru); 1883 glb = list_entry(b, struct gfs2_glock, gl_lru); 1884 1885 if (glock_number(gla) > glock_number(glb)) 1886 return 1; 1887 if (glock_number(gla) < glock_number(glb)) 1888 return -1; 1889 1890 return 0; 1891 } 1892 1893 static bool can_free_glock(struct gfs2_glock *gl) 1894 { 1895 struct gfs2_sbd *sdp = glock_sbd(gl); 1896 1897 return !test_bit(GLF_LOCK, &gl->gl_flags) && 1898 !gl->gl_lockref.count && 1899 (!test_bit(GLF_LFLUSH, &gl->gl_flags) || 1900 test_bit(SDF_KILL, &sdp->sd_flags)); 1901 } 1902 1903 /** 1904 * gfs2_dispose_glock_lru - Demote a list of glocks 1905 * @list: The list to dispose of 1906 * 1907 * Disposing of glocks may involve disk accesses, so that here we sort 1908 * the glocks by number (i.e. disk location of the inodes) so that if 1909 * there are any such accesses, they'll be sent in order (mostly). 1910 * 1911 * Must be called under the lru_lock, but may drop and retake this 1912 * lock. While the lru_lock is dropped, entries may vanish from the 1913 * list, but no new entries will appear on the list (since it is 1914 * private) 1915 */ 1916 1917 static unsigned long gfs2_dispose_glock_lru(struct list_head *list) 1918 __releases(&lru_lock) 1919 __acquires(&lru_lock) 1920 { 1921 struct gfs2_glock *gl; 1922 unsigned long freed = 0; 1923 1924 list_sort(NULL, list, glock_cmp); 1925 1926 while(!list_empty(list)) { 1927 gl = list_first_entry(list, struct gfs2_glock, gl_lru); 1928 if (!spin_trylock(&gl->gl_lockref.lock)) { 1929 add_back_to_lru: 1930 list_move(&gl->gl_lru, &lru_list); 1931 continue; 1932 } 1933 if (!can_free_glock(gl)) { 1934 spin_unlock(&gl->gl_lockref.lock); 1935 goto add_back_to_lru; 1936 } 1937 list_del_init(&gl->gl_lru); 1938 atomic_dec(&lru_count); 1939 clear_bit(GLF_LRU, &gl->gl_flags); 1940 freed++; 1941 gl->gl_lockref.count++; 1942 if (gl->gl_state != LM_ST_UNLOCKED) 1943 request_demote(gl, LM_ST_UNLOCKED, 0, false); 1944 gfs2_glock_queue_work(gl, 0); 1945 spin_unlock(&gl->gl_lockref.lock); 1946 cond_resched_lock(&lru_lock); 1947 } 1948 return freed; 1949 } 1950 1951 /** 1952 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote 1953 * @nr: The number of entries to scan 1954 * 1955 * This function selects the entries on the LRU which are able to 1956 * be demoted, and then kicks off the process by calling 1957 * gfs2_dispose_glock_lru() above. 1958 */ 1959 1960 static unsigned long gfs2_scan_glock_lru(unsigned long nr) 1961 { 1962 struct gfs2_glock *gl, *next; 1963 LIST_HEAD(dispose); 1964 unsigned long freed = 0; 1965 1966 spin_lock(&lru_lock); 1967 list_for_each_entry_safe(gl, next, &lru_list, gl_lru) { 1968 if (!nr--) 1969 break; 1970 if (can_free_glock(gl)) 1971 list_move(&gl->gl_lru, &dispose); 1972 } 1973 if (!list_empty(&dispose)) 1974 freed = gfs2_dispose_glock_lru(&dispose); 1975 spin_unlock(&lru_lock); 1976 1977 return freed; 1978 } 1979 1980 static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink, 1981 struct shrink_control *sc) 1982 { 1983 if (!(sc->gfp_mask & __GFP_FS)) 1984 return SHRINK_STOP; 1985 return gfs2_scan_glock_lru(sc->nr_to_scan); 1986 } 1987 1988 static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink, 1989 struct shrink_control *sc) 1990 { 1991 return vfs_pressure_ratio(atomic_read(&lru_count)); 1992 } 1993 1994 static struct shrinker *glock_shrinker; 1995 1996 /** 1997 * glock_hash_walk - Call a function for glock in a hash bucket 1998 * @examiner: the function 1999 * @sdp: the filesystem 2000 * 2001 * Note that the function can be called multiple times on the same 2002 * object. So the user must ensure that the function can cope with 2003 * that. 2004 */ 2005 2006 static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp) 2007 { 2008 struct gfs2_glock *gl; 2009 struct rhashtable_iter iter; 2010 2011 rhashtable_walk_enter(&gl_hash_table, &iter); 2012 2013 do { 2014 rhashtable_walk_start(&iter); 2015 2016 while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) { 2017 if (glock_sbd(gl) == sdp) 2018 examiner(gl); 2019 } 2020 2021 rhashtable_walk_stop(&iter); 2022 } while (cond_resched(), gl == ERR_PTR(-EAGAIN)); 2023 2024 rhashtable_walk_exit(&iter); 2025 } 2026 2027 void gfs2_cancel_delete_work(struct gfs2_glock *gl) 2028 { 2029 clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags); 2030 clear_bit(GLF_VERIFY_DELETE, &gl->gl_flags); 2031 if (cancel_delayed_work(&gl->gl_delete)) 2032 gfs2_glock_put(gl); 2033 } 2034 2035 static void flush_delete_work(struct gfs2_glock *gl) 2036 { 2037 if (glock_type(gl) == LM_TYPE_IOPEN) { 2038 struct gfs2_sbd *sdp = glock_sbd(gl); 2039 2040 if (cancel_delayed_work(&gl->gl_delete)) { 2041 queue_delayed_work(sdp->sd_delete_wq, 2042 &gl->gl_delete, 0); 2043 } 2044 } 2045 } 2046 2047 void gfs2_flush_delete_work(struct gfs2_sbd *sdp) 2048 { 2049 glock_hash_walk(flush_delete_work, sdp); 2050 flush_workqueue(sdp->sd_delete_wq); 2051 } 2052 2053 /** 2054 * thaw_glock - thaw out a glock which has an unprocessed reply waiting 2055 * @gl: The glock to thaw 2056 * 2057 */ 2058 2059 static void thaw_glock(struct gfs2_glock *gl) 2060 { 2061 if (!test_and_clear_bit(GLF_HAVE_FROZEN_REPLY, &gl->gl_flags)) 2062 return; 2063 if (!lockref_get_not_dead(&gl->gl_lockref)) 2064 return; 2065 2066 gfs2_glock_remove_from_lru(gl); 2067 spin_lock(&gl->gl_lockref.lock); 2068 set_bit(GLF_HAVE_REPLY, &gl->gl_flags); 2069 gfs2_glock_queue_work(gl, 0); 2070 spin_unlock(&gl->gl_lockref.lock); 2071 } 2072 2073 /** 2074 * clear_glock - look at a glock and see if we can free it from glock cache 2075 * @gl: the glock to look at 2076 * 2077 */ 2078 2079 static void clear_glock(struct gfs2_glock *gl) 2080 { 2081 gfs2_glock_remove_from_lru(gl); 2082 2083 spin_lock(&gl->gl_lockref.lock); 2084 if (!__lockref_is_dead(&gl->gl_lockref)) { 2085 gl->gl_lockref.count++; 2086 if (gl->gl_state != LM_ST_UNLOCKED) 2087 request_demote(gl, LM_ST_UNLOCKED, 0, false); 2088 gfs2_glock_queue_work(gl, 0); 2089 } 2090 spin_unlock(&gl->gl_lockref.lock); 2091 } 2092 2093 /** 2094 * gfs2_glock_thaw - Thaw any frozen glocks 2095 * @sdp: The super block 2096 * 2097 */ 2098 2099 void gfs2_glock_thaw(struct gfs2_sbd *sdp) 2100 { 2101 glock_hash_walk(thaw_glock, sdp); 2102 } 2103 2104 static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid) 2105 { 2106 spin_lock(&gl->gl_lockref.lock); 2107 gfs2_dump_glock(seq, gl, fsid); 2108 spin_unlock(&gl->gl_lockref.lock); 2109 } 2110 2111 static void dump_glock_func(struct gfs2_glock *gl) 2112 { 2113 dump_glock(NULL, gl, true); 2114 } 2115 2116 static void withdraw_glock(struct gfs2_glock *gl) 2117 { 2118 spin_lock(&gl->gl_lockref.lock); 2119 if (!__lockref_is_dead(&gl->gl_lockref)) { 2120 /* 2121 * We don't want to write back any more dirty data. Unlock the 2122 * remaining inode and resource group glocks; this will cause 2123 * their ->go_inval() hooks to toss out all the remaining 2124 * cached data, dirty or not. 2125 */ 2126 if (gl->gl_ops->go_inval && gl->gl_state != LM_ST_UNLOCKED) 2127 request_demote(gl, LM_ST_UNLOCKED, 0, false); 2128 do_error(gl, LM_OUT_ERROR); /* remove pending waiters */ 2129 } 2130 spin_unlock(&gl->gl_lockref.lock); 2131 } 2132 2133 void gfs2_withdraw_glocks(struct gfs2_sbd *sdp) 2134 { 2135 glock_hash_walk(withdraw_glock, sdp); 2136 } 2137 2138 /** 2139 * gfs2_gl_hash_clear - Empty out the glock hash table 2140 * @sdp: the filesystem 2141 * 2142 * Called when unmounting the filesystem. 2143 */ 2144 2145 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) 2146 { 2147 unsigned long start = jiffies; 2148 bool timed_out = false; 2149 2150 set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags); 2151 flush_workqueue(sdp->sd_glock_wq); 2152 glock_hash_walk(clear_glock, sdp); 2153 flush_workqueue(sdp->sd_glock_wq); 2154 2155 while (!timed_out) { 2156 wait_event_timeout(sdp->sd_kill_wait, 2157 !atomic_read(&sdp->sd_glock_disposal), 2158 HZ * 60); 2159 if (!atomic_read(&sdp->sd_glock_disposal)) 2160 break; 2161 timed_out = time_after(jiffies, start + (HZ * 600)); 2162 fs_warn(sdp, "%u glocks left after %u seconds%s\n", 2163 atomic_read(&sdp->sd_glock_disposal), 2164 jiffies_to_msecs(jiffies - start) / 1000, 2165 timed_out ? ":" : "; still waiting"); 2166 } 2167 gfs2_lm_unmount(sdp); 2168 gfs2_free_dead_glocks(sdp); 2169 glock_hash_walk(dump_glock_func, sdp); 2170 destroy_workqueue(sdp->sd_glock_wq); 2171 sdp->sd_glock_wq = NULL; 2172 } 2173 2174 static const char *state2str(unsigned state) 2175 { 2176 switch(state) { 2177 case LM_ST_UNLOCKED: 2178 return "UN"; 2179 case LM_ST_SHARED: 2180 return "SH"; 2181 case LM_ST_DEFERRED: 2182 return "DF"; 2183 case LM_ST_EXCLUSIVE: 2184 return "EX"; 2185 } 2186 return "??"; 2187 } 2188 2189 static const char *hflags2str(char *buf, u16 flags, unsigned long iflags) 2190 { 2191 char *p = buf; 2192 if (flags & LM_FLAG_TRY) 2193 *p++ = 't'; 2194 if (flags & LM_FLAG_TRY_1CB) 2195 *p++ = 'T'; 2196 if (flags & LM_FLAG_RECOVER) 2197 *p++ = 'e'; 2198 if (flags & LM_FLAG_ANY) 2199 *p++ = 'A'; 2200 if (flags & LM_FLAG_NODE_SCOPE) 2201 *p++ = 'n'; 2202 if (flags & GL_ASYNC) 2203 *p++ = 'a'; 2204 if (flags & GL_EXACT) 2205 *p++ = 'E'; 2206 if (flags & GL_NOCACHE) 2207 *p++ = 'c'; 2208 if (test_bit(HIF_HOLDER, &iflags)) 2209 *p++ = 'H'; 2210 if (test_bit(HIF_WAIT, &iflags)) 2211 *p++ = 'W'; 2212 if (flags & GL_SKIP) 2213 *p++ = 's'; 2214 *p = 0; 2215 return buf; 2216 } 2217 2218 /** 2219 * dump_holder - print information about a glock holder 2220 * @seq: the seq_file struct 2221 * @gh: the glock holder 2222 * @fs_id_buf: pointer to file system id (if requested) 2223 * 2224 */ 2225 2226 static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh, 2227 const char *fs_id_buf) 2228 { 2229 const char *comm = "(none)"; 2230 pid_t owner_pid = 0; 2231 char flags_buf[32]; 2232 2233 rcu_read_lock(); 2234 if (pid_is_meaningful(gh)) { 2235 struct task_struct *gh_owner; 2236 2237 comm = "(ended)"; 2238 owner_pid = pid_nr(gh->gh_owner_pid); 2239 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID); 2240 if (gh_owner) 2241 comm = gh_owner->comm; 2242 } 2243 gfs2_print_dbg(seq, "%s H: s:%s f:%s e:%d p:%ld [%s] %pS\n", 2244 fs_id_buf, state2str(gh->gh_state), 2245 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags), 2246 gh->gh_error, (long)owner_pid, comm, (void *)gh->gh_ip); 2247 rcu_read_unlock(); 2248 } 2249 2250 static const char *gflags2str(char *buf, const struct gfs2_glock *gl) 2251 { 2252 const unsigned long *gflags = &gl->gl_flags; 2253 char *p = buf; 2254 2255 if (test_bit(GLF_LOCK, gflags)) 2256 *p++ = 'l'; 2257 if (test_bit(GLF_DEMOTE, gflags)) 2258 *p++ = 'D'; 2259 if (test_bit(GLF_PENDING_DEMOTE, gflags)) 2260 *p++ = 'd'; 2261 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags)) 2262 *p++ = 'p'; 2263 if (test_bit(GLF_DIRTY, gflags)) 2264 *p++ = 'y'; 2265 if (test_bit(GLF_LFLUSH, gflags)) 2266 *p++ = 'f'; 2267 if (test_bit(GLF_MAY_CANCEL, gflags)) 2268 *p++ = 'c'; 2269 if (test_bit(GLF_HAVE_REPLY, gflags)) 2270 *p++ = 'r'; 2271 if (test_bit(GLF_INITIAL, gflags)) 2272 *p++ = 'a'; 2273 if (test_bit(GLF_HAVE_FROZEN_REPLY, gflags)) 2274 *p++ = 'F'; 2275 if (!list_empty(&gl->gl_holders)) 2276 *p++ = 'q'; 2277 if (test_bit(GLF_LRU, gflags)) 2278 *p++ = 'L'; 2279 if (gl->gl_object) 2280 *p++ = 'o'; 2281 if (test_bit(GLF_BLOCKING, gflags)) 2282 *p++ = 'b'; 2283 if (test_bit(GLF_INSTANTIATE_NEEDED, gflags)) 2284 *p++ = 'n'; 2285 if (test_bit(GLF_INSTANTIATE_IN_PROG, gflags)) 2286 *p++ = 'N'; 2287 if (test_bit(GLF_TRY_TO_EVICT, gflags)) 2288 *p++ = 'e'; 2289 if (test_bit(GLF_VERIFY_DELETE, gflags)) 2290 *p++ = 'E'; 2291 if (test_bit(GLF_DEFER_DELETE, gflags)) 2292 *p++ = 's'; 2293 if (test_bit(GLF_CANCELING, gflags)) 2294 *p++ = 'C'; 2295 *p = 0; 2296 return buf; 2297 } 2298 2299 /** 2300 * gfs2_dump_glock - print information about a glock 2301 * @seq: The seq_file struct 2302 * @gl: the glock 2303 * @fsid: If true, also dump the file system id 2304 * 2305 * The file format is as follows: 2306 * One line per object, capital letters are used to indicate objects 2307 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented, 2308 * other objects are indented by a single space and follow the glock to 2309 * which they are related. Fields are indicated by lower case letters 2310 * followed by a colon and the field value, except for strings which are in 2311 * [] so that its possible to see if they are composed of spaces for 2312 * example. The field's are n = number (id of the object), f = flags, 2313 * t = type, s = state, r = refcount, e = error, p = pid. 2314 * 2315 */ 2316 2317 void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid) 2318 { 2319 const struct gfs2_glock_operations *glops = gl->gl_ops; 2320 unsigned long long dtime; 2321 const struct gfs2_holder *gh; 2322 char gflags_buf[32]; 2323 struct gfs2_sbd *sdp = glock_sbd(gl); 2324 char fs_id_buf[sizeof(sdp->sd_fsname) + 7]; 2325 unsigned long nrpages = 0; 2326 2327 if (gl->gl_ops->go_flags & GLOF_ASPACE) { 2328 struct address_space *mapping = gfs2_glock2aspace(gl); 2329 2330 nrpages = mapping->nrpages; 2331 } 2332 memset(fs_id_buf, 0, sizeof(fs_id_buf)); 2333 if (fsid && sdp) /* safety precaution */ 2334 sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname); 2335 dtime = jiffies - gl->gl_demote_time; 2336 dtime *= 1000000/HZ; /* demote time in uSec */ 2337 if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) 2338 dtime = 0; 2339 gfs2_print_dbg(seq, "%sG: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d " 2340 "v:%d r:%d m:%ld p:%lu\n", 2341 fs_id_buf, state2str(gl->gl_state), 2342 glock_type(gl), 2343 (unsigned long long) glock_number(gl), 2344 gflags2str(gflags_buf, gl), 2345 state2str(gl->gl_target), 2346 state2str(gl->gl_demote_state), dtime, 2347 atomic_read(&gl->gl_ail_count), 2348 atomic_read(&gl->gl_revokes), 2349 (int)gl->gl_lockref.count, gl->gl_hold_time, nrpages); 2350 2351 list_for_each_entry(gh, &gl->gl_holders, gh_list) 2352 dump_holder(seq, gh, fs_id_buf); 2353 2354 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump) 2355 glops->go_dump(seq, gl, fs_id_buf); 2356 } 2357 2358 static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr) 2359 { 2360 struct gfs2_glock *gl = iter_ptr; 2361 2362 seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n", 2363 glock_type(gl), 2364 (unsigned long long) glock_number(gl), 2365 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT], 2366 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR], 2367 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB], 2368 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB], 2369 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT], 2370 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR], 2371 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT], 2372 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]); 2373 return 0; 2374 } 2375 2376 static const char *gfs2_gltype[] = { 2377 "type", 2378 "reserved", 2379 "nondisk", 2380 "inode", 2381 "rgrp", 2382 "meta", 2383 "iopen", 2384 "flock", 2385 "plock", 2386 "quota", 2387 "journal", 2388 }; 2389 2390 static const char *gfs2_stype[] = { 2391 [GFS2_LKS_SRTT] = "srtt", 2392 [GFS2_LKS_SRTTVAR] = "srttvar", 2393 [GFS2_LKS_SRTTB] = "srttb", 2394 [GFS2_LKS_SRTTVARB] = "srttvarb", 2395 [GFS2_LKS_SIRT] = "sirt", 2396 [GFS2_LKS_SIRTVAR] = "sirtvar", 2397 [GFS2_LKS_DCOUNT] = "dlm", 2398 [GFS2_LKS_QCOUNT] = "queue", 2399 }; 2400 2401 #define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype)) 2402 2403 static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr) 2404 { 2405 struct gfs2_sbd *sdp = seq->private; 2406 loff_t pos = *(loff_t *)iter_ptr; 2407 unsigned index = pos >> 3; 2408 unsigned subindex = pos & 0x07; 2409 int i; 2410 2411 if (index == 0 && subindex != 0) 2412 return 0; 2413 2414 seq_printf(seq, "%-10s %8s:", gfs2_gltype[index], 2415 (index == 0) ? "cpu": gfs2_stype[subindex]); 2416 2417 for_each_possible_cpu(i) { 2418 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i); 2419 2420 if (index == 0) 2421 seq_printf(seq, " %15u", i); 2422 else 2423 seq_printf(seq, " %15llu", (unsigned long long)lkstats-> 2424 lkstats[index - 1].stats[subindex]); 2425 } 2426 seq_putc(seq, '\n'); 2427 return 0; 2428 } 2429 2430 int __init gfs2_glock_init(void) 2431 { 2432 int i, ret; 2433 2434 ret = rhashtable_init(&gl_hash_table, &ht_parms); 2435 if (ret < 0) 2436 return ret; 2437 2438 glock_shrinker = shrinker_alloc(0, "gfs2-glock"); 2439 if (!glock_shrinker) { 2440 rhashtable_destroy(&gl_hash_table); 2441 return -ENOMEM; 2442 } 2443 2444 glock_shrinker->count_objects = gfs2_glock_shrink_count; 2445 glock_shrinker->scan_objects = gfs2_glock_shrink_scan; 2446 2447 shrinker_register(glock_shrinker); 2448 2449 for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++) 2450 init_waitqueue_head(glock_wait_table + i); 2451 2452 return 0; 2453 } 2454 2455 void gfs2_glock_exit(void) 2456 { 2457 shrinker_free(glock_shrinker); 2458 rhashtable_destroy(&gl_hash_table); 2459 } 2460 2461 static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n) 2462 { 2463 struct gfs2_glock *gl = gi->gl; 2464 2465 if (gl) { 2466 if (n == 0) 2467 return; 2468 gfs2_glock_put_async(gl); 2469 } 2470 for (;;) { 2471 gl = rhashtable_walk_next(&gi->hti); 2472 if (IS_ERR_OR_NULL(gl)) { 2473 if (gl == ERR_PTR(-EAGAIN)) { 2474 n = 1; 2475 continue; 2476 } 2477 gl = NULL; 2478 break; 2479 } 2480 if (glock_sbd(gl) != gi->sdp) 2481 continue; 2482 if (n <= 1) { 2483 if (!lockref_get_not_dead(&gl->gl_lockref)) 2484 continue; 2485 break; 2486 } else { 2487 if (__lockref_is_dead(&gl->gl_lockref)) 2488 continue; 2489 n--; 2490 } 2491 } 2492 gi->gl = gl; 2493 } 2494 2495 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos) 2496 __acquires(RCU) 2497 { 2498 struct gfs2_glock_iter *gi = seq->private; 2499 loff_t n; 2500 2501 /* 2502 * We can either stay where we are, skip to the next hash table 2503 * entry, or start from the beginning. 2504 */ 2505 if (*pos < gi->last_pos) { 2506 rhashtable_walk_exit(&gi->hti); 2507 rhashtable_walk_enter(&gl_hash_table, &gi->hti); 2508 n = *pos + 1; 2509 } else { 2510 n = *pos - gi->last_pos; 2511 } 2512 2513 rhashtable_walk_start(&gi->hti); 2514 2515 gfs2_glock_iter_next(gi, n); 2516 gi->last_pos = *pos; 2517 return gi->gl; 2518 } 2519 2520 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr, 2521 loff_t *pos) 2522 { 2523 struct gfs2_glock_iter *gi = seq->private; 2524 2525 (*pos)++; 2526 gi->last_pos = *pos; 2527 gfs2_glock_iter_next(gi, 1); 2528 return gi->gl; 2529 } 2530 2531 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr) 2532 __releases(RCU) 2533 { 2534 struct gfs2_glock_iter *gi = seq->private; 2535 2536 rhashtable_walk_stop(&gi->hti); 2537 } 2538 2539 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr) 2540 { 2541 dump_glock(seq, iter_ptr, false); 2542 return 0; 2543 } 2544 2545 static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos) 2546 { 2547 preempt_disable(); 2548 if (*pos >= GFS2_NR_SBSTATS) 2549 return NULL; 2550 return pos; 2551 } 2552 2553 static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr, 2554 loff_t *pos) 2555 { 2556 (*pos)++; 2557 if (*pos >= GFS2_NR_SBSTATS) 2558 return NULL; 2559 return pos; 2560 } 2561 2562 static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr) 2563 { 2564 preempt_enable(); 2565 } 2566 2567 static const struct seq_operations gfs2_glock_seq_ops = { 2568 .start = gfs2_glock_seq_start, 2569 .next = gfs2_glock_seq_next, 2570 .stop = gfs2_glock_seq_stop, 2571 .show = gfs2_glock_seq_show, 2572 }; 2573 2574 static const struct seq_operations gfs2_glstats_seq_ops = { 2575 .start = gfs2_glock_seq_start, 2576 .next = gfs2_glock_seq_next, 2577 .stop = gfs2_glock_seq_stop, 2578 .show = gfs2_glstats_seq_show, 2579 }; 2580 2581 static const struct seq_operations gfs2_sbstats_sops = { 2582 .start = gfs2_sbstats_seq_start, 2583 .next = gfs2_sbstats_seq_next, 2584 .stop = gfs2_sbstats_seq_stop, 2585 .show = gfs2_sbstats_seq_show, 2586 }; 2587 2588 #define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL) 2589 2590 static int __gfs2_glocks_open(struct inode *inode, struct file *file, 2591 const struct seq_operations *ops) 2592 { 2593 int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter)); 2594 if (ret == 0) { 2595 struct seq_file *seq = file->private_data; 2596 struct gfs2_glock_iter *gi = seq->private; 2597 2598 gi->sdp = inode->i_private; 2599 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN); 2600 if (seq->buf) 2601 seq->size = GFS2_SEQ_GOODSIZE; 2602 /* 2603 * Initially, we are "before" the first hash table entry; the 2604 * first call to rhashtable_walk_next gets us the first entry. 2605 */ 2606 gi->last_pos = -1; 2607 gi->gl = NULL; 2608 rhashtable_walk_enter(&gl_hash_table, &gi->hti); 2609 } 2610 return ret; 2611 } 2612 2613 static int gfs2_glocks_open(struct inode *inode, struct file *file) 2614 { 2615 return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops); 2616 } 2617 2618 static int gfs2_glocks_release(struct inode *inode, struct file *file) 2619 { 2620 struct seq_file *seq = file->private_data; 2621 struct gfs2_glock_iter *gi = seq->private; 2622 2623 if (gi->gl) 2624 gfs2_glock_put(gi->gl); 2625 rhashtable_walk_exit(&gi->hti); 2626 return seq_release_private(inode, file); 2627 } 2628 2629 static int gfs2_glstats_open(struct inode *inode, struct file *file) 2630 { 2631 return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops); 2632 } 2633 2634 static const struct file_operations gfs2_glocks_fops = { 2635 .owner = THIS_MODULE, 2636 .open = gfs2_glocks_open, 2637 .read = seq_read, 2638 .llseek = seq_lseek, 2639 .release = gfs2_glocks_release, 2640 }; 2641 2642 static const struct file_operations gfs2_glstats_fops = { 2643 .owner = THIS_MODULE, 2644 .open = gfs2_glstats_open, 2645 .read = seq_read, 2646 .llseek = seq_lseek, 2647 .release = gfs2_glocks_release, 2648 }; 2649 2650 struct gfs2_glockfd_iter { 2651 struct super_block *sb; 2652 unsigned int tgid; 2653 struct task_struct *task; 2654 unsigned int fd; 2655 struct file *file; 2656 }; 2657 2658 static struct task_struct *gfs2_glockfd_next_task(struct gfs2_glockfd_iter *i) 2659 { 2660 struct pid_namespace *ns = task_active_pid_ns(current); 2661 struct pid *pid; 2662 2663 if (i->task) 2664 put_task_struct(i->task); 2665 2666 rcu_read_lock(); 2667 retry: 2668 i->task = NULL; 2669 pid = find_ge_pid(i->tgid, ns); 2670 if (pid) { 2671 i->tgid = pid_nr_ns(pid, ns); 2672 i->task = pid_task(pid, PIDTYPE_TGID); 2673 if (!i->task) { 2674 i->tgid++; 2675 goto retry; 2676 } 2677 get_task_struct(i->task); 2678 } 2679 rcu_read_unlock(); 2680 return i->task; 2681 } 2682 2683 static struct file *gfs2_glockfd_next_file(struct gfs2_glockfd_iter *i) 2684 { 2685 if (i->file) { 2686 fput(i->file); 2687 i->file = NULL; 2688 } 2689 2690 for(;; i->fd++) { 2691 i->file = fget_task_next(i->task, &i->fd); 2692 if (!i->file) { 2693 i->fd = 0; 2694 break; 2695 } 2696 2697 if (file_inode(i->file)->i_sb == i->sb) 2698 break; 2699 2700 fput(i->file); 2701 } 2702 return i->file; 2703 } 2704 2705 static void *gfs2_glockfd_seq_start(struct seq_file *seq, loff_t *pos) 2706 { 2707 struct gfs2_glockfd_iter *i = seq->private; 2708 2709 if (*pos) 2710 return NULL; 2711 while (gfs2_glockfd_next_task(i)) { 2712 if (gfs2_glockfd_next_file(i)) 2713 return i; 2714 i->tgid++; 2715 } 2716 return NULL; 2717 } 2718 2719 static void *gfs2_glockfd_seq_next(struct seq_file *seq, void *iter_ptr, 2720 loff_t *pos) 2721 { 2722 struct gfs2_glockfd_iter *i = seq->private; 2723 2724 (*pos)++; 2725 i->fd++; 2726 do { 2727 if (gfs2_glockfd_next_file(i)) 2728 return i; 2729 i->tgid++; 2730 } while (gfs2_glockfd_next_task(i)); 2731 return NULL; 2732 } 2733 2734 static void gfs2_glockfd_seq_stop(struct seq_file *seq, void *iter_ptr) 2735 { 2736 struct gfs2_glockfd_iter *i = seq->private; 2737 2738 if (i->file) 2739 fput(i->file); 2740 if (i->task) 2741 put_task_struct(i->task); 2742 } 2743 2744 static void gfs2_glockfd_seq_show_flock(struct seq_file *seq, 2745 struct gfs2_glockfd_iter *i) 2746 { 2747 struct gfs2_file *fp = i->file->private_data; 2748 struct gfs2_holder *fl_gh = &fp->f_fl_gh; 2749 struct lm_lockname gl_name = { .ln_type = LM_TYPE_RESERVED }; 2750 2751 if (!READ_ONCE(fl_gh->gh_gl)) 2752 return; 2753 2754 spin_lock(&i->file->f_lock); 2755 if (gfs2_holder_initialized(fl_gh)) 2756 gl_name = fl_gh->gh_gl->gl_name; 2757 spin_unlock(&i->file->f_lock); 2758 2759 if (gl_name.ln_type != LM_TYPE_RESERVED) { 2760 seq_printf(seq, "%d %u %u/%llx\n", 2761 i->tgid, i->fd, gl_name.ln_type, 2762 (unsigned long long)gl_name.ln_number); 2763 } 2764 } 2765 2766 static int gfs2_glockfd_seq_show(struct seq_file *seq, void *iter_ptr) 2767 { 2768 struct gfs2_glockfd_iter *i = seq->private; 2769 struct inode *inode = file_inode(i->file); 2770 struct gfs2_glock *gl; 2771 2772 inode_lock_shared(inode); 2773 gl = GFS2_I(inode)->i_iopen_gh.gh_gl; 2774 if (gl) { 2775 seq_printf(seq, "%d %u %u/%llx\n", 2776 i->tgid, i->fd, glock_type(gl), 2777 (unsigned long long) glock_number(gl)); 2778 } 2779 gfs2_glockfd_seq_show_flock(seq, i); 2780 inode_unlock_shared(inode); 2781 return 0; 2782 } 2783 2784 static const struct seq_operations gfs2_glockfd_seq_ops = { 2785 .start = gfs2_glockfd_seq_start, 2786 .next = gfs2_glockfd_seq_next, 2787 .stop = gfs2_glockfd_seq_stop, 2788 .show = gfs2_glockfd_seq_show, 2789 }; 2790 2791 static int gfs2_glockfd_open(struct inode *inode, struct file *file) 2792 { 2793 struct gfs2_glockfd_iter *i; 2794 struct gfs2_sbd *sdp = inode->i_private; 2795 2796 i = __seq_open_private(file, &gfs2_glockfd_seq_ops, 2797 sizeof(struct gfs2_glockfd_iter)); 2798 if (!i) 2799 return -ENOMEM; 2800 i->sb = sdp->sd_vfs; 2801 return 0; 2802 } 2803 2804 static const struct file_operations gfs2_glockfd_fops = { 2805 .owner = THIS_MODULE, 2806 .open = gfs2_glockfd_open, 2807 .read = seq_read, 2808 .llseek = seq_lseek, 2809 .release = seq_release_private, 2810 }; 2811 2812 DEFINE_SEQ_ATTRIBUTE(gfs2_sbstats); 2813 2814 void gfs2_create_debugfs_file(struct gfs2_sbd *sdp) 2815 { 2816 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root); 2817 2818 debugfs_create_file("glocks", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, 2819 &gfs2_glocks_fops); 2820 2821 debugfs_create_file("glockfd", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, 2822 &gfs2_glockfd_fops); 2823 2824 debugfs_create_file("glstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, 2825 &gfs2_glstats_fops); 2826 2827 debugfs_create_file("sbstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, 2828 &gfs2_sbstats_fops); 2829 } 2830 2831 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp) 2832 { 2833 debugfs_remove_recursive(sdp->debugfs_dir); 2834 sdp->debugfs_dir = NULL; 2835 } 2836 2837 void gfs2_register_debugfs(void) 2838 { 2839 gfs2_root = debugfs_create_dir("gfs2", NULL); 2840 } 2841 2842 void gfs2_unregister_debugfs(void) 2843 { 2844 debugfs_remove(gfs2_root); 2845 gfs2_root = NULL; 2846 } 2847