1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/sched.h> 10 #include <linux/slab.h> 11 #include <linux/spinlock.h> 12 #include <linux/buffer_head.h> 13 #include <linux/delay.h> 14 #include <linux/sort.h> 15 #include <linux/hash.h> 16 #include <linux/jhash.h> 17 #include <linux/kallsyms.h> 18 #include <linux/gfs2_ondisk.h> 19 #include <linux/list.h> 20 #include <linux/wait.h> 21 #include <linux/module.h> 22 #include <linux/uaccess.h> 23 #include <linux/seq_file.h> 24 #include <linux/debugfs.h> 25 #include <linux/kthread.h> 26 #include <linux/freezer.h> 27 #include <linux/workqueue.h> 28 #include <linux/jiffies.h> 29 #include <linux/rcupdate.h> 30 #include <linux/rculist_bl.h> 31 #include <linux/bit_spinlock.h> 32 #include <linux/percpu.h> 33 #include <linux/list_sort.h> 34 #include <linux/lockref.h> 35 #include <linux/rhashtable.h> 36 #include <linux/pid_namespace.h> 37 #include <linux/fdtable.h> 38 #include <linux/file.h> 39 40 #include "gfs2.h" 41 #include "incore.h" 42 #include "glock.h" 43 #include "glops.h" 44 #include "inode.h" 45 #include "lops.h" 46 #include "meta_io.h" 47 #include "quota.h" 48 #include "super.h" 49 #include "util.h" 50 #include "bmap.h" 51 #define CREATE_TRACE_POINTS 52 #include "trace_gfs2.h" 53 54 struct gfs2_glock_iter { 55 struct gfs2_sbd *sdp; /* incore superblock */ 56 struct rhashtable_iter hti; /* rhashtable iterator */ 57 struct gfs2_glock *gl; /* current glock struct */ 58 loff_t last_pos; /* last position */ 59 }; 60 61 typedef void (*glock_examiner) (struct gfs2_glock * gl); 62 63 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target); 64 static void __gfs2_glock_dq(struct gfs2_holder *gh); 65 static void handle_callback(struct gfs2_glock *gl, unsigned int state, 66 unsigned long delay, bool remote); 67 68 static struct dentry *gfs2_root; 69 static struct workqueue_struct *glock_workqueue; 70 struct workqueue_struct *gfs2_delete_workqueue; 71 static LIST_HEAD(lru_list); 72 static atomic_t lru_count = ATOMIC_INIT(0); 73 static DEFINE_SPINLOCK(lru_lock); 74 75 #define GFS2_GL_HASH_SHIFT 15 76 #define GFS2_GL_HASH_SIZE BIT(GFS2_GL_HASH_SHIFT) 77 78 static const struct rhashtable_params ht_parms = { 79 .nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4, 80 .key_len = offsetofend(struct lm_lockname, ln_type), 81 .key_offset = offsetof(struct gfs2_glock, gl_name), 82 .head_offset = offsetof(struct gfs2_glock, gl_node), 83 }; 84 85 static struct rhashtable gl_hash_table; 86 87 #define GLOCK_WAIT_TABLE_BITS 12 88 #define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS) 89 static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned; 90 91 struct wait_glock_queue { 92 struct lm_lockname *name; 93 wait_queue_entry_t wait; 94 }; 95 96 static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode, 97 int sync, void *key) 98 { 99 struct wait_glock_queue *wait_glock = 100 container_of(wait, struct wait_glock_queue, wait); 101 struct lm_lockname *wait_name = wait_glock->name; 102 struct lm_lockname *wake_name = key; 103 104 if (wake_name->ln_sbd != wait_name->ln_sbd || 105 wake_name->ln_number != wait_name->ln_number || 106 wake_name->ln_type != wait_name->ln_type) 107 return 0; 108 return autoremove_wake_function(wait, mode, sync, key); 109 } 110 111 static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name) 112 { 113 u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0); 114 115 return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS); 116 } 117 118 /** 119 * wake_up_glock - Wake up waiters on a glock 120 * @gl: the glock 121 */ 122 static void wake_up_glock(struct gfs2_glock *gl) 123 { 124 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name); 125 126 if (waitqueue_active(wq)) 127 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name); 128 } 129 130 static void gfs2_glock_dealloc(struct rcu_head *rcu) 131 { 132 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); 133 134 kfree(gl->gl_lksb.sb_lvbptr); 135 if (gl->gl_ops->go_flags & GLOF_ASPACE) { 136 struct gfs2_glock_aspace *gla = 137 container_of(gl, struct gfs2_glock_aspace, glock); 138 kmem_cache_free(gfs2_glock_aspace_cachep, gla); 139 } else 140 kmem_cache_free(gfs2_glock_cachep, gl); 141 } 142 143 /** 144 * glock_blocked_by_withdraw - determine if we can still use a glock 145 * @gl: the glock 146 * 147 * We need to allow some glocks to be enqueued, dequeued, promoted, and demoted 148 * when we're withdrawn. For example, to maintain metadata integrity, we should 149 * disallow the use of inode and rgrp glocks when withdrawn. Other glocks, like 150 * iopen or the transaction glocks may be safely used because none of their 151 * metadata goes through the journal. So in general, we should disallow all 152 * glocks that are journaled, and allow all the others. One exception is: 153 * we need to allow our active journal to be promoted and demoted so others 154 * may recover it and we can reacquire it when they're done. 155 */ 156 static bool glock_blocked_by_withdraw(struct gfs2_glock *gl) 157 { 158 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 159 160 if (likely(!gfs2_withdrawn(sdp))) 161 return false; 162 if (gl->gl_ops->go_flags & GLOF_NONDISK) 163 return false; 164 if (!sdp->sd_jdesc || 165 gl->gl_name.ln_number == sdp->sd_jdesc->jd_no_addr) 166 return false; 167 return true; 168 } 169 170 void gfs2_glock_free(struct gfs2_glock *gl) 171 { 172 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 173 174 gfs2_glock_assert_withdraw(gl, atomic_read(&gl->gl_revokes) == 0); 175 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms); 176 smp_mb(); 177 wake_up_glock(gl); 178 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc); 179 if (atomic_dec_and_test(&sdp->sd_glock_disposal)) 180 wake_up(&sdp->sd_glock_wait); 181 } 182 183 /** 184 * gfs2_glock_hold() - increment reference count on glock 185 * @gl: The glock to hold 186 * 187 */ 188 189 void gfs2_glock_hold(struct gfs2_glock *gl) 190 { 191 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); 192 lockref_get(&gl->gl_lockref); 193 } 194 195 /** 196 * demote_ok - Check to see if it's ok to unlock a glock 197 * @gl: the glock 198 * 199 * Returns: 1 if it's ok 200 */ 201 202 static int demote_ok(const struct gfs2_glock *gl) 203 { 204 const struct gfs2_glock_operations *glops = gl->gl_ops; 205 206 if (gl->gl_state == LM_ST_UNLOCKED) 207 return 0; 208 /* 209 * Note that demote_ok is used for the lru process of disposing of 210 * glocks. For this purpose, we don't care if the glock's holders 211 * have the HIF_MAY_DEMOTE flag set or not. If someone is using 212 * them, don't demote. 213 */ 214 if (!list_empty(&gl->gl_holders)) 215 return 0; 216 if (glops->go_demote_ok) 217 return glops->go_demote_ok(gl); 218 return 1; 219 } 220 221 222 void gfs2_glock_add_to_lru(struct gfs2_glock *gl) 223 { 224 if (!(gl->gl_ops->go_flags & GLOF_LRU)) 225 return; 226 227 spin_lock(&lru_lock); 228 229 list_move_tail(&gl->gl_lru, &lru_list); 230 231 if (!test_bit(GLF_LRU, &gl->gl_flags)) { 232 set_bit(GLF_LRU, &gl->gl_flags); 233 atomic_inc(&lru_count); 234 } 235 236 spin_unlock(&lru_lock); 237 } 238 239 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) 240 { 241 if (!(gl->gl_ops->go_flags & GLOF_LRU)) 242 return; 243 244 spin_lock(&lru_lock); 245 if (test_bit(GLF_LRU, &gl->gl_flags)) { 246 list_del_init(&gl->gl_lru); 247 atomic_dec(&lru_count); 248 clear_bit(GLF_LRU, &gl->gl_flags); 249 } 250 spin_unlock(&lru_lock); 251 } 252 253 /* 254 * Enqueue the glock on the work queue. Passes one glock reference on to the 255 * work queue. 256 */ 257 static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { 258 if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) { 259 /* 260 * We are holding the lockref spinlock, and the work was still 261 * queued above. The queued work (glock_work_func) takes that 262 * spinlock before dropping its glock reference(s), so it 263 * cannot have dropped them in the meantime. 264 */ 265 GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2); 266 gl->gl_lockref.count--; 267 } 268 } 269 270 static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { 271 spin_lock(&gl->gl_lockref.lock); 272 __gfs2_glock_queue_work(gl, delay); 273 spin_unlock(&gl->gl_lockref.lock); 274 } 275 276 static void __gfs2_glock_put(struct gfs2_glock *gl) 277 { 278 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 279 struct address_space *mapping = gfs2_glock2aspace(gl); 280 281 lockref_mark_dead(&gl->gl_lockref); 282 283 gfs2_glock_remove_from_lru(gl); 284 spin_unlock(&gl->gl_lockref.lock); 285 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); 286 if (mapping) { 287 truncate_inode_pages_final(mapping); 288 if (!gfs2_withdrawn(sdp)) 289 GLOCK_BUG_ON(gl, !mapping_empty(mapping)); 290 } 291 trace_gfs2_glock_put(gl); 292 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); 293 } 294 295 /* 296 * Cause the glock to be put in work queue context. 297 */ 298 void gfs2_glock_queue_put(struct gfs2_glock *gl) 299 { 300 gfs2_glock_queue_work(gl, 0); 301 } 302 303 /** 304 * gfs2_glock_put() - Decrement reference count on glock 305 * @gl: The glock to put 306 * 307 */ 308 309 void gfs2_glock_put(struct gfs2_glock *gl) 310 { 311 if (lockref_put_or_lock(&gl->gl_lockref)) 312 return; 313 314 __gfs2_glock_put(gl); 315 } 316 317 /** 318 * may_grant - check if it's ok to grant a new lock 319 * @gl: The glock 320 * @current_gh: One of the current holders of @gl 321 * @gh: The lock request which we wish to grant 322 * 323 * With our current compatibility rules, if a glock has one or more active 324 * holders (HIF_HOLDER flag set), any of those holders can be passed in as 325 * @current_gh; they are all the same as far as compatibility with the new @gh 326 * goes. 327 * 328 * Returns true if it's ok to grant the lock. 329 */ 330 331 static inline bool may_grant(struct gfs2_glock *gl, 332 struct gfs2_holder *current_gh, 333 struct gfs2_holder *gh) 334 { 335 if (current_gh) { 336 GLOCK_BUG_ON(gl, !test_bit(HIF_HOLDER, ¤t_gh->gh_iflags)); 337 338 switch(current_gh->gh_state) { 339 case LM_ST_EXCLUSIVE: 340 /* 341 * Here we make a special exception to grant holders 342 * who agree to share the EX lock with other holders 343 * who also have the bit set. If the original holder 344 * has the LM_FLAG_NODE_SCOPE bit set, we grant more 345 * holders with the bit set. 346 */ 347 return gh->gh_state == LM_ST_EXCLUSIVE && 348 (current_gh->gh_flags & LM_FLAG_NODE_SCOPE) && 349 (gh->gh_flags & LM_FLAG_NODE_SCOPE); 350 351 case LM_ST_SHARED: 352 case LM_ST_DEFERRED: 353 return gh->gh_state == current_gh->gh_state; 354 355 default: 356 return false; 357 } 358 } 359 360 if (gl->gl_state == gh->gh_state) 361 return true; 362 if (gh->gh_flags & GL_EXACT) 363 return false; 364 if (gl->gl_state == LM_ST_EXCLUSIVE) { 365 return gh->gh_state == LM_ST_SHARED || 366 gh->gh_state == LM_ST_DEFERRED; 367 } 368 if (gh->gh_flags & LM_FLAG_ANY) 369 return gl->gl_state != LM_ST_UNLOCKED; 370 return false; 371 } 372 373 static void gfs2_holder_wake(struct gfs2_holder *gh) 374 { 375 clear_bit(HIF_WAIT, &gh->gh_iflags); 376 smp_mb__after_atomic(); 377 wake_up_bit(&gh->gh_iflags, HIF_WAIT); 378 if (gh->gh_flags & GL_ASYNC) { 379 struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd; 380 381 wake_up(&sdp->sd_async_glock_wait); 382 } 383 } 384 385 /** 386 * do_error - Something unexpected has happened during a lock request 387 * @gl: The glock 388 * @ret: The status from the DLM 389 */ 390 391 static void do_error(struct gfs2_glock *gl, const int ret) 392 { 393 struct gfs2_holder *gh, *tmp; 394 395 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { 396 if (!test_bit(HIF_WAIT, &gh->gh_iflags)) 397 continue; 398 if (ret & LM_OUT_ERROR) 399 gh->gh_error = -EIO; 400 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) 401 gh->gh_error = GLR_TRYFAILED; 402 else 403 continue; 404 list_del_init(&gh->gh_list); 405 trace_gfs2_glock_queue(gh, 0); 406 gfs2_holder_wake(gh); 407 } 408 } 409 410 /** 411 * demote_incompat_holders - demote incompatible demoteable holders 412 * @gl: the glock we want to promote 413 * @current_gh: the newly promoted holder 414 * 415 * We're passing the newly promoted holder in @current_gh, but actually, any of 416 * the strong holders would do. 417 */ 418 static void demote_incompat_holders(struct gfs2_glock *gl, 419 struct gfs2_holder *current_gh) 420 { 421 struct gfs2_holder *gh, *tmp; 422 423 /* 424 * Demote incompatible holders before we make ourselves eligible. 425 * (This holder may or may not allow auto-demoting, but we don't want 426 * to demote the new holder before it's even granted.) 427 */ 428 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { 429 /* 430 * Since holders are at the front of the list, we stop when we 431 * find the first non-holder. 432 */ 433 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) 434 return; 435 if (gh == current_gh) 436 continue; 437 if (test_bit(HIF_MAY_DEMOTE, &gh->gh_iflags) && 438 !may_grant(gl, current_gh, gh)) { 439 /* 440 * We should not recurse into do_promote because 441 * __gfs2_glock_dq only calls handle_callback, 442 * gfs2_glock_add_to_lru and __gfs2_glock_queue_work. 443 */ 444 __gfs2_glock_dq(gh); 445 } 446 } 447 } 448 449 /** 450 * find_first_holder - find the first "holder" gh 451 * @gl: the glock 452 */ 453 454 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) 455 { 456 struct gfs2_holder *gh; 457 458 if (!list_empty(&gl->gl_holders)) { 459 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, 460 gh_list); 461 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 462 return gh; 463 } 464 return NULL; 465 } 466 467 /** 468 * find_first_strong_holder - find the first non-demoteable holder 469 * @gl: the glock 470 * 471 * Find the first holder that doesn't have the HIF_MAY_DEMOTE flag set. 472 */ 473 static inline struct gfs2_holder * 474 find_first_strong_holder(struct gfs2_glock *gl) 475 { 476 struct gfs2_holder *gh; 477 478 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 479 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) 480 return NULL; 481 if (!test_bit(HIF_MAY_DEMOTE, &gh->gh_iflags)) 482 return gh; 483 } 484 return NULL; 485 } 486 487 /* 488 * gfs2_instantiate - Call the glops instantiate function 489 * @gh: The glock holder 490 * 491 * Returns: 0 if instantiate was successful, or error. 492 */ 493 int gfs2_instantiate(struct gfs2_holder *gh) 494 { 495 struct gfs2_glock *gl = gh->gh_gl; 496 const struct gfs2_glock_operations *glops = gl->gl_ops; 497 int ret; 498 499 again: 500 if (!test_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags)) 501 goto done; 502 503 /* 504 * Since we unlock the lockref lock, we set a flag to indicate 505 * instantiate is in progress. 506 */ 507 if (test_and_set_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags)) { 508 wait_on_bit(&gl->gl_flags, GLF_INSTANTIATE_IN_PROG, 509 TASK_UNINTERRUPTIBLE); 510 /* 511 * Here we just waited for a different instantiate to finish. 512 * But that may not have been successful, as when a process 513 * locks an inode glock _before_ it has an actual inode to 514 * instantiate into. So we check again. This process might 515 * have an inode to instantiate, so might be successful. 516 */ 517 goto again; 518 } 519 520 ret = glops->go_instantiate(gl); 521 if (!ret) 522 clear_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags); 523 clear_and_wake_up_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags); 524 if (ret) 525 return ret; 526 527 done: 528 if (glops->go_held) 529 return glops->go_held(gh); 530 return 0; 531 } 532 533 /** 534 * do_promote - promote as many requests as possible on the current queue 535 * @gl: The glock 536 * 537 * Returns: 1 if there is a blocked holder at the head of the list 538 */ 539 540 static int do_promote(struct gfs2_glock *gl) 541 { 542 struct gfs2_holder *gh, *current_gh; 543 bool incompat_holders_demoted = false; 544 545 current_gh = find_first_strong_holder(gl); 546 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 547 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 548 continue; 549 if (!may_grant(gl, current_gh, gh)) { 550 /* 551 * If we get here, it means we may not grant this 552 * holder for some reason. If this holder is at the 553 * head of the list, it means we have a blocked holder 554 * at the head, so return 1. 555 */ 556 if (list_is_first(&gh->gh_list, &gl->gl_holders)) 557 return 1; 558 do_error(gl, 0); 559 break; 560 } 561 set_bit(HIF_HOLDER, &gh->gh_iflags); 562 trace_gfs2_promote(gh); 563 gfs2_holder_wake(gh); 564 if (!incompat_holders_demoted) { 565 current_gh = gh; 566 demote_incompat_holders(gl, current_gh); 567 incompat_holders_demoted = true; 568 } 569 } 570 return 0; 571 } 572 573 /** 574 * find_first_waiter - find the first gh that's waiting for the glock 575 * @gl: the glock 576 */ 577 578 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl) 579 { 580 struct gfs2_holder *gh; 581 582 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 583 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) 584 return gh; 585 } 586 return NULL; 587 } 588 589 /** 590 * state_change - record that the glock is now in a different state 591 * @gl: the glock 592 * @new_state: the new state 593 */ 594 595 static void state_change(struct gfs2_glock *gl, unsigned int new_state) 596 { 597 int held1, held2; 598 599 held1 = (gl->gl_state != LM_ST_UNLOCKED); 600 held2 = (new_state != LM_ST_UNLOCKED); 601 602 if (held1 != held2) { 603 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); 604 if (held2) 605 gl->gl_lockref.count++; 606 else 607 gl->gl_lockref.count--; 608 } 609 if (new_state != gl->gl_target) 610 /* shorten our minimum hold time */ 611 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR, 612 GL_GLOCK_MIN_HOLD); 613 gl->gl_state = new_state; 614 gl->gl_tchange = jiffies; 615 } 616 617 static void gfs2_set_demote(struct gfs2_glock *gl) 618 { 619 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 620 621 set_bit(GLF_DEMOTE, &gl->gl_flags); 622 smp_mb(); 623 wake_up(&sdp->sd_async_glock_wait); 624 } 625 626 static void gfs2_demote_wake(struct gfs2_glock *gl) 627 { 628 gl->gl_demote_state = LM_ST_EXCLUSIVE; 629 clear_bit(GLF_DEMOTE, &gl->gl_flags); 630 smp_mb__after_atomic(); 631 wake_up_bit(&gl->gl_flags, GLF_DEMOTE); 632 } 633 634 /** 635 * finish_xmote - The DLM has replied to one of our lock requests 636 * @gl: The glock 637 * @ret: The status from the DLM 638 * 639 */ 640 641 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) 642 { 643 const struct gfs2_glock_operations *glops = gl->gl_ops; 644 struct gfs2_holder *gh; 645 unsigned state = ret & LM_OUT_ST_MASK; 646 647 spin_lock(&gl->gl_lockref.lock); 648 trace_gfs2_glock_state_change(gl, state); 649 state_change(gl, state); 650 gh = find_first_waiter(gl); 651 652 /* Demote to UN request arrived during demote to SH or DF */ 653 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && 654 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED) 655 gl->gl_target = LM_ST_UNLOCKED; 656 657 /* Check for state != intended state */ 658 if (unlikely(state != gl->gl_target)) { 659 if (gh && (ret & LM_OUT_CANCELED)) 660 gfs2_holder_wake(gh); 661 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { 662 /* move to back of queue and try next entry */ 663 if (ret & LM_OUT_CANCELED) { 664 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0) 665 list_move_tail(&gh->gh_list, &gl->gl_holders); 666 gh = find_first_waiter(gl); 667 gl->gl_target = gh->gh_state; 668 goto retry; 669 } 670 /* Some error or failed "try lock" - report it */ 671 if ((ret & LM_OUT_ERROR) || 672 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { 673 gl->gl_target = gl->gl_state; 674 do_error(gl, ret); 675 goto out; 676 } 677 } 678 switch(state) { 679 /* Unlocked due to conversion deadlock, try again */ 680 case LM_ST_UNLOCKED: 681 retry: 682 do_xmote(gl, gh, gl->gl_target); 683 break; 684 /* Conversion fails, unlock and try again */ 685 case LM_ST_SHARED: 686 case LM_ST_DEFERRED: 687 do_xmote(gl, gh, LM_ST_UNLOCKED); 688 break; 689 default: /* Everything else */ 690 fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n", 691 gl->gl_target, state); 692 GLOCK_BUG_ON(gl, 1); 693 } 694 spin_unlock(&gl->gl_lockref.lock); 695 return; 696 } 697 698 /* Fast path - we got what we asked for */ 699 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) 700 gfs2_demote_wake(gl); 701 if (state != LM_ST_UNLOCKED) { 702 if (glops->go_xmote_bh) { 703 int rv; 704 705 spin_unlock(&gl->gl_lockref.lock); 706 rv = glops->go_xmote_bh(gl); 707 spin_lock(&gl->gl_lockref.lock); 708 if (rv) { 709 do_error(gl, rv); 710 goto out; 711 } 712 } 713 do_promote(gl); 714 } 715 out: 716 clear_bit(GLF_LOCK, &gl->gl_flags); 717 spin_unlock(&gl->gl_lockref.lock); 718 } 719 720 static bool is_system_glock(struct gfs2_glock *gl) 721 { 722 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 723 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 724 725 if (gl == m_ip->i_gl) 726 return true; 727 return false; 728 } 729 730 /** 731 * do_xmote - Calls the DLM to change the state of a lock 732 * @gl: The lock state 733 * @gh: The holder (only for promotes) 734 * @target: The target lock state 735 * 736 */ 737 738 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, 739 unsigned int target) 740 __releases(&gl->gl_lockref.lock) 741 __acquires(&gl->gl_lockref.lock) 742 { 743 const struct gfs2_glock_operations *glops = gl->gl_ops; 744 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 745 unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0); 746 int ret; 747 748 if (target != LM_ST_UNLOCKED && glock_blocked_by_withdraw(gl) && 749 gh && !(gh->gh_flags & LM_FLAG_NOEXP)) 750 goto skip_inval; 751 752 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP | 753 LM_FLAG_PRIORITY); 754 GLOCK_BUG_ON(gl, gl->gl_state == target); 755 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target); 756 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) && 757 glops->go_inval) { 758 /* 759 * If another process is already doing the invalidate, let that 760 * finish first. The glock state machine will get back to this 761 * holder again later. 762 */ 763 if (test_and_set_bit(GLF_INVALIDATE_IN_PROGRESS, 764 &gl->gl_flags)) 765 return; 766 do_error(gl, 0); /* Fail queued try locks */ 767 } 768 gl->gl_req = target; 769 set_bit(GLF_BLOCKING, &gl->gl_flags); 770 if ((gl->gl_req == LM_ST_UNLOCKED) || 771 (gl->gl_state == LM_ST_EXCLUSIVE) || 772 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB))) 773 clear_bit(GLF_BLOCKING, &gl->gl_flags); 774 spin_unlock(&gl->gl_lockref.lock); 775 if (glops->go_sync) { 776 ret = glops->go_sync(gl); 777 /* If we had a problem syncing (due to io errors or whatever, 778 * we should not invalidate the metadata or tell dlm to 779 * release the glock to other nodes. 780 */ 781 if (ret) { 782 if (cmpxchg(&sdp->sd_log_error, 0, ret)) { 783 fs_err(sdp, "Error %d syncing glock \n", ret); 784 gfs2_dump_glock(NULL, gl, true); 785 } 786 goto skip_inval; 787 } 788 } 789 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) { 790 /* 791 * The call to go_sync should have cleared out the ail list. 792 * If there are still items, we have a problem. We ought to 793 * withdraw, but we can't because the withdraw code also uses 794 * glocks. Warn about the error, dump the glock, then fall 795 * through and wait for logd to do the withdraw for us. 796 */ 797 if ((atomic_read(&gl->gl_ail_count) != 0) && 798 (!cmpxchg(&sdp->sd_log_error, 0, -EIO))) { 799 gfs2_glock_assert_warn(gl, 800 !atomic_read(&gl->gl_ail_count)); 801 gfs2_dump_glock(NULL, gl, true); 802 } 803 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA); 804 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); 805 } 806 807 skip_inval: 808 gfs2_glock_hold(gl); 809 /* 810 * Check for an error encountered since we called go_sync and go_inval. 811 * If so, we can't withdraw from the glock code because the withdraw 812 * code itself uses glocks (see function signal_our_withdraw) to 813 * change the mount to read-only. Most importantly, we must not call 814 * dlm to unlock the glock until the journal is in a known good state 815 * (after journal replay) otherwise other nodes may use the object 816 * (rgrp or dinode) and then later, journal replay will corrupt the 817 * file system. The best we can do here is wait for the logd daemon 818 * to see sd_log_error and withdraw, and in the meantime, requeue the 819 * work for later. 820 * 821 * We make a special exception for some system glocks, such as the 822 * system statfs inode glock, which needs to be granted before the 823 * gfs2_quotad daemon can exit, and that exit needs to finish before 824 * we can unmount the withdrawn file system. 825 * 826 * However, if we're just unlocking the lock (say, for unmount, when 827 * gfs2_gl_hash_clear calls clear_glock) and recovery is complete 828 * then it's okay to tell dlm to unlock it. 829 */ 830 if (unlikely(sdp->sd_log_error && !gfs2_withdrawn(sdp))) 831 gfs2_withdraw_delayed(sdp); 832 if (glock_blocked_by_withdraw(gl) && 833 (target != LM_ST_UNLOCKED || 834 test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags))) { 835 if (!is_system_glock(gl)) { 836 handle_callback(gl, LM_ST_UNLOCKED, 0, false); /* sets demote */ 837 /* 838 * Ordinarily, we would call dlm and its callback would call 839 * finish_xmote, which would call state_change() to the new state. 840 * Since we withdrew, we won't call dlm, so call state_change 841 * manually, but to the UNLOCKED state we desire. 842 */ 843 state_change(gl, LM_ST_UNLOCKED); 844 /* 845 * We skip telling dlm to do the locking, so we won't get a 846 * reply that would otherwise clear GLF_LOCK. So we clear it here. 847 */ 848 clear_bit(GLF_LOCK, &gl->gl_flags); 849 clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); 850 gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD); 851 goto out; 852 } else { 853 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); 854 } 855 } 856 857 if (sdp->sd_lockstruct.ls_ops->lm_lock) { 858 /* lock_dlm */ 859 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags); 860 if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED && 861 target == LM_ST_UNLOCKED && 862 test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) { 863 finish_xmote(gl, target); 864 gfs2_glock_queue_work(gl, 0); 865 } else if (ret) { 866 fs_err(sdp, "lm_lock ret %d\n", ret); 867 GLOCK_BUG_ON(gl, !gfs2_withdrawn(sdp)); 868 } 869 } else { /* lock_nolock */ 870 finish_xmote(gl, target); 871 gfs2_glock_queue_work(gl, 0); 872 } 873 out: 874 spin_lock(&gl->gl_lockref.lock); 875 } 876 877 /** 878 * run_queue - do all outstanding tasks related to a glock 879 * @gl: The glock in question 880 * @nonblock: True if we must not block in run_queue 881 * 882 */ 883 884 static void run_queue(struct gfs2_glock *gl, const int nonblock) 885 __releases(&gl->gl_lockref.lock) 886 __acquires(&gl->gl_lockref.lock) 887 { 888 struct gfs2_holder *gh = NULL; 889 890 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) 891 return; 892 893 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); 894 895 if (test_bit(GLF_DEMOTE, &gl->gl_flags) && 896 gl->gl_demote_state != gl->gl_state) { 897 if (find_first_holder(gl)) 898 goto out_unlock; 899 if (nonblock) 900 goto out_sched; 901 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); 902 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); 903 gl->gl_target = gl->gl_demote_state; 904 } else { 905 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) 906 gfs2_demote_wake(gl); 907 if (do_promote(gl) == 0) 908 goto out_unlock; 909 gh = find_first_waiter(gl); 910 gl->gl_target = gh->gh_state; 911 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) 912 do_error(gl, 0); /* Fail queued try locks */ 913 } 914 do_xmote(gl, gh, gl->gl_target); 915 return; 916 917 out_sched: 918 clear_bit(GLF_LOCK, &gl->gl_flags); 919 smp_mb__after_atomic(); 920 gl->gl_lockref.count++; 921 __gfs2_glock_queue_work(gl, 0); 922 return; 923 924 out_unlock: 925 clear_bit(GLF_LOCK, &gl->gl_flags); 926 smp_mb__after_atomic(); 927 return; 928 } 929 930 void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation) 931 { 932 struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr; 933 934 if (ri->ri_magic == 0) 935 ri->ri_magic = cpu_to_be32(GFS2_MAGIC); 936 if (ri->ri_magic == cpu_to_be32(GFS2_MAGIC)) 937 ri->ri_generation_deleted = cpu_to_be64(generation); 938 } 939 940 bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation) 941 { 942 struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr; 943 944 if (ri->ri_magic != cpu_to_be32(GFS2_MAGIC)) 945 return false; 946 return generation <= be64_to_cpu(ri->ri_generation_deleted); 947 } 948 949 static void gfs2_glock_poke(struct gfs2_glock *gl) 950 { 951 int flags = LM_FLAG_TRY_1CB | LM_FLAG_ANY | GL_SKIP; 952 struct gfs2_holder gh; 953 int error; 954 955 __gfs2_holder_init(gl, LM_ST_SHARED, flags, &gh, _RET_IP_); 956 error = gfs2_glock_nq(&gh); 957 if (!error) 958 gfs2_glock_dq(&gh); 959 gfs2_holder_uninit(&gh); 960 } 961 962 static bool gfs2_try_evict(struct gfs2_glock *gl) 963 { 964 struct gfs2_inode *ip; 965 bool evicted = false; 966 967 /* 968 * If there is contention on the iopen glock and we have an inode, try 969 * to grab and release the inode so that it can be evicted. This will 970 * allow the remote node to go ahead and delete the inode without us 971 * having to do it, which will avoid rgrp glock thrashing. 972 * 973 * The remote node is likely still holding the corresponding inode 974 * glock, so it will run before we get to verify that the delete has 975 * happened below. 976 */ 977 spin_lock(&gl->gl_lockref.lock); 978 ip = gl->gl_object; 979 if (ip && !igrab(&ip->i_inode)) 980 ip = NULL; 981 spin_unlock(&gl->gl_lockref.lock); 982 if (ip) { 983 struct gfs2_glock *inode_gl = NULL; 984 985 gl->gl_no_formal_ino = ip->i_no_formal_ino; 986 set_bit(GIF_DEFERRED_DELETE, &ip->i_flags); 987 d_prune_aliases(&ip->i_inode); 988 iput(&ip->i_inode); 989 990 /* If the inode was evicted, gl->gl_object will now be NULL. */ 991 spin_lock(&gl->gl_lockref.lock); 992 ip = gl->gl_object; 993 if (ip) { 994 inode_gl = ip->i_gl; 995 lockref_get(&inode_gl->gl_lockref); 996 clear_bit(GIF_DEFERRED_DELETE, &ip->i_flags); 997 } 998 spin_unlock(&gl->gl_lockref.lock); 999 if (inode_gl) { 1000 gfs2_glock_poke(inode_gl); 1001 gfs2_glock_put(inode_gl); 1002 } 1003 evicted = !ip; 1004 } 1005 return evicted; 1006 } 1007 1008 static void delete_work_func(struct work_struct *work) 1009 { 1010 struct delayed_work *dwork = to_delayed_work(work); 1011 struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete); 1012 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 1013 struct inode *inode; 1014 u64 no_addr = gl->gl_name.ln_number; 1015 1016 spin_lock(&gl->gl_lockref.lock); 1017 clear_bit(GLF_PENDING_DELETE, &gl->gl_flags); 1018 spin_unlock(&gl->gl_lockref.lock); 1019 1020 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { 1021 /* 1022 * If we can evict the inode, give the remote node trying to 1023 * delete the inode some time before verifying that the delete 1024 * has happened. Otherwise, if we cause contention on the inode glock 1025 * immediately, the remote node will think that we still have 1026 * the inode in use, and so it will give up waiting. 1027 * 1028 * If we can't evict the inode, signal to the remote node that 1029 * the inode is still in use. We'll later try to delete the 1030 * inode locally in gfs2_evict_inode. 1031 * 1032 * FIXME: We only need to verify that the remote node has 1033 * deleted the inode because nodes before this remote delete 1034 * rework won't cooperate. At a later time, when we no longer 1035 * care about compatibility with such nodes, we can skip this 1036 * step entirely. 1037 */ 1038 if (gfs2_try_evict(gl)) { 1039 if (gfs2_queue_delete_work(gl, 5 * HZ)) 1040 return; 1041 } 1042 } 1043 1044 inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino, 1045 GFS2_BLKST_UNLINKED); 1046 if (IS_ERR(inode)) { 1047 if (PTR_ERR(inode) == -EAGAIN && 1048 (gfs2_queue_delete_work(gl, 5 * HZ))) 1049 return; 1050 } else { 1051 d_prune_aliases(inode); 1052 iput(inode); 1053 } 1054 gfs2_glock_put(gl); 1055 } 1056 1057 static void glock_work_func(struct work_struct *work) 1058 { 1059 unsigned long delay = 0; 1060 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); 1061 unsigned int drop_refs = 1; 1062 1063 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { 1064 finish_xmote(gl, gl->gl_reply); 1065 drop_refs++; 1066 } 1067 spin_lock(&gl->gl_lockref.lock); 1068 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 1069 gl->gl_state != LM_ST_UNLOCKED && 1070 gl->gl_demote_state != LM_ST_EXCLUSIVE) { 1071 unsigned long holdtime, now = jiffies; 1072 1073 holdtime = gl->gl_tchange + gl->gl_hold_time; 1074 if (time_before(now, holdtime)) 1075 delay = holdtime - now; 1076 1077 if (!delay) { 1078 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); 1079 gfs2_set_demote(gl); 1080 } 1081 } 1082 run_queue(gl, 0); 1083 if (delay) { 1084 /* Keep one glock reference for the work we requeue. */ 1085 drop_refs--; 1086 if (gl->gl_name.ln_type != LM_TYPE_INODE) 1087 delay = 0; 1088 __gfs2_glock_queue_work(gl, delay); 1089 } 1090 1091 /* 1092 * Drop the remaining glock references manually here. (Mind that 1093 * __gfs2_glock_queue_work depends on the lockref spinlock begin held 1094 * here as well.) 1095 */ 1096 gl->gl_lockref.count -= drop_refs; 1097 if (!gl->gl_lockref.count) { 1098 __gfs2_glock_put(gl); 1099 return; 1100 } 1101 spin_unlock(&gl->gl_lockref.lock); 1102 } 1103 1104 static struct gfs2_glock *find_insert_glock(struct lm_lockname *name, 1105 struct gfs2_glock *new) 1106 { 1107 struct wait_glock_queue wait; 1108 wait_queue_head_t *wq = glock_waitqueue(name); 1109 struct gfs2_glock *gl; 1110 1111 wait.name = name; 1112 init_wait(&wait.wait); 1113 wait.wait.func = glock_wake_function; 1114 1115 again: 1116 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); 1117 rcu_read_lock(); 1118 if (new) { 1119 gl = rhashtable_lookup_get_insert_fast(&gl_hash_table, 1120 &new->gl_node, ht_parms); 1121 if (IS_ERR(gl)) 1122 goto out; 1123 } else { 1124 gl = rhashtable_lookup_fast(&gl_hash_table, 1125 name, ht_parms); 1126 } 1127 if (gl && !lockref_get_not_dead(&gl->gl_lockref)) { 1128 rcu_read_unlock(); 1129 schedule(); 1130 goto again; 1131 } 1132 out: 1133 rcu_read_unlock(); 1134 finish_wait(wq, &wait.wait); 1135 return gl; 1136 } 1137 1138 /** 1139 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist 1140 * @sdp: The GFS2 superblock 1141 * @number: the lock number 1142 * @glops: The glock_operations to use 1143 * @create: If 0, don't create the glock if it doesn't exist 1144 * @glp: the glock is returned here 1145 * 1146 * This does not lock a glock, just finds/creates structures for one. 1147 * 1148 * Returns: errno 1149 */ 1150 1151 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, 1152 const struct gfs2_glock_operations *glops, int create, 1153 struct gfs2_glock **glp) 1154 { 1155 struct super_block *s = sdp->sd_vfs; 1156 struct lm_lockname name = { .ln_number = number, 1157 .ln_type = glops->go_type, 1158 .ln_sbd = sdp }; 1159 struct gfs2_glock *gl, *tmp; 1160 struct address_space *mapping; 1161 int ret = 0; 1162 1163 gl = find_insert_glock(&name, NULL); 1164 if (gl) { 1165 *glp = gl; 1166 return 0; 1167 } 1168 if (!create) 1169 return -ENOENT; 1170 1171 if (glops->go_flags & GLOF_ASPACE) { 1172 struct gfs2_glock_aspace *gla = 1173 kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_NOFS); 1174 if (!gla) 1175 return -ENOMEM; 1176 gl = &gla->glock; 1177 } else { 1178 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_NOFS); 1179 if (!gl) 1180 return -ENOMEM; 1181 } 1182 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); 1183 gl->gl_ops = glops; 1184 1185 if (glops->go_flags & GLOF_LVB) { 1186 gl->gl_lksb.sb_lvbptr = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); 1187 if (!gl->gl_lksb.sb_lvbptr) { 1188 gfs2_glock_dealloc(&gl->gl_rcu); 1189 return -ENOMEM; 1190 } 1191 } 1192 1193 atomic_inc(&sdp->sd_glock_disposal); 1194 gl->gl_node.next = NULL; 1195 gl->gl_flags = glops->go_instantiate ? BIT(GLF_INSTANTIATE_NEEDED) : 0; 1196 gl->gl_name = name; 1197 lockdep_set_subclass(&gl->gl_lockref.lock, glops->go_subclass); 1198 gl->gl_lockref.count = 1; 1199 gl->gl_state = LM_ST_UNLOCKED; 1200 gl->gl_target = LM_ST_UNLOCKED; 1201 gl->gl_demote_state = LM_ST_EXCLUSIVE; 1202 gl->gl_dstamp = 0; 1203 preempt_disable(); 1204 /* We use the global stats to estimate the initial per-glock stats */ 1205 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type]; 1206 preempt_enable(); 1207 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0; 1208 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0; 1209 gl->gl_tchange = jiffies; 1210 gl->gl_object = NULL; 1211 gl->gl_hold_time = GL_GLOCK_DFT_HOLD; 1212 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); 1213 if (gl->gl_name.ln_type == LM_TYPE_IOPEN) 1214 INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func); 1215 1216 mapping = gfs2_glock2aspace(gl); 1217 if (mapping) { 1218 mapping->a_ops = &gfs2_meta_aops; 1219 mapping->host = s->s_bdev->bd_inode; 1220 mapping->flags = 0; 1221 mapping_set_gfp_mask(mapping, GFP_NOFS); 1222 mapping->private_data = NULL; 1223 mapping->writeback_index = 0; 1224 } 1225 1226 tmp = find_insert_glock(&name, gl); 1227 if (!tmp) { 1228 *glp = gl; 1229 goto out; 1230 } 1231 if (IS_ERR(tmp)) { 1232 ret = PTR_ERR(tmp); 1233 goto out_free; 1234 } 1235 *glp = tmp; 1236 1237 out_free: 1238 gfs2_glock_dealloc(&gl->gl_rcu); 1239 if (atomic_dec_and_test(&sdp->sd_glock_disposal)) 1240 wake_up(&sdp->sd_glock_wait); 1241 1242 out: 1243 return ret; 1244 } 1245 1246 /** 1247 * __gfs2_holder_init - initialize a struct gfs2_holder in the default way 1248 * @gl: the glock 1249 * @state: the state we're requesting 1250 * @flags: the modifier flags 1251 * @gh: the holder structure 1252 * 1253 */ 1254 1255 void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags, 1256 struct gfs2_holder *gh, unsigned long ip) 1257 { 1258 INIT_LIST_HEAD(&gh->gh_list); 1259 gh->gh_gl = gl; 1260 gh->gh_ip = ip; 1261 gh->gh_owner_pid = get_pid(task_pid(current)); 1262 gh->gh_state = state; 1263 gh->gh_flags = flags; 1264 gh->gh_iflags = 0; 1265 gfs2_glock_hold(gl); 1266 } 1267 1268 /** 1269 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it 1270 * @state: the state we're requesting 1271 * @flags: the modifier flags 1272 * @gh: the holder structure 1273 * 1274 * Don't mess with the glock. 1275 * 1276 */ 1277 1278 void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh) 1279 { 1280 gh->gh_state = state; 1281 gh->gh_flags = flags; 1282 gh->gh_iflags = 0; 1283 gh->gh_ip = _RET_IP_; 1284 put_pid(gh->gh_owner_pid); 1285 gh->gh_owner_pid = get_pid(task_pid(current)); 1286 } 1287 1288 /** 1289 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference) 1290 * @gh: the holder structure 1291 * 1292 */ 1293 1294 void gfs2_holder_uninit(struct gfs2_holder *gh) 1295 { 1296 put_pid(gh->gh_owner_pid); 1297 gfs2_glock_put(gh->gh_gl); 1298 gfs2_holder_mark_uninitialized(gh); 1299 gh->gh_ip = 0; 1300 } 1301 1302 static void gfs2_glock_update_hold_time(struct gfs2_glock *gl, 1303 unsigned long start_time) 1304 { 1305 /* Have we waited longer that a second? */ 1306 if (time_after(jiffies, start_time + HZ)) { 1307 /* Lengthen the minimum hold time. */ 1308 gl->gl_hold_time = min(gl->gl_hold_time + GL_GLOCK_HOLD_INCR, 1309 GL_GLOCK_MAX_HOLD); 1310 } 1311 } 1312 1313 /** 1314 * gfs2_glock_holder_ready - holder is ready and its error code can be collected 1315 * @gh: the glock holder 1316 * 1317 * Called when a glock holder no longer needs to be waited for because it is 1318 * now either held (HIF_HOLDER set; gh_error == 0), or acquiring the lock has 1319 * failed (gh_error != 0). 1320 */ 1321 1322 int gfs2_glock_holder_ready(struct gfs2_holder *gh) 1323 { 1324 if (gh->gh_error || (gh->gh_flags & GL_SKIP)) 1325 return gh->gh_error; 1326 gh->gh_error = gfs2_instantiate(gh); 1327 if (gh->gh_error) 1328 gfs2_glock_dq(gh); 1329 return gh->gh_error; 1330 } 1331 1332 /** 1333 * gfs2_glock_wait - wait on a glock acquisition 1334 * @gh: the glock holder 1335 * 1336 * Returns: 0 on success 1337 */ 1338 1339 int gfs2_glock_wait(struct gfs2_holder *gh) 1340 { 1341 unsigned long start_time = jiffies; 1342 1343 might_sleep(); 1344 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE); 1345 gfs2_glock_update_hold_time(gh->gh_gl, start_time); 1346 return gfs2_glock_holder_ready(gh); 1347 } 1348 1349 static int glocks_pending(unsigned int num_gh, struct gfs2_holder *ghs) 1350 { 1351 int i; 1352 1353 for (i = 0; i < num_gh; i++) 1354 if (test_bit(HIF_WAIT, &ghs[i].gh_iflags)) 1355 return 1; 1356 return 0; 1357 } 1358 1359 /** 1360 * gfs2_glock_async_wait - wait on multiple asynchronous glock acquisitions 1361 * @num_gh: the number of holders in the array 1362 * @ghs: the glock holder array 1363 * 1364 * Returns: 0 on success, meaning all glocks have been granted and are held. 1365 * -ESTALE if the request timed out, meaning all glocks were released, 1366 * and the caller should retry the operation. 1367 */ 1368 1369 int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs) 1370 { 1371 struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd; 1372 int i, ret = 0, timeout = 0; 1373 unsigned long start_time = jiffies; 1374 1375 might_sleep(); 1376 /* 1377 * Total up the (minimum hold time * 2) of all glocks and use that to 1378 * determine the max amount of time we should wait. 1379 */ 1380 for (i = 0; i < num_gh; i++) 1381 timeout += ghs[i].gh_gl->gl_hold_time << 1; 1382 1383 if (!wait_event_timeout(sdp->sd_async_glock_wait, 1384 !glocks_pending(num_gh, ghs), timeout)) { 1385 ret = -ESTALE; /* request timed out. */ 1386 goto out; 1387 } 1388 1389 for (i = 0; i < num_gh; i++) { 1390 struct gfs2_holder *gh = &ghs[i]; 1391 int ret2; 1392 1393 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) { 1394 gfs2_glock_update_hold_time(gh->gh_gl, 1395 start_time); 1396 } 1397 ret2 = gfs2_glock_holder_ready(gh); 1398 if (!ret) 1399 ret = ret2; 1400 } 1401 1402 out: 1403 if (ret) { 1404 for (i = 0; i < num_gh; i++) { 1405 struct gfs2_holder *gh = &ghs[i]; 1406 1407 gfs2_glock_dq(gh); 1408 } 1409 } 1410 return ret; 1411 } 1412 1413 /** 1414 * handle_callback - process a demote request 1415 * @gl: the glock 1416 * @state: the state the caller wants us to change to 1417 * @delay: zero to demote immediately; otherwise pending demote 1418 * @remote: true if this came from a different cluster node 1419 * 1420 * There are only two requests that we are going to see in actual 1421 * practise: LM_ST_SHARED and LM_ST_UNLOCKED 1422 */ 1423 1424 static void handle_callback(struct gfs2_glock *gl, unsigned int state, 1425 unsigned long delay, bool remote) 1426 { 1427 if (delay) 1428 set_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); 1429 else 1430 gfs2_set_demote(gl); 1431 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { 1432 gl->gl_demote_state = state; 1433 gl->gl_demote_time = jiffies; 1434 } else if (gl->gl_demote_state != LM_ST_UNLOCKED && 1435 gl->gl_demote_state != state) { 1436 gl->gl_demote_state = LM_ST_UNLOCKED; 1437 } 1438 if (gl->gl_ops->go_callback) 1439 gl->gl_ops->go_callback(gl, remote); 1440 trace_gfs2_demote_rq(gl, remote); 1441 } 1442 1443 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...) 1444 { 1445 struct va_format vaf; 1446 va_list args; 1447 1448 va_start(args, fmt); 1449 1450 if (seq) { 1451 seq_vprintf(seq, fmt, args); 1452 } else { 1453 vaf.fmt = fmt; 1454 vaf.va = &args; 1455 1456 pr_err("%pV", &vaf); 1457 } 1458 1459 va_end(args); 1460 } 1461 1462 static inline bool pid_is_meaningful(const struct gfs2_holder *gh) 1463 { 1464 if (!(gh->gh_flags & GL_NOPID)) 1465 return true; 1466 if (gh->gh_state == LM_ST_UNLOCKED) 1467 return true; 1468 return false; 1469 } 1470 1471 /** 1472 * add_to_queue - Add a holder to the wait queue (but look for recursion) 1473 * @gh: the holder structure to add 1474 * 1475 * Eventually we should move the recursive locking trap to a 1476 * debugging option or something like that. This is the fast 1477 * path and needs to have the minimum number of distractions. 1478 * 1479 */ 1480 1481 static inline void add_to_queue(struct gfs2_holder *gh) 1482 __releases(&gl->gl_lockref.lock) 1483 __acquires(&gl->gl_lockref.lock) 1484 { 1485 struct gfs2_glock *gl = gh->gh_gl; 1486 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 1487 struct list_head *insert_pt = NULL; 1488 struct gfs2_holder *gh2; 1489 int try_futile = 0; 1490 1491 GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL); 1492 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) 1493 GLOCK_BUG_ON(gl, true); 1494 1495 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { 1496 if (test_bit(GLF_LOCK, &gl->gl_flags)) { 1497 struct gfs2_holder *current_gh; 1498 1499 current_gh = find_first_strong_holder(gl); 1500 try_futile = !may_grant(gl, current_gh, gh); 1501 } 1502 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) 1503 goto fail; 1504 } 1505 1506 list_for_each_entry(gh2, &gl->gl_holders, gh_list) { 1507 if (likely(gh2->gh_owner_pid != gh->gh_owner_pid)) 1508 continue; 1509 if (gh->gh_gl->gl_ops->go_type == LM_TYPE_FLOCK) 1510 continue; 1511 if (test_bit(HIF_MAY_DEMOTE, &gh2->gh_iflags)) 1512 continue; 1513 if (!pid_is_meaningful(gh2)) 1514 continue; 1515 goto trap_recursive; 1516 } 1517 list_for_each_entry(gh2, &gl->gl_holders, gh_list) { 1518 if (try_futile && 1519 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { 1520 fail: 1521 gh->gh_error = GLR_TRYFAILED; 1522 gfs2_holder_wake(gh); 1523 return; 1524 } 1525 if (test_bit(HIF_HOLDER, &gh2->gh_iflags)) 1526 continue; 1527 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt)) 1528 insert_pt = &gh2->gh_list; 1529 } 1530 trace_gfs2_glock_queue(gh, 1); 1531 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT); 1532 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT); 1533 if (likely(insert_pt == NULL)) { 1534 list_add_tail(&gh->gh_list, &gl->gl_holders); 1535 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY)) 1536 goto do_cancel; 1537 return; 1538 } 1539 list_add_tail(&gh->gh_list, insert_pt); 1540 do_cancel: 1541 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list); 1542 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) { 1543 spin_unlock(&gl->gl_lockref.lock); 1544 if (sdp->sd_lockstruct.ls_ops->lm_cancel) 1545 sdp->sd_lockstruct.ls_ops->lm_cancel(gl); 1546 spin_lock(&gl->gl_lockref.lock); 1547 } 1548 return; 1549 1550 trap_recursive: 1551 fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip); 1552 fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid)); 1553 fs_err(sdp, "lock type: %d req lock state : %d\n", 1554 gh2->gh_gl->gl_name.ln_type, gh2->gh_state); 1555 fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip); 1556 fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid)); 1557 fs_err(sdp, "lock type: %d req lock state : %d\n", 1558 gh->gh_gl->gl_name.ln_type, gh->gh_state); 1559 gfs2_dump_glock(NULL, gl, true); 1560 BUG(); 1561 } 1562 1563 /** 1564 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock) 1565 * @gh: the holder structure 1566 * 1567 * if (gh->gh_flags & GL_ASYNC), this never returns an error 1568 * 1569 * Returns: 0, GLR_TRYFAILED, or errno on failure 1570 */ 1571 1572 int gfs2_glock_nq(struct gfs2_holder *gh) 1573 { 1574 struct gfs2_glock *gl = gh->gh_gl; 1575 int error = 0; 1576 1577 if (glock_blocked_by_withdraw(gl) && !(gh->gh_flags & LM_FLAG_NOEXP)) 1578 return -EIO; 1579 1580 if (test_bit(GLF_LRU, &gl->gl_flags)) 1581 gfs2_glock_remove_from_lru(gl); 1582 1583 gh->gh_error = 0; 1584 spin_lock(&gl->gl_lockref.lock); 1585 add_to_queue(gh); 1586 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) && 1587 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) { 1588 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1589 gl->gl_lockref.count++; 1590 __gfs2_glock_queue_work(gl, 0); 1591 } 1592 run_queue(gl, 1); 1593 spin_unlock(&gl->gl_lockref.lock); 1594 1595 if (!(gh->gh_flags & GL_ASYNC)) 1596 error = gfs2_glock_wait(gh); 1597 1598 return error; 1599 } 1600 1601 /** 1602 * gfs2_glock_poll - poll to see if an async request has been completed 1603 * @gh: the holder 1604 * 1605 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on 1606 */ 1607 1608 int gfs2_glock_poll(struct gfs2_holder *gh) 1609 { 1610 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1; 1611 } 1612 1613 static inline bool needs_demote(struct gfs2_glock *gl) 1614 { 1615 return (test_bit(GLF_DEMOTE, &gl->gl_flags) || 1616 test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags)); 1617 } 1618 1619 static void __gfs2_glock_dq(struct gfs2_holder *gh) 1620 { 1621 struct gfs2_glock *gl = gh->gh_gl; 1622 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 1623 unsigned delay = 0; 1624 int fast_path = 0; 1625 1626 /* 1627 * This while loop is similar to function demote_incompat_holders: 1628 * If the glock is due to be demoted (which may be from another node 1629 * or even if this holder is GL_NOCACHE), the weak holders are 1630 * demoted as well, allowing the glock to be demoted. 1631 */ 1632 while (gh) { 1633 /* 1634 * If we're in the process of file system withdraw, we cannot 1635 * just dequeue any glocks until our journal is recovered, lest 1636 * we introduce file system corruption. We need two exceptions 1637 * to this rule: We need to allow unlocking of nondisk glocks 1638 * and the glock for our own journal that needs recovery. 1639 */ 1640 if (test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags) && 1641 glock_blocked_by_withdraw(gl) && 1642 gh->gh_gl != sdp->sd_jinode_gl) { 1643 sdp->sd_glock_dqs_held++; 1644 spin_unlock(&gl->gl_lockref.lock); 1645 might_sleep(); 1646 wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY, 1647 TASK_UNINTERRUPTIBLE); 1648 spin_lock(&gl->gl_lockref.lock); 1649 } 1650 1651 /* 1652 * This holder should not be cached, so mark it for demote. 1653 * Note: this should be done before the check for needs_demote 1654 * below. 1655 */ 1656 if (gh->gh_flags & GL_NOCACHE) 1657 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1658 1659 list_del_init(&gh->gh_list); 1660 clear_bit(HIF_HOLDER, &gh->gh_iflags); 1661 trace_gfs2_glock_queue(gh, 0); 1662 1663 /* 1664 * If there hasn't been a demote request we are done. 1665 * (Let the remaining holders, if any, keep holding it.) 1666 */ 1667 if (!needs_demote(gl)) { 1668 if (list_empty(&gl->gl_holders)) 1669 fast_path = 1; 1670 break; 1671 } 1672 /* 1673 * If we have another strong holder (we cannot auto-demote) 1674 * we are done. It keeps holding it until it is done. 1675 */ 1676 if (find_first_strong_holder(gl)) 1677 break; 1678 1679 /* 1680 * If we have a weak holder at the head of the list, it 1681 * (and all others like it) must be auto-demoted. If there 1682 * are no more weak holders, we exit the while loop. 1683 */ 1684 gh = find_first_holder(gl); 1685 } 1686 1687 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl)) 1688 gfs2_glock_add_to_lru(gl); 1689 1690 if (unlikely(!fast_path)) { 1691 gl->gl_lockref.count++; 1692 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 1693 !test_bit(GLF_DEMOTE, &gl->gl_flags) && 1694 gl->gl_name.ln_type == LM_TYPE_INODE) 1695 delay = gl->gl_hold_time; 1696 __gfs2_glock_queue_work(gl, delay); 1697 } 1698 } 1699 1700 /** 1701 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock) 1702 * @gh: the glock holder 1703 * 1704 */ 1705 void gfs2_glock_dq(struct gfs2_holder *gh) 1706 { 1707 struct gfs2_glock *gl = gh->gh_gl; 1708 1709 spin_lock(&gl->gl_lockref.lock); 1710 if (list_is_first(&gh->gh_list, &gl->gl_holders) && 1711 !test_bit(HIF_HOLDER, &gh->gh_iflags)) { 1712 spin_unlock(&gl->gl_lockref.lock); 1713 gl->gl_name.ln_sbd->sd_lockstruct.ls_ops->lm_cancel(gl); 1714 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE); 1715 spin_lock(&gl->gl_lockref.lock); 1716 } 1717 1718 __gfs2_glock_dq(gh); 1719 spin_unlock(&gl->gl_lockref.lock); 1720 } 1721 1722 void gfs2_glock_dq_wait(struct gfs2_holder *gh) 1723 { 1724 struct gfs2_glock *gl = gh->gh_gl; 1725 gfs2_glock_dq(gh); 1726 might_sleep(); 1727 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE); 1728 } 1729 1730 /** 1731 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it 1732 * @gh: the holder structure 1733 * 1734 */ 1735 1736 void gfs2_glock_dq_uninit(struct gfs2_holder *gh) 1737 { 1738 gfs2_glock_dq(gh); 1739 gfs2_holder_uninit(gh); 1740 } 1741 1742 /** 1743 * gfs2_glock_nq_num - acquire a glock based on lock number 1744 * @sdp: the filesystem 1745 * @number: the lock number 1746 * @glops: the glock operations for the type of glock 1747 * @state: the state to acquire the glock in 1748 * @flags: modifier flags for the acquisition 1749 * @gh: the struct gfs2_holder 1750 * 1751 * Returns: errno 1752 */ 1753 1754 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number, 1755 const struct gfs2_glock_operations *glops, 1756 unsigned int state, u16 flags, struct gfs2_holder *gh) 1757 { 1758 struct gfs2_glock *gl; 1759 int error; 1760 1761 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl); 1762 if (!error) { 1763 error = gfs2_glock_nq_init(gl, state, flags, gh); 1764 gfs2_glock_put(gl); 1765 } 1766 1767 return error; 1768 } 1769 1770 /** 1771 * glock_compare - Compare two struct gfs2_glock structures for sorting 1772 * @arg_a: the first structure 1773 * @arg_b: the second structure 1774 * 1775 */ 1776 1777 static int glock_compare(const void *arg_a, const void *arg_b) 1778 { 1779 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a; 1780 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b; 1781 const struct lm_lockname *a = &gh_a->gh_gl->gl_name; 1782 const struct lm_lockname *b = &gh_b->gh_gl->gl_name; 1783 1784 if (a->ln_number > b->ln_number) 1785 return 1; 1786 if (a->ln_number < b->ln_number) 1787 return -1; 1788 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type); 1789 return 0; 1790 } 1791 1792 /** 1793 * nq_m_sync - synchronously acquire more than one glock in deadlock free order 1794 * @num_gh: the number of structures 1795 * @ghs: an array of struct gfs2_holder structures 1796 * @p: placeholder for the holder structure to pass back 1797 * 1798 * Returns: 0 on success (all glocks acquired), 1799 * errno on failure (no glocks acquired) 1800 */ 1801 1802 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs, 1803 struct gfs2_holder **p) 1804 { 1805 unsigned int x; 1806 int error = 0; 1807 1808 for (x = 0; x < num_gh; x++) 1809 p[x] = &ghs[x]; 1810 1811 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL); 1812 1813 for (x = 0; x < num_gh; x++) { 1814 error = gfs2_glock_nq(p[x]); 1815 if (error) { 1816 while (x--) 1817 gfs2_glock_dq(p[x]); 1818 break; 1819 } 1820 } 1821 1822 return error; 1823 } 1824 1825 /** 1826 * gfs2_glock_nq_m - acquire multiple glocks 1827 * @num_gh: the number of structures 1828 * @ghs: an array of struct gfs2_holder structures 1829 * 1830 * Returns: 0 on success (all glocks acquired), 1831 * errno on failure (no glocks acquired) 1832 */ 1833 1834 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs) 1835 { 1836 struct gfs2_holder *tmp[4]; 1837 struct gfs2_holder **pph = tmp; 1838 int error = 0; 1839 1840 switch(num_gh) { 1841 case 0: 1842 return 0; 1843 case 1: 1844 return gfs2_glock_nq(ghs); 1845 default: 1846 if (num_gh <= 4) 1847 break; 1848 pph = kmalloc_array(num_gh, sizeof(struct gfs2_holder *), 1849 GFP_NOFS); 1850 if (!pph) 1851 return -ENOMEM; 1852 } 1853 1854 error = nq_m_sync(num_gh, ghs, pph); 1855 1856 if (pph != tmp) 1857 kfree(pph); 1858 1859 return error; 1860 } 1861 1862 /** 1863 * gfs2_glock_dq_m - release multiple glocks 1864 * @num_gh: the number of structures 1865 * @ghs: an array of struct gfs2_holder structures 1866 * 1867 */ 1868 1869 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs) 1870 { 1871 while (num_gh--) 1872 gfs2_glock_dq(&ghs[num_gh]); 1873 } 1874 1875 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) 1876 { 1877 unsigned long delay = 0; 1878 unsigned long holdtime; 1879 unsigned long now = jiffies; 1880 1881 gfs2_glock_hold(gl); 1882 spin_lock(&gl->gl_lockref.lock); 1883 holdtime = gl->gl_tchange + gl->gl_hold_time; 1884 if (!list_empty(&gl->gl_holders) && 1885 gl->gl_name.ln_type == LM_TYPE_INODE) { 1886 if (time_before(now, holdtime)) 1887 delay = holdtime - now; 1888 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) 1889 delay = gl->gl_hold_time; 1890 } 1891 /* 1892 * Note 1: We cannot call demote_incompat_holders from handle_callback 1893 * or gfs2_set_demote due to recursion problems like: gfs2_glock_dq -> 1894 * handle_callback -> demote_incompat_holders -> gfs2_glock_dq 1895 * Plus, we only want to demote the holders if the request comes from 1896 * a remote cluster node because local holder conflicts are resolved 1897 * elsewhere. 1898 * 1899 * Note 2: if a remote node wants this glock in EX mode, lock_dlm will 1900 * request that we set our state to UNLOCKED. Here we mock up a holder 1901 * to make it look like someone wants the lock EX locally. Any SH 1902 * and DF requests should be able to share the lock without demoting. 1903 * 1904 * Note 3: We only want to demote the demoteable holders when there 1905 * are no more strong holders. The demoteable holders might as well 1906 * keep the glock until the last strong holder is done with it. 1907 */ 1908 if (!find_first_strong_holder(gl)) { 1909 struct gfs2_holder mock_gh = { 1910 .gh_gl = gl, 1911 .gh_state = (state == LM_ST_UNLOCKED) ? 1912 LM_ST_EXCLUSIVE : state, 1913 .gh_iflags = BIT(HIF_HOLDER) 1914 }; 1915 1916 demote_incompat_holders(gl, &mock_gh); 1917 } 1918 handle_callback(gl, state, delay, true); 1919 __gfs2_glock_queue_work(gl, delay); 1920 spin_unlock(&gl->gl_lockref.lock); 1921 } 1922 1923 /** 1924 * gfs2_should_freeze - Figure out if glock should be frozen 1925 * @gl: The glock in question 1926 * 1927 * Glocks are not frozen if (a) the result of the dlm operation is 1928 * an error, (b) the locking operation was an unlock operation or 1929 * (c) if there is a "noexp" flagged request anywhere in the queue 1930 * 1931 * Returns: 1 if freezing should occur, 0 otherwise 1932 */ 1933 1934 static int gfs2_should_freeze(const struct gfs2_glock *gl) 1935 { 1936 const struct gfs2_holder *gh; 1937 1938 if (gl->gl_reply & ~LM_OUT_ST_MASK) 1939 return 0; 1940 if (gl->gl_target == LM_ST_UNLOCKED) 1941 return 0; 1942 1943 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 1944 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 1945 continue; 1946 if (LM_FLAG_NOEXP & gh->gh_flags) 1947 return 0; 1948 } 1949 1950 return 1; 1951 } 1952 1953 /** 1954 * gfs2_glock_complete - Callback used by locking 1955 * @gl: Pointer to the glock 1956 * @ret: The return value from the dlm 1957 * 1958 * The gl_reply field is under the gl_lockref.lock lock so that it is ok 1959 * to use a bitfield shared with other glock state fields. 1960 */ 1961 1962 void gfs2_glock_complete(struct gfs2_glock *gl, int ret) 1963 { 1964 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; 1965 1966 spin_lock(&gl->gl_lockref.lock); 1967 gl->gl_reply = ret; 1968 1969 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) { 1970 if (gfs2_should_freeze(gl)) { 1971 set_bit(GLF_FROZEN, &gl->gl_flags); 1972 spin_unlock(&gl->gl_lockref.lock); 1973 return; 1974 } 1975 } 1976 1977 gl->gl_lockref.count++; 1978 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1979 __gfs2_glock_queue_work(gl, 0); 1980 spin_unlock(&gl->gl_lockref.lock); 1981 } 1982 1983 static int glock_cmp(void *priv, const struct list_head *a, 1984 const struct list_head *b) 1985 { 1986 struct gfs2_glock *gla, *glb; 1987 1988 gla = list_entry(a, struct gfs2_glock, gl_lru); 1989 glb = list_entry(b, struct gfs2_glock, gl_lru); 1990 1991 if (gla->gl_name.ln_number > glb->gl_name.ln_number) 1992 return 1; 1993 if (gla->gl_name.ln_number < glb->gl_name.ln_number) 1994 return -1; 1995 1996 return 0; 1997 } 1998 1999 /** 2000 * gfs2_dispose_glock_lru - Demote a list of glocks 2001 * @list: The list to dispose of 2002 * 2003 * Disposing of glocks may involve disk accesses, so that here we sort 2004 * the glocks by number (i.e. disk location of the inodes) so that if 2005 * there are any such accesses, they'll be sent in order (mostly). 2006 * 2007 * Must be called under the lru_lock, but may drop and retake this 2008 * lock. While the lru_lock is dropped, entries may vanish from the 2009 * list, but no new entries will appear on the list (since it is 2010 * private) 2011 */ 2012 2013 static void gfs2_dispose_glock_lru(struct list_head *list) 2014 __releases(&lru_lock) 2015 __acquires(&lru_lock) 2016 { 2017 struct gfs2_glock *gl; 2018 2019 list_sort(NULL, list, glock_cmp); 2020 2021 while(!list_empty(list)) { 2022 gl = list_first_entry(list, struct gfs2_glock, gl_lru); 2023 list_del_init(&gl->gl_lru); 2024 clear_bit(GLF_LRU, &gl->gl_flags); 2025 if (!spin_trylock(&gl->gl_lockref.lock)) { 2026 add_back_to_lru: 2027 list_add(&gl->gl_lru, &lru_list); 2028 set_bit(GLF_LRU, &gl->gl_flags); 2029 atomic_inc(&lru_count); 2030 continue; 2031 } 2032 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 2033 spin_unlock(&gl->gl_lockref.lock); 2034 goto add_back_to_lru; 2035 } 2036 gl->gl_lockref.count++; 2037 if (demote_ok(gl)) 2038 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 2039 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags)); 2040 __gfs2_glock_queue_work(gl, 0); 2041 spin_unlock(&gl->gl_lockref.lock); 2042 cond_resched_lock(&lru_lock); 2043 } 2044 } 2045 2046 /** 2047 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote 2048 * @nr: The number of entries to scan 2049 * 2050 * This function selects the entries on the LRU which are able to 2051 * be demoted, and then kicks off the process by calling 2052 * gfs2_dispose_glock_lru() above. 2053 */ 2054 2055 static long gfs2_scan_glock_lru(int nr) 2056 { 2057 struct gfs2_glock *gl; 2058 LIST_HEAD(skipped); 2059 LIST_HEAD(dispose); 2060 long freed = 0; 2061 2062 spin_lock(&lru_lock); 2063 while ((nr-- >= 0) && !list_empty(&lru_list)) { 2064 gl = list_first_entry(&lru_list, struct gfs2_glock, gl_lru); 2065 2066 /* Test for being demotable */ 2067 if (!test_bit(GLF_LOCK, &gl->gl_flags)) { 2068 list_move(&gl->gl_lru, &dispose); 2069 atomic_dec(&lru_count); 2070 freed++; 2071 continue; 2072 } 2073 2074 list_move(&gl->gl_lru, &skipped); 2075 } 2076 list_splice(&skipped, &lru_list); 2077 if (!list_empty(&dispose)) 2078 gfs2_dispose_glock_lru(&dispose); 2079 spin_unlock(&lru_lock); 2080 2081 return freed; 2082 } 2083 2084 static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink, 2085 struct shrink_control *sc) 2086 { 2087 if (!(sc->gfp_mask & __GFP_FS)) 2088 return SHRINK_STOP; 2089 return gfs2_scan_glock_lru(sc->nr_to_scan); 2090 } 2091 2092 static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink, 2093 struct shrink_control *sc) 2094 { 2095 return vfs_pressure_ratio(atomic_read(&lru_count)); 2096 } 2097 2098 static struct shrinker glock_shrinker = { 2099 .seeks = DEFAULT_SEEKS, 2100 .count_objects = gfs2_glock_shrink_count, 2101 .scan_objects = gfs2_glock_shrink_scan, 2102 }; 2103 2104 /** 2105 * glock_hash_walk - Call a function for glock in a hash bucket 2106 * @examiner: the function 2107 * @sdp: the filesystem 2108 * 2109 * Note that the function can be called multiple times on the same 2110 * object. So the user must ensure that the function can cope with 2111 * that. 2112 */ 2113 2114 static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp) 2115 { 2116 struct gfs2_glock *gl; 2117 struct rhashtable_iter iter; 2118 2119 rhashtable_walk_enter(&gl_hash_table, &iter); 2120 2121 do { 2122 rhashtable_walk_start(&iter); 2123 2124 while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) { 2125 if (gl->gl_name.ln_sbd == sdp) 2126 examiner(gl); 2127 } 2128 2129 rhashtable_walk_stop(&iter); 2130 } while (cond_resched(), gl == ERR_PTR(-EAGAIN)); 2131 2132 rhashtable_walk_exit(&iter); 2133 } 2134 2135 bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay) 2136 { 2137 bool queued; 2138 2139 spin_lock(&gl->gl_lockref.lock); 2140 queued = queue_delayed_work(gfs2_delete_workqueue, 2141 &gl->gl_delete, delay); 2142 if (queued) 2143 set_bit(GLF_PENDING_DELETE, &gl->gl_flags); 2144 spin_unlock(&gl->gl_lockref.lock); 2145 return queued; 2146 } 2147 2148 void gfs2_cancel_delete_work(struct gfs2_glock *gl) 2149 { 2150 if (cancel_delayed_work(&gl->gl_delete)) { 2151 clear_bit(GLF_PENDING_DELETE, &gl->gl_flags); 2152 gfs2_glock_put(gl); 2153 } 2154 } 2155 2156 bool gfs2_delete_work_queued(const struct gfs2_glock *gl) 2157 { 2158 return test_bit(GLF_PENDING_DELETE, &gl->gl_flags); 2159 } 2160 2161 static void flush_delete_work(struct gfs2_glock *gl) 2162 { 2163 if (gl->gl_name.ln_type == LM_TYPE_IOPEN) { 2164 if (cancel_delayed_work(&gl->gl_delete)) { 2165 queue_delayed_work(gfs2_delete_workqueue, 2166 &gl->gl_delete, 0); 2167 } 2168 } 2169 } 2170 2171 void gfs2_flush_delete_work(struct gfs2_sbd *sdp) 2172 { 2173 glock_hash_walk(flush_delete_work, sdp); 2174 flush_workqueue(gfs2_delete_workqueue); 2175 } 2176 2177 /** 2178 * thaw_glock - thaw out a glock which has an unprocessed reply waiting 2179 * @gl: The glock to thaw 2180 * 2181 */ 2182 2183 static void thaw_glock(struct gfs2_glock *gl) 2184 { 2185 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) 2186 return; 2187 if (!lockref_get_not_dead(&gl->gl_lockref)) 2188 return; 2189 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 2190 gfs2_glock_queue_work(gl, 0); 2191 } 2192 2193 /** 2194 * clear_glock - look at a glock and see if we can free it from glock cache 2195 * @gl: the glock to look at 2196 * 2197 */ 2198 2199 static void clear_glock(struct gfs2_glock *gl) 2200 { 2201 gfs2_glock_remove_from_lru(gl); 2202 2203 spin_lock(&gl->gl_lockref.lock); 2204 if (!__lockref_is_dead(&gl->gl_lockref)) { 2205 gl->gl_lockref.count++; 2206 if (gl->gl_state != LM_ST_UNLOCKED) 2207 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 2208 __gfs2_glock_queue_work(gl, 0); 2209 } 2210 spin_unlock(&gl->gl_lockref.lock); 2211 } 2212 2213 /** 2214 * gfs2_glock_thaw - Thaw any frozen glocks 2215 * @sdp: The super block 2216 * 2217 */ 2218 2219 void gfs2_glock_thaw(struct gfs2_sbd *sdp) 2220 { 2221 glock_hash_walk(thaw_glock, sdp); 2222 } 2223 2224 static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid) 2225 { 2226 spin_lock(&gl->gl_lockref.lock); 2227 gfs2_dump_glock(seq, gl, fsid); 2228 spin_unlock(&gl->gl_lockref.lock); 2229 } 2230 2231 static void dump_glock_func(struct gfs2_glock *gl) 2232 { 2233 dump_glock(NULL, gl, true); 2234 } 2235 2236 static void withdraw_dq(struct gfs2_glock *gl) 2237 { 2238 spin_lock(&gl->gl_lockref.lock); 2239 if (!__lockref_is_dead(&gl->gl_lockref) && 2240 glock_blocked_by_withdraw(gl)) 2241 do_error(gl, LM_OUT_ERROR); /* remove pending waiters */ 2242 spin_unlock(&gl->gl_lockref.lock); 2243 } 2244 2245 void gfs2_gl_dq_holders(struct gfs2_sbd *sdp) 2246 { 2247 glock_hash_walk(withdraw_dq, sdp); 2248 } 2249 2250 /** 2251 * gfs2_gl_hash_clear - Empty out the glock hash table 2252 * @sdp: the filesystem 2253 * 2254 * Called when unmounting the filesystem. 2255 */ 2256 2257 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) 2258 { 2259 set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags); 2260 flush_workqueue(glock_workqueue); 2261 glock_hash_walk(clear_glock, sdp); 2262 flush_workqueue(glock_workqueue); 2263 wait_event_timeout(sdp->sd_glock_wait, 2264 atomic_read(&sdp->sd_glock_disposal) == 0, 2265 HZ * 600); 2266 glock_hash_walk(dump_glock_func, sdp); 2267 } 2268 2269 static const char *state2str(unsigned state) 2270 { 2271 switch(state) { 2272 case LM_ST_UNLOCKED: 2273 return "UN"; 2274 case LM_ST_SHARED: 2275 return "SH"; 2276 case LM_ST_DEFERRED: 2277 return "DF"; 2278 case LM_ST_EXCLUSIVE: 2279 return "EX"; 2280 } 2281 return "??"; 2282 } 2283 2284 static const char *hflags2str(char *buf, u16 flags, unsigned long iflags) 2285 { 2286 char *p = buf; 2287 if (flags & LM_FLAG_TRY) 2288 *p++ = 't'; 2289 if (flags & LM_FLAG_TRY_1CB) 2290 *p++ = 'T'; 2291 if (flags & LM_FLAG_NOEXP) 2292 *p++ = 'e'; 2293 if (flags & LM_FLAG_ANY) 2294 *p++ = 'A'; 2295 if (flags & LM_FLAG_PRIORITY) 2296 *p++ = 'p'; 2297 if (flags & LM_FLAG_NODE_SCOPE) 2298 *p++ = 'n'; 2299 if (flags & GL_ASYNC) 2300 *p++ = 'a'; 2301 if (flags & GL_EXACT) 2302 *p++ = 'E'; 2303 if (flags & GL_NOCACHE) 2304 *p++ = 'c'; 2305 if (test_bit(HIF_HOLDER, &iflags)) 2306 *p++ = 'H'; 2307 if (test_bit(HIF_WAIT, &iflags)) 2308 *p++ = 'W'; 2309 if (test_bit(HIF_MAY_DEMOTE, &iflags)) 2310 *p++ = 'D'; 2311 if (flags & GL_SKIP) 2312 *p++ = 's'; 2313 *p = 0; 2314 return buf; 2315 } 2316 2317 /** 2318 * dump_holder - print information about a glock holder 2319 * @seq: the seq_file struct 2320 * @gh: the glock holder 2321 * @fs_id_buf: pointer to file system id (if requested) 2322 * 2323 */ 2324 2325 static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh, 2326 const char *fs_id_buf) 2327 { 2328 const char *comm = "(none)"; 2329 pid_t owner_pid = 0; 2330 char flags_buf[32]; 2331 2332 rcu_read_lock(); 2333 if (pid_is_meaningful(gh)) { 2334 struct task_struct *gh_owner; 2335 2336 comm = "(ended)"; 2337 owner_pid = pid_nr(gh->gh_owner_pid); 2338 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID); 2339 if (gh_owner) 2340 comm = gh_owner->comm; 2341 } 2342 gfs2_print_dbg(seq, "%s H: s:%s f:%s e:%d p:%ld [%s] %pS\n", 2343 fs_id_buf, state2str(gh->gh_state), 2344 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags), 2345 gh->gh_error, (long)owner_pid, comm, (void *)gh->gh_ip); 2346 rcu_read_unlock(); 2347 } 2348 2349 static const char *gflags2str(char *buf, const struct gfs2_glock *gl) 2350 { 2351 const unsigned long *gflags = &gl->gl_flags; 2352 char *p = buf; 2353 2354 if (test_bit(GLF_LOCK, gflags)) 2355 *p++ = 'l'; 2356 if (test_bit(GLF_DEMOTE, gflags)) 2357 *p++ = 'D'; 2358 if (test_bit(GLF_PENDING_DEMOTE, gflags)) 2359 *p++ = 'd'; 2360 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags)) 2361 *p++ = 'p'; 2362 if (test_bit(GLF_DIRTY, gflags)) 2363 *p++ = 'y'; 2364 if (test_bit(GLF_LFLUSH, gflags)) 2365 *p++ = 'f'; 2366 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags)) 2367 *p++ = 'i'; 2368 if (test_bit(GLF_REPLY_PENDING, gflags)) 2369 *p++ = 'r'; 2370 if (test_bit(GLF_INITIAL, gflags)) 2371 *p++ = 'I'; 2372 if (test_bit(GLF_FROZEN, gflags)) 2373 *p++ = 'F'; 2374 if (!list_empty(&gl->gl_holders)) 2375 *p++ = 'q'; 2376 if (test_bit(GLF_LRU, gflags)) 2377 *p++ = 'L'; 2378 if (gl->gl_object) 2379 *p++ = 'o'; 2380 if (test_bit(GLF_BLOCKING, gflags)) 2381 *p++ = 'b'; 2382 if (test_bit(GLF_PENDING_DELETE, gflags)) 2383 *p++ = 'P'; 2384 if (test_bit(GLF_FREEING, gflags)) 2385 *p++ = 'x'; 2386 if (test_bit(GLF_INSTANTIATE_NEEDED, gflags)) 2387 *p++ = 'n'; 2388 if (test_bit(GLF_INSTANTIATE_IN_PROG, gflags)) 2389 *p++ = 'N'; 2390 *p = 0; 2391 return buf; 2392 } 2393 2394 /** 2395 * gfs2_dump_glock - print information about a glock 2396 * @seq: The seq_file struct 2397 * @gl: the glock 2398 * @fsid: If true, also dump the file system id 2399 * 2400 * The file format is as follows: 2401 * One line per object, capital letters are used to indicate objects 2402 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented, 2403 * other objects are indented by a single space and follow the glock to 2404 * which they are related. Fields are indicated by lower case letters 2405 * followed by a colon and the field value, except for strings which are in 2406 * [] so that its possible to see if they are composed of spaces for 2407 * example. The field's are n = number (id of the object), f = flags, 2408 * t = type, s = state, r = refcount, e = error, p = pid. 2409 * 2410 */ 2411 2412 void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid) 2413 { 2414 const struct gfs2_glock_operations *glops = gl->gl_ops; 2415 unsigned long long dtime; 2416 const struct gfs2_holder *gh; 2417 char gflags_buf[32]; 2418 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 2419 char fs_id_buf[sizeof(sdp->sd_fsname) + 7]; 2420 unsigned long nrpages = 0; 2421 2422 if (gl->gl_ops->go_flags & GLOF_ASPACE) { 2423 struct address_space *mapping = gfs2_glock2aspace(gl); 2424 2425 nrpages = mapping->nrpages; 2426 } 2427 memset(fs_id_buf, 0, sizeof(fs_id_buf)); 2428 if (fsid && sdp) /* safety precaution */ 2429 sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname); 2430 dtime = jiffies - gl->gl_demote_time; 2431 dtime *= 1000000/HZ; /* demote time in uSec */ 2432 if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) 2433 dtime = 0; 2434 gfs2_print_dbg(seq, "%sG: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d " 2435 "v:%d r:%d m:%ld p:%lu\n", 2436 fs_id_buf, state2str(gl->gl_state), 2437 gl->gl_name.ln_type, 2438 (unsigned long long)gl->gl_name.ln_number, 2439 gflags2str(gflags_buf, gl), 2440 state2str(gl->gl_target), 2441 state2str(gl->gl_demote_state), dtime, 2442 atomic_read(&gl->gl_ail_count), 2443 atomic_read(&gl->gl_revokes), 2444 (int)gl->gl_lockref.count, gl->gl_hold_time, nrpages); 2445 2446 list_for_each_entry(gh, &gl->gl_holders, gh_list) 2447 dump_holder(seq, gh, fs_id_buf); 2448 2449 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump) 2450 glops->go_dump(seq, gl, fs_id_buf); 2451 } 2452 2453 static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr) 2454 { 2455 struct gfs2_glock *gl = iter_ptr; 2456 2457 seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n", 2458 gl->gl_name.ln_type, 2459 (unsigned long long)gl->gl_name.ln_number, 2460 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT], 2461 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR], 2462 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB], 2463 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB], 2464 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT], 2465 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR], 2466 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT], 2467 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]); 2468 return 0; 2469 } 2470 2471 static const char *gfs2_gltype[] = { 2472 "type", 2473 "reserved", 2474 "nondisk", 2475 "inode", 2476 "rgrp", 2477 "meta", 2478 "iopen", 2479 "flock", 2480 "plock", 2481 "quota", 2482 "journal", 2483 }; 2484 2485 static const char *gfs2_stype[] = { 2486 [GFS2_LKS_SRTT] = "srtt", 2487 [GFS2_LKS_SRTTVAR] = "srttvar", 2488 [GFS2_LKS_SRTTB] = "srttb", 2489 [GFS2_LKS_SRTTVARB] = "srttvarb", 2490 [GFS2_LKS_SIRT] = "sirt", 2491 [GFS2_LKS_SIRTVAR] = "sirtvar", 2492 [GFS2_LKS_DCOUNT] = "dlm", 2493 [GFS2_LKS_QCOUNT] = "queue", 2494 }; 2495 2496 #define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype)) 2497 2498 static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr) 2499 { 2500 struct gfs2_sbd *sdp = seq->private; 2501 loff_t pos = *(loff_t *)iter_ptr; 2502 unsigned index = pos >> 3; 2503 unsigned subindex = pos & 0x07; 2504 int i; 2505 2506 if (index == 0 && subindex != 0) 2507 return 0; 2508 2509 seq_printf(seq, "%-10s %8s:", gfs2_gltype[index], 2510 (index == 0) ? "cpu": gfs2_stype[subindex]); 2511 2512 for_each_possible_cpu(i) { 2513 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i); 2514 2515 if (index == 0) 2516 seq_printf(seq, " %15u", i); 2517 else 2518 seq_printf(seq, " %15llu", (unsigned long long)lkstats-> 2519 lkstats[index - 1].stats[subindex]); 2520 } 2521 seq_putc(seq, '\n'); 2522 return 0; 2523 } 2524 2525 int __init gfs2_glock_init(void) 2526 { 2527 int i, ret; 2528 2529 ret = rhashtable_init(&gl_hash_table, &ht_parms); 2530 if (ret < 0) 2531 return ret; 2532 2533 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | 2534 WQ_HIGHPRI | WQ_FREEZABLE, 0); 2535 if (!glock_workqueue) { 2536 rhashtable_destroy(&gl_hash_table); 2537 return -ENOMEM; 2538 } 2539 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", 2540 WQ_MEM_RECLAIM | WQ_FREEZABLE, 2541 0); 2542 if (!gfs2_delete_workqueue) { 2543 destroy_workqueue(glock_workqueue); 2544 rhashtable_destroy(&gl_hash_table); 2545 return -ENOMEM; 2546 } 2547 2548 ret = register_shrinker(&glock_shrinker, "gfs2-glock"); 2549 if (ret) { 2550 destroy_workqueue(gfs2_delete_workqueue); 2551 destroy_workqueue(glock_workqueue); 2552 rhashtable_destroy(&gl_hash_table); 2553 return ret; 2554 } 2555 2556 for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++) 2557 init_waitqueue_head(glock_wait_table + i); 2558 2559 return 0; 2560 } 2561 2562 void gfs2_glock_exit(void) 2563 { 2564 unregister_shrinker(&glock_shrinker); 2565 rhashtable_destroy(&gl_hash_table); 2566 destroy_workqueue(glock_workqueue); 2567 destroy_workqueue(gfs2_delete_workqueue); 2568 } 2569 2570 static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n) 2571 { 2572 struct gfs2_glock *gl = gi->gl; 2573 2574 if (gl) { 2575 if (n == 0) 2576 return; 2577 if (!lockref_put_not_zero(&gl->gl_lockref)) 2578 gfs2_glock_queue_put(gl); 2579 } 2580 for (;;) { 2581 gl = rhashtable_walk_next(&gi->hti); 2582 if (IS_ERR_OR_NULL(gl)) { 2583 if (gl == ERR_PTR(-EAGAIN)) { 2584 n = 1; 2585 continue; 2586 } 2587 gl = NULL; 2588 break; 2589 } 2590 if (gl->gl_name.ln_sbd != gi->sdp) 2591 continue; 2592 if (n <= 1) { 2593 if (!lockref_get_not_dead(&gl->gl_lockref)) 2594 continue; 2595 break; 2596 } else { 2597 if (__lockref_is_dead(&gl->gl_lockref)) 2598 continue; 2599 n--; 2600 } 2601 } 2602 gi->gl = gl; 2603 } 2604 2605 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos) 2606 __acquires(RCU) 2607 { 2608 struct gfs2_glock_iter *gi = seq->private; 2609 loff_t n; 2610 2611 /* 2612 * We can either stay where we are, skip to the next hash table 2613 * entry, or start from the beginning. 2614 */ 2615 if (*pos < gi->last_pos) { 2616 rhashtable_walk_exit(&gi->hti); 2617 rhashtable_walk_enter(&gl_hash_table, &gi->hti); 2618 n = *pos + 1; 2619 } else { 2620 n = *pos - gi->last_pos; 2621 } 2622 2623 rhashtable_walk_start(&gi->hti); 2624 2625 gfs2_glock_iter_next(gi, n); 2626 gi->last_pos = *pos; 2627 return gi->gl; 2628 } 2629 2630 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr, 2631 loff_t *pos) 2632 { 2633 struct gfs2_glock_iter *gi = seq->private; 2634 2635 (*pos)++; 2636 gi->last_pos = *pos; 2637 gfs2_glock_iter_next(gi, 1); 2638 return gi->gl; 2639 } 2640 2641 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr) 2642 __releases(RCU) 2643 { 2644 struct gfs2_glock_iter *gi = seq->private; 2645 2646 rhashtable_walk_stop(&gi->hti); 2647 } 2648 2649 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr) 2650 { 2651 dump_glock(seq, iter_ptr, false); 2652 return 0; 2653 } 2654 2655 static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos) 2656 { 2657 preempt_disable(); 2658 if (*pos >= GFS2_NR_SBSTATS) 2659 return NULL; 2660 return pos; 2661 } 2662 2663 static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr, 2664 loff_t *pos) 2665 { 2666 (*pos)++; 2667 if (*pos >= GFS2_NR_SBSTATS) 2668 return NULL; 2669 return pos; 2670 } 2671 2672 static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr) 2673 { 2674 preempt_enable(); 2675 } 2676 2677 static const struct seq_operations gfs2_glock_seq_ops = { 2678 .start = gfs2_glock_seq_start, 2679 .next = gfs2_glock_seq_next, 2680 .stop = gfs2_glock_seq_stop, 2681 .show = gfs2_glock_seq_show, 2682 }; 2683 2684 static const struct seq_operations gfs2_glstats_seq_ops = { 2685 .start = gfs2_glock_seq_start, 2686 .next = gfs2_glock_seq_next, 2687 .stop = gfs2_glock_seq_stop, 2688 .show = gfs2_glstats_seq_show, 2689 }; 2690 2691 static const struct seq_operations gfs2_sbstats_sops = { 2692 .start = gfs2_sbstats_seq_start, 2693 .next = gfs2_sbstats_seq_next, 2694 .stop = gfs2_sbstats_seq_stop, 2695 .show = gfs2_sbstats_seq_show, 2696 }; 2697 2698 #define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL) 2699 2700 static int __gfs2_glocks_open(struct inode *inode, struct file *file, 2701 const struct seq_operations *ops) 2702 { 2703 int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter)); 2704 if (ret == 0) { 2705 struct seq_file *seq = file->private_data; 2706 struct gfs2_glock_iter *gi = seq->private; 2707 2708 gi->sdp = inode->i_private; 2709 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN); 2710 if (seq->buf) 2711 seq->size = GFS2_SEQ_GOODSIZE; 2712 /* 2713 * Initially, we are "before" the first hash table entry; the 2714 * first call to rhashtable_walk_next gets us the first entry. 2715 */ 2716 gi->last_pos = -1; 2717 gi->gl = NULL; 2718 rhashtable_walk_enter(&gl_hash_table, &gi->hti); 2719 } 2720 return ret; 2721 } 2722 2723 static int gfs2_glocks_open(struct inode *inode, struct file *file) 2724 { 2725 return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops); 2726 } 2727 2728 static int gfs2_glocks_release(struct inode *inode, struct file *file) 2729 { 2730 struct seq_file *seq = file->private_data; 2731 struct gfs2_glock_iter *gi = seq->private; 2732 2733 if (gi->gl) 2734 gfs2_glock_put(gi->gl); 2735 rhashtable_walk_exit(&gi->hti); 2736 return seq_release_private(inode, file); 2737 } 2738 2739 static int gfs2_glstats_open(struct inode *inode, struct file *file) 2740 { 2741 return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops); 2742 } 2743 2744 static const struct file_operations gfs2_glocks_fops = { 2745 .owner = THIS_MODULE, 2746 .open = gfs2_glocks_open, 2747 .read = seq_read, 2748 .llseek = seq_lseek, 2749 .release = gfs2_glocks_release, 2750 }; 2751 2752 static const struct file_operations gfs2_glstats_fops = { 2753 .owner = THIS_MODULE, 2754 .open = gfs2_glstats_open, 2755 .read = seq_read, 2756 .llseek = seq_lseek, 2757 .release = gfs2_glocks_release, 2758 }; 2759 2760 struct gfs2_glockfd_iter { 2761 struct super_block *sb; 2762 unsigned int tgid; 2763 struct task_struct *task; 2764 unsigned int fd; 2765 struct file *file; 2766 }; 2767 2768 static struct task_struct *gfs2_glockfd_next_task(struct gfs2_glockfd_iter *i) 2769 { 2770 struct pid_namespace *ns = task_active_pid_ns(current); 2771 struct pid *pid; 2772 2773 if (i->task) 2774 put_task_struct(i->task); 2775 2776 rcu_read_lock(); 2777 retry: 2778 i->task = NULL; 2779 pid = find_ge_pid(i->tgid, ns); 2780 if (pid) { 2781 i->tgid = pid_nr_ns(pid, ns); 2782 i->task = pid_task(pid, PIDTYPE_TGID); 2783 if (!i->task) { 2784 i->tgid++; 2785 goto retry; 2786 } 2787 get_task_struct(i->task); 2788 } 2789 rcu_read_unlock(); 2790 return i->task; 2791 } 2792 2793 static struct file *gfs2_glockfd_next_file(struct gfs2_glockfd_iter *i) 2794 { 2795 if (i->file) { 2796 fput(i->file); 2797 i->file = NULL; 2798 } 2799 2800 rcu_read_lock(); 2801 for(;; i->fd++) { 2802 struct inode *inode; 2803 2804 i->file = task_lookup_next_fd_rcu(i->task, &i->fd); 2805 if (!i->file) { 2806 i->fd = 0; 2807 break; 2808 } 2809 inode = file_inode(i->file); 2810 if (inode->i_sb != i->sb) 2811 continue; 2812 if (get_file_rcu(i->file)) 2813 break; 2814 } 2815 rcu_read_unlock(); 2816 return i->file; 2817 } 2818 2819 static void *gfs2_glockfd_seq_start(struct seq_file *seq, loff_t *pos) 2820 { 2821 struct gfs2_glockfd_iter *i = seq->private; 2822 2823 if (*pos) 2824 return NULL; 2825 while (gfs2_glockfd_next_task(i)) { 2826 if (gfs2_glockfd_next_file(i)) 2827 return i; 2828 i->tgid++; 2829 } 2830 return NULL; 2831 } 2832 2833 static void *gfs2_glockfd_seq_next(struct seq_file *seq, void *iter_ptr, 2834 loff_t *pos) 2835 { 2836 struct gfs2_glockfd_iter *i = seq->private; 2837 2838 (*pos)++; 2839 i->fd++; 2840 do { 2841 if (gfs2_glockfd_next_file(i)) 2842 return i; 2843 i->tgid++; 2844 } while (gfs2_glockfd_next_task(i)); 2845 return NULL; 2846 } 2847 2848 static void gfs2_glockfd_seq_stop(struct seq_file *seq, void *iter_ptr) 2849 { 2850 struct gfs2_glockfd_iter *i = seq->private; 2851 2852 if (i->file) 2853 fput(i->file); 2854 if (i->task) 2855 put_task_struct(i->task); 2856 } 2857 2858 static void gfs2_glockfd_seq_show_flock(struct seq_file *seq, 2859 struct gfs2_glockfd_iter *i) 2860 { 2861 struct gfs2_file *fp = i->file->private_data; 2862 struct gfs2_holder *fl_gh = &fp->f_fl_gh; 2863 struct lm_lockname gl_name = { .ln_type = LM_TYPE_RESERVED }; 2864 2865 if (!READ_ONCE(fl_gh->gh_gl)) 2866 return; 2867 2868 spin_lock(&i->file->f_lock); 2869 if (gfs2_holder_initialized(fl_gh)) 2870 gl_name = fl_gh->gh_gl->gl_name; 2871 spin_unlock(&i->file->f_lock); 2872 2873 if (gl_name.ln_type != LM_TYPE_RESERVED) { 2874 seq_printf(seq, "%d %u %u/%llx\n", 2875 i->tgid, i->fd, gl_name.ln_type, 2876 (unsigned long long)gl_name.ln_number); 2877 } 2878 } 2879 2880 static int gfs2_glockfd_seq_show(struct seq_file *seq, void *iter_ptr) 2881 { 2882 struct gfs2_glockfd_iter *i = seq->private; 2883 struct inode *inode = file_inode(i->file); 2884 struct gfs2_glock *gl; 2885 2886 inode_lock_shared(inode); 2887 gl = GFS2_I(inode)->i_iopen_gh.gh_gl; 2888 if (gl) { 2889 seq_printf(seq, "%d %u %u/%llx\n", 2890 i->tgid, i->fd, gl->gl_name.ln_type, 2891 (unsigned long long)gl->gl_name.ln_number); 2892 } 2893 gfs2_glockfd_seq_show_flock(seq, i); 2894 inode_unlock_shared(inode); 2895 return 0; 2896 } 2897 2898 static const struct seq_operations gfs2_glockfd_seq_ops = { 2899 .start = gfs2_glockfd_seq_start, 2900 .next = gfs2_glockfd_seq_next, 2901 .stop = gfs2_glockfd_seq_stop, 2902 .show = gfs2_glockfd_seq_show, 2903 }; 2904 2905 static int gfs2_glockfd_open(struct inode *inode, struct file *file) 2906 { 2907 struct gfs2_glockfd_iter *i; 2908 struct gfs2_sbd *sdp = inode->i_private; 2909 2910 i = __seq_open_private(file, &gfs2_glockfd_seq_ops, 2911 sizeof(struct gfs2_glockfd_iter)); 2912 if (!i) 2913 return -ENOMEM; 2914 i->sb = sdp->sd_vfs; 2915 return 0; 2916 } 2917 2918 static const struct file_operations gfs2_glockfd_fops = { 2919 .owner = THIS_MODULE, 2920 .open = gfs2_glockfd_open, 2921 .read = seq_read, 2922 .llseek = seq_lseek, 2923 .release = seq_release_private, 2924 }; 2925 2926 DEFINE_SEQ_ATTRIBUTE(gfs2_sbstats); 2927 2928 void gfs2_create_debugfs_file(struct gfs2_sbd *sdp) 2929 { 2930 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root); 2931 2932 debugfs_create_file("glocks", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, 2933 &gfs2_glocks_fops); 2934 2935 debugfs_create_file("glockfd", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, 2936 &gfs2_glockfd_fops); 2937 2938 debugfs_create_file("glstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, 2939 &gfs2_glstats_fops); 2940 2941 debugfs_create_file("sbstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, 2942 &gfs2_sbstats_fops); 2943 } 2944 2945 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp) 2946 { 2947 debugfs_remove_recursive(sdp->debugfs_dir); 2948 sdp->debugfs_dir = NULL; 2949 } 2950 2951 void gfs2_register_debugfs(void) 2952 { 2953 gfs2_root = debugfs_create_dir("gfs2", NULL); 2954 } 2955 2956 void gfs2_unregister_debugfs(void) 2957 { 2958 debugfs_remove(gfs2_root); 2959 gfs2_root = NULL; 2960 } 2961