1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * dlmthread.c 5 * 6 * standalone DLM module 7 * 8 * Copyright (C) 2004 Oracle. All rights reserved. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public 12 * License as published by the Free Software Foundation; either 13 * version 2 of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public 21 * License along with this program; if not, write to the 22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 23 * Boston, MA 021110-1307, USA. 24 * 25 */ 26 27 28 #include <linux/module.h> 29 #include <linux/fs.h> 30 #include <linux/types.h> 31 #include <linux/highmem.h> 32 #include <linux/init.h> 33 #include <linux/sysctl.h> 34 #include <linux/random.h> 35 #include <linux/blkdev.h> 36 #include <linux/socket.h> 37 #include <linux/inet.h> 38 #include <linux/timer.h> 39 #include <linux/kthread.h> 40 #include <linux/delay.h> 41 42 43 #include "cluster/heartbeat.h" 44 #include "cluster/nodemanager.h" 45 #include "cluster/tcp.h" 46 47 #include "dlmapi.h" 48 #include "dlmcommon.h" 49 #include "dlmdomain.h" 50 51 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_THREAD) 52 #include "cluster/masklog.h" 53 54 static int dlm_thread(void *data); 55 static void dlm_flush_asts(struct dlm_ctxt *dlm); 56 57 #define dlm_lock_is_remote(dlm, lock) ((lock)->ml.node != (dlm)->node_num) 58 59 /* will exit holding res->spinlock, but may drop in function */ 60 /* waits until flags are cleared on res->state */ 61 void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags) 62 { 63 DECLARE_WAITQUEUE(wait, current); 64 65 assert_spin_locked(&res->spinlock); 66 67 add_wait_queue(&res->wq, &wait); 68 repeat: 69 set_current_state(TASK_UNINTERRUPTIBLE); 70 if (res->state & flags) { 71 spin_unlock(&res->spinlock); 72 schedule(); 73 spin_lock(&res->spinlock); 74 goto repeat; 75 } 76 remove_wait_queue(&res->wq, &wait); 77 __set_current_state(TASK_RUNNING); 78 } 79 80 int __dlm_lockres_has_locks(struct dlm_lock_resource *res) 81 { 82 if (list_empty(&res->granted) && 83 list_empty(&res->converting) && 84 list_empty(&res->blocked)) 85 return 0; 86 return 1; 87 } 88 89 /* "unused": the lockres has no locks, is not on the dirty list, 90 * has no inflight locks (in the gap between mastery and acquiring 91 * the first lock), and has no bits in its refmap. 92 * truly ready to be freed. */ 93 int __dlm_lockres_unused(struct dlm_lock_resource *res) 94 { 95 if (!__dlm_lockres_has_locks(res) && 96 (list_empty(&res->dirty) && !(res->state & DLM_LOCK_RES_DIRTY))) { 97 /* try not to scan the bitmap unless the first two 98 * conditions are already true */ 99 int bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); 100 if (bit >= O2NM_MAX_NODES) { 101 /* since the bit for dlm->node_num is not 102 * set, inflight_locks better be zero */ 103 BUG_ON(res->inflight_locks != 0); 104 return 1; 105 } 106 } 107 return 0; 108 } 109 110 111 /* Call whenever you may have added or deleted something from one of 112 * the lockres queue's. This will figure out whether it belongs on the 113 * unused list or not and does the appropriate thing. */ 114 void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, 115 struct dlm_lock_resource *res) 116 { 117 mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); 118 119 assert_spin_locked(&dlm->spinlock); 120 assert_spin_locked(&res->spinlock); 121 122 if (__dlm_lockres_unused(res)){ 123 if (list_empty(&res->purge)) { 124 mlog(0, "putting lockres %.*s:%p onto purge list\n", 125 res->lockname.len, res->lockname.name, res); 126 127 res->last_used = jiffies; 128 dlm_lockres_get(res); 129 list_add_tail(&res->purge, &dlm->purge_list); 130 dlm->purge_count++; 131 } 132 } else if (!list_empty(&res->purge)) { 133 mlog(0, "removing lockres %.*s:%p from purge list, owner=%u\n", 134 res->lockname.len, res->lockname.name, res, res->owner); 135 136 list_del_init(&res->purge); 137 dlm_lockres_put(res); 138 dlm->purge_count--; 139 } 140 } 141 142 void dlm_lockres_calc_usage(struct dlm_ctxt *dlm, 143 struct dlm_lock_resource *res) 144 { 145 mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); 146 spin_lock(&dlm->spinlock); 147 spin_lock(&res->spinlock); 148 149 __dlm_lockres_calc_usage(dlm, res); 150 151 spin_unlock(&res->spinlock); 152 spin_unlock(&dlm->spinlock); 153 } 154 155 static int dlm_purge_lockres(struct dlm_ctxt *dlm, 156 struct dlm_lock_resource *res) 157 { 158 int master; 159 int ret = 0; 160 161 spin_lock(&res->spinlock); 162 if (!__dlm_lockres_unused(res)) { 163 mlog(0, "%s:%.*s: tried to purge but not unused\n", 164 dlm->name, res->lockname.len, res->lockname.name); 165 __dlm_print_one_lock_resource(res); 166 spin_unlock(&res->spinlock); 167 BUG(); 168 } 169 170 if (res->state & DLM_LOCK_RES_MIGRATING) { 171 mlog(0, "%s:%.*s: Delay dropref as this lockres is " 172 "being remastered\n", dlm->name, res->lockname.len, 173 res->lockname.name); 174 /* Re-add the lockres to the end of the purge list */ 175 if (!list_empty(&res->purge)) { 176 list_del_init(&res->purge); 177 list_add_tail(&res->purge, &dlm->purge_list); 178 } 179 spin_unlock(&res->spinlock); 180 return 0; 181 } 182 183 master = (res->owner == dlm->node_num); 184 185 if (!master) 186 res->state |= DLM_LOCK_RES_DROPPING_REF; 187 spin_unlock(&res->spinlock); 188 189 mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len, 190 res->lockname.name, master); 191 192 if (!master) { 193 /* drop spinlock... retake below */ 194 spin_unlock(&dlm->spinlock); 195 196 spin_lock(&res->spinlock); 197 /* This ensures that clear refmap is sent after the set */ 198 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); 199 spin_unlock(&res->spinlock); 200 201 /* clear our bit from the master's refmap, ignore errors */ 202 ret = dlm_drop_lockres_ref(dlm, res); 203 if (ret < 0) { 204 mlog_errno(ret); 205 if (!dlm_is_host_down(ret)) 206 BUG(); 207 } 208 mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n", 209 dlm->name, res->lockname.len, res->lockname.name, ret); 210 spin_lock(&dlm->spinlock); 211 } 212 213 spin_lock(&res->spinlock); 214 if (!list_empty(&res->purge)) { 215 mlog(0, "removing lockres %.*s:%p from purgelist, " 216 "master = %d\n", res->lockname.len, res->lockname.name, 217 res, master); 218 list_del_init(&res->purge); 219 spin_unlock(&res->spinlock); 220 dlm_lockres_put(res); 221 dlm->purge_count--; 222 } else 223 spin_unlock(&res->spinlock); 224 225 __dlm_unhash_lockres(res); 226 227 /* lockres is not in the hash now. drop the flag and wake up 228 * any processes waiting in dlm_get_lock_resource. */ 229 if (!master) { 230 spin_lock(&res->spinlock); 231 res->state &= ~DLM_LOCK_RES_DROPPING_REF; 232 spin_unlock(&res->spinlock); 233 wake_up(&res->wq); 234 } 235 return 0; 236 } 237 238 static void dlm_run_purge_list(struct dlm_ctxt *dlm, 239 int purge_now) 240 { 241 unsigned int run_max, unused; 242 unsigned long purge_jiffies; 243 struct dlm_lock_resource *lockres; 244 245 spin_lock(&dlm->spinlock); 246 run_max = dlm->purge_count; 247 248 while(run_max && !list_empty(&dlm->purge_list)) { 249 run_max--; 250 251 lockres = list_entry(dlm->purge_list.next, 252 struct dlm_lock_resource, purge); 253 254 /* Status of the lockres *might* change so double 255 * check. If the lockres is unused, holding the dlm 256 * spinlock will prevent people from getting and more 257 * refs on it -- there's no need to keep the lockres 258 * spinlock. */ 259 spin_lock(&lockres->spinlock); 260 unused = __dlm_lockres_unused(lockres); 261 spin_unlock(&lockres->spinlock); 262 263 if (!unused) 264 continue; 265 266 purge_jiffies = lockres->last_used + 267 msecs_to_jiffies(DLM_PURGE_INTERVAL_MS); 268 269 /* Make sure that we want to be processing this guy at 270 * this time. */ 271 if (!purge_now && time_after(purge_jiffies, jiffies)) { 272 /* Since resources are added to the purge list 273 * in tail order, we can stop at the first 274 * unpurgable resource -- anyone added after 275 * him will have a greater last_used value */ 276 break; 277 } 278 279 dlm_lockres_get(lockres); 280 281 /* This may drop and reacquire the dlm spinlock if it 282 * has to do migration. */ 283 if (dlm_purge_lockres(dlm, lockres)) 284 BUG(); 285 286 dlm_lockres_put(lockres); 287 288 /* Avoid adding any scheduling latencies */ 289 cond_resched_lock(&dlm->spinlock); 290 } 291 292 spin_unlock(&dlm->spinlock); 293 } 294 295 static void dlm_shuffle_lists(struct dlm_ctxt *dlm, 296 struct dlm_lock_resource *res) 297 { 298 struct dlm_lock *lock, *target; 299 struct list_head *iter; 300 struct list_head *head; 301 int can_grant = 1; 302 303 //mlog(0, "res->lockname.len=%d\n", res->lockname.len); 304 //mlog(0, "res->lockname.name=%p\n", res->lockname.name); 305 //mlog(0, "shuffle res %.*s\n", res->lockname.len, 306 // res->lockname.name); 307 308 /* because this function is called with the lockres 309 * spinlock, and because we know that it is not migrating/ 310 * recovering/in-progress, it is fine to reserve asts and 311 * basts right before queueing them all throughout */ 312 assert_spin_locked(&res->spinlock); 313 BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING| 314 DLM_LOCK_RES_RECOVERING| 315 DLM_LOCK_RES_IN_PROGRESS))); 316 317 converting: 318 if (list_empty(&res->converting)) 319 goto blocked; 320 mlog(0, "res %.*s has locks on a convert queue\n", res->lockname.len, 321 res->lockname.name); 322 323 target = list_entry(res->converting.next, struct dlm_lock, list); 324 if (target->ml.convert_type == LKM_IVMODE) { 325 mlog(ML_ERROR, "%.*s: converting a lock with no " 326 "convert_type!\n", res->lockname.len, res->lockname.name); 327 BUG(); 328 } 329 head = &res->granted; 330 list_for_each(iter, head) { 331 lock = list_entry(iter, struct dlm_lock, list); 332 if (lock==target) 333 continue; 334 if (!dlm_lock_compatible(lock->ml.type, 335 target->ml.convert_type)) { 336 can_grant = 0; 337 /* queue the BAST if not already */ 338 if (lock->ml.highest_blocked == LKM_IVMODE) { 339 __dlm_lockres_reserve_ast(res); 340 dlm_queue_bast(dlm, lock); 341 } 342 /* update the highest_blocked if needed */ 343 if (lock->ml.highest_blocked < target->ml.convert_type) 344 lock->ml.highest_blocked = 345 target->ml.convert_type; 346 } 347 } 348 head = &res->converting; 349 list_for_each(iter, head) { 350 lock = list_entry(iter, struct dlm_lock, list); 351 if (lock==target) 352 continue; 353 if (!dlm_lock_compatible(lock->ml.type, 354 target->ml.convert_type)) { 355 can_grant = 0; 356 if (lock->ml.highest_blocked == LKM_IVMODE) { 357 __dlm_lockres_reserve_ast(res); 358 dlm_queue_bast(dlm, lock); 359 } 360 if (lock->ml.highest_blocked < target->ml.convert_type) 361 lock->ml.highest_blocked = 362 target->ml.convert_type; 363 } 364 } 365 366 /* we can convert the lock */ 367 if (can_grant) { 368 spin_lock(&target->spinlock); 369 BUG_ON(target->ml.highest_blocked != LKM_IVMODE); 370 371 mlog(0, "calling ast for converting lock: %.*s, have: %d, " 372 "granting: %d, node: %u\n", res->lockname.len, 373 res->lockname.name, target->ml.type, 374 target->ml.convert_type, target->ml.node); 375 376 target->ml.type = target->ml.convert_type; 377 target->ml.convert_type = LKM_IVMODE; 378 list_move_tail(&target->list, &res->granted); 379 380 BUG_ON(!target->lksb); 381 target->lksb->status = DLM_NORMAL; 382 383 spin_unlock(&target->spinlock); 384 385 __dlm_lockres_reserve_ast(res); 386 dlm_queue_ast(dlm, target); 387 /* go back and check for more */ 388 goto converting; 389 } 390 391 blocked: 392 if (list_empty(&res->blocked)) 393 goto leave; 394 target = list_entry(res->blocked.next, struct dlm_lock, list); 395 396 head = &res->granted; 397 list_for_each(iter, head) { 398 lock = list_entry(iter, struct dlm_lock, list); 399 if (lock==target) 400 continue; 401 if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) { 402 can_grant = 0; 403 if (lock->ml.highest_blocked == LKM_IVMODE) { 404 __dlm_lockres_reserve_ast(res); 405 dlm_queue_bast(dlm, lock); 406 } 407 if (lock->ml.highest_blocked < target->ml.type) 408 lock->ml.highest_blocked = target->ml.type; 409 } 410 } 411 412 head = &res->converting; 413 list_for_each(iter, head) { 414 lock = list_entry(iter, struct dlm_lock, list); 415 if (lock==target) 416 continue; 417 if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) { 418 can_grant = 0; 419 if (lock->ml.highest_blocked == LKM_IVMODE) { 420 __dlm_lockres_reserve_ast(res); 421 dlm_queue_bast(dlm, lock); 422 } 423 if (lock->ml.highest_blocked < target->ml.type) 424 lock->ml.highest_blocked = target->ml.type; 425 } 426 } 427 428 /* we can grant the blocked lock (only 429 * possible if converting list empty) */ 430 if (can_grant) { 431 spin_lock(&target->spinlock); 432 BUG_ON(target->ml.highest_blocked != LKM_IVMODE); 433 434 mlog(0, "calling ast for blocked lock: %.*s, granting: %d, " 435 "node: %u\n", res->lockname.len, res->lockname.name, 436 target->ml.type, target->ml.node); 437 438 // target->ml.type is already correct 439 list_move_tail(&target->list, &res->granted); 440 441 BUG_ON(!target->lksb); 442 target->lksb->status = DLM_NORMAL; 443 444 spin_unlock(&target->spinlock); 445 446 __dlm_lockres_reserve_ast(res); 447 dlm_queue_ast(dlm, target); 448 /* go back and check for more */ 449 goto converting; 450 } 451 452 leave: 453 return; 454 } 455 456 /* must have NO locks when calling this with res !=NULL * */ 457 void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) 458 { 459 mlog_entry("dlm=%p, res=%p\n", dlm, res); 460 if (res) { 461 spin_lock(&dlm->spinlock); 462 spin_lock(&res->spinlock); 463 __dlm_dirty_lockres(dlm, res); 464 spin_unlock(&res->spinlock); 465 spin_unlock(&dlm->spinlock); 466 } 467 wake_up(&dlm->dlm_thread_wq); 468 } 469 470 void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) 471 { 472 mlog_entry("dlm=%p, res=%p\n", dlm, res); 473 474 assert_spin_locked(&dlm->spinlock); 475 assert_spin_locked(&res->spinlock); 476 477 /* don't shuffle secondary queues */ 478 if ((res->owner == dlm->node_num)) { 479 if (res->state & (DLM_LOCK_RES_MIGRATING | 480 DLM_LOCK_RES_BLOCK_DIRTY)) 481 return; 482 483 if (list_empty(&res->dirty)) { 484 /* ref for dirty_list */ 485 dlm_lockres_get(res); 486 list_add_tail(&res->dirty, &dlm->dirty_list); 487 res->state |= DLM_LOCK_RES_DIRTY; 488 } 489 } 490 } 491 492 493 /* Launch the NM thread for the mounted volume */ 494 int dlm_launch_thread(struct dlm_ctxt *dlm) 495 { 496 mlog(0, "starting dlm thread...\n"); 497 498 dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm_thread"); 499 if (IS_ERR(dlm->dlm_thread_task)) { 500 mlog_errno(PTR_ERR(dlm->dlm_thread_task)); 501 dlm->dlm_thread_task = NULL; 502 return -EINVAL; 503 } 504 505 return 0; 506 } 507 508 void dlm_complete_thread(struct dlm_ctxt *dlm) 509 { 510 if (dlm->dlm_thread_task) { 511 mlog(ML_KTHREAD, "waiting for dlm thread to exit\n"); 512 kthread_stop(dlm->dlm_thread_task); 513 dlm->dlm_thread_task = NULL; 514 } 515 } 516 517 static int dlm_dirty_list_empty(struct dlm_ctxt *dlm) 518 { 519 int empty; 520 521 spin_lock(&dlm->spinlock); 522 empty = list_empty(&dlm->dirty_list); 523 spin_unlock(&dlm->spinlock); 524 525 return empty; 526 } 527 528 static void dlm_flush_asts(struct dlm_ctxt *dlm) 529 { 530 int ret; 531 struct dlm_lock *lock; 532 struct dlm_lock_resource *res; 533 u8 hi; 534 535 spin_lock(&dlm->ast_lock); 536 while (!list_empty(&dlm->pending_asts)) { 537 lock = list_entry(dlm->pending_asts.next, 538 struct dlm_lock, ast_list); 539 /* get an extra ref on lock */ 540 dlm_lock_get(lock); 541 res = lock->lockres; 542 mlog(0, "delivering an ast for this lockres\n"); 543 544 BUG_ON(!lock->ast_pending); 545 546 /* remove from list (including ref) */ 547 list_del_init(&lock->ast_list); 548 dlm_lock_put(lock); 549 spin_unlock(&dlm->ast_lock); 550 551 if (lock->ml.node != dlm->node_num) { 552 ret = dlm_do_remote_ast(dlm, res, lock); 553 if (ret < 0) 554 mlog_errno(ret); 555 } else 556 dlm_do_local_ast(dlm, res, lock); 557 558 spin_lock(&dlm->ast_lock); 559 560 /* possible that another ast was queued while 561 * we were delivering the last one */ 562 if (!list_empty(&lock->ast_list)) { 563 mlog(0, "aha another ast got queued while " 564 "we were finishing the last one. will " 565 "keep the ast_pending flag set.\n"); 566 } else 567 lock->ast_pending = 0; 568 569 /* drop the extra ref. 570 * this may drop it completely. */ 571 dlm_lock_put(lock); 572 dlm_lockres_release_ast(dlm, res); 573 } 574 575 while (!list_empty(&dlm->pending_basts)) { 576 lock = list_entry(dlm->pending_basts.next, 577 struct dlm_lock, bast_list); 578 /* get an extra ref on lock */ 579 dlm_lock_get(lock); 580 res = lock->lockres; 581 582 BUG_ON(!lock->bast_pending); 583 584 /* get the highest blocked lock, and reset */ 585 spin_lock(&lock->spinlock); 586 BUG_ON(lock->ml.highest_blocked <= LKM_IVMODE); 587 hi = lock->ml.highest_blocked; 588 lock->ml.highest_blocked = LKM_IVMODE; 589 spin_unlock(&lock->spinlock); 590 591 /* remove from list (including ref) */ 592 list_del_init(&lock->bast_list); 593 dlm_lock_put(lock); 594 spin_unlock(&dlm->ast_lock); 595 596 mlog(0, "delivering a bast for this lockres " 597 "(blocked = %d\n", hi); 598 599 if (lock->ml.node != dlm->node_num) { 600 ret = dlm_send_proxy_bast(dlm, res, lock, hi); 601 if (ret < 0) 602 mlog_errno(ret); 603 } else 604 dlm_do_local_bast(dlm, res, lock, hi); 605 606 spin_lock(&dlm->ast_lock); 607 608 /* possible that another bast was queued while 609 * we were delivering the last one */ 610 if (!list_empty(&lock->bast_list)) { 611 mlog(0, "aha another bast got queued while " 612 "we were finishing the last one. will " 613 "keep the bast_pending flag set.\n"); 614 } else 615 lock->bast_pending = 0; 616 617 /* drop the extra ref. 618 * this may drop it completely. */ 619 dlm_lock_put(lock); 620 dlm_lockres_release_ast(dlm, res); 621 } 622 wake_up(&dlm->ast_wq); 623 spin_unlock(&dlm->ast_lock); 624 } 625 626 627 #define DLM_THREAD_TIMEOUT_MS (4 * 1000) 628 #define DLM_THREAD_MAX_DIRTY 100 629 #define DLM_THREAD_MAX_ASTS 10 630 631 static int dlm_thread(void *data) 632 { 633 struct dlm_lock_resource *res; 634 struct dlm_ctxt *dlm = data; 635 unsigned long timeout = msecs_to_jiffies(DLM_THREAD_TIMEOUT_MS); 636 637 mlog(0, "dlm thread running for %s...\n", dlm->name); 638 639 while (!kthread_should_stop()) { 640 int n = DLM_THREAD_MAX_DIRTY; 641 642 /* dlm_shutting_down is very point-in-time, but that 643 * doesn't matter as we'll just loop back around if we 644 * get false on the leading edge of a state 645 * transition. */ 646 dlm_run_purge_list(dlm, dlm_shutting_down(dlm)); 647 648 /* We really don't want to hold dlm->spinlock while 649 * calling dlm_shuffle_lists on each lockres that 650 * needs to have its queues adjusted and AST/BASTs 651 * run. So let's pull each entry off the dirty_list 652 * and drop dlm->spinlock ASAP. Once off the list, 653 * res->spinlock needs to be taken again to protect 654 * the queues while calling dlm_shuffle_lists. */ 655 spin_lock(&dlm->spinlock); 656 while (!list_empty(&dlm->dirty_list)) { 657 int delay = 0; 658 res = list_entry(dlm->dirty_list.next, 659 struct dlm_lock_resource, dirty); 660 661 /* peel a lockres off, remove it from the list, 662 * unset the dirty flag and drop the dlm lock */ 663 BUG_ON(!res); 664 dlm_lockres_get(res); 665 666 spin_lock(&res->spinlock); 667 /* We clear the DLM_LOCK_RES_DIRTY state once we shuffle lists below */ 668 list_del_init(&res->dirty); 669 spin_unlock(&res->spinlock); 670 spin_unlock(&dlm->spinlock); 671 /* Drop dirty_list ref */ 672 dlm_lockres_put(res); 673 674 /* lockres can be re-dirtied/re-added to the 675 * dirty_list in this gap, but that is ok */ 676 677 spin_lock(&res->spinlock); 678 if (res->owner != dlm->node_num) { 679 __dlm_print_one_lock_resource(res); 680 mlog(ML_ERROR, "inprog:%s, mig:%s, reco:%s, dirty:%s\n", 681 res->state & DLM_LOCK_RES_IN_PROGRESS ? "yes" : "no", 682 res->state & DLM_LOCK_RES_MIGRATING ? "yes" : "no", 683 res->state & DLM_LOCK_RES_RECOVERING ? "yes" : "no", 684 res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no"); 685 } 686 BUG_ON(res->owner != dlm->node_num); 687 688 /* it is now ok to move lockreses in these states 689 * to the dirty list, assuming that they will only be 690 * dirty for a short while. */ 691 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); 692 if (res->state & (DLM_LOCK_RES_IN_PROGRESS | 693 DLM_LOCK_RES_RECOVERING)) { 694 /* move it to the tail and keep going */ 695 res->state &= ~DLM_LOCK_RES_DIRTY; 696 spin_unlock(&res->spinlock); 697 mlog(0, "delaying list shuffling for in-" 698 "progress lockres %.*s, state=%d\n", 699 res->lockname.len, res->lockname.name, 700 res->state); 701 delay = 1; 702 goto in_progress; 703 } 704 705 /* at this point the lockres is not migrating/ 706 * recovering/in-progress. we have the lockres 707 * spinlock and do NOT have the dlm lock. 708 * safe to reserve/queue asts and run the lists. */ 709 710 mlog(0, "calling dlm_shuffle_lists with dlm=%s, " 711 "res=%.*s\n", dlm->name, 712 res->lockname.len, res->lockname.name); 713 714 /* called while holding lockres lock */ 715 dlm_shuffle_lists(dlm, res); 716 res->state &= ~DLM_LOCK_RES_DIRTY; 717 spin_unlock(&res->spinlock); 718 719 dlm_lockres_calc_usage(dlm, res); 720 721 in_progress: 722 723 spin_lock(&dlm->spinlock); 724 /* if the lock was in-progress, stick 725 * it on the back of the list */ 726 if (delay) { 727 spin_lock(&res->spinlock); 728 __dlm_dirty_lockres(dlm, res); 729 spin_unlock(&res->spinlock); 730 } 731 dlm_lockres_put(res); 732 733 /* unlikely, but we may need to give time to 734 * other tasks */ 735 if (!--n) { 736 mlog(0, "throttling dlm_thread\n"); 737 break; 738 } 739 } 740 741 spin_unlock(&dlm->spinlock); 742 dlm_flush_asts(dlm); 743 744 /* yield and continue right away if there is more work to do */ 745 if (!n) { 746 cond_resched(); 747 continue; 748 } 749 750 wait_event_interruptible_timeout(dlm->dlm_thread_wq, 751 !dlm_dirty_list_empty(dlm) || 752 kthread_should_stop(), 753 timeout); 754 } 755 756 mlog(0, "quitting DLM thread\n"); 757 return 0; 758 } 759