1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * dlmmod.c 4 * 5 * standalone DLM module 6 * 7 * Copyright (C) 2004 Oracle. All rights reserved. 8 */ 9 10 11 #include <linux/module.h> 12 #include <linux/fs.h> 13 #include <linux/types.h> 14 #include <linux/slab.h> 15 #include <linux/highmem.h> 16 #include <linux/init.h> 17 #include <linux/sysctl.h> 18 #include <linux/random.h> 19 #include <linux/blkdev.h> 20 #include <linux/socket.h> 21 #include <linux/inet.h> 22 #include <linux/spinlock.h> 23 #include <linux/delay.h> 24 #include <linux/string_choices.h> 25 26 #include "../cluster/heartbeat.h" 27 #include "../cluster/nodemanager.h" 28 #include "../cluster/tcp.h" 29 30 #include "dlmapi.h" 31 #include "dlmcommon.h" 32 #include "dlmdomain.h" 33 #include "dlmdebug.h" 34 35 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER) 36 #include "../cluster/masklog.h" 37 38 static void dlm_mle_node_down(struct dlm_ctxt *dlm, 39 struct dlm_master_list_entry *mle, 40 struct o2nm_node *node, 41 int idx); 42 static void dlm_mle_node_up(struct dlm_ctxt *dlm, 43 struct dlm_master_list_entry *mle, 44 struct o2nm_node *node, 45 int idx); 46 47 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data); 48 static int dlm_do_assert_master(struct dlm_ctxt *dlm, 49 struct dlm_lock_resource *res, 50 void *nodemap, u32 flags); 51 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data); 52 53 static inline int dlm_mle_equal(struct dlm_ctxt *dlm, 54 struct dlm_master_list_entry *mle, 55 const char *name, 56 unsigned int namelen) 57 { 58 if (dlm != mle->dlm) 59 return 0; 60 61 if (namelen != mle->mnamelen || 62 memcmp(name, mle->mname, namelen) != 0) 63 return 0; 64 65 return 1; 66 } 67 68 static struct kmem_cache *dlm_lockres_cache; 69 static struct kmem_cache *dlm_lockname_cache; 70 static struct kmem_cache *dlm_mle_cache; 71 72 static void dlm_mle_release(struct kref *kref); 73 static void dlm_init_mle(struct dlm_master_list_entry *mle, 74 enum dlm_mle_type type, 75 struct dlm_ctxt *dlm, 76 struct dlm_lock_resource *res, 77 const char *name, 78 unsigned int namelen); 79 static void dlm_put_mle(struct dlm_master_list_entry *mle); 80 static void __dlm_put_mle(struct dlm_master_list_entry *mle); 81 static int dlm_find_mle(struct dlm_ctxt *dlm, 82 struct dlm_master_list_entry **mle, 83 char *name, unsigned int namelen); 84 85 static int dlm_do_master_request(struct dlm_lock_resource *res, 86 struct dlm_master_list_entry *mle, int to); 87 88 89 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm, 90 struct dlm_lock_resource *res, 91 struct dlm_master_list_entry *mle, 92 int *blocked); 93 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm, 94 struct dlm_lock_resource *res, 95 struct dlm_master_list_entry *mle, 96 int blocked); 97 static int dlm_add_migration_mle(struct dlm_ctxt *dlm, 98 struct dlm_lock_resource *res, 99 struct dlm_master_list_entry *mle, 100 struct dlm_master_list_entry **oldmle, 101 const char *name, unsigned int namelen, 102 u8 new_master, u8 master); 103 104 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm, 105 struct dlm_lock_resource *res); 106 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, 107 struct dlm_lock_resource *res); 108 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm, 109 struct dlm_lock_resource *res, 110 u8 target); 111 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, 112 struct dlm_lock_resource *res); 113 114 115 int dlm_is_host_down(int errno) 116 { 117 switch (errno) { 118 case -EBADF: 119 case -ECONNREFUSED: 120 case -ENOTCONN: 121 case -ECONNRESET: 122 case -EPIPE: 123 case -EHOSTDOWN: 124 case -EHOSTUNREACH: 125 case -ETIMEDOUT: 126 case -ECONNABORTED: 127 case -ENETDOWN: 128 case -ENETUNREACH: 129 case -ENETRESET: 130 case -ESHUTDOWN: 131 case -ENOPROTOOPT: 132 case -EINVAL: /* if returned from our tcp code, 133 this means there is no socket */ 134 return 1; 135 } 136 return 0; 137 } 138 139 140 /* 141 * MASTER LIST FUNCTIONS 142 */ 143 144 145 /* 146 * regarding master list entries and heartbeat callbacks: 147 * 148 * in order to avoid sleeping and allocation that occurs in 149 * heartbeat, master list entries are simply attached to the 150 * dlm's established heartbeat callbacks. the mle is attached 151 * when it is created, and since the dlm->spinlock is held at 152 * that time, any heartbeat event will be properly discovered 153 * by the mle. the mle needs to be detached from the 154 * dlm->mle_hb_events list as soon as heartbeat events are no 155 * longer useful to the mle, and before the mle is freed. 156 * 157 * as a general rule, heartbeat events are no longer needed by 158 * the mle once an "answer" regarding the lock master has been 159 * received. 160 */ 161 static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm, 162 struct dlm_master_list_entry *mle) 163 { 164 assert_spin_locked(&dlm->spinlock); 165 166 list_add_tail(&mle->hb_events, &dlm->mle_hb_events); 167 } 168 169 170 static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm, 171 struct dlm_master_list_entry *mle) 172 { 173 if (!list_empty(&mle->hb_events)) 174 list_del_init(&mle->hb_events); 175 } 176 177 178 static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm, 179 struct dlm_master_list_entry *mle) 180 { 181 spin_lock(&dlm->spinlock); 182 __dlm_mle_detach_hb_events(dlm, mle); 183 spin_unlock(&dlm->spinlock); 184 } 185 186 static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle) 187 { 188 struct dlm_ctxt *dlm; 189 dlm = mle->dlm; 190 191 assert_spin_locked(&dlm->spinlock); 192 assert_spin_locked(&dlm->master_lock); 193 mle->inuse++; 194 kref_get(&mle->mle_refs); 195 } 196 197 static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle) 198 { 199 struct dlm_ctxt *dlm; 200 dlm = mle->dlm; 201 202 spin_lock(&dlm->spinlock); 203 spin_lock(&dlm->master_lock); 204 mle->inuse--; 205 __dlm_put_mle(mle); 206 spin_unlock(&dlm->master_lock); 207 spin_unlock(&dlm->spinlock); 208 209 } 210 211 /* remove from list and free */ 212 static void __dlm_put_mle(struct dlm_master_list_entry *mle) 213 { 214 struct dlm_ctxt *dlm; 215 dlm = mle->dlm; 216 217 assert_spin_locked(&dlm->spinlock); 218 assert_spin_locked(&dlm->master_lock); 219 if (!kref_read(&mle->mle_refs)) { 220 /* this may or may not crash, but who cares. 221 * it's a BUG. */ 222 mlog(ML_ERROR, "bad mle: %p\n", mle); 223 dlm_print_one_mle(mle); 224 BUG(); 225 } else 226 kref_put(&mle->mle_refs, dlm_mle_release); 227 } 228 229 230 /* must not have any spinlocks coming in */ 231 static void dlm_put_mle(struct dlm_master_list_entry *mle) 232 { 233 struct dlm_ctxt *dlm; 234 dlm = mle->dlm; 235 236 spin_lock(&dlm->spinlock); 237 spin_lock(&dlm->master_lock); 238 __dlm_put_mle(mle); 239 spin_unlock(&dlm->master_lock); 240 spin_unlock(&dlm->spinlock); 241 } 242 243 static inline void dlm_get_mle(struct dlm_master_list_entry *mle) 244 { 245 kref_get(&mle->mle_refs); 246 } 247 248 static void dlm_init_mle(struct dlm_master_list_entry *mle, 249 enum dlm_mle_type type, 250 struct dlm_ctxt *dlm, 251 struct dlm_lock_resource *res, 252 const char *name, 253 unsigned int namelen) 254 { 255 assert_spin_locked(&dlm->spinlock); 256 257 mle->dlm = dlm; 258 mle->type = type; 259 INIT_HLIST_NODE(&mle->master_hash_node); 260 INIT_LIST_HEAD(&mle->hb_events); 261 bitmap_zero(mle->maybe_map, O2NM_MAX_NODES); 262 spin_lock_init(&mle->spinlock); 263 init_waitqueue_head(&mle->wq); 264 atomic_set(&mle->woken, 0); 265 kref_init(&mle->mle_refs); 266 bitmap_zero(mle->response_map, O2NM_MAX_NODES); 267 mle->master = O2NM_MAX_NODES; 268 mle->new_master = O2NM_MAX_NODES; 269 mle->inuse = 0; 270 271 BUG_ON(mle->type != DLM_MLE_BLOCK && 272 mle->type != DLM_MLE_MASTER && 273 mle->type != DLM_MLE_MIGRATION); 274 275 if (mle->type == DLM_MLE_MASTER) { 276 BUG_ON(!res); 277 mle->mleres = res; 278 memcpy(mle->mname, res->lockname.name, res->lockname.len); 279 mle->mnamelen = res->lockname.len; 280 mle->mnamehash = res->lockname.hash; 281 } else { 282 BUG_ON(!name); 283 mle->mleres = NULL; 284 memcpy(mle->mname, name, namelen); 285 mle->mnamelen = namelen; 286 mle->mnamehash = dlm_lockid_hash(name, namelen); 287 } 288 289 atomic_inc(&dlm->mle_tot_count[mle->type]); 290 atomic_inc(&dlm->mle_cur_count[mle->type]); 291 292 /* copy off the node_map and register hb callbacks on our copy */ 293 bitmap_copy(mle->node_map, dlm->domain_map, O2NM_MAX_NODES); 294 bitmap_copy(mle->vote_map, dlm->domain_map, O2NM_MAX_NODES); 295 clear_bit(dlm->node_num, mle->vote_map); 296 clear_bit(dlm->node_num, mle->node_map); 297 298 /* attach the mle to the domain node up/down events */ 299 __dlm_mle_attach_hb_events(dlm, mle); 300 } 301 302 void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) 303 { 304 assert_spin_locked(&dlm->spinlock); 305 assert_spin_locked(&dlm->master_lock); 306 307 if (!hlist_unhashed(&mle->master_hash_node)) 308 hlist_del_init(&mle->master_hash_node); 309 } 310 311 void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) 312 { 313 struct hlist_head *bucket; 314 315 assert_spin_locked(&dlm->master_lock); 316 317 bucket = dlm_master_hash(dlm, mle->mnamehash); 318 hlist_add_head(&mle->master_hash_node, bucket); 319 } 320 321 /* returns 1 if found, 0 if not */ 322 static int dlm_find_mle(struct dlm_ctxt *dlm, 323 struct dlm_master_list_entry **mle, 324 char *name, unsigned int namelen) 325 { 326 struct dlm_master_list_entry *tmpmle; 327 struct hlist_head *bucket; 328 unsigned int hash; 329 330 assert_spin_locked(&dlm->master_lock); 331 332 hash = dlm_lockid_hash(name, namelen); 333 bucket = dlm_master_hash(dlm, hash); 334 hlist_for_each_entry(tmpmle, bucket, master_hash_node) { 335 if (!dlm_mle_equal(dlm, tmpmle, name, namelen)) 336 continue; 337 dlm_get_mle(tmpmle); 338 *mle = tmpmle; 339 return 1; 340 } 341 return 0; 342 } 343 344 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up) 345 { 346 struct dlm_master_list_entry *mle; 347 348 assert_spin_locked(&dlm->spinlock); 349 350 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) { 351 if (node_up) 352 dlm_mle_node_up(dlm, mle, NULL, idx); 353 else 354 dlm_mle_node_down(dlm, mle, NULL, idx); 355 } 356 } 357 358 static void dlm_mle_node_down(struct dlm_ctxt *dlm, 359 struct dlm_master_list_entry *mle, 360 struct o2nm_node *node, int idx) 361 { 362 spin_lock(&mle->spinlock); 363 364 if (!test_bit(idx, mle->node_map)) 365 mlog(0, "node %u already removed from nodemap!\n", idx); 366 else 367 clear_bit(idx, mle->node_map); 368 369 spin_unlock(&mle->spinlock); 370 } 371 372 static void dlm_mle_node_up(struct dlm_ctxt *dlm, 373 struct dlm_master_list_entry *mle, 374 struct o2nm_node *node, int idx) 375 { 376 spin_lock(&mle->spinlock); 377 378 if (test_bit(idx, mle->node_map)) 379 mlog(0, "node %u already in node map!\n", idx); 380 else 381 set_bit(idx, mle->node_map); 382 383 spin_unlock(&mle->spinlock); 384 } 385 386 387 int dlm_init_mle_cache(void) 388 { 389 dlm_mle_cache = kmem_cache_create("o2dlm_mle", 390 sizeof(struct dlm_master_list_entry), 391 0, SLAB_HWCACHE_ALIGN, 392 NULL); 393 if (dlm_mle_cache == NULL) 394 return -ENOMEM; 395 return 0; 396 } 397 398 void dlm_destroy_mle_cache(void) 399 { 400 kmem_cache_destroy(dlm_mle_cache); 401 } 402 403 static void dlm_mle_release(struct kref *kref) 404 { 405 struct dlm_master_list_entry *mle; 406 struct dlm_ctxt *dlm; 407 408 mle = container_of(kref, struct dlm_master_list_entry, mle_refs); 409 dlm = mle->dlm; 410 411 assert_spin_locked(&dlm->spinlock); 412 assert_spin_locked(&dlm->master_lock); 413 414 mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname, 415 mle->type); 416 417 /* remove from list if not already */ 418 __dlm_unlink_mle(dlm, mle); 419 420 /* detach the mle from the domain node up/down events */ 421 __dlm_mle_detach_hb_events(dlm, mle); 422 423 atomic_dec(&dlm->mle_cur_count[mle->type]); 424 425 /* NOTE: kfree under spinlock here. 426 * if this is bad, we can move this to a freelist. */ 427 kmem_cache_free(dlm_mle_cache, mle); 428 } 429 430 431 /* 432 * LOCK RESOURCE FUNCTIONS 433 */ 434 435 int dlm_init_master_caches(void) 436 { 437 dlm_lockres_cache = kmem_cache_create("o2dlm_lockres", 438 sizeof(struct dlm_lock_resource), 439 0, SLAB_HWCACHE_ALIGN, NULL); 440 if (!dlm_lockres_cache) 441 goto bail; 442 443 dlm_lockname_cache = kmem_cache_create("o2dlm_lockname", 444 DLM_LOCKID_NAME_MAX, 0, 445 SLAB_HWCACHE_ALIGN, NULL); 446 if (!dlm_lockname_cache) 447 goto bail; 448 449 return 0; 450 bail: 451 dlm_destroy_master_caches(); 452 return -ENOMEM; 453 } 454 455 void dlm_destroy_master_caches(void) 456 { 457 kmem_cache_destroy(dlm_lockname_cache); 458 dlm_lockname_cache = NULL; 459 460 kmem_cache_destroy(dlm_lockres_cache); 461 dlm_lockres_cache = NULL; 462 } 463 464 static void dlm_lockres_release(struct kref *kref) 465 { 466 struct dlm_lock_resource *res; 467 struct dlm_ctxt *dlm; 468 469 res = container_of(kref, struct dlm_lock_resource, refs); 470 dlm = res->dlm; 471 472 /* This should not happen -- all lockres' have a name 473 * associated with them at init time. */ 474 BUG_ON(!res->lockname.name); 475 476 mlog(0, "destroying lockres %.*s\n", res->lockname.len, 477 res->lockname.name); 478 479 atomic_dec(&dlm->res_cur_count); 480 481 if (!hlist_unhashed(&res->hash_node) || 482 !list_empty(&res->granted) || 483 !list_empty(&res->converting) || 484 !list_empty(&res->blocked) || 485 !list_empty(&res->dirty) || 486 !list_empty(&res->recovering) || 487 !list_empty(&res->purge)) { 488 mlog(ML_ERROR, 489 "Going to BUG for resource %.*s." 490 " We're on a list! [%c%c%c%c%c%c%c]\n", 491 res->lockname.len, res->lockname.name, 492 !hlist_unhashed(&res->hash_node) ? 'H' : ' ', 493 !list_empty(&res->granted) ? 'G' : ' ', 494 !list_empty(&res->converting) ? 'C' : ' ', 495 !list_empty(&res->blocked) ? 'B' : ' ', 496 !list_empty(&res->dirty) ? 'D' : ' ', 497 !list_empty(&res->recovering) ? 'R' : ' ', 498 !list_empty(&res->purge) ? 'P' : ' '); 499 500 dlm_print_one_lock_resource(res); 501 } 502 503 /* By the time we're ready to blow this guy away, we shouldn't 504 * be on any lists. */ 505 BUG_ON(!hlist_unhashed(&res->hash_node)); 506 BUG_ON(!list_empty(&res->granted)); 507 BUG_ON(!list_empty(&res->converting)); 508 BUG_ON(!list_empty(&res->blocked)); 509 BUG_ON(!list_empty(&res->dirty)); 510 BUG_ON(!list_empty(&res->recovering)); 511 BUG_ON(!list_empty(&res->purge)); 512 513 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name); 514 515 kmem_cache_free(dlm_lockres_cache, res); 516 } 517 518 void dlm_lockres_put(struct dlm_lock_resource *res) 519 { 520 kref_put(&res->refs, dlm_lockres_release); 521 } 522 523 static void dlm_init_lockres(struct dlm_ctxt *dlm, 524 struct dlm_lock_resource *res, 525 const char *name, unsigned int namelen) 526 { 527 char *qname; 528 529 /* If we memset here, we lose our reference to the kmalloc'd 530 * res->lockname.name, so be sure to init every field 531 * correctly! */ 532 533 qname = (char *) res->lockname.name; 534 memcpy(qname, name, namelen); 535 536 res->lockname.len = namelen; 537 res->lockname.hash = dlm_lockid_hash(name, namelen); 538 539 init_waitqueue_head(&res->wq); 540 spin_lock_init(&res->spinlock); 541 INIT_HLIST_NODE(&res->hash_node); 542 INIT_LIST_HEAD(&res->granted); 543 INIT_LIST_HEAD(&res->converting); 544 INIT_LIST_HEAD(&res->blocked); 545 INIT_LIST_HEAD(&res->dirty); 546 INIT_LIST_HEAD(&res->recovering); 547 INIT_LIST_HEAD(&res->purge); 548 INIT_LIST_HEAD(&res->tracking); 549 atomic_set(&res->asts_reserved, 0); 550 res->migration_pending = 0; 551 res->inflight_locks = 0; 552 res->inflight_assert_workers = 0; 553 554 res->dlm = dlm; 555 556 kref_init(&res->refs); 557 558 atomic_inc(&dlm->res_tot_count); 559 atomic_inc(&dlm->res_cur_count); 560 561 /* just for consistency */ 562 spin_lock(&res->spinlock); 563 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN); 564 spin_unlock(&res->spinlock); 565 566 res->state = DLM_LOCK_RES_IN_PROGRESS; 567 568 res->last_used = 0; 569 570 spin_lock(&dlm->track_lock); 571 list_add_tail(&res->tracking, &dlm->tracking_list); 572 spin_unlock(&dlm->track_lock); 573 574 memset(res->lvb, 0, DLM_LVB_LEN); 575 bitmap_zero(res->refmap, O2NM_MAX_NODES); 576 } 577 578 struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm, 579 const char *name, 580 unsigned int namelen) 581 { 582 struct dlm_lock_resource *res = NULL; 583 584 res = kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS); 585 if (!res) 586 goto error; 587 588 res->lockname.name = kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS); 589 if (!res->lockname.name) 590 goto error; 591 592 dlm_init_lockres(dlm, res, name, namelen); 593 return res; 594 595 error: 596 if (res) 597 kmem_cache_free(dlm_lockres_cache, res); 598 return NULL; 599 } 600 601 void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm, 602 struct dlm_lock_resource *res, int bit) 603 { 604 assert_spin_locked(&res->spinlock); 605 606 mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len, 607 res->lockname.name, bit, __builtin_return_address(0)); 608 609 set_bit(bit, res->refmap); 610 } 611 612 void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm, 613 struct dlm_lock_resource *res, int bit) 614 { 615 assert_spin_locked(&res->spinlock); 616 617 mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len, 618 res->lockname.name, bit, __builtin_return_address(0)); 619 620 clear_bit(bit, res->refmap); 621 } 622 623 static void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, 624 struct dlm_lock_resource *res) 625 { 626 res->inflight_locks++; 627 628 mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name, 629 res->lockname.len, res->lockname.name, res->inflight_locks, 630 __builtin_return_address(0)); 631 } 632 633 void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, 634 struct dlm_lock_resource *res) 635 { 636 assert_spin_locked(&res->spinlock); 637 __dlm_lockres_grab_inflight_ref(dlm, res); 638 } 639 640 void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, 641 struct dlm_lock_resource *res) 642 { 643 assert_spin_locked(&res->spinlock); 644 645 BUG_ON(res->inflight_locks == 0); 646 647 res->inflight_locks--; 648 649 mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name, 650 res->lockname.len, res->lockname.name, res->inflight_locks, 651 __builtin_return_address(0)); 652 653 wake_up(&res->wq); 654 } 655 656 void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm, 657 struct dlm_lock_resource *res) 658 { 659 assert_spin_locked(&res->spinlock); 660 res->inflight_assert_workers++; 661 mlog(0, "%s:%.*s: inflight assert worker++: now %u\n", 662 dlm->name, res->lockname.len, res->lockname.name, 663 res->inflight_assert_workers); 664 } 665 666 static void __dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm, 667 struct dlm_lock_resource *res) 668 { 669 assert_spin_locked(&res->spinlock); 670 BUG_ON(res->inflight_assert_workers == 0); 671 res->inflight_assert_workers--; 672 mlog(0, "%s:%.*s: inflight assert worker--: now %u\n", 673 dlm->name, res->lockname.len, res->lockname.name, 674 res->inflight_assert_workers); 675 } 676 677 static void dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm, 678 struct dlm_lock_resource *res) 679 { 680 spin_lock(&res->spinlock); 681 __dlm_lockres_drop_inflight_worker(dlm, res); 682 spin_unlock(&res->spinlock); 683 } 684 685 /* 686 * lookup a lock resource by name. 687 * may already exist in the hashtable. 688 * lockid is null terminated 689 * 690 * if not, allocate enough for the lockres and for 691 * the temporary structure used in doing the mastering. 692 * 693 * also, do a lookup in the dlm->master_list to see 694 * if another node has begun mastering the same lock. 695 * if so, there should be a block entry in there 696 * for this name, and we should *not* attempt to master 697 * the lock here. need to wait around for that node 698 * to assert_master (or die). 699 * 700 */ 701 struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, 702 const char *lockid, 703 int namelen, 704 int flags) 705 { 706 struct dlm_lock_resource *tmpres=NULL, *res=NULL; 707 struct dlm_master_list_entry *mle = NULL; 708 struct dlm_master_list_entry *alloc_mle = NULL; 709 int blocked = 0; 710 int ret, nodenum; 711 struct dlm_node_iter iter; 712 unsigned int hash; 713 int tries = 0; 714 int bit, wait_on_recovery = 0; 715 716 BUG_ON(!lockid); 717 718 hash = dlm_lockid_hash(lockid, namelen); 719 720 mlog(0, "get lockres %s (len %d)\n", lockid, namelen); 721 722 lookup: 723 spin_lock(&dlm->spinlock); 724 tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash); 725 if (tmpres) { 726 spin_unlock(&dlm->spinlock); 727 spin_lock(&tmpres->spinlock); 728 729 /* 730 * Right after dlm spinlock was released, dlm_thread could have 731 * purged the lockres. Check if lockres got unhashed. If so 732 * start over. 733 */ 734 if (hlist_unhashed(&tmpres->hash_node)) { 735 spin_unlock(&tmpres->spinlock); 736 dlm_lockres_put(tmpres); 737 tmpres = NULL; 738 goto lookup; 739 } 740 741 /* Wait on the thread that is mastering the resource */ 742 if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { 743 __dlm_wait_on_lockres(tmpres); 744 BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN); 745 spin_unlock(&tmpres->spinlock); 746 dlm_lockres_put(tmpres); 747 tmpres = NULL; 748 goto lookup; 749 } 750 751 /* Wait on the resource purge to complete before continuing */ 752 if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) { 753 BUG_ON(tmpres->owner == dlm->node_num); 754 __dlm_wait_on_lockres_flags(tmpres, 755 DLM_LOCK_RES_DROPPING_REF); 756 spin_unlock(&tmpres->spinlock); 757 dlm_lockres_put(tmpres); 758 tmpres = NULL; 759 goto lookup; 760 } 761 762 /* Grab inflight ref to pin the resource */ 763 dlm_lockres_grab_inflight_ref(dlm, tmpres); 764 765 spin_unlock(&tmpres->spinlock); 766 if (res) { 767 spin_lock(&dlm->track_lock); 768 if (!list_empty(&res->tracking)) 769 list_del_init(&res->tracking); 770 else 771 mlog(ML_ERROR, "Resource %.*s not " 772 "on the Tracking list\n", 773 res->lockname.len, 774 res->lockname.name); 775 spin_unlock(&dlm->track_lock); 776 dlm_lockres_put(res); 777 } 778 res = tmpres; 779 goto leave; 780 } 781 782 if (!res) { 783 spin_unlock(&dlm->spinlock); 784 mlog(0, "allocating a new resource\n"); 785 /* nothing found and we need to allocate one. */ 786 alloc_mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); 787 if (!alloc_mle) 788 goto leave; 789 res = dlm_new_lockres(dlm, lockid, namelen); 790 if (!res) 791 goto leave; 792 goto lookup; 793 } 794 795 mlog(0, "no lockres found, allocated our own: %p\n", res); 796 797 if (flags & LKM_LOCAL) { 798 /* caller knows it's safe to assume it's not mastered elsewhere 799 * DONE! return right away */ 800 spin_lock(&res->spinlock); 801 dlm_change_lockres_owner(dlm, res, dlm->node_num); 802 __dlm_insert_lockres(dlm, res); 803 dlm_lockres_grab_inflight_ref(dlm, res); 804 spin_unlock(&res->spinlock); 805 spin_unlock(&dlm->spinlock); 806 /* lockres still marked IN_PROGRESS */ 807 goto wake_waiters; 808 } 809 810 /* check master list to see if another node has started mastering it */ 811 spin_lock(&dlm->master_lock); 812 813 /* if we found a block, wait for lock to be mastered by another node */ 814 blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen); 815 if (blocked) { 816 int mig; 817 if (mle->type == DLM_MLE_MASTER) { 818 mlog(ML_ERROR, "master entry for nonexistent lock!\n"); 819 BUG(); 820 } 821 mig = (mle->type == DLM_MLE_MIGRATION); 822 /* if there is a migration in progress, let the migration 823 * finish before continuing. we can wait for the absence 824 * of the MIGRATION mle: either the migrate finished or 825 * one of the nodes died and the mle was cleaned up. 826 * if there is a BLOCK here, but it already has a master 827 * set, we are too late. the master does not have a ref 828 * for us in the refmap. detach the mle and drop it. 829 * either way, go back to the top and start over. */ 830 if (mig || mle->master != O2NM_MAX_NODES) { 831 BUG_ON(mig && mle->master == dlm->node_num); 832 /* we arrived too late. the master does not 833 * have a ref for us. retry. */ 834 mlog(0, "%s:%.*s: late on %s\n", 835 dlm->name, namelen, lockid, 836 mig ? "MIGRATION" : "BLOCK"); 837 spin_unlock(&dlm->master_lock); 838 spin_unlock(&dlm->spinlock); 839 840 /* master is known, detach */ 841 if (!mig) 842 dlm_mle_detach_hb_events(dlm, mle); 843 dlm_put_mle(mle); 844 mle = NULL; 845 /* this is lame, but we can't wait on either 846 * the mle or lockres waitqueue here */ 847 if (mig) 848 msleep(100); 849 goto lookup; 850 } 851 } else { 852 /* go ahead and try to master lock on this node */ 853 mle = alloc_mle; 854 /* make sure this does not get freed below */ 855 alloc_mle = NULL; 856 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0); 857 set_bit(dlm->node_num, mle->maybe_map); 858 __dlm_insert_mle(dlm, mle); 859 860 /* still holding the dlm spinlock, check the recovery map 861 * to see if there are any nodes that still need to be 862 * considered. these will not appear in the mle nodemap 863 * but they might own this lockres. wait on them. */ 864 bit = find_first_bit(dlm->recovery_map, O2NM_MAX_NODES); 865 if (bit < O2NM_MAX_NODES) { 866 mlog(0, "%s: res %.*s, At least one node (%d) " 867 "to recover before lock mastery can begin\n", 868 dlm->name, namelen, (char *)lockid, bit); 869 wait_on_recovery = 1; 870 } 871 } 872 873 /* at this point there is either a DLM_MLE_BLOCK or a 874 * DLM_MLE_MASTER on the master list, so it's safe to add the 875 * lockres to the hashtable. anyone who finds the lock will 876 * still have to wait on the IN_PROGRESS. */ 877 878 /* finally add the lockres to its hash bucket */ 879 __dlm_insert_lockres(dlm, res); 880 881 /* since this lockres is new it doesn't not require the spinlock */ 882 __dlm_lockres_grab_inflight_ref(dlm, res); 883 884 /* get an extra ref on the mle in case this is a BLOCK 885 * if so, the creator of the BLOCK may try to put the last 886 * ref at this time in the assert master handler, so we 887 * need an extra one to keep from a bad ptr deref. */ 888 dlm_get_mle_inuse(mle); 889 spin_unlock(&dlm->master_lock); 890 spin_unlock(&dlm->spinlock); 891 892 redo_request: 893 while (wait_on_recovery) { 894 /* any cluster changes that occurred after dropping the 895 * dlm spinlock would be detectable be a change on the mle, 896 * so we only need to clear out the recovery map once. */ 897 if (dlm_is_recovery_lock(lockid, namelen)) { 898 mlog(0, "%s: Recovery map is not empty, but must " 899 "master $RECOVERY lock now\n", dlm->name); 900 if (!dlm_pre_master_reco_lockres(dlm, res)) 901 wait_on_recovery = 0; 902 else { 903 mlog(0, "%s: waiting 500ms for heartbeat state " 904 "change\n", dlm->name); 905 msleep(500); 906 } 907 continue; 908 } 909 910 dlm_kick_recovery_thread(dlm); 911 msleep(1000); 912 dlm_wait_for_recovery(dlm); 913 914 spin_lock(&dlm->spinlock); 915 bit = find_first_bit(dlm->recovery_map, O2NM_MAX_NODES); 916 if (bit < O2NM_MAX_NODES) { 917 mlog(0, "%s: res %.*s, At least one node (%d) " 918 "to recover before lock mastery can begin\n", 919 dlm->name, namelen, (char *)lockid, bit); 920 wait_on_recovery = 1; 921 } else 922 wait_on_recovery = 0; 923 spin_unlock(&dlm->spinlock); 924 925 if (wait_on_recovery) 926 dlm_wait_for_node_recovery(dlm, bit, 10000); 927 } 928 929 /* must wait for lock to be mastered elsewhere */ 930 if (blocked) 931 goto wait; 932 933 ret = -EINVAL; 934 dlm_node_iter_init(mle->vote_map, &iter); 935 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 936 ret = dlm_do_master_request(res, mle, nodenum); 937 if (ret < 0) 938 mlog_errno(ret); 939 if (mle->master != O2NM_MAX_NODES) { 940 /* found a master ! */ 941 if (mle->master <= nodenum) 942 break; 943 /* if our master request has not reached the master 944 * yet, keep going until it does. this is how the 945 * master will know that asserts are needed back to 946 * the lower nodes. */ 947 mlog(0, "%s: res %.*s, Requests only up to %u but " 948 "master is %u, keep going\n", dlm->name, namelen, 949 lockid, nodenum, mle->master); 950 } 951 } 952 953 wait: 954 /* keep going until the response map includes all nodes */ 955 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); 956 if (ret < 0) { 957 wait_on_recovery = 1; 958 mlog(0, "%s: res %.*s, Node map changed, redo the master " 959 "request now, blocked=%d\n", dlm->name, res->lockname.len, 960 res->lockname.name, blocked); 961 if (++tries > 20) { 962 mlog(ML_ERROR, "%s: res %.*s, Spinning on " 963 "dlm_wait_for_lock_mastery, blocked = %d\n", 964 dlm->name, res->lockname.len, 965 res->lockname.name, blocked); 966 dlm_print_one_lock_resource(res); 967 dlm_print_one_mle(mle); 968 tries = 0; 969 } 970 goto redo_request; 971 } 972 973 mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len, 974 res->lockname.name, res->owner); 975 /* make sure we never continue without this */ 976 BUG_ON(res->owner == O2NM_MAX_NODES); 977 978 /* master is known, detach if not already detached */ 979 dlm_mle_detach_hb_events(dlm, mle); 980 dlm_put_mle(mle); 981 /* put the extra ref */ 982 dlm_put_mle_inuse(mle); 983 984 wake_waiters: 985 spin_lock(&res->spinlock); 986 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 987 spin_unlock(&res->spinlock); 988 wake_up(&res->wq); 989 990 leave: 991 /* need to free the unused mle */ 992 if (alloc_mle) 993 kmem_cache_free(dlm_mle_cache, alloc_mle); 994 995 return res; 996 } 997 998 999 #define DLM_MASTERY_TIMEOUT_MS 5000 1000 1001 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm, 1002 struct dlm_lock_resource *res, 1003 struct dlm_master_list_entry *mle, 1004 int *blocked) 1005 { 1006 u8 m; 1007 int ret, bit; 1008 int map_changed, voting_done; 1009 int assert, sleep; 1010 1011 recheck: 1012 ret = 0; 1013 assert = 0; 1014 1015 /* check if another node has already become the owner */ 1016 spin_lock(&res->spinlock); 1017 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { 1018 mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name, 1019 res->lockname.len, res->lockname.name, res->owner); 1020 spin_unlock(&res->spinlock); 1021 /* this will cause the master to re-assert across 1022 * the whole cluster, freeing up mles */ 1023 if (res->owner != dlm->node_num) { 1024 ret = dlm_do_master_request(res, mle, res->owner); 1025 if (ret < 0) { 1026 /* give recovery a chance to run */ 1027 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret); 1028 msleep(500); 1029 goto recheck; 1030 } 1031 } 1032 ret = 0; 1033 goto leave; 1034 } 1035 spin_unlock(&res->spinlock); 1036 1037 spin_lock(&mle->spinlock); 1038 m = mle->master; 1039 map_changed = !bitmap_equal(mle->vote_map, mle->node_map, 1040 O2NM_MAX_NODES); 1041 voting_done = bitmap_equal(mle->vote_map, mle->response_map, 1042 O2NM_MAX_NODES); 1043 1044 /* restart if we hit any errors */ 1045 if (map_changed) { 1046 int b; 1047 mlog(0, "%s: %.*s: node map changed, restarting\n", 1048 dlm->name, res->lockname.len, res->lockname.name); 1049 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked); 1050 b = (mle->type == DLM_MLE_BLOCK); 1051 if ((*blocked && !b) || (!*blocked && b)) { 1052 mlog(0, "%s:%.*s: status change: old=%d new=%d\n", 1053 dlm->name, res->lockname.len, res->lockname.name, 1054 *blocked, b); 1055 *blocked = b; 1056 } 1057 spin_unlock(&mle->spinlock); 1058 if (ret < 0) { 1059 mlog_errno(ret); 1060 goto leave; 1061 } 1062 mlog(0, "%s:%.*s: restart lock mastery succeeded, " 1063 "rechecking now\n", dlm->name, res->lockname.len, 1064 res->lockname.name); 1065 goto recheck; 1066 } else { 1067 if (!voting_done) { 1068 mlog(0, "map not changed and voting not done " 1069 "for %s:%.*s\n", dlm->name, res->lockname.len, 1070 res->lockname.name); 1071 } 1072 } 1073 1074 if (m != O2NM_MAX_NODES) { 1075 /* another node has done an assert! 1076 * all done! */ 1077 sleep = 0; 1078 } else { 1079 sleep = 1; 1080 /* have all nodes responded? */ 1081 if (voting_done && !*blocked) { 1082 bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES); 1083 if (dlm->node_num <= bit) { 1084 /* my node number is lowest. 1085 * now tell other nodes that I am 1086 * mastering this. */ 1087 mle->master = dlm->node_num; 1088 /* ref was grabbed in get_lock_resource 1089 * will be dropped in dlmlock_master */ 1090 assert = 1; 1091 sleep = 0; 1092 } 1093 /* if voting is done, but we have not received 1094 * an assert master yet, we must sleep */ 1095 } 1096 } 1097 1098 spin_unlock(&mle->spinlock); 1099 1100 /* sleep if we haven't finished voting yet */ 1101 if (sleep) { 1102 unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS); 1103 atomic_set(&mle->woken, 0); 1104 (void)wait_event_timeout(mle->wq, 1105 (atomic_read(&mle->woken) == 1), 1106 timeo); 1107 if (res->owner == O2NM_MAX_NODES) { 1108 mlog(0, "%s:%.*s: waiting again\n", dlm->name, 1109 res->lockname.len, res->lockname.name); 1110 goto recheck; 1111 } 1112 mlog(0, "done waiting, master is %u\n", res->owner); 1113 ret = 0; 1114 goto leave; 1115 } 1116 1117 ret = 0; /* done */ 1118 if (assert) { 1119 m = dlm->node_num; 1120 mlog(0, "about to master %.*s here, this=%u\n", 1121 res->lockname.len, res->lockname.name, m); 1122 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0); 1123 if (ret) { 1124 /* This is a failure in the network path, 1125 * not in the response to the assert_master 1126 * (any nonzero response is a BUG on this node). 1127 * Most likely a socket just got disconnected 1128 * due to node death. */ 1129 mlog_errno(ret); 1130 } 1131 /* no longer need to restart lock mastery. 1132 * all living nodes have been contacted. */ 1133 ret = 0; 1134 } 1135 1136 /* set the lockres owner */ 1137 spin_lock(&res->spinlock); 1138 /* mastery reference obtained either during 1139 * assert_master_handler or in get_lock_resource */ 1140 dlm_change_lockres_owner(dlm, res, m); 1141 spin_unlock(&res->spinlock); 1142 1143 leave: 1144 return ret; 1145 } 1146 1147 struct dlm_bitmap_diff_iter 1148 { 1149 int curnode; 1150 unsigned long *orig_bm; 1151 unsigned long *cur_bm; 1152 unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)]; 1153 }; 1154 1155 enum dlm_node_state_change 1156 { 1157 NODE_DOWN = -1, 1158 NODE_NO_CHANGE = 0, 1159 NODE_UP 1160 }; 1161 1162 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter, 1163 unsigned long *orig_bm, 1164 unsigned long *cur_bm) 1165 { 1166 unsigned long p1, p2; 1167 int i; 1168 1169 iter->curnode = -1; 1170 iter->orig_bm = orig_bm; 1171 iter->cur_bm = cur_bm; 1172 1173 for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) { 1174 p1 = *(iter->orig_bm + i); 1175 p2 = *(iter->cur_bm + i); 1176 iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1); 1177 } 1178 } 1179 1180 static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter, 1181 enum dlm_node_state_change *state) 1182 { 1183 int bit; 1184 1185 if (iter->curnode >= O2NM_MAX_NODES) 1186 return -ENOENT; 1187 1188 bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES, 1189 iter->curnode+1); 1190 if (bit >= O2NM_MAX_NODES) { 1191 iter->curnode = O2NM_MAX_NODES; 1192 return -ENOENT; 1193 } 1194 1195 /* if it was there in the original then this node died */ 1196 if (test_bit(bit, iter->orig_bm)) 1197 *state = NODE_DOWN; 1198 else 1199 *state = NODE_UP; 1200 1201 iter->curnode = bit; 1202 return bit; 1203 } 1204 1205 1206 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm, 1207 struct dlm_lock_resource *res, 1208 struct dlm_master_list_entry *mle, 1209 int blocked) 1210 { 1211 struct dlm_bitmap_diff_iter bdi; 1212 enum dlm_node_state_change sc; 1213 int node; 1214 int ret = 0; 1215 1216 mlog(0, "something happened such that the " 1217 "master process may need to be restarted!\n"); 1218 1219 assert_spin_locked(&mle->spinlock); 1220 1221 dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map); 1222 node = dlm_bitmap_diff_iter_next(&bdi, &sc); 1223 while (node >= 0) { 1224 if (sc == NODE_UP) { 1225 /* a node came up. clear any old vote from 1226 * the response map and set it in the vote map 1227 * then restart the mastery. */ 1228 mlog(ML_NOTICE, "node %d up while restarting\n", node); 1229 1230 /* redo the master request, but only for the new node */ 1231 mlog(0, "sending request to new node\n"); 1232 clear_bit(node, mle->response_map); 1233 set_bit(node, mle->vote_map); 1234 } else { 1235 mlog(ML_ERROR, "node down! %d\n", node); 1236 if (blocked) { 1237 int lowest = find_first_bit(mle->maybe_map, 1238 O2NM_MAX_NODES); 1239 1240 /* act like it was never there */ 1241 clear_bit(node, mle->maybe_map); 1242 1243 if (node == lowest) { 1244 mlog(0, "expected master %u died" 1245 " while this node was blocked " 1246 "waiting on it!\n", node); 1247 lowest = find_next_bit(mle->maybe_map, 1248 O2NM_MAX_NODES, 1249 lowest+1); 1250 if (lowest < O2NM_MAX_NODES) { 1251 mlog(0, "%s:%.*s:still " 1252 "blocked. waiting on %u " 1253 "now\n", dlm->name, 1254 res->lockname.len, 1255 res->lockname.name, 1256 lowest); 1257 } else { 1258 /* mle is an MLE_BLOCK, but 1259 * there is now nothing left to 1260 * block on. we need to return 1261 * all the way back out and try 1262 * again with an MLE_MASTER. 1263 * dlm_do_local_recovery_cleanup 1264 * has already run, so the mle 1265 * refcount is ok */ 1266 mlog(0, "%s:%.*s: no " 1267 "longer blocking. try to " 1268 "master this here\n", 1269 dlm->name, 1270 res->lockname.len, 1271 res->lockname.name); 1272 mle->type = DLM_MLE_MASTER; 1273 mle->mleres = res; 1274 } 1275 } 1276 } 1277 1278 /* now blank out everything, as if we had never 1279 * contacted anyone */ 1280 bitmap_zero(mle->maybe_map, O2NM_MAX_NODES); 1281 bitmap_zero(mle->response_map, O2NM_MAX_NODES); 1282 /* reset the vote_map to the current node_map */ 1283 bitmap_copy(mle->vote_map, mle->node_map, 1284 O2NM_MAX_NODES); 1285 /* put myself into the maybe map */ 1286 if (mle->type != DLM_MLE_BLOCK) 1287 set_bit(dlm->node_num, mle->maybe_map); 1288 } 1289 ret = -EAGAIN; 1290 node = dlm_bitmap_diff_iter_next(&bdi, &sc); 1291 } 1292 return ret; 1293 } 1294 1295 1296 /* 1297 * DLM_MASTER_REQUEST_MSG 1298 * 1299 * returns: 0 on success, 1300 * -errno on a network error 1301 * 1302 * on error, the caller should assume the target node is "dead" 1303 * 1304 */ 1305 1306 static int dlm_do_master_request(struct dlm_lock_resource *res, 1307 struct dlm_master_list_entry *mle, int to) 1308 { 1309 struct dlm_ctxt *dlm = mle->dlm; 1310 struct dlm_master_request request; 1311 int ret, response=0, resend; 1312 1313 memset(&request, 0, sizeof(request)); 1314 request.node_idx = dlm->node_num; 1315 1316 BUG_ON(mle->type == DLM_MLE_MIGRATION); 1317 1318 request.namelen = (u8)mle->mnamelen; 1319 memcpy(request.name, mle->mname, request.namelen); 1320 1321 again: 1322 ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request, 1323 sizeof(request), to, &response); 1324 if (ret < 0) { 1325 if (ret == -ESRCH) { 1326 /* should never happen */ 1327 mlog(ML_ERROR, "TCP stack not ready!\n"); 1328 BUG(); 1329 } else if (ret == -EINVAL) { 1330 mlog(ML_ERROR, "bad args passed to o2net!\n"); 1331 BUG(); 1332 } else if (ret == -ENOMEM) { 1333 mlog(ML_ERROR, "out of memory while trying to send " 1334 "network message! retrying\n"); 1335 /* this is totally crude */ 1336 msleep(50); 1337 goto again; 1338 } else if (!dlm_is_host_down(ret)) { 1339 /* not a network error. bad. */ 1340 mlog_errno(ret); 1341 mlog(ML_ERROR, "unhandled error!"); 1342 BUG(); 1343 } 1344 /* all other errors should be network errors, 1345 * and likely indicate node death */ 1346 mlog(ML_ERROR, "link to %d went down!\n", to); 1347 goto out; 1348 } 1349 1350 ret = 0; 1351 resend = 0; 1352 spin_lock(&mle->spinlock); 1353 switch (response) { 1354 case DLM_MASTER_RESP_YES: 1355 set_bit(to, mle->response_map); 1356 mlog(0, "node %u is the master, response=YES\n", to); 1357 mlog(0, "%s:%.*s: master node %u now knows I have a " 1358 "reference\n", dlm->name, res->lockname.len, 1359 res->lockname.name, to); 1360 mle->master = to; 1361 break; 1362 case DLM_MASTER_RESP_NO: 1363 mlog(0, "node %u not master, response=NO\n", to); 1364 set_bit(to, mle->response_map); 1365 break; 1366 case DLM_MASTER_RESP_MAYBE: 1367 mlog(0, "node %u not master, response=MAYBE\n", to); 1368 set_bit(to, mle->response_map); 1369 set_bit(to, mle->maybe_map); 1370 break; 1371 case DLM_MASTER_RESP_ERROR: 1372 mlog(0, "node %u hit an error, resending\n", to); 1373 resend = 1; 1374 response = 0; 1375 break; 1376 default: 1377 mlog(ML_ERROR, "bad response! %u\n", response); 1378 BUG(); 1379 } 1380 spin_unlock(&mle->spinlock); 1381 if (resend) { 1382 /* this is also totally crude */ 1383 msleep(50); 1384 goto again; 1385 } 1386 1387 out: 1388 return ret; 1389 } 1390 1391 /* 1392 * locks that can be taken here: 1393 * dlm->spinlock 1394 * res->spinlock 1395 * mle->spinlock 1396 * dlm->master_list 1397 * 1398 * if possible, TRIM THIS DOWN!!! 1399 */ 1400 int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data, 1401 void **ret_data) 1402 { 1403 u8 response = DLM_MASTER_RESP_MAYBE; 1404 struct dlm_ctxt *dlm = data; 1405 struct dlm_lock_resource *res = NULL; 1406 struct dlm_master_request *request = (struct dlm_master_request *) msg->buf; 1407 struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL; 1408 char *name; 1409 unsigned int namelen, hash; 1410 int found, ret; 1411 int set_maybe; 1412 int dispatch_assert = 0; 1413 int dispatched = 0; 1414 1415 if (!dlm_grab(dlm)) 1416 return DLM_MASTER_RESP_NO; 1417 1418 if (!dlm_domain_fully_joined(dlm)) { 1419 response = DLM_MASTER_RESP_NO; 1420 goto send_response; 1421 } 1422 1423 name = request->name; 1424 namelen = request->namelen; 1425 hash = dlm_lockid_hash(name, namelen); 1426 1427 if (namelen > DLM_LOCKID_NAME_MAX) { 1428 response = DLM_IVBUFLEN; 1429 goto send_response; 1430 } 1431 1432 way_up_top: 1433 spin_lock(&dlm->spinlock); 1434 res = __dlm_lookup_lockres(dlm, name, namelen, hash); 1435 if (res) { 1436 spin_unlock(&dlm->spinlock); 1437 1438 /* take care of the easy cases up front */ 1439 spin_lock(&res->spinlock); 1440 1441 /* 1442 * Right after dlm spinlock was released, dlm_thread could have 1443 * purged the lockres. Check if lockres got unhashed. If so 1444 * start over. 1445 */ 1446 if (hlist_unhashed(&res->hash_node)) { 1447 spin_unlock(&res->spinlock); 1448 dlm_lockres_put(res); 1449 goto way_up_top; 1450 } 1451 1452 if (res->state & (DLM_LOCK_RES_RECOVERING| 1453 DLM_LOCK_RES_MIGRATING)) { 1454 spin_unlock(&res->spinlock); 1455 mlog(0, "returning DLM_MASTER_RESP_ERROR since res is " 1456 "being recovered/migrated\n"); 1457 response = DLM_MASTER_RESP_ERROR; 1458 if (mle) 1459 kmem_cache_free(dlm_mle_cache, mle); 1460 goto send_response; 1461 } 1462 1463 if (res->owner == dlm->node_num) { 1464 dlm_lockres_set_refmap_bit(dlm, res, request->node_idx); 1465 spin_unlock(&res->spinlock); 1466 response = DLM_MASTER_RESP_YES; 1467 if (mle) 1468 kmem_cache_free(dlm_mle_cache, mle); 1469 1470 /* this node is the owner. 1471 * there is some extra work that needs to 1472 * happen now. the requesting node has 1473 * caused all nodes up to this one to 1474 * create mles. this node now needs to 1475 * go back and clean those up. */ 1476 dispatch_assert = 1; 1477 goto send_response; 1478 } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { 1479 spin_unlock(&res->spinlock); 1480 response = DLM_MASTER_RESP_NO; 1481 if (mle) 1482 kmem_cache_free(dlm_mle_cache, mle); 1483 goto send_response; 1484 } 1485 1486 /* ok, there is no owner. either this node is 1487 * being blocked, or it is actively trying to 1488 * master this lock. */ 1489 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { 1490 mlog(ML_ERROR, "lock with no owner should be " 1491 "in-progress!\n"); 1492 BUG(); 1493 } 1494 1495 spin_lock(&dlm->master_lock); 1496 found = dlm_find_mle(dlm, &tmpmle, name, namelen); 1497 if (!found) { 1498 mlog(ML_ERROR, "no mle found for this lock!\n"); 1499 BUG(); 1500 } 1501 set_maybe = 1; 1502 spin_lock(&tmpmle->spinlock); 1503 if (tmpmle->type == DLM_MLE_BLOCK) { 1504 response = DLM_MASTER_RESP_NO; 1505 } else if (tmpmle->type == DLM_MLE_MIGRATION) { 1506 mlog(0, "node %u is master, but trying to migrate to " 1507 "node %u.\n", tmpmle->master, tmpmle->new_master); 1508 if (tmpmle->master == dlm->node_num) { 1509 mlog(ML_ERROR, "no owner on lockres, but this " 1510 "node is trying to migrate it to %u?!\n", 1511 tmpmle->new_master); 1512 BUG(); 1513 } else { 1514 /* the real master can respond on its own */ 1515 response = DLM_MASTER_RESP_NO; 1516 } 1517 } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) { 1518 set_maybe = 0; 1519 if (tmpmle->master == dlm->node_num) { 1520 response = DLM_MASTER_RESP_YES; 1521 /* this node will be the owner. 1522 * go back and clean the mles on any 1523 * other nodes */ 1524 dispatch_assert = 1; 1525 dlm_lockres_set_refmap_bit(dlm, res, 1526 request->node_idx); 1527 } else 1528 response = DLM_MASTER_RESP_NO; 1529 } else { 1530 response = DLM_MASTER_RESP_MAYBE; 1531 } 1532 if (set_maybe) 1533 set_bit(request->node_idx, tmpmle->maybe_map); 1534 spin_unlock(&tmpmle->spinlock); 1535 1536 spin_unlock(&dlm->master_lock); 1537 spin_unlock(&res->spinlock); 1538 1539 /* keep the mle attached to heartbeat events */ 1540 dlm_put_mle(tmpmle); 1541 if (mle) 1542 kmem_cache_free(dlm_mle_cache, mle); 1543 goto send_response; 1544 } 1545 1546 /* 1547 * lockres doesn't exist on this node 1548 * if there is an MLE_BLOCK, return NO 1549 * if there is an MLE_MASTER, return MAYBE 1550 * otherwise, add an MLE_BLOCK, return NO 1551 */ 1552 spin_lock(&dlm->master_lock); 1553 found = dlm_find_mle(dlm, &tmpmle, name, namelen); 1554 if (!found) { 1555 /* this lockid has never been seen on this node yet */ 1556 if (!mle) { 1557 spin_unlock(&dlm->master_lock); 1558 spin_unlock(&dlm->spinlock); 1559 1560 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); 1561 if (!mle) { 1562 response = DLM_MASTER_RESP_ERROR; 1563 mlog_errno(-ENOMEM); 1564 goto send_response; 1565 } 1566 goto way_up_top; 1567 } 1568 1569 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen); 1570 set_bit(request->node_idx, mle->maybe_map); 1571 __dlm_insert_mle(dlm, mle); 1572 response = DLM_MASTER_RESP_NO; 1573 } else { 1574 spin_lock(&tmpmle->spinlock); 1575 if (tmpmle->master == dlm->node_num) { 1576 mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n"); 1577 BUG(); 1578 } 1579 if (tmpmle->type == DLM_MLE_BLOCK) 1580 response = DLM_MASTER_RESP_NO; 1581 else if (tmpmle->type == DLM_MLE_MIGRATION) { 1582 mlog(0, "migration mle was found (%u->%u)\n", 1583 tmpmle->master, tmpmle->new_master); 1584 /* real master can respond on its own */ 1585 response = DLM_MASTER_RESP_NO; 1586 } else 1587 response = DLM_MASTER_RESP_MAYBE; 1588 set_bit(request->node_idx, tmpmle->maybe_map); 1589 spin_unlock(&tmpmle->spinlock); 1590 } 1591 spin_unlock(&dlm->master_lock); 1592 spin_unlock(&dlm->spinlock); 1593 1594 if (found) { 1595 /* keep the mle attached to heartbeat events */ 1596 dlm_put_mle(tmpmle); 1597 } 1598 send_response: 1599 /* 1600 * __dlm_lookup_lockres() grabbed a reference to this lockres. 1601 * The reference is released by dlm_assert_master_worker() under 1602 * the call to dlm_dispatch_assert_master(). If 1603 * dlm_assert_master_worker() isn't called, we drop it here. 1604 */ 1605 if (dispatch_assert) { 1606 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n", 1607 dlm->node_num, res->lockname.len, res->lockname.name); 1608 spin_lock(&res->spinlock); 1609 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, 1610 DLM_ASSERT_MASTER_MLE_CLEANUP); 1611 if (ret < 0) { 1612 mlog(ML_ERROR, "failed to dispatch assert master work\n"); 1613 response = DLM_MASTER_RESP_ERROR; 1614 spin_unlock(&res->spinlock); 1615 dlm_lockres_put(res); 1616 } else { 1617 dispatched = 1; 1618 __dlm_lockres_grab_inflight_worker(dlm, res); 1619 spin_unlock(&res->spinlock); 1620 } 1621 } else { 1622 if (res) 1623 dlm_lockres_put(res); 1624 } 1625 1626 if (!dispatched) 1627 dlm_put(dlm); 1628 return response; 1629 } 1630 1631 /* 1632 * DLM_ASSERT_MASTER_MSG 1633 */ 1634 1635 1636 /* 1637 * NOTE: this can be used for debugging 1638 * can periodically run all locks owned by this node 1639 * and re-assert across the cluster... 1640 */ 1641 static int dlm_do_assert_master(struct dlm_ctxt *dlm, 1642 struct dlm_lock_resource *res, 1643 void *nodemap, u32 flags) 1644 { 1645 struct dlm_assert_master assert; 1646 int to, tmpret; 1647 struct dlm_node_iter iter; 1648 int ret = 0; 1649 int reassert; 1650 const char *lockname = res->lockname.name; 1651 unsigned int namelen = res->lockname.len; 1652 1653 BUG_ON(namelen > O2NM_MAX_NAME_LEN); 1654 1655 spin_lock(&res->spinlock); 1656 res->state |= DLM_LOCK_RES_SETREF_INPROG; 1657 spin_unlock(&res->spinlock); 1658 1659 again: 1660 reassert = 0; 1661 1662 /* note that if this nodemap is empty, it returns 0 */ 1663 dlm_node_iter_init(nodemap, &iter); 1664 while ((to = dlm_node_iter_next(&iter)) >= 0) { 1665 int r = 0; 1666 struct dlm_master_list_entry *mle = NULL; 1667 1668 mlog(0, "sending assert master to %d (%.*s)\n", to, 1669 namelen, lockname); 1670 memset(&assert, 0, sizeof(assert)); 1671 assert.node_idx = dlm->node_num; 1672 assert.namelen = namelen; 1673 memcpy(assert.name, lockname, namelen); 1674 assert.flags = cpu_to_be32(flags); 1675 1676 tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key, 1677 &assert, sizeof(assert), to, &r); 1678 if (tmpret < 0) { 1679 mlog(ML_ERROR, "Error %d when sending message %u (key " 1680 "0x%x) to node %u\n", tmpret, 1681 DLM_ASSERT_MASTER_MSG, dlm->key, to); 1682 if (!dlm_is_host_down(tmpret)) { 1683 mlog(ML_ERROR, "unhandled error=%d!\n", tmpret); 1684 BUG(); 1685 } 1686 /* a node died. finish out the rest of the nodes. */ 1687 mlog(0, "link to %d went down!\n", to); 1688 /* any nonzero status return will do */ 1689 ret = tmpret; 1690 r = 0; 1691 } else if (r < 0) { 1692 /* ok, something horribly messed. kill thyself. */ 1693 mlog(ML_ERROR,"during assert master of %.*s to %u, " 1694 "got %d.\n", namelen, lockname, to, r); 1695 spin_lock(&dlm->spinlock); 1696 spin_lock(&dlm->master_lock); 1697 if (dlm_find_mle(dlm, &mle, (char *)lockname, 1698 namelen)) { 1699 dlm_print_one_mle(mle); 1700 __dlm_put_mle(mle); 1701 } 1702 spin_unlock(&dlm->master_lock); 1703 spin_unlock(&dlm->spinlock); 1704 BUG(); 1705 } 1706 1707 if (r & DLM_ASSERT_RESPONSE_REASSERT && 1708 !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) { 1709 mlog(ML_ERROR, "%.*s: very strange, " 1710 "master MLE but no lockres on %u\n", 1711 namelen, lockname, to); 1712 } 1713 1714 if (r & DLM_ASSERT_RESPONSE_REASSERT) { 1715 mlog(0, "%.*s: node %u create mles on other " 1716 "nodes and requests a re-assert\n", 1717 namelen, lockname, to); 1718 reassert = 1; 1719 } 1720 if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) { 1721 mlog(0, "%.*s: node %u has a reference to this " 1722 "lockres, set the bit in the refmap\n", 1723 namelen, lockname, to); 1724 spin_lock(&res->spinlock); 1725 dlm_lockres_set_refmap_bit(dlm, res, to); 1726 spin_unlock(&res->spinlock); 1727 } 1728 } 1729 1730 if (reassert) 1731 goto again; 1732 1733 spin_lock(&res->spinlock); 1734 res->state &= ~DLM_LOCK_RES_SETREF_INPROG; 1735 spin_unlock(&res->spinlock); 1736 wake_up(&res->wq); 1737 1738 return ret; 1739 } 1740 1741 /* 1742 * locks that can be taken here: 1743 * dlm->spinlock 1744 * res->spinlock 1745 * mle->spinlock 1746 * dlm->master_list 1747 * 1748 * if possible, TRIM THIS DOWN!!! 1749 */ 1750 int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data, 1751 void **ret_data) 1752 { 1753 struct dlm_ctxt *dlm = data; 1754 struct dlm_master_list_entry *mle = NULL; 1755 struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf; 1756 struct dlm_lock_resource *res = NULL; 1757 char *name; 1758 unsigned int namelen, hash; 1759 u32 flags; 1760 int master_request = 0, have_lockres_ref = 0; 1761 int ret = 0; 1762 1763 if (!dlm_grab(dlm)) 1764 return 0; 1765 1766 name = assert->name; 1767 namelen = assert->namelen; 1768 hash = dlm_lockid_hash(name, namelen); 1769 flags = be32_to_cpu(assert->flags); 1770 1771 if (namelen > DLM_LOCKID_NAME_MAX) { 1772 mlog(ML_ERROR, "Invalid name length!"); 1773 goto done; 1774 } 1775 1776 spin_lock(&dlm->spinlock); 1777 1778 if (flags) 1779 mlog(0, "assert_master with flags: %u\n", flags); 1780 1781 /* find the MLE */ 1782 spin_lock(&dlm->master_lock); 1783 if (!dlm_find_mle(dlm, &mle, name, namelen)) { 1784 /* not an error, could be master just re-asserting */ 1785 mlog(0, "just got an assert_master from %u, but no " 1786 "MLE for it! (%.*s)\n", assert->node_idx, 1787 namelen, name); 1788 } else { 1789 int bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES); 1790 if (bit >= O2NM_MAX_NODES) { 1791 /* not necessarily an error, though less likely. 1792 * could be master just re-asserting. */ 1793 mlog(0, "no bits set in the maybe_map, but %u " 1794 "is asserting! (%.*s)\n", assert->node_idx, 1795 namelen, name); 1796 } else if (bit != assert->node_idx) { 1797 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) { 1798 mlog(0, "master %u was found, %u should " 1799 "back off\n", assert->node_idx, bit); 1800 } else { 1801 /* with the fix for bug 569, a higher node 1802 * number winning the mastery will respond 1803 * YES to mastery requests, but this node 1804 * had no way of knowing. let it pass. */ 1805 mlog(0, "%u is the lowest node, " 1806 "%u is asserting. (%.*s) %u must " 1807 "have begun after %u won.\n", bit, 1808 assert->node_idx, namelen, name, bit, 1809 assert->node_idx); 1810 } 1811 } 1812 if (mle->type == DLM_MLE_MIGRATION) { 1813 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) { 1814 mlog(0, "%s:%.*s: got cleanup assert" 1815 " from %u for migration\n", 1816 dlm->name, namelen, name, 1817 assert->node_idx); 1818 } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) { 1819 mlog(0, "%s:%.*s: got unrelated assert" 1820 " from %u for migration, ignoring\n", 1821 dlm->name, namelen, name, 1822 assert->node_idx); 1823 __dlm_put_mle(mle); 1824 spin_unlock(&dlm->master_lock); 1825 spin_unlock(&dlm->spinlock); 1826 goto done; 1827 } 1828 } 1829 } 1830 spin_unlock(&dlm->master_lock); 1831 1832 /* ok everything checks out with the MLE 1833 * now check to see if there is a lockres */ 1834 res = __dlm_lookup_lockres(dlm, name, namelen, hash); 1835 if (res) { 1836 spin_lock(&res->spinlock); 1837 if (res->state & DLM_LOCK_RES_RECOVERING) { 1838 mlog(ML_ERROR, "%u asserting but %.*s is " 1839 "RECOVERING!\n", assert->node_idx, namelen, name); 1840 goto kill; 1841 } 1842 if (!mle) { 1843 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN && 1844 res->owner != assert->node_idx) { 1845 mlog(ML_ERROR, "DIE! Mastery assert from %u, " 1846 "but current owner is %u! (%.*s)\n", 1847 assert->node_idx, res->owner, namelen, 1848 name); 1849 __dlm_print_one_lock_resource(res); 1850 BUG(); 1851 } 1852 } else if (mle->type != DLM_MLE_MIGRATION) { 1853 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { 1854 /* owner is just re-asserting */ 1855 if (res->owner == assert->node_idx) { 1856 mlog(0, "owner %u re-asserting on " 1857 "lock %.*s\n", assert->node_idx, 1858 namelen, name); 1859 goto ok; 1860 } 1861 mlog(ML_ERROR, "got assert_master from " 1862 "node %u, but %u is the owner! " 1863 "(%.*s)\n", assert->node_idx, 1864 res->owner, namelen, name); 1865 goto kill; 1866 } 1867 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { 1868 mlog(ML_ERROR, "got assert from %u, but lock " 1869 "with no owner should be " 1870 "in-progress! (%.*s)\n", 1871 assert->node_idx, 1872 namelen, name); 1873 goto kill; 1874 } 1875 } else /* mle->type == DLM_MLE_MIGRATION */ { 1876 /* should only be getting an assert from new master */ 1877 if (assert->node_idx != mle->new_master) { 1878 mlog(ML_ERROR, "got assert from %u, but " 1879 "new master is %u, and old master " 1880 "was %u (%.*s)\n", 1881 assert->node_idx, mle->new_master, 1882 mle->master, namelen, name); 1883 goto kill; 1884 } 1885 1886 } 1887 ok: 1888 spin_unlock(&res->spinlock); 1889 } 1890 1891 if (mle) { 1892 int extra_ref = 0; 1893 int nn = -1; 1894 int rr, err = 0; 1895 1896 spin_lock(&mle->spinlock); 1897 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION) 1898 extra_ref = 1; 1899 else { 1900 /* MASTER mle: if any bits set in the response map 1901 * then the calling node needs to re-assert to clear 1902 * up nodes that this node contacted */ 1903 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, 1904 nn+1)) < O2NM_MAX_NODES) { 1905 if (nn != dlm->node_num && nn != assert->node_idx) { 1906 master_request = 1; 1907 break; 1908 } 1909 } 1910 } 1911 mle->master = assert->node_idx; 1912 atomic_set(&mle->woken, 1); 1913 wake_up(&mle->wq); 1914 spin_unlock(&mle->spinlock); 1915 1916 if (res) { 1917 int wake = 0; 1918 spin_lock(&res->spinlock); 1919 if (mle->type == DLM_MLE_MIGRATION) { 1920 mlog(0, "finishing off migration of lockres %.*s, " 1921 "from %u to %u\n", 1922 res->lockname.len, res->lockname.name, 1923 dlm->node_num, mle->new_master); 1924 res->state &= ~DLM_LOCK_RES_MIGRATING; 1925 wake = 1; 1926 dlm_change_lockres_owner(dlm, res, mle->new_master); 1927 BUG_ON(res->state & DLM_LOCK_RES_DIRTY); 1928 } else { 1929 dlm_change_lockres_owner(dlm, res, mle->master); 1930 } 1931 spin_unlock(&res->spinlock); 1932 have_lockres_ref = 1; 1933 if (wake) 1934 wake_up(&res->wq); 1935 } 1936 1937 /* master is known, detach if not already detached. 1938 * ensures that only one assert_master call will happen 1939 * on this mle. */ 1940 spin_lock(&dlm->master_lock); 1941 1942 rr = kref_read(&mle->mle_refs); 1943 if (mle->inuse > 0) { 1944 if (extra_ref && rr < 3) 1945 err = 1; 1946 else if (!extra_ref && rr < 2) 1947 err = 1; 1948 } else { 1949 if (extra_ref && rr < 2) 1950 err = 1; 1951 else if (!extra_ref && rr < 1) 1952 err = 1; 1953 } 1954 if (err) { 1955 mlog(ML_ERROR, "%s:%.*s: got assert master from %u " 1956 "that will mess up this node, refs=%d, extra=%d, " 1957 "inuse=%d\n", dlm->name, namelen, name, 1958 assert->node_idx, rr, extra_ref, mle->inuse); 1959 dlm_print_one_mle(mle); 1960 } 1961 __dlm_unlink_mle(dlm, mle); 1962 __dlm_mle_detach_hb_events(dlm, mle); 1963 __dlm_put_mle(mle); 1964 if (extra_ref) { 1965 /* the assert master message now balances the extra 1966 * ref given by the master / migration request message. 1967 * if this is the last put, it will be removed 1968 * from the list. */ 1969 __dlm_put_mle(mle); 1970 } 1971 spin_unlock(&dlm->master_lock); 1972 } else if (res) { 1973 if (res->owner != assert->node_idx) { 1974 mlog(0, "assert_master from %u, but current " 1975 "owner is %u (%.*s), no mle\n", assert->node_idx, 1976 res->owner, namelen, name); 1977 } 1978 } 1979 spin_unlock(&dlm->spinlock); 1980 1981 done: 1982 ret = 0; 1983 if (res) { 1984 spin_lock(&res->spinlock); 1985 res->state |= DLM_LOCK_RES_SETREF_INPROG; 1986 spin_unlock(&res->spinlock); 1987 *ret_data = (void *)res; 1988 } 1989 dlm_put(dlm); 1990 if (master_request) { 1991 mlog(0, "need to tell master to reassert\n"); 1992 /* positive. negative would shoot down the node. */ 1993 ret |= DLM_ASSERT_RESPONSE_REASSERT; 1994 if (!have_lockres_ref) { 1995 mlog(ML_ERROR, "strange, got assert from %u, MASTER " 1996 "mle present here for %s:%.*s, but no lockres!\n", 1997 assert->node_idx, dlm->name, namelen, name); 1998 } 1999 } 2000 if (have_lockres_ref) { 2001 /* let the master know we have a reference to the lockres */ 2002 ret |= DLM_ASSERT_RESPONSE_MASTERY_REF; 2003 mlog(0, "%s:%.*s: got assert from %u, need a ref\n", 2004 dlm->name, namelen, name, assert->node_idx); 2005 } 2006 return ret; 2007 2008 kill: 2009 /* kill the caller! */ 2010 mlog(ML_ERROR, "Bad message received from another node. Dumping state " 2011 "and killing the other node now! This node is OK and can continue.\n"); 2012 __dlm_print_one_lock_resource(res); 2013 spin_unlock(&res->spinlock); 2014 spin_lock(&dlm->master_lock); 2015 if (mle) 2016 __dlm_put_mle(mle); 2017 spin_unlock(&dlm->master_lock); 2018 spin_unlock(&dlm->spinlock); 2019 *ret_data = (void *)res; 2020 dlm_put(dlm); 2021 return -EINVAL; 2022 } 2023 2024 void dlm_assert_master_post_handler(int status, void *data, void *ret_data) 2025 { 2026 struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data; 2027 2028 if (ret_data) { 2029 spin_lock(&res->spinlock); 2030 res->state &= ~DLM_LOCK_RES_SETREF_INPROG; 2031 spin_unlock(&res->spinlock); 2032 wake_up(&res->wq); 2033 dlm_lockres_put(res); 2034 } 2035 return; 2036 } 2037 2038 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, 2039 struct dlm_lock_resource *res, 2040 int ignore_higher, u8 request_from, u32 flags) 2041 { 2042 struct dlm_work_item *item; 2043 item = kzalloc(sizeof(*item), GFP_ATOMIC); 2044 if (!item) 2045 return -ENOMEM; 2046 2047 2048 /* queue up work for dlm_assert_master_worker */ 2049 dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL); 2050 item->u.am.lockres = res; /* already have a ref */ 2051 /* can optionally ignore node numbers higher than this node */ 2052 item->u.am.ignore_higher = ignore_higher; 2053 item->u.am.request_from = request_from; 2054 item->u.am.flags = flags; 2055 2056 if (ignore_higher) 2057 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, 2058 res->lockname.name); 2059 2060 spin_lock(&dlm->work_lock); 2061 list_add_tail(&item->list, &dlm->work_list); 2062 spin_unlock(&dlm->work_lock); 2063 2064 queue_work(dlm->dlm_worker, &dlm->dispatched_work); 2065 return 0; 2066 } 2067 2068 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data) 2069 { 2070 struct dlm_ctxt *dlm = data; 2071 int ret = 0; 2072 struct dlm_lock_resource *res; 2073 unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)]; 2074 int ignore_higher; 2075 int bit; 2076 u8 request_from; 2077 u32 flags; 2078 2079 dlm = item->dlm; 2080 res = item->u.am.lockres; 2081 ignore_higher = item->u.am.ignore_higher; 2082 request_from = item->u.am.request_from; 2083 flags = item->u.am.flags; 2084 2085 spin_lock(&dlm->spinlock); 2086 bitmap_copy(nodemap, dlm->domain_map, O2NM_MAX_NODES); 2087 spin_unlock(&dlm->spinlock); 2088 2089 clear_bit(dlm->node_num, nodemap); 2090 if (ignore_higher) { 2091 /* if is this just to clear up mles for nodes below 2092 * this node, do not send the message to the original 2093 * caller or any node number higher than this */ 2094 clear_bit(request_from, nodemap); 2095 bit = dlm->node_num; 2096 while (1) { 2097 bit = find_next_bit(nodemap, O2NM_MAX_NODES, 2098 bit+1); 2099 if (bit >= O2NM_MAX_NODES) 2100 break; 2101 clear_bit(bit, nodemap); 2102 } 2103 } 2104 2105 /* 2106 * If we're migrating this lock to someone else, we are no 2107 * longer allowed to assert out own mastery. OTOH, we need to 2108 * prevent migration from starting while we're still asserting 2109 * our dominance. The reserved ast delays migration. 2110 */ 2111 spin_lock(&res->spinlock); 2112 if (res->state & DLM_LOCK_RES_MIGRATING) { 2113 mlog(0, "Someone asked us to assert mastery, but we're " 2114 "in the middle of migration. Skipping assert, " 2115 "the new master will handle that.\n"); 2116 spin_unlock(&res->spinlock); 2117 goto put; 2118 } else 2119 __dlm_lockres_reserve_ast(res); 2120 spin_unlock(&res->spinlock); 2121 2122 /* this call now finishes out the nodemap 2123 * even if one or more nodes die */ 2124 mlog(0, "worker about to master %.*s here, this=%u\n", 2125 res->lockname.len, res->lockname.name, dlm->node_num); 2126 ret = dlm_do_assert_master(dlm, res, nodemap, flags); 2127 if (ret < 0) { 2128 /* no need to restart, we are done */ 2129 if (!dlm_is_host_down(ret)) 2130 mlog_errno(ret); 2131 } 2132 2133 /* Ok, we've asserted ourselves. Let's let migration start. */ 2134 dlm_lockres_release_ast(dlm, res); 2135 2136 put: 2137 dlm_lockres_drop_inflight_worker(dlm, res); 2138 2139 dlm_lockres_put(res); 2140 2141 mlog(0, "finished with dlm_assert_master_worker\n"); 2142 } 2143 2144 /* SPECIAL CASE for the $RECOVERY lock used by the recovery thread. 2145 * We cannot wait for node recovery to complete to begin mastering this 2146 * lockres because this lockres is used to kick off recovery! ;-) 2147 * So, do a pre-check on all living nodes to see if any of those nodes 2148 * think that $RECOVERY is currently mastered by a dead node. If so, 2149 * we wait a short time to allow that node to get notified by its own 2150 * heartbeat stack, then check again. All $RECOVERY lock resources 2151 * mastered by dead nodes are purged when the heartbeat callback is 2152 * fired, so we can know for sure that it is safe to continue once 2153 * the node returns a live node or no node. */ 2154 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, 2155 struct dlm_lock_resource *res) 2156 { 2157 struct dlm_node_iter iter; 2158 int nodenum; 2159 int ret = 0; 2160 u8 master = DLM_LOCK_RES_OWNER_UNKNOWN; 2161 2162 spin_lock(&dlm->spinlock); 2163 dlm_node_iter_init(dlm->domain_map, &iter); 2164 spin_unlock(&dlm->spinlock); 2165 2166 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 2167 /* do not send to self */ 2168 if (nodenum == dlm->node_num) 2169 continue; 2170 ret = dlm_do_master_requery(dlm, res, nodenum, &master); 2171 if (ret < 0) { 2172 mlog_errno(ret); 2173 if (!dlm_is_host_down(ret)) 2174 BUG(); 2175 /* host is down, so answer for that node would be 2176 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */ 2177 ret = 0; 2178 } 2179 2180 if (master != DLM_LOCK_RES_OWNER_UNKNOWN) { 2181 /* check to see if this master is in the recovery map */ 2182 spin_lock(&dlm->spinlock); 2183 if (test_bit(master, dlm->recovery_map)) { 2184 mlog(ML_NOTICE, "%s: node %u has not seen " 2185 "node %u go down yet, and thinks the " 2186 "dead node is mastering the recovery " 2187 "lock. must wait.\n", dlm->name, 2188 nodenum, master); 2189 ret = -EAGAIN; 2190 } 2191 spin_unlock(&dlm->spinlock); 2192 mlog(0, "%s: reco lock master is %u\n", dlm->name, 2193 master); 2194 break; 2195 } 2196 } 2197 return ret; 2198 } 2199 2200 /* 2201 * DLM_DEREF_LOCKRES_MSG 2202 */ 2203 2204 int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) 2205 { 2206 struct dlm_deref_lockres deref; 2207 int ret = 0, r; 2208 const char *lockname; 2209 unsigned int namelen; 2210 2211 lockname = res->lockname.name; 2212 namelen = res->lockname.len; 2213 BUG_ON(namelen > O2NM_MAX_NAME_LEN); 2214 2215 memset(&deref, 0, sizeof(deref)); 2216 deref.node_idx = dlm->node_num; 2217 deref.namelen = namelen; 2218 memcpy(deref.name, lockname, namelen); 2219 2220 ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key, 2221 &deref, sizeof(deref), res->owner, &r); 2222 if (ret < 0) 2223 mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n", 2224 dlm->name, namelen, lockname, ret, res->owner); 2225 else if (r < 0) { 2226 /* BAD. other node says I did not have a ref. */ 2227 mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n", 2228 dlm->name, namelen, lockname, res->owner, r); 2229 dlm_print_one_lock_resource(res); 2230 if (r == -ENOMEM) 2231 BUG(); 2232 } else 2233 ret = r; 2234 2235 return ret; 2236 } 2237 2238 int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data, 2239 void **ret_data) 2240 { 2241 struct dlm_ctxt *dlm = data; 2242 struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf; 2243 struct dlm_lock_resource *res = NULL; 2244 char *name; 2245 unsigned int namelen; 2246 int ret = -EINVAL; 2247 u8 node; 2248 unsigned int hash; 2249 struct dlm_work_item *item; 2250 int cleared = 0; 2251 int dispatch = 0; 2252 2253 if (!dlm_grab(dlm)) 2254 return 0; 2255 2256 name = deref->name; 2257 namelen = deref->namelen; 2258 node = deref->node_idx; 2259 2260 if (namelen > DLM_LOCKID_NAME_MAX) { 2261 mlog(ML_ERROR, "Invalid name length!"); 2262 goto done; 2263 } 2264 if (deref->node_idx >= O2NM_MAX_NODES) { 2265 mlog(ML_ERROR, "Invalid node number: %u\n", node); 2266 goto done; 2267 } 2268 2269 hash = dlm_lockid_hash(name, namelen); 2270 2271 spin_lock(&dlm->spinlock); 2272 res = __dlm_lookup_lockres_full(dlm, name, namelen, hash); 2273 if (!res) { 2274 spin_unlock(&dlm->spinlock); 2275 mlog(ML_ERROR, "%s:%.*s: bad lockres name\n", 2276 dlm->name, namelen, name); 2277 goto done; 2278 } 2279 spin_unlock(&dlm->spinlock); 2280 2281 spin_lock(&res->spinlock); 2282 if (res->state & DLM_LOCK_RES_SETREF_INPROG) 2283 dispatch = 1; 2284 else { 2285 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); 2286 if (test_bit(node, res->refmap)) { 2287 dlm_lockres_clear_refmap_bit(dlm, res, node); 2288 cleared = 1; 2289 } 2290 } 2291 spin_unlock(&res->spinlock); 2292 2293 if (!dispatch) { 2294 if (cleared) 2295 dlm_lockres_calc_usage(dlm, res); 2296 else { 2297 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref " 2298 "but it is already dropped!\n", dlm->name, 2299 res->lockname.len, res->lockname.name, node); 2300 dlm_print_one_lock_resource(res); 2301 } 2302 ret = DLM_DEREF_RESPONSE_DONE; 2303 goto done; 2304 } 2305 2306 item = kzalloc(sizeof(*item), GFP_NOFS); 2307 if (!item) { 2308 ret = -ENOMEM; 2309 mlog_errno(ret); 2310 goto done; 2311 } 2312 2313 dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL); 2314 item->u.dl.deref_res = res; 2315 item->u.dl.deref_node = node; 2316 2317 spin_lock(&dlm->work_lock); 2318 list_add_tail(&item->list, &dlm->work_list); 2319 spin_unlock(&dlm->work_lock); 2320 2321 queue_work(dlm->dlm_worker, &dlm->dispatched_work); 2322 return DLM_DEREF_RESPONSE_INPROG; 2323 2324 done: 2325 if (res) 2326 dlm_lockres_put(res); 2327 dlm_put(dlm); 2328 2329 return ret; 2330 } 2331 2332 int dlm_deref_lockres_done_handler(struct o2net_msg *msg, u32 len, void *data, 2333 void **ret_data) 2334 { 2335 struct dlm_ctxt *dlm = data; 2336 struct dlm_deref_lockres_done *deref 2337 = (struct dlm_deref_lockres_done *)msg->buf; 2338 struct dlm_lock_resource *res = NULL; 2339 char *name; 2340 unsigned int namelen; 2341 int ret = -EINVAL; 2342 u8 node; 2343 unsigned int hash; 2344 2345 if (!dlm_grab(dlm)) 2346 return 0; 2347 2348 name = deref->name; 2349 namelen = deref->namelen; 2350 node = deref->node_idx; 2351 2352 if (namelen > DLM_LOCKID_NAME_MAX) { 2353 mlog(ML_ERROR, "Invalid name length!"); 2354 goto done; 2355 } 2356 if (deref->node_idx >= O2NM_MAX_NODES) { 2357 mlog(ML_ERROR, "Invalid node number: %u\n", node); 2358 goto done; 2359 } 2360 2361 hash = dlm_lockid_hash(name, namelen); 2362 2363 spin_lock(&dlm->spinlock); 2364 res = __dlm_lookup_lockres_full(dlm, name, namelen, hash); 2365 if (!res) { 2366 spin_unlock(&dlm->spinlock); 2367 mlog(ML_ERROR, "%s:%.*s: bad lockres name\n", 2368 dlm->name, namelen, name); 2369 goto done; 2370 } 2371 2372 spin_lock(&res->spinlock); 2373 if (!(res->state & DLM_LOCK_RES_DROPPING_REF)) { 2374 spin_unlock(&res->spinlock); 2375 spin_unlock(&dlm->spinlock); 2376 mlog(ML_NOTICE, "%s:%.*s: node %u sends deref done " 2377 "but it is already derefed!\n", dlm->name, 2378 res->lockname.len, res->lockname.name, node); 2379 ret = 0; 2380 goto done; 2381 } 2382 2383 __dlm_do_purge_lockres(dlm, res); 2384 spin_unlock(&res->spinlock); 2385 wake_up(&res->wq); 2386 2387 spin_unlock(&dlm->spinlock); 2388 2389 ret = 0; 2390 done: 2391 if (res) 2392 dlm_lockres_put(res); 2393 dlm_put(dlm); 2394 return ret; 2395 } 2396 2397 static void dlm_drop_lockres_ref_done(struct dlm_ctxt *dlm, 2398 struct dlm_lock_resource *res, u8 node) 2399 { 2400 struct dlm_deref_lockres_done deref; 2401 int ret = 0, r; 2402 const char *lockname; 2403 unsigned int namelen; 2404 2405 lockname = res->lockname.name; 2406 namelen = res->lockname.len; 2407 BUG_ON(namelen > O2NM_MAX_NAME_LEN); 2408 2409 memset(&deref, 0, sizeof(deref)); 2410 deref.node_idx = dlm->node_num; 2411 deref.namelen = namelen; 2412 memcpy(deref.name, lockname, namelen); 2413 2414 ret = o2net_send_message(DLM_DEREF_LOCKRES_DONE, dlm->key, 2415 &deref, sizeof(deref), node, &r); 2416 if (ret < 0) { 2417 mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF DONE " 2418 " to node %u\n", dlm->name, namelen, 2419 lockname, ret, node); 2420 } else if (r < 0) { 2421 /* ignore the error */ 2422 mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n", 2423 dlm->name, namelen, lockname, node, r); 2424 dlm_print_one_lock_resource(res); 2425 } 2426 } 2427 2428 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data) 2429 { 2430 struct dlm_ctxt *dlm; 2431 struct dlm_lock_resource *res; 2432 u8 node; 2433 u8 cleared = 0; 2434 2435 dlm = item->dlm; 2436 res = item->u.dl.deref_res; 2437 node = item->u.dl.deref_node; 2438 2439 spin_lock(&res->spinlock); 2440 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); 2441 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); 2442 if (test_bit(node, res->refmap)) { 2443 dlm_lockres_clear_refmap_bit(dlm, res, node); 2444 cleared = 1; 2445 } 2446 spin_unlock(&res->spinlock); 2447 2448 dlm_drop_lockres_ref_done(dlm, res, node); 2449 2450 if (cleared) { 2451 mlog(0, "%s:%.*s node %u ref dropped in dispatch\n", 2452 dlm->name, res->lockname.len, res->lockname.name, node); 2453 dlm_lockres_calc_usage(dlm, res); 2454 } else { 2455 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref " 2456 "but it is already dropped!\n", dlm->name, 2457 res->lockname.len, res->lockname.name, node); 2458 dlm_print_one_lock_resource(res); 2459 } 2460 2461 dlm_lockres_put(res); 2462 } 2463 2464 /* 2465 * A migratable resource is one that is : 2466 * 1. locally mastered, and, 2467 * 2. zero local locks, and, 2468 * 3. one or more non-local locks, or, one or more references 2469 * Returns 1 if yes, 0 if not. 2470 */ 2471 static int dlm_is_lockres_migratable(struct dlm_ctxt *dlm, 2472 struct dlm_lock_resource *res) 2473 { 2474 enum dlm_lockres_list idx; 2475 int nonlocal = 0, node_ref; 2476 struct list_head *queue; 2477 struct dlm_lock *lock; 2478 u64 cookie; 2479 2480 assert_spin_locked(&res->spinlock); 2481 2482 /* delay migration when the lockres is in MIGRATING state */ 2483 if (res->state & DLM_LOCK_RES_MIGRATING) 2484 return 0; 2485 2486 /* delay migration when the lockres is in RECOCERING state */ 2487 if (res->state & (DLM_LOCK_RES_RECOVERING| 2488 DLM_LOCK_RES_RECOVERY_WAITING)) 2489 return 0; 2490 2491 if (res->owner != dlm->node_num) 2492 return 0; 2493 2494 for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) { 2495 queue = dlm_list_idx_to_ptr(res, idx); 2496 list_for_each_entry(lock, queue, list) { 2497 if (lock->ml.node != dlm->node_num) { 2498 nonlocal++; 2499 continue; 2500 } 2501 cookie = be64_to_cpu(lock->ml.cookie); 2502 mlog(0, "%s: Not migratable res %.*s, lock %u:%llu on " 2503 "%s list\n", dlm->name, res->lockname.len, 2504 res->lockname.name, 2505 dlm_get_lock_cookie_node(cookie), 2506 dlm_get_lock_cookie_seq(cookie), 2507 dlm_list_in_text(idx)); 2508 return 0; 2509 } 2510 } 2511 2512 if (!nonlocal) { 2513 node_ref = find_first_bit(res->refmap, O2NM_MAX_NODES); 2514 if (node_ref >= O2NM_MAX_NODES) 2515 return 0; 2516 } 2517 2518 mlog(0, "%s: res %.*s, Migratable\n", dlm->name, res->lockname.len, 2519 res->lockname.name); 2520 2521 return 1; 2522 } 2523 2524 /* 2525 * DLM_MIGRATE_LOCKRES 2526 */ 2527 2528 2529 static int dlm_migrate_lockres(struct dlm_ctxt *dlm, 2530 struct dlm_lock_resource *res, u8 target) 2531 { 2532 struct dlm_master_list_entry *mle = NULL; 2533 struct dlm_master_list_entry *oldmle = NULL; 2534 struct dlm_migratable_lockres *mres = NULL; 2535 int ret = 0; 2536 const char *name; 2537 unsigned int namelen; 2538 int mle_added = 0; 2539 int wake = 0; 2540 2541 if (!dlm_grab(dlm)) 2542 return -EINVAL; 2543 2544 name = res->lockname.name; 2545 namelen = res->lockname.len; 2546 2547 mlog(0, "%s: Migrating %.*s to node %u\n", dlm->name, namelen, name, 2548 target); 2549 2550 /* preallocate up front. if this fails, abort */ 2551 ret = -ENOMEM; 2552 mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS); 2553 if (!mres) { 2554 mlog_errno(ret); 2555 goto leave; 2556 } 2557 2558 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); 2559 if (!mle) { 2560 mlog_errno(ret); 2561 goto leave; 2562 } 2563 ret = 0; 2564 2565 /* 2566 * clear any existing master requests and 2567 * add the migration mle to the list 2568 */ 2569 spin_lock(&dlm->spinlock); 2570 spin_lock(&dlm->master_lock); 2571 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name, 2572 namelen, target, dlm->node_num); 2573 /* get an extra reference on the mle. 2574 * otherwise the assert_master from the new 2575 * master will destroy this. 2576 */ 2577 if (ret != -EEXIST) 2578 dlm_get_mle_inuse(mle); 2579 2580 spin_unlock(&dlm->master_lock); 2581 spin_unlock(&dlm->spinlock); 2582 2583 if (ret == -EEXIST) { 2584 mlog(0, "another process is already migrating it\n"); 2585 goto fail; 2586 } 2587 mle_added = 1; 2588 2589 /* 2590 * set the MIGRATING flag and flush asts 2591 * if we fail after this we need to re-dirty the lockres 2592 */ 2593 if (dlm_mark_lockres_migrating(dlm, res, target) < 0) { 2594 mlog(ML_ERROR, "tried to migrate %.*s to %u, but " 2595 "the target went down.\n", res->lockname.len, 2596 res->lockname.name, target); 2597 spin_lock(&res->spinlock); 2598 res->state &= ~DLM_LOCK_RES_MIGRATING; 2599 wake = 1; 2600 spin_unlock(&res->spinlock); 2601 ret = -EINVAL; 2602 } 2603 2604 fail: 2605 if (ret != -EEXIST && oldmle) { 2606 /* master is known, detach if not already detached */ 2607 dlm_mle_detach_hb_events(dlm, oldmle); 2608 dlm_put_mle(oldmle); 2609 } 2610 2611 if (ret < 0) { 2612 if (mle_added) { 2613 dlm_mle_detach_hb_events(dlm, mle); 2614 dlm_put_mle(mle); 2615 dlm_put_mle_inuse(mle); 2616 } else if (mle) { 2617 kmem_cache_free(dlm_mle_cache, mle); 2618 mle = NULL; 2619 } 2620 goto leave; 2621 } 2622 2623 /* 2624 * at this point, we have a migration target, an mle 2625 * in the master list, and the MIGRATING flag set on 2626 * the lockres 2627 */ 2628 2629 /* now that remote nodes are spinning on the MIGRATING flag, 2630 * ensure that all assert_master work is flushed. */ 2631 flush_workqueue(dlm->dlm_worker); 2632 2633 /* notify new node and send all lock state */ 2634 /* call send_one_lockres with migration flag. 2635 * this serves as notice to the target node that a 2636 * migration is starting. */ 2637 ret = dlm_send_one_lockres(dlm, res, mres, target, 2638 DLM_MRES_MIGRATION); 2639 2640 if (ret < 0) { 2641 mlog(0, "migration to node %u failed with %d\n", 2642 target, ret); 2643 /* migration failed, detach and clean up mle */ 2644 dlm_mle_detach_hb_events(dlm, mle); 2645 dlm_put_mle(mle); 2646 dlm_put_mle_inuse(mle); 2647 spin_lock(&res->spinlock); 2648 res->state &= ~DLM_LOCK_RES_MIGRATING; 2649 wake = 1; 2650 spin_unlock(&res->spinlock); 2651 if (dlm_is_host_down(ret)) 2652 dlm_wait_for_node_death(dlm, target, 2653 DLM_NODE_DEATH_WAIT_MAX); 2654 goto leave; 2655 } 2656 2657 /* at this point, the target sends a message to all nodes, 2658 * (using dlm_do_migrate_request). this node is skipped since 2659 * we had to put an mle in the list to begin the process. this 2660 * node now waits for target to do an assert master. this node 2661 * will be the last one notified, ensuring that the migration 2662 * is complete everywhere. if the target dies while this is 2663 * going on, some nodes could potentially see the target as the 2664 * master, so it is important that my recovery finds the migration 2665 * mle and sets the master to UNKNOWN. */ 2666 2667 2668 /* wait for new node to assert master */ 2669 while (1) { 2670 ret = wait_event_interruptible_timeout(mle->wq, 2671 (atomic_read(&mle->woken) == 1), 2672 msecs_to_jiffies(5000)); 2673 2674 if (ret >= 0) { 2675 if (atomic_read(&mle->woken) == 1 || 2676 res->owner == target) 2677 break; 2678 2679 mlog(0, "%s:%.*s: timed out during migration\n", 2680 dlm->name, res->lockname.len, res->lockname.name); 2681 /* avoid hang during shutdown when migrating lockres 2682 * to a node which also goes down */ 2683 if (dlm_is_node_dead(dlm, target)) { 2684 mlog(0, "%s:%.*s: expected migration " 2685 "target %u is no longer up, restarting\n", 2686 dlm->name, res->lockname.len, 2687 res->lockname.name, target); 2688 ret = -EINVAL; 2689 /* migration failed, detach and clean up mle */ 2690 dlm_mle_detach_hb_events(dlm, mle); 2691 dlm_put_mle(mle); 2692 dlm_put_mle_inuse(mle); 2693 spin_lock(&res->spinlock); 2694 res->state &= ~DLM_LOCK_RES_MIGRATING; 2695 wake = 1; 2696 spin_unlock(&res->spinlock); 2697 goto leave; 2698 } 2699 } else 2700 mlog(0, "%s:%.*s: caught signal during migration\n", 2701 dlm->name, res->lockname.len, res->lockname.name); 2702 } 2703 2704 /* all done, set the owner, clear the flag */ 2705 spin_lock(&res->spinlock); 2706 dlm_set_lockres_owner(dlm, res, target); 2707 res->state &= ~DLM_LOCK_RES_MIGRATING; 2708 dlm_remove_nonlocal_locks(dlm, res); 2709 spin_unlock(&res->spinlock); 2710 wake_up(&res->wq); 2711 2712 /* master is known, detach if not already detached */ 2713 dlm_mle_detach_hb_events(dlm, mle); 2714 dlm_put_mle_inuse(mle); 2715 ret = 0; 2716 2717 dlm_lockres_calc_usage(dlm, res); 2718 2719 leave: 2720 /* re-dirty the lockres if we failed */ 2721 if (ret < 0) 2722 dlm_kick_thread(dlm, res); 2723 2724 /* wake up waiters if the MIGRATING flag got set 2725 * but migration failed */ 2726 if (wake) 2727 wake_up(&res->wq); 2728 2729 if (mres) 2730 free_page((unsigned long)mres); 2731 2732 dlm_put(dlm); 2733 2734 mlog(0, "%s: Migrating %.*s to %u, returns %d\n", dlm->name, namelen, 2735 name, target, ret); 2736 return ret; 2737 } 2738 2739 /* 2740 * Should be called only after beginning the domain leave process. 2741 * There should not be any remaining locks on nonlocal lock resources, 2742 * and there should be no local locks left on locally mastered resources. 2743 * 2744 * Called with the dlm spinlock held, may drop it to do migration, but 2745 * will re-acquire before exit. 2746 * 2747 * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped 2748 */ 2749 int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) 2750 __must_hold(&dlm->spinlock) 2751 { 2752 int ret; 2753 int lock_dropped = 0; 2754 u8 target = O2NM_MAX_NODES; 2755 2756 assert_spin_locked(&dlm->spinlock); 2757 2758 spin_lock(&res->spinlock); 2759 if (dlm_is_lockres_migratable(dlm, res)) 2760 target = dlm_pick_migration_target(dlm, res); 2761 spin_unlock(&res->spinlock); 2762 2763 if (target == O2NM_MAX_NODES) 2764 goto leave; 2765 2766 /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */ 2767 spin_unlock(&dlm->spinlock); 2768 lock_dropped = 1; 2769 ret = dlm_migrate_lockres(dlm, res, target); 2770 if (ret) 2771 mlog(0, "%s: res %.*s, Migrate to node %u failed with %d\n", 2772 dlm->name, res->lockname.len, res->lockname.name, 2773 target, ret); 2774 spin_lock(&dlm->spinlock); 2775 leave: 2776 return lock_dropped; 2777 } 2778 2779 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock) 2780 { 2781 int ret; 2782 spin_lock(&dlm->ast_lock); 2783 spin_lock(&lock->spinlock); 2784 ret = (list_empty(&lock->bast_list) && !lock->bast_pending); 2785 spin_unlock(&lock->spinlock); 2786 spin_unlock(&dlm->ast_lock); 2787 return ret; 2788 } 2789 2790 static int dlm_migration_can_proceed(struct dlm_ctxt *dlm, 2791 struct dlm_lock_resource *res, 2792 u8 mig_target) 2793 { 2794 int can_proceed; 2795 spin_lock(&res->spinlock); 2796 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING); 2797 spin_unlock(&res->spinlock); 2798 2799 /* target has died, so make the caller break out of the 2800 * wait_event, but caller must recheck the domain_map */ 2801 spin_lock(&dlm->spinlock); 2802 if (!test_bit(mig_target, dlm->domain_map)) 2803 can_proceed = 1; 2804 spin_unlock(&dlm->spinlock); 2805 return can_proceed; 2806 } 2807 2808 static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm, 2809 struct dlm_lock_resource *res) 2810 { 2811 int ret; 2812 spin_lock(&res->spinlock); 2813 ret = !!(res->state & DLM_LOCK_RES_DIRTY); 2814 spin_unlock(&res->spinlock); 2815 return ret; 2816 } 2817 2818 2819 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm, 2820 struct dlm_lock_resource *res, 2821 u8 target) 2822 { 2823 int ret = 0; 2824 2825 mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n", 2826 res->lockname.len, res->lockname.name, dlm->node_num, 2827 target); 2828 /* need to set MIGRATING flag on lockres. this is done by 2829 * ensuring that all asts have been flushed for this lockres. */ 2830 spin_lock(&res->spinlock); 2831 BUG_ON(res->migration_pending); 2832 res->migration_pending = 1; 2833 /* strategy is to reserve an extra ast then release 2834 * it below, letting the release do all of the work */ 2835 __dlm_lockres_reserve_ast(res); 2836 spin_unlock(&res->spinlock); 2837 2838 /* now flush all the pending asts */ 2839 dlm_kick_thread(dlm, res); 2840 /* before waiting on DIRTY, block processes which may 2841 * try to dirty the lockres before MIGRATING is set */ 2842 spin_lock(&res->spinlock); 2843 BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY); 2844 res->state |= DLM_LOCK_RES_BLOCK_DIRTY; 2845 spin_unlock(&res->spinlock); 2846 /* now wait on any pending asts and the DIRTY state */ 2847 wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res)); 2848 dlm_lockres_release_ast(dlm, res); 2849 2850 mlog(0, "about to wait on migration_wq, dirty=%s\n", 2851 str_yes_no(res->state & DLM_LOCK_RES_DIRTY)); 2852 /* if the extra ref we just put was the final one, this 2853 * will pass thru immediately. otherwise, we need to wait 2854 * for the last ast to finish. */ 2855 again: 2856 ret = wait_event_interruptible_timeout(dlm->migration_wq, 2857 dlm_migration_can_proceed(dlm, res, target), 2858 msecs_to_jiffies(1000)); 2859 if (ret < 0) { 2860 mlog(0, "woken again: migrating? %s, dead? %s\n", 2861 str_yes_no(res->state & DLM_LOCK_RES_MIGRATING), 2862 str_no_yes(test_bit(target, dlm->domain_map))); 2863 } else { 2864 mlog(0, "all is well: migrating? %s, dead? %s\n", 2865 str_yes_no(res->state & DLM_LOCK_RES_MIGRATING), 2866 str_no_yes(test_bit(target, dlm->domain_map))); 2867 } 2868 if (!dlm_migration_can_proceed(dlm, res, target)) { 2869 mlog(0, "trying again...\n"); 2870 goto again; 2871 } 2872 2873 ret = 0; 2874 /* did the target go down or die? */ 2875 spin_lock(&dlm->spinlock); 2876 if (!test_bit(target, dlm->domain_map)) { 2877 mlog(ML_ERROR, "aha. migration target %u just went down\n", 2878 target); 2879 ret = -EHOSTDOWN; 2880 } 2881 spin_unlock(&dlm->spinlock); 2882 2883 /* 2884 * if target is down, we need to clear DLM_LOCK_RES_BLOCK_DIRTY for 2885 * another try; otherwise, we are sure the MIGRATING state is there, 2886 * drop the unneeded state which blocked threads trying to DIRTY 2887 */ 2888 spin_lock(&res->spinlock); 2889 BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY)); 2890 res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY; 2891 if (!ret) 2892 BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING)); 2893 else 2894 res->migration_pending = 0; 2895 spin_unlock(&res->spinlock); 2896 2897 /* 2898 * at this point: 2899 * 2900 * o the DLM_LOCK_RES_MIGRATING flag is set if target not down 2901 * o there are no pending asts on this lockres 2902 * o all processes trying to reserve an ast on this 2903 * lockres must wait for the MIGRATING flag to clear 2904 */ 2905 return ret; 2906 } 2907 2908 /* last step in the migration process. 2909 * original master calls this to free all of the dlm_lock 2910 * structures that used to be for other nodes. */ 2911 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, 2912 struct dlm_lock_resource *res) 2913 { 2914 struct list_head *queue = &res->granted; 2915 int i, bit; 2916 struct dlm_lock *lock, *next; 2917 2918 assert_spin_locked(&res->spinlock); 2919 2920 BUG_ON(res->owner == dlm->node_num); 2921 2922 for (i=0; i<3; i++) { 2923 list_for_each_entry_safe(lock, next, queue, list) { 2924 if (lock->ml.node != dlm->node_num) { 2925 mlog(0, "putting lock for node %u\n", 2926 lock->ml.node); 2927 /* be extra careful */ 2928 BUG_ON(!list_empty(&lock->ast_list)); 2929 BUG_ON(!list_empty(&lock->bast_list)); 2930 BUG_ON(lock->ast_pending); 2931 BUG_ON(lock->bast_pending); 2932 dlm_lockres_clear_refmap_bit(dlm, res, 2933 lock->ml.node); 2934 list_del_init(&lock->list); 2935 dlm_lock_put(lock); 2936 /* In a normal unlock, we would have added a 2937 * DLM_UNLOCK_FREE_LOCK action. Force it. */ 2938 dlm_lock_put(lock); 2939 } 2940 } 2941 queue++; 2942 } 2943 bit = 0; 2944 while (1) { 2945 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit); 2946 if (bit >= O2NM_MAX_NODES) 2947 break; 2948 /* do not clear the local node reference, if there is a 2949 * process holding this, let it drop the ref itself */ 2950 if (bit != dlm->node_num) { 2951 mlog(0, "%s:%.*s: node %u had a ref to this " 2952 "migrating lockres, clearing\n", dlm->name, 2953 res->lockname.len, res->lockname.name, bit); 2954 dlm_lockres_clear_refmap_bit(dlm, res, bit); 2955 } 2956 bit++; 2957 } 2958 } 2959 2960 /* 2961 * Pick a node to migrate the lock resource to. This function selects a 2962 * potential target based first on the locks and then on refmap. It skips 2963 * nodes that are in the process of exiting the domain. 2964 */ 2965 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm, 2966 struct dlm_lock_resource *res) 2967 { 2968 enum dlm_lockres_list idx; 2969 struct list_head *queue; 2970 struct dlm_lock *lock; 2971 int noderef; 2972 u8 nodenum = O2NM_MAX_NODES; 2973 2974 assert_spin_locked(&dlm->spinlock); 2975 assert_spin_locked(&res->spinlock); 2976 2977 /* Go through all the locks */ 2978 for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) { 2979 queue = dlm_list_idx_to_ptr(res, idx); 2980 list_for_each_entry(lock, queue, list) { 2981 if (lock->ml.node == dlm->node_num) 2982 continue; 2983 if (test_bit(lock->ml.node, dlm->exit_domain_map)) 2984 continue; 2985 nodenum = lock->ml.node; 2986 goto bail; 2987 } 2988 } 2989 2990 /* Go thru the refmap */ 2991 noderef = -1; 2992 while (1) { 2993 noderef = find_next_bit(res->refmap, O2NM_MAX_NODES, 2994 noderef + 1); 2995 if (noderef >= O2NM_MAX_NODES) 2996 break; 2997 if (noderef == dlm->node_num) 2998 continue; 2999 if (test_bit(noderef, dlm->exit_domain_map)) 3000 continue; 3001 nodenum = noderef; 3002 goto bail; 3003 } 3004 3005 bail: 3006 return nodenum; 3007 } 3008 3009 /* this is called by the new master once all lockres 3010 * data has been received */ 3011 static int dlm_do_migrate_request(struct dlm_ctxt *dlm, 3012 struct dlm_lock_resource *res, 3013 u8 master, u8 new_master, 3014 struct dlm_node_iter *iter) 3015 { 3016 struct dlm_migrate_request migrate; 3017 int ret, skip, status = 0; 3018 int nodenum; 3019 3020 memset(&migrate, 0, sizeof(migrate)); 3021 migrate.namelen = res->lockname.len; 3022 memcpy(migrate.name, res->lockname.name, migrate.namelen); 3023 migrate.new_master = new_master; 3024 migrate.master = master; 3025 3026 ret = 0; 3027 3028 /* send message to all nodes, except the master and myself */ 3029 while ((nodenum = dlm_node_iter_next(iter)) >= 0) { 3030 if (nodenum == master || 3031 nodenum == new_master) 3032 continue; 3033 3034 /* We could race exit domain. If exited, skip. */ 3035 spin_lock(&dlm->spinlock); 3036 skip = (!test_bit(nodenum, dlm->domain_map)); 3037 spin_unlock(&dlm->spinlock); 3038 if (skip) { 3039 clear_bit(nodenum, iter->node_map); 3040 continue; 3041 } 3042 3043 ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key, 3044 &migrate, sizeof(migrate), nodenum, 3045 &status); 3046 if (ret < 0) { 3047 mlog(ML_ERROR, "%s: res %.*s, Error %d send " 3048 "MIGRATE_REQUEST to node %u\n", dlm->name, 3049 migrate.namelen, migrate.name, ret, nodenum); 3050 if (!dlm_is_host_down(ret)) { 3051 mlog(ML_ERROR, "unhandled error=%d!\n", ret); 3052 BUG(); 3053 } 3054 clear_bit(nodenum, iter->node_map); 3055 ret = 0; 3056 } else if (status < 0) { 3057 mlog(0, "migrate request (node %u) returned %d!\n", 3058 nodenum, status); 3059 ret = status; 3060 } else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) { 3061 /* during the migration request we short-circuited 3062 * the mastery of the lockres. make sure we have 3063 * a mastery ref for nodenum */ 3064 mlog(0, "%s:%.*s: need ref for node %u\n", 3065 dlm->name, res->lockname.len, res->lockname.name, 3066 nodenum); 3067 spin_lock(&res->spinlock); 3068 dlm_lockres_set_refmap_bit(dlm, res, nodenum); 3069 spin_unlock(&res->spinlock); 3070 } 3071 } 3072 3073 if (ret < 0) 3074 mlog_errno(ret); 3075 3076 mlog(0, "returning ret=%d\n", ret); 3077 return ret; 3078 } 3079 3080 3081 /* if there is an existing mle for this lockres, we now know who the master is. 3082 * (the one who sent us *this* message) we can clear it up right away. 3083 * since the process that put the mle on the list still has a reference to it, 3084 * we can unhash it now, set the master and wake the process. as a result, 3085 * we will have no mle in the list to start with. now we can add an mle for 3086 * the migration and this should be the only one found for those scanning the 3087 * list. */ 3088 int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data, 3089 void **ret_data) 3090 { 3091 struct dlm_ctxt *dlm = data; 3092 struct dlm_lock_resource *res = NULL; 3093 struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf; 3094 struct dlm_master_list_entry *mle = NULL, *oldmle = NULL; 3095 const char *name; 3096 unsigned int namelen, hash; 3097 int ret = 0; 3098 3099 if (!dlm_grab(dlm)) 3100 return 0; 3101 3102 name = migrate->name; 3103 namelen = migrate->namelen; 3104 hash = dlm_lockid_hash(name, namelen); 3105 3106 /* preallocate.. if this fails, abort */ 3107 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); 3108 3109 if (!mle) { 3110 ret = -ENOMEM; 3111 goto leave; 3112 } 3113 3114 /* check for pre-existing lock */ 3115 spin_lock(&dlm->spinlock); 3116 res = __dlm_lookup_lockres(dlm, name, namelen, hash); 3117 if (res) { 3118 spin_lock(&res->spinlock); 3119 if (res->state & DLM_LOCK_RES_RECOVERING) { 3120 /* if all is working ok, this can only mean that we got 3121 * a migrate request from a node that we now see as 3122 * dead. what can we do here? drop it to the floor? */ 3123 spin_unlock(&res->spinlock); 3124 mlog(ML_ERROR, "Got a migrate request, but the " 3125 "lockres is marked as recovering!"); 3126 kmem_cache_free(dlm_mle_cache, mle); 3127 ret = -EINVAL; /* need a better solution */ 3128 goto unlock; 3129 } 3130 res->state |= DLM_LOCK_RES_MIGRATING; 3131 spin_unlock(&res->spinlock); 3132 } 3133 3134 spin_lock(&dlm->master_lock); 3135 /* ignore status. only nonzero status would BUG. */ 3136 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, 3137 name, namelen, 3138 migrate->new_master, 3139 migrate->master); 3140 3141 if (ret < 0) 3142 kmem_cache_free(dlm_mle_cache, mle); 3143 3144 spin_unlock(&dlm->master_lock); 3145 unlock: 3146 spin_unlock(&dlm->spinlock); 3147 3148 if (oldmle) { 3149 /* master is known, detach if not already detached */ 3150 dlm_mle_detach_hb_events(dlm, oldmle); 3151 dlm_put_mle(oldmle); 3152 } 3153 3154 if (res) 3155 dlm_lockres_put(res); 3156 leave: 3157 dlm_put(dlm); 3158 return ret; 3159 } 3160 3161 /* must be holding dlm->spinlock and dlm->master_lock 3162 * when adding a migration mle, we can clear any other mles 3163 * in the master list because we know with certainty that 3164 * the master is "master". so we remove any old mle from 3165 * the list after setting it's master field, and then add 3166 * the new migration mle. this way we can hold with the rule 3167 * of having only one mle for a given lock name at all times. */ 3168 static int dlm_add_migration_mle(struct dlm_ctxt *dlm, 3169 struct dlm_lock_resource *res, 3170 struct dlm_master_list_entry *mle, 3171 struct dlm_master_list_entry **oldmle, 3172 const char *name, unsigned int namelen, 3173 u8 new_master, u8 master) 3174 { 3175 int found; 3176 int ret = 0; 3177 3178 *oldmle = NULL; 3179 3180 assert_spin_locked(&dlm->spinlock); 3181 assert_spin_locked(&dlm->master_lock); 3182 3183 /* caller is responsible for any ref taken here on oldmle */ 3184 found = dlm_find_mle(dlm, oldmle, (char *)name, namelen); 3185 if (found) { 3186 struct dlm_master_list_entry *tmp = *oldmle; 3187 spin_lock(&tmp->spinlock); 3188 if (tmp->type == DLM_MLE_MIGRATION) { 3189 if (master == dlm->node_num) { 3190 /* ah another process raced me to it */ 3191 mlog(0, "tried to migrate %.*s, but some " 3192 "process beat me to it\n", 3193 namelen, name); 3194 spin_unlock(&tmp->spinlock); 3195 return -EEXIST; 3196 } else { 3197 /* bad. 2 NODES are trying to migrate! */ 3198 mlog(ML_ERROR, "migration error mle: " 3199 "master=%u new_master=%u // request: " 3200 "master=%u new_master=%u // " 3201 "lockres=%.*s\n", 3202 tmp->master, tmp->new_master, 3203 master, new_master, 3204 namelen, name); 3205 BUG(); 3206 } 3207 } else { 3208 /* this is essentially what assert_master does */ 3209 tmp->master = master; 3210 atomic_set(&tmp->woken, 1); 3211 wake_up(&tmp->wq); 3212 /* remove it so that only one mle will be found */ 3213 __dlm_unlink_mle(dlm, tmp); 3214 __dlm_mle_detach_hb_events(dlm, tmp); 3215 if (tmp->type == DLM_MLE_MASTER) { 3216 ret = DLM_MIGRATE_RESPONSE_MASTERY_REF; 3217 mlog(0, "%s:%.*s: master=%u, newmaster=%u, " 3218 "telling master to get ref " 3219 "for cleared out mle during " 3220 "migration\n", dlm->name, 3221 namelen, name, master, 3222 new_master); 3223 } 3224 } 3225 spin_unlock(&tmp->spinlock); 3226 } 3227 3228 /* now add a migration mle to the tail of the list */ 3229 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen); 3230 mle->new_master = new_master; 3231 /* the new master will be sending an assert master for this. 3232 * at that point we will get the refmap reference */ 3233 mle->master = master; 3234 /* do this for consistency with other mle types */ 3235 set_bit(new_master, mle->maybe_map); 3236 __dlm_insert_mle(dlm, mle); 3237 3238 return ret; 3239 } 3240 3241 /* 3242 * Sets the owner of the lockres, associated to the mle, to UNKNOWN 3243 */ 3244 static struct dlm_lock_resource *dlm_reset_mleres_owner(struct dlm_ctxt *dlm, 3245 struct dlm_master_list_entry *mle) 3246 { 3247 struct dlm_lock_resource *res; 3248 3249 /* Find the lockres associated to the mle and set its owner to UNK */ 3250 res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen, 3251 mle->mnamehash); 3252 if (res) { 3253 spin_unlock(&dlm->master_lock); 3254 3255 /* move lockres onto recovery list */ 3256 spin_lock(&res->spinlock); 3257 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN); 3258 dlm_move_lockres_to_recovery_list(dlm, res); 3259 spin_unlock(&res->spinlock); 3260 dlm_lockres_put(res); 3261 3262 /* about to get rid of mle, detach from heartbeat */ 3263 __dlm_mle_detach_hb_events(dlm, mle); 3264 3265 /* dump the mle */ 3266 spin_lock(&dlm->master_lock); 3267 __dlm_put_mle(mle); 3268 spin_unlock(&dlm->master_lock); 3269 } 3270 3271 return res; 3272 } 3273 3274 static void dlm_clean_migration_mle(struct dlm_ctxt *dlm, 3275 struct dlm_master_list_entry *mle) 3276 { 3277 __dlm_mle_detach_hb_events(dlm, mle); 3278 3279 spin_lock(&mle->spinlock); 3280 __dlm_unlink_mle(dlm, mle); 3281 atomic_set(&mle->woken, 1); 3282 spin_unlock(&mle->spinlock); 3283 3284 wake_up(&mle->wq); 3285 } 3286 3287 static void dlm_clean_block_mle(struct dlm_ctxt *dlm, 3288 struct dlm_master_list_entry *mle, u8 dead_node) 3289 { 3290 int bit; 3291 3292 BUG_ON(mle->type != DLM_MLE_BLOCK); 3293 3294 spin_lock(&mle->spinlock); 3295 bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES); 3296 if (bit != dead_node) { 3297 mlog(0, "mle found, but dead node %u would not have been " 3298 "master\n", dead_node); 3299 spin_unlock(&mle->spinlock); 3300 } else { 3301 /* Must drop the refcount by one since the assert_master will 3302 * never arrive. This may result in the mle being unlinked and 3303 * freed, but there may still be a process waiting in the 3304 * dlmlock path which is fine. */ 3305 mlog(0, "node %u was expected master\n", dead_node); 3306 atomic_set(&mle->woken, 1); 3307 spin_unlock(&mle->spinlock); 3308 wake_up(&mle->wq); 3309 3310 /* Do not need events any longer, so detach from heartbeat */ 3311 __dlm_mle_detach_hb_events(dlm, mle); 3312 __dlm_put_mle(mle); 3313 } 3314 } 3315 3316 void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node) 3317 { 3318 struct dlm_master_list_entry *mle; 3319 struct dlm_lock_resource *res; 3320 struct hlist_head *bucket; 3321 struct hlist_node *tmp; 3322 unsigned int i; 3323 3324 mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node); 3325 top: 3326 assert_spin_locked(&dlm->spinlock); 3327 3328 /* clean the master list */ 3329 spin_lock(&dlm->master_lock); 3330 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 3331 bucket = dlm_master_hash(dlm, i); 3332 hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) { 3333 BUG_ON(mle->type != DLM_MLE_BLOCK && 3334 mle->type != DLM_MLE_MASTER && 3335 mle->type != DLM_MLE_MIGRATION); 3336 3337 /* MASTER mles are initiated locally. The waiting 3338 * process will notice the node map change shortly. 3339 * Let that happen as normal. */ 3340 if (mle->type == DLM_MLE_MASTER) 3341 continue; 3342 3343 /* BLOCK mles are initiated by other nodes. Need to 3344 * clean up if the dead node would have been the 3345 * master. */ 3346 if (mle->type == DLM_MLE_BLOCK) { 3347 dlm_clean_block_mle(dlm, mle, dead_node); 3348 continue; 3349 } 3350 3351 /* Everything else is a MIGRATION mle */ 3352 3353 /* The rule for MIGRATION mles is that the master 3354 * becomes UNKNOWN if *either* the original or the new 3355 * master dies. All UNKNOWN lockres' are sent to 3356 * whichever node becomes the recovery master. The new 3357 * master is responsible for determining if there is 3358 * still a master for this lockres, or if he needs to 3359 * take over mastery. Either way, this node should 3360 * expect another message to resolve this. */ 3361 3362 if (mle->master != dead_node && 3363 mle->new_master != dead_node) 3364 continue; 3365 3366 if (mle->new_master == dead_node && mle->inuse) { 3367 mlog(ML_NOTICE, "%s: target %u died during " 3368 "migration from %u, the MLE is " 3369 "still keep used, ignore it!\n", 3370 dlm->name, dead_node, 3371 mle->master); 3372 continue; 3373 } 3374 3375 /* If we have reached this point, this mle needs to be 3376 * removed from the list and freed. */ 3377 dlm_clean_migration_mle(dlm, mle); 3378 3379 mlog(0, "%s: node %u died during migration from " 3380 "%u to %u!\n", dlm->name, dead_node, mle->master, 3381 mle->new_master); 3382 3383 /* If we find a lockres associated with the mle, we've 3384 * hit this rare case that messes up our lock ordering. 3385 * If so, we need to drop the master lock so that we can 3386 * take the lockres lock, meaning that we will have to 3387 * restart from the head of list. */ 3388 res = dlm_reset_mleres_owner(dlm, mle); 3389 if (res) 3390 /* restart */ 3391 goto top; 3392 3393 /* This may be the last reference */ 3394 __dlm_put_mle(mle); 3395 } 3396 } 3397 spin_unlock(&dlm->master_lock); 3398 } 3399 3400 int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 3401 u8 old_master) 3402 { 3403 struct dlm_node_iter iter; 3404 int ret = 0; 3405 3406 spin_lock(&dlm->spinlock); 3407 dlm_node_iter_init(dlm->domain_map, &iter); 3408 clear_bit(old_master, iter.node_map); 3409 clear_bit(dlm->node_num, iter.node_map); 3410 spin_unlock(&dlm->spinlock); 3411 3412 /* ownership of the lockres is changing. account for the 3413 * mastery reference here since old_master will briefly have 3414 * a reference after the migration completes */ 3415 spin_lock(&res->spinlock); 3416 dlm_lockres_set_refmap_bit(dlm, res, old_master); 3417 spin_unlock(&res->spinlock); 3418 3419 mlog(0, "now time to do a migrate request to other nodes\n"); 3420 ret = dlm_do_migrate_request(dlm, res, old_master, 3421 dlm->node_num, &iter); 3422 if (ret < 0) { 3423 mlog_errno(ret); 3424 goto leave; 3425 } 3426 3427 mlog(0, "doing assert master of %.*s to all except the original node\n", 3428 res->lockname.len, res->lockname.name); 3429 /* this call now finishes out the nodemap 3430 * even if one or more nodes die */ 3431 ret = dlm_do_assert_master(dlm, res, iter.node_map, 3432 DLM_ASSERT_MASTER_FINISH_MIGRATION); 3433 if (ret < 0) { 3434 /* no longer need to retry. all living nodes contacted. */ 3435 mlog_errno(ret); 3436 ret = 0; 3437 } 3438 3439 bitmap_zero(iter.node_map, O2NM_MAX_NODES); 3440 set_bit(old_master, iter.node_map); 3441 mlog(0, "doing assert master of %.*s back to %u\n", 3442 res->lockname.len, res->lockname.name, old_master); 3443 ret = dlm_do_assert_master(dlm, res, iter.node_map, 3444 DLM_ASSERT_MASTER_FINISH_MIGRATION); 3445 if (ret < 0) { 3446 mlog(0, "assert master to original master failed " 3447 "with %d.\n", ret); 3448 /* the only nonzero status here would be because of 3449 * a dead original node. we're done. */ 3450 ret = 0; 3451 } 3452 3453 /* all done, set the owner, clear the flag */ 3454 spin_lock(&res->spinlock); 3455 dlm_set_lockres_owner(dlm, res, dlm->node_num); 3456 res->state &= ~DLM_LOCK_RES_MIGRATING; 3457 spin_unlock(&res->spinlock); 3458 /* re-dirty it on the new master */ 3459 dlm_kick_thread(dlm, res); 3460 wake_up(&res->wq); 3461 leave: 3462 return ret; 3463 } 3464 3465 /* 3466 * LOCKRES AST REFCOUNT 3467 * this is integral to migration 3468 */ 3469 3470 /* for future intent to call an ast, reserve one ahead of time. 3471 * this should be called only after waiting on the lockres 3472 * with dlm_wait_on_lockres, and while still holding the 3473 * spinlock after the call. */ 3474 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res) 3475 { 3476 assert_spin_locked(&res->spinlock); 3477 if (res->state & DLM_LOCK_RES_MIGRATING) { 3478 __dlm_print_one_lock_resource(res); 3479 } 3480 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); 3481 3482 atomic_inc(&res->asts_reserved); 3483 } 3484 3485 /* 3486 * used to drop the reserved ast, either because it went unused, 3487 * or because the ast/bast was actually called. 3488 * 3489 * also, if there is a pending migration on this lockres, 3490 * and this was the last pending ast on the lockres, 3491 * atomically set the MIGRATING flag before we drop the lock. 3492 * this is how we ensure that migration can proceed with no 3493 * asts in progress. note that it is ok if the state of the 3494 * queues is such that a lock should be granted in the future 3495 * or that a bast should be fired, because the new master will 3496 * shuffle the lists on this lockres as soon as it is migrated. 3497 */ 3498 void dlm_lockres_release_ast(struct dlm_ctxt *dlm, 3499 struct dlm_lock_resource *res) 3500 { 3501 if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock)) 3502 return; 3503 3504 if (!res->migration_pending) { 3505 spin_unlock(&res->spinlock); 3506 return; 3507 } 3508 3509 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); 3510 res->migration_pending = 0; 3511 res->state |= DLM_LOCK_RES_MIGRATING; 3512 spin_unlock(&res->spinlock); 3513 wake_up(&res->wq); 3514 wake_up(&dlm->migration_wq); 3515 } 3516 3517 void dlm_force_free_mles(struct dlm_ctxt *dlm) 3518 { 3519 int i; 3520 struct hlist_head *bucket; 3521 struct dlm_master_list_entry *mle; 3522 struct hlist_node *tmp; 3523 3524 /* 3525 * We notified all other nodes that we are exiting the domain and 3526 * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still 3527 * around we force free them and wake any processes that are waiting 3528 * on the mles 3529 */ 3530 spin_lock(&dlm->spinlock); 3531 spin_lock(&dlm->master_lock); 3532 3533 BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING); 3534 BUG_ON((find_first_bit(dlm->domain_map, O2NM_MAX_NODES) < O2NM_MAX_NODES)); 3535 3536 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 3537 bucket = dlm_master_hash(dlm, i); 3538 hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) { 3539 if (mle->type != DLM_MLE_BLOCK) { 3540 mlog(ML_ERROR, "bad mle: %p\n", mle); 3541 dlm_print_one_mle(mle); 3542 } 3543 atomic_set(&mle->woken, 1); 3544 wake_up(&mle->wq); 3545 3546 __dlm_unlink_mle(dlm, mle); 3547 __dlm_mle_detach_hb_events(dlm, mle); 3548 __dlm_put_mle(mle); 3549 } 3550 } 3551 spin_unlock(&dlm->master_lock); 3552 spin_unlock(&dlm->spinlock); 3553 } 3554