1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * dlmmod.c 5 * 6 * standalone DLM module 7 * 8 * Copyright (C) 2004 Oracle. All rights reserved. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public 12 * License as published by the Free Software Foundation; either 13 * version 2 of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public 21 * License along with this program; if not, write to the 22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 23 * Boston, MA 021110-1307, USA. 24 * 25 */ 26 27 28 #include <linux/module.h> 29 #include <linux/fs.h> 30 #include <linux/types.h> 31 #include <linux/slab.h> 32 #include <linux/highmem.h> 33 #include <linux/init.h> 34 #include <linux/sysctl.h> 35 #include <linux/random.h> 36 #include <linux/blkdev.h> 37 #include <linux/socket.h> 38 #include <linux/inet.h> 39 #include <linux/spinlock.h> 40 #include <linux/delay.h> 41 42 43 #include "cluster/heartbeat.h" 44 #include "cluster/nodemanager.h" 45 #include "cluster/tcp.h" 46 47 #include "dlmapi.h" 48 #include "dlmcommon.h" 49 #include "dlmdomain.h" 50 #include "dlmdebug.h" 51 52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER) 53 #include "cluster/masklog.h" 54 55 static void dlm_mle_node_down(struct dlm_ctxt *dlm, 56 struct dlm_master_list_entry *mle, 57 struct o2nm_node *node, 58 int idx); 59 static void dlm_mle_node_up(struct dlm_ctxt *dlm, 60 struct dlm_master_list_entry *mle, 61 struct o2nm_node *node, 62 int idx); 63 64 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data); 65 static int dlm_do_assert_master(struct dlm_ctxt *dlm, 66 struct dlm_lock_resource *res, 67 void *nodemap, u32 flags); 68 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data); 69 70 static inline int dlm_mle_equal(struct dlm_ctxt *dlm, 71 struct dlm_master_list_entry *mle, 72 const char *name, 73 unsigned int namelen) 74 { 75 if (dlm != mle->dlm) 76 return 0; 77 78 if (namelen != mle->mnamelen || 79 memcmp(name, mle->mname, namelen) != 0) 80 return 0; 81 82 return 1; 83 } 84 85 static struct kmem_cache *dlm_lockres_cache; 86 static struct kmem_cache *dlm_lockname_cache; 87 static struct kmem_cache *dlm_mle_cache; 88 89 static void dlm_mle_release(struct kref *kref); 90 static void dlm_init_mle(struct dlm_master_list_entry *mle, 91 enum dlm_mle_type type, 92 struct dlm_ctxt *dlm, 93 struct dlm_lock_resource *res, 94 const char *name, 95 unsigned int namelen); 96 static void dlm_put_mle(struct dlm_master_list_entry *mle); 97 static void __dlm_put_mle(struct dlm_master_list_entry *mle); 98 static int dlm_find_mle(struct dlm_ctxt *dlm, 99 struct dlm_master_list_entry **mle, 100 char *name, unsigned int namelen); 101 102 static int dlm_do_master_request(struct dlm_lock_resource *res, 103 struct dlm_master_list_entry *mle, int to); 104 105 106 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm, 107 struct dlm_lock_resource *res, 108 struct dlm_master_list_entry *mle, 109 int *blocked); 110 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm, 111 struct dlm_lock_resource *res, 112 struct dlm_master_list_entry *mle, 113 int blocked); 114 static int dlm_add_migration_mle(struct dlm_ctxt *dlm, 115 struct dlm_lock_resource *res, 116 struct dlm_master_list_entry *mle, 117 struct dlm_master_list_entry **oldmle, 118 const char *name, unsigned int namelen, 119 u8 new_master, u8 master); 120 121 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm, 122 struct dlm_lock_resource *res); 123 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, 124 struct dlm_lock_resource *res); 125 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm, 126 struct dlm_lock_resource *res, 127 u8 target); 128 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, 129 struct dlm_lock_resource *res); 130 131 132 int dlm_is_host_down(int errno) 133 { 134 switch (errno) { 135 case -EBADF: 136 case -ECONNREFUSED: 137 case -ENOTCONN: 138 case -ECONNRESET: 139 case -EPIPE: 140 case -EHOSTDOWN: 141 case -EHOSTUNREACH: 142 case -ETIMEDOUT: 143 case -ECONNABORTED: 144 case -ENETDOWN: 145 case -ENETUNREACH: 146 case -ENETRESET: 147 case -ESHUTDOWN: 148 case -ENOPROTOOPT: 149 case -EINVAL: /* if returned from our tcp code, 150 this means there is no socket */ 151 return 1; 152 } 153 return 0; 154 } 155 156 157 /* 158 * MASTER LIST FUNCTIONS 159 */ 160 161 162 /* 163 * regarding master list entries and heartbeat callbacks: 164 * 165 * in order to avoid sleeping and allocation that occurs in 166 * heartbeat, master list entries are simply attached to the 167 * dlm's established heartbeat callbacks. the mle is attached 168 * when it is created, and since the dlm->spinlock is held at 169 * that time, any heartbeat event will be properly discovered 170 * by the mle. the mle needs to be detached from the 171 * dlm->mle_hb_events list as soon as heartbeat events are no 172 * longer useful to the mle, and before the mle is freed. 173 * 174 * as a general rule, heartbeat events are no longer needed by 175 * the mle once an "answer" regarding the lock master has been 176 * received. 177 */ 178 static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm, 179 struct dlm_master_list_entry *mle) 180 { 181 assert_spin_locked(&dlm->spinlock); 182 183 list_add_tail(&mle->hb_events, &dlm->mle_hb_events); 184 } 185 186 187 static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm, 188 struct dlm_master_list_entry *mle) 189 { 190 if (!list_empty(&mle->hb_events)) 191 list_del_init(&mle->hb_events); 192 } 193 194 195 static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm, 196 struct dlm_master_list_entry *mle) 197 { 198 spin_lock(&dlm->spinlock); 199 __dlm_mle_detach_hb_events(dlm, mle); 200 spin_unlock(&dlm->spinlock); 201 } 202 203 static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle) 204 { 205 struct dlm_ctxt *dlm; 206 dlm = mle->dlm; 207 208 assert_spin_locked(&dlm->spinlock); 209 assert_spin_locked(&dlm->master_lock); 210 mle->inuse++; 211 kref_get(&mle->mle_refs); 212 } 213 214 static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle) 215 { 216 struct dlm_ctxt *dlm; 217 dlm = mle->dlm; 218 219 spin_lock(&dlm->spinlock); 220 spin_lock(&dlm->master_lock); 221 mle->inuse--; 222 __dlm_put_mle(mle); 223 spin_unlock(&dlm->master_lock); 224 spin_unlock(&dlm->spinlock); 225 226 } 227 228 /* remove from list and free */ 229 static void __dlm_put_mle(struct dlm_master_list_entry *mle) 230 { 231 struct dlm_ctxt *dlm; 232 dlm = mle->dlm; 233 234 assert_spin_locked(&dlm->spinlock); 235 assert_spin_locked(&dlm->master_lock); 236 if (!atomic_read(&mle->mle_refs.refcount)) { 237 /* this may or may not crash, but who cares. 238 * it's a BUG. */ 239 mlog(ML_ERROR, "bad mle: %p\n", mle); 240 dlm_print_one_mle(mle); 241 BUG(); 242 } else 243 kref_put(&mle->mle_refs, dlm_mle_release); 244 } 245 246 247 /* must not have any spinlocks coming in */ 248 static void dlm_put_mle(struct dlm_master_list_entry *mle) 249 { 250 struct dlm_ctxt *dlm; 251 dlm = mle->dlm; 252 253 spin_lock(&dlm->spinlock); 254 spin_lock(&dlm->master_lock); 255 __dlm_put_mle(mle); 256 spin_unlock(&dlm->master_lock); 257 spin_unlock(&dlm->spinlock); 258 } 259 260 static inline void dlm_get_mle(struct dlm_master_list_entry *mle) 261 { 262 kref_get(&mle->mle_refs); 263 } 264 265 static void dlm_init_mle(struct dlm_master_list_entry *mle, 266 enum dlm_mle_type type, 267 struct dlm_ctxt *dlm, 268 struct dlm_lock_resource *res, 269 const char *name, 270 unsigned int namelen) 271 { 272 assert_spin_locked(&dlm->spinlock); 273 274 mle->dlm = dlm; 275 mle->type = type; 276 INIT_HLIST_NODE(&mle->master_hash_node); 277 INIT_LIST_HEAD(&mle->hb_events); 278 memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); 279 spin_lock_init(&mle->spinlock); 280 init_waitqueue_head(&mle->wq); 281 atomic_set(&mle->woken, 0); 282 kref_init(&mle->mle_refs); 283 memset(mle->response_map, 0, sizeof(mle->response_map)); 284 mle->master = O2NM_MAX_NODES; 285 mle->new_master = O2NM_MAX_NODES; 286 mle->inuse = 0; 287 288 BUG_ON(mle->type != DLM_MLE_BLOCK && 289 mle->type != DLM_MLE_MASTER && 290 mle->type != DLM_MLE_MIGRATION); 291 292 if (mle->type == DLM_MLE_MASTER) { 293 BUG_ON(!res); 294 mle->mleres = res; 295 memcpy(mle->mname, res->lockname.name, res->lockname.len); 296 mle->mnamelen = res->lockname.len; 297 mle->mnamehash = res->lockname.hash; 298 } else { 299 BUG_ON(!name); 300 mle->mleres = NULL; 301 memcpy(mle->mname, name, namelen); 302 mle->mnamelen = namelen; 303 mle->mnamehash = dlm_lockid_hash(name, namelen); 304 } 305 306 atomic_inc(&dlm->mle_tot_count[mle->type]); 307 atomic_inc(&dlm->mle_cur_count[mle->type]); 308 309 /* copy off the node_map and register hb callbacks on our copy */ 310 memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map)); 311 memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map)); 312 clear_bit(dlm->node_num, mle->vote_map); 313 clear_bit(dlm->node_num, mle->node_map); 314 315 /* attach the mle to the domain node up/down events */ 316 __dlm_mle_attach_hb_events(dlm, mle); 317 } 318 319 void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) 320 { 321 assert_spin_locked(&dlm->spinlock); 322 assert_spin_locked(&dlm->master_lock); 323 324 if (!hlist_unhashed(&mle->master_hash_node)) 325 hlist_del_init(&mle->master_hash_node); 326 } 327 328 void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) 329 { 330 struct hlist_head *bucket; 331 332 assert_spin_locked(&dlm->master_lock); 333 334 bucket = dlm_master_hash(dlm, mle->mnamehash); 335 hlist_add_head(&mle->master_hash_node, bucket); 336 } 337 338 /* returns 1 if found, 0 if not */ 339 static int dlm_find_mle(struct dlm_ctxt *dlm, 340 struct dlm_master_list_entry **mle, 341 char *name, unsigned int namelen) 342 { 343 struct dlm_master_list_entry *tmpmle; 344 struct hlist_head *bucket; 345 unsigned int hash; 346 347 assert_spin_locked(&dlm->master_lock); 348 349 hash = dlm_lockid_hash(name, namelen); 350 bucket = dlm_master_hash(dlm, hash); 351 hlist_for_each_entry(tmpmle, bucket, master_hash_node) { 352 if (!dlm_mle_equal(dlm, tmpmle, name, namelen)) 353 continue; 354 dlm_get_mle(tmpmle); 355 *mle = tmpmle; 356 return 1; 357 } 358 return 0; 359 } 360 361 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up) 362 { 363 struct dlm_master_list_entry *mle; 364 365 assert_spin_locked(&dlm->spinlock); 366 367 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) { 368 if (node_up) 369 dlm_mle_node_up(dlm, mle, NULL, idx); 370 else 371 dlm_mle_node_down(dlm, mle, NULL, idx); 372 } 373 } 374 375 static void dlm_mle_node_down(struct dlm_ctxt *dlm, 376 struct dlm_master_list_entry *mle, 377 struct o2nm_node *node, int idx) 378 { 379 spin_lock(&mle->spinlock); 380 381 if (!test_bit(idx, mle->node_map)) 382 mlog(0, "node %u already removed from nodemap!\n", idx); 383 else 384 clear_bit(idx, mle->node_map); 385 386 spin_unlock(&mle->spinlock); 387 } 388 389 static void dlm_mle_node_up(struct dlm_ctxt *dlm, 390 struct dlm_master_list_entry *mle, 391 struct o2nm_node *node, int idx) 392 { 393 spin_lock(&mle->spinlock); 394 395 if (test_bit(idx, mle->node_map)) 396 mlog(0, "node %u already in node map!\n", idx); 397 else 398 set_bit(idx, mle->node_map); 399 400 spin_unlock(&mle->spinlock); 401 } 402 403 404 int dlm_init_mle_cache(void) 405 { 406 dlm_mle_cache = kmem_cache_create("o2dlm_mle", 407 sizeof(struct dlm_master_list_entry), 408 0, SLAB_HWCACHE_ALIGN, 409 NULL); 410 if (dlm_mle_cache == NULL) 411 return -ENOMEM; 412 return 0; 413 } 414 415 void dlm_destroy_mle_cache(void) 416 { 417 if (dlm_mle_cache) 418 kmem_cache_destroy(dlm_mle_cache); 419 } 420 421 static void dlm_mle_release(struct kref *kref) 422 { 423 struct dlm_master_list_entry *mle; 424 struct dlm_ctxt *dlm; 425 426 mle = container_of(kref, struct dlm_master_list_entry, mle_refs); 427 dlm = mle->dlm; 428 429 assert_spin_locked(&dlm->spinlock); 430 assert_spin_locked(&dlm->master_lock); 431 432 mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname, 433 mle->type); 434 435 /* remove from list if not already */ 436 __dlm_unlink_mle(dlm, mle); 437 438 /* detach the mle from the domain node up/down events */ 439 __dlm_mle_detach_hb_events(dlm, mle); 440 441 atomic_dec(&dlm->mle_cur_count[mle->type]); 442 443 /* NOTE: kfree under spinlock here. 444 * if this is bad, we can move this to a freelist. */ 445 kmem_cache_free(dlm_mle_cache, mle); 446 } 447 448 449 /* 450 * LOCK RESOURCE FUNCTIONS 451 */ 452 453 int dlm_init_master_caches(void) 454 { 455 dlm_lockres_cache = kmem_cache_create("o2dlm_lockres", 456 sizeof(struct dlm_lock_resource), 457 0, SLAB_HWCACHE_ALIGN, NULL); 458 if (!dlm_lockres_cache) 459 goto bail; 460 461 dlm_lockname_cache = kmem_cache_create("o2dlm_lockname", 462 DLM_LOCKID_NAME_MAX, 0, 463 SLAB_HWCACHE_ALIGN, NULL); 464 if (!dlm_lockname_cache) 465 goto bail; 466 467 return 0; 468 bail: 469 dlm_destroy_master_caches(); 470 return -ENOMEM; 471 } 472 473 void dlm_destroy_master_caches(void) 474 { 475 if (dlm_lockname_cache) { 476 kmem_cache_destroy(dlm_lockname_cache); 477 dlm_lockname_cache = NULL; 478 } 479 480 if (dlm_lockres_cache) { 481 kmem_cache_destroy(dlm_lockres_cache); 482 dlm_lockres_cache = NULL; 483 } 484 } 485 486 static void dlm_lockres_release(struct kref *kref) 487 { 488 struct dlm_lock_resource *res; 489 struct dlm_ctxt *dlm; 490 491 res = container_of(kref, struct dlm_lock_resource, refs); 492 dlm = res->dlm; 493 494 /* This should not happen -- all lockres' have a name 495 * associated with them at init time. */ 496 BUG_ON(!res->lockname.name); 497 498 mlog(0, "destroying lockres %.*s\n", res->lockname.len, 499 res->lockname.name); 500 501 spin_lock(&dlm->track_lock); 502 if (!list_empty(&res->tracking)) 503 list_del_init(&res->tracking); 504 else { 505 mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n", 506 res->lockname.len, res->lockname.name); 507 dlm_print_one_lock_resource(res); 508 } 509 spin_unlock(&dlm->track_lock); 510 511 atomic_dec(&dlm->res_cur_count); 512 513 if (!hlist_unhashed(&res->hash_node) || 514 !list_empty(&res->granted) || 515 !list_empty(&res->converting) || 516 !list_empty(&res->blocked) || 517 !list_empty(&res->dirty) || 518 !list_empty(&res->recovering) || 519 !list_empty(&res->purge)) { 520 mlog(ML_ERROR, 521 "Going to BUG for resource %.*s." 522 " We're on a list! [%c%c%c%c%c%c%c]\n", 523 res->lockname.len, res->lockname.name, 524 !hlist_unhashed(&res->hash_node) ? 'H' : ' ', 525 !list_empty(&res->granted) ? 'G' : ' ', 526 !list_empty(&res->converting) ? 'C' : ' ', 527 !list_empty(&res->blocked) ? 'B' : ' ', 528 !list_empty(&res->dirty) ? 'D' : ' ', 529 !list_empty(&res->recovering) ? 'R' : ' ', 530 !list_empty(&res->purge) ? 'P' : ' '); 531 532 dlm_print_one_lock_resource(res); 533 } 534 535 /* By the time we're ready to blow this guy away, we shouldn't 536 * be on any lists. */ 537 BUG_ON(!hlist_unhashed(&res->hash_node)); 538 BUG_ON(!list_empty(&res->granted)); 539 BUG_ON(!list_empty(&res->converting)); 540 BUG_ON(!list_empty(&res->blocked)); 541 BUG_ON(!list_empty(&res->dirty)); 542 BUG_ON(!list_empty(&res->recovering)); 543 BUG_ON(!list_empty(&res->purge)); 544 545 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name); 546 547 kmem_cache_free(dlm_lockres_cache, res); 548 } 549 550 void dlm_lockres_put(struct dlm_lock_resource *res) 551 { 552 kref_put(&res->refs, dlm_lockres_release); 553 } 554 555 static void dlm_init_lockres(struct dlm_ctxt *dlm, 556 struct dlm_lock_resource *res, 557 const char *name, unsigned int namelen) 558 { 559 char *qname; 560 561 /* If we memset here, we lose our reference to the kmalloc'd 562 * res->lockname.name, so be sure to init every field 563 * correctly! */ 564 565 qname = (char *) res->lockname.name; 566 memcpy(qname, name, namelen); 567 568 res->lockname.len = namelen; 569 res->lockname.hash = dlm_lockid_hash(name, namelen); 570 571 init_waitqueue_head(&res->wq); 572 spin_lock_init(&res->spinlock); 573 INIT_HLIST_NODE(&res->hash_node); 574 INIT_LIST_HEAD(&res->granted); 575 INIT_LIST_HEAD(&res->converting); 576 INIT_LIST_HEAD(&res->blocked); 577 INIT_LIST_HEAD(&res->dirty); 578 INIT_LIST_HEAD(&res->recovering); 579 INIT_LIST_HEAD(&res->purge); 580 INIT_LIST_HEAD(&res->tracking); 581 atomic_set(&res->asts_reserved, 0); 582 res->migration_pending = 0; 583 res->inflight_locks = 0; 584 res->inflight_assert_workers = 0; 585 586 res->dlm = dlm; 587 588 kref_init(&res->refs); 589 590 atomic_inc(&dlm->res_tot_count); 591 atomic_inc(&dlm->res_cur_count); 592 593 /* just for consistency */ 594 spin_lock(&res->spinlock); 595 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN); 596 spin_unlock(&res->spinlock); 597 598 res->state = DLM_LOCK_RES_IN_PROGRESS; 599 600 res->last_used = 0; 601 602 spin_lock(&dlm->spinlock); 603 list_add_tail(&res->tracking, &dlm->tracking_list); 604 spin_unlock(&dlm->spinlock); 605 606 memset(res->lvb, 0, DLM_LVB_LEN); 607 memset(res->refmap, 0, sizeof(res->refmap)); 608 } 609 610 struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm, 611 const char *name, 612 unsigned int namelen) 613 { 614 struct dlm_lock_resource *res = NULL; 615 616 res = kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS); 617 if (!res) 618 goto error; 619 620 res->lockname.name = kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS); 621 if (!res->lockname.name) 622 goto error; 623 624 dlm_init_lockres(dlm, res, name, namelen); 625 return res; 626 627 error: 628 if (res) 629 kmem_cache_free(dlm_lockres_cache, res); 630 return NULL; 631 } 632 633 void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm, 634 struct dlm_lock_resource *res, int bit) 635 { 636 assert_spin_locked(&res->spinlock); 637 638 mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len, 639 res->lockname.name, bit, __builtin_return_address(0)); 640 641 set_bit(bit, res->refmap); 642 } 643 644 void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm, 645 struct dlm_lock_resource *res, int bit) 646 { 647 assert_spin_locked(&res->spinlock); 648 649 mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len, 650 res->lockname.name, bit, __builtin_return_address(0)); 651 652 clear_bit(bit, res->refmap); 653 } 654 655 static void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, 656 struct dlm_lock_resource *res) 657 { 658 res->inflight_locks++; 659 660 mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name, 661 res->lockname.len, res->lockname.name, res->inflight_locks, 662 __builtin_return_address(0)); 663 } 664 665 void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, 666 struct dlm_lock_resource *res) 667 { 668 assert_spin_locked(&res->spinlock); 669 __dlm_lockres_grab_inflight_ref(dlm, res); 670 } 671 672 void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, 673 struct dlm_lock_resource *res) 674 { 675 assert_spin_locked(&res->spinlock); 676 677 BUG_ON(res->inflight_locks == 0); 678 679 res->inflight_locks--; 680 681 mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name, 682 res->lockname.len, res->lockname.name, res->inflight_locks, 683 __builtin_return_address(0)); 684 685 wake_up(&res->wq); 686 } 687 688 void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm, 689 struct dlm_lock_resource *res) 690 { 691 assert_spin_locked(&res->spinlock); 692 res->inflight_assert_workers++; 693 mlog(0, "%s:%.*s: inflight assert worker++: now %u\n", 694 dlm->name, res->lockname.len, res->lockname.name, 695 res->inflight_assert_workers); 696 } 697 698 static void __dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm, 699 struct dlm_lock_resource *res) 700 { 701 assert_spin_locked(&res->spinlock); 702 BUG_ON(res->inflight_assert_workers == 0); 703 res->inflight_assert_workers--; 704 mlog(0, "%s:%.*s: inflight assert worker--: now %u\n", 705 dlm->name, res->lockname.len, res->lockname.name, 706 res->inflight_assert_workers); 707 } 708 709 static void dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm, 710 struct dlm_lock_resource *res) 711 { 712 spin_lock(&res->spinlock); 713 __dlm_lockres_drop_inflight_worker(dlm, res); 714 spin_unlock(&res->spinlock); 715 } 716 717 /* 718 * lookup a lock resource by name. 719 * may already exist in the hashtable. 720 * lockid is null terminated 721 * 722 * if not, allocate enough for the lockres and for 723 * the temporary structure used in doing the mastering. 724 * 725 * also, do a lookup in the dlm->master_list to see 726 * if another node has begun mastering the same lock. 727 * if so, there should be a block entry in there 728 * for this name, and we should *not* attempt to master 729 * the lock here. need to wait around for that node 730 * to assert_master (or die). 731 * 732 */ 733 struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, 734 const char *lockid, 735 int namelen, 736 int flags) 737 { 738 struct dlm_lock_resource *tmpres=NULL, *res=NULL; 739 struct dlm_master_list_entry *mle = NULL; 740 struct dlm_master_list_entry *alloc_mle = NULL; 741 int blocked = 0; 742 int ret, nodenum; 743 struct dlm_node_iter iter; 744 unsigned int hash; 745 int tries = 0; 746 int bit, wait_on_recovery = 0; 747 748 BUG_ON(!lockid); 749 750 hash = dlm_lockid_hash(lockid, namelen); 751 752 mlog(0, "get lockres %s (len %d)\n", lockid, namelen); 753 754 lookup: 755 spin_lock(&dlm->spinlock); 756 tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash); 757 if (tmpres) { 758 spin_unlock(&dlm->spinlock); 759 spin_lock(&tmpres->spinlock); 760 /* Wait on the thread that is mastering the resource */ 761 if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { 762 __dlm_wait_on_lockres(tmpres); 763 BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN); 764 spin_unlock(&tmpres->spinlock); 765 dlm_lockres_put(tmpres); 766 tmpres = NULL; 767 goto lookup; 768 } 769 770 /* Wait on the resource purge to complete before continuing */ 771 if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) { 772 BUG_ON(tmpres->owner == dlm->node_num); 773 __dlm_wait_on_lockres_flags(tmpres, 774 DLM_LOCK_RES_DROPPING_REF); 775 spin_unlock(&tmpres->spinlock); 776 dlm_lockres_put(tmpres); 777 tmpres = NULL; 778 goto lookup; 779 } 780 781 /* Grab inflight ref to pin the resource */ 782 dlm_lockres_grab_inflight_ref(dlm, tmpres); 783 784 spin_unlock(&tmpres->spinlock); 785 if (res) 786 dlm_lockres_put(res); 787 res = tmpres; 788 goto leave; 789 } 790 791 if (!res) { 792 spin_unlock(&dlm->spinlock); 793 mlog(0, "allocating a new resource\n"); 794 /* nothing found and we need to allocate one. */ 795 alloc_mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); 796 if (!alloc_mle) 797 goto leave; 798 res = dlm_new_lockres(dlm, lockid, namelen); 799 if (!res) 800 goto leave; 801 goto lookup; 802 } 803 804 mlog(0, "no lockres found, allocated our own: %p\n", res); 805 806 if (flags & LKM_LOCAL) { 807 /* caller knows it's safe to assume it's not mastered elsewhere 808 * DONE! return right away */ 809 spin_lock(&res->spinlock); 810 dlm_change_lockres_owner(dlm, res, dlm->node_num); 811 __dlm_insert_lockres(dlm, res); 812 dlm_lockres_grab_inflight_ref(dlm, res); 813 spin_unlock(&res->spinlock); 814 spin_unlock(&dlm->spinlock); 815 /* lockres still marked IN_PROGRESS */ 816 goto wake_waiters; 817 } 818 819 /* check master list to see if another node has started mastering it */ 820 spin_lock(&dlm->master_lock); 821 822 /* if we found a block, wait for lock to be mastered by another node */ 823 blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen); 824 if (blocked) { 825 int mig; 826 if (mle->type == DLM_MLE_MASTER) { 827 mlog(ML_ERROR, "master entry for nonexistent lock!\n"); 828 BUG(); 829 } 830 mig = (mle->type == DLM_MLE_MIGRATION); 831 /* if there is a migration in progress, let the migration 832 * finish before continuing. we can wait for the absence 833 * of the MIGRATION mle: either the migrate finished or 834 * one of the nodes died and the mle was cleaned up. 835 * if there is a BLOCK here, but it already has a master 836 * set, we are too late. the master does not have a ref 837 * for us in the refmap. detach the mle and drop it. 838 * either way, go back to the top and start over. */ 839 if (mig || mle->master != O2NM_MAX_NODES) { 840 BUG_ON(mig && mle->master == dlm->node_num); 841 /* we arrived too late. the master does not 842 * have a ref for us. retry. */ 843 mlog(0, "%s:%.*s: late on %s\n", 844 dlm->name, namelen, lockid, 845 mig ? "MIGRATION" : "BLOCK"); 846 spin_unlock(&dlm->master_lock); 847 spin_unlock(&dlm->spinlock); 848 849 /* master is known, detach */ 850 if (!mig) 851 dlm_mle_detach_hb_events(dlm, mle); 852 dlm_put_mle(mle); 853 mle = NULL; 854 /* this is lame, but we can't wait on either 855 * the mle or lockres waitqueue here */ 856 if (mig) 857 msleep(100); 858 goto lookup; 859 } 860 } else { 861 /* go ahead and try to master lock on this node */ 862 mle = alloc_mle; 863 /* make sure this does not get freed below */ 864 alloc_mle = NULL; 865 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0); 866 set_bit(dlm->node_num, mle->maybe_map); 867 __dlm_insert_mle(dlm, mle); 868 869 /* still holding the dlm spinlock, check the recovery map 870 * to see if there are any nodes that still need to be 871 * considered. these will not appear in the mle nodemap 872 * but they might own this lockres. wait on them. */ 873 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); 874 if (bit < O2NM_MAX_NODES) { 875 mlog(0, "%s: res %.*s, At least one node (%d) " 876 "to recover before lock mastery can begin\n", 877 dlm->name, namelen, (char *)lockid, bit); 878 wait_on_recovery = 1; 879 } 880 } 881 882 /* at this point there is either a DLM_MLE_BLOCK or a 883 * DLM_MLE_MASTER on the master list, so it's safe to add the 884 * lockres to the hashtable. anyone who finds the lock will 885 * still have to wait on the IN_PROGRESS. */ 886 887 /* finally add the lockres to its hash bucket */ 888 __dlm_insert_lockres(dlm, res); 889 890 /* since this lockres is new it doesn't not require the spinlock */ 891 __dlm_lockres_grab_inflight_ref(dlm, res); 892 893 /* get an extra ref on the mle in case this is a BLOCK 894 * if so, the creator of the BLOCK may try to put the last 895 * ref at this time in the assert master handler, so we 896 * need an extra one to keep from a bad ptr deref. */ 897 dlm_get_mle_inuse(mle); 898 spin_unlock(&dlm->master_lock); 899 spin_unlock(&dlm->spinlock); 900 901 redo_request: 902 while (wait_on_recovery) { 903 /* any cluster changes that occurred after dropping the 904 * dlm spinlock would be detectable be a change on the mle, 905 * so we only need to clear out the recovery map once. */ 906 if (dlm_is_recovery_lock(lockid, namelen)) { 907 mlog(0, "%s: Recovery map is not empty, but must " 908 "master $RECOVERY lock now\n", dlm->name); 909 if (!dlm_pre_master_reco_lockres(dlm, res)) 910 wait_on_recovery = 0; 911 else { 912 mlog(0, "%s: waiting 500ms for heartbeat state " 913 "change\n", dlm->name); 914 msleep(500); 915 } 916 continue; 917 } 918 919 dlm_kick_recovery_thread(dlm); 920 msleep(1000); 921 dlm_wait_for_recovery(dlm); 922 923 spin_lock(&dlm->spinlock); 924 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); 925 if (bit < O2NM_MAX_NODES) { 926 mlog(0, "%s: res %.*s, At least one node (%d) " 927 "to recover before lock mastery can begin\n", 928 dlm->name, namelen, (char *)lockid, bit); 929 wait_on_recovery = 1; 930 } else 931 wait_on_recovery = 0; 932 spin_unlock(&dlm->spinlock); 933 934 if (wait_on_recovery) 935 dlm_wait_for_node_recovery(dlm, bit, 10000); 936 } 937 938 /* must wait for lock to be mastered elsewhere */ 939 if (blocked) 940 goto wait; 941 942 ret = -EINVAL; 943 dlm_node_iter_init(mle->vote_map, &iter); 944 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 945 ret = dlm_do_master_request(res, mle, nodenum); 946 if (ret < 0) 947 mlog_errno(ret); 948 if (mle->master != O2NM_MAX_NODES) { 949 /* found a master ! */ 950 if (mle->master <= nodenum) 951 break; 952 /* if our master request has not reached the master 953 * yet, keep going until it does. this is how the 954 * master will know that asserts are needed back to 955 * the lower nodes. */ 956 mlog(0, "%s: res %.*s, Requests only up to %u but " 957 "master is %u, keep going\n", dlm->name, namelen, 958 lockid, nodenum, mle->master); 959 } 960 } 961 962 wait: 963 /* keep going until the response map includes all nodes */ 964 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); 965 if (ret < 0) { 966 wait_on_recovery = 1; 967 mlog(0, "%s: res %.*s, Node map changed, redo the master " 968 "request now, blocked=%d\n", dlm->name, res->lockname.len, 969 res->lockname.name, blocked); 970 if (++tries > 20) { 971 mlog(ML_ERROR, "%s: res %.*s, Spinning on " 972 "dlm_wait_for_lock_mastery, blocked = %d\n", 973 dlm->name, res->lockname.len, 974 res->lockname.name, blocked); 975 dlm_print_one_lock_resource(res); 976 dlm_print_one_mle(mle); 977 tries = 0; 978 } 979 goto redo_request; 980 } 981 982 mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len, 983 res->lockname.name, res->owner); 984 /* make sure we never continue without this */ 985 BUG_ON(res->owner == O2NM_MAX_NODES); 986 987 /* master is known, detach if not already detached */ 988 dlm_mle_detach_hb_events(dlm, mle); 989 dlm_put_mle(mle); 990 /* put the extra ref */ 991 dlm_put_mle_inuse(mle); 992 993 wake_waiters: 994 spin_lock(&res->spinlock); 995 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 996 spin_unlock(&res->spinlock); 997 wake_up(&res->wq); 998 999 leave: 1000 /* need to free the unused mle */ 1001 if (alloc_mle) 1002 kmem_cache_free(dlm_mle_cache, alloc_mle); 1003 1004 return res; 1005 } 1006 1007 1008 #define DLM_MASTERY_TIMEOUT_MS 5000 1009 1010 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm, 1011 struct dlm_lock_resource *res, 1012 struct dlm_master_list_entry *mle, 1013 int *blocked) 1014 { 1015 u8 m; 1016 int ret, bit; 1017 int map_changed, voting_done; 1018 int assert, sleep; 1019 1020 recheck: 1021 ret = 0; 1022 assert = 0; 1023 1024 /* check if another node has already become the owner */ 1025 spin_lock(&res->spinlock); 1026 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { 1027 mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name, 1028 res->lockname.len, res->lockname.name, res->owner); 1029 spin_unlock(&res->spinlock); 1030 /* this will cause the master to re-assert across 1031 * the whole cluster, freeing up mles */ 1032 if (res->owner != dlm->node_num) { 1033 ret = dlm_do_master_request(res, mle, res->owner); 1034 if (ret < 0) { 1035 /* give recovery a chance to run */ 1036 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret); 1037 msleep(500); 1038 goto recheck; 1039 } 1040 } 1041 ret = 0; 1042 goto leave; 1043 } 1044 spin_unlock(&res->spinlock); 1045 1046 spin_lock(&mle->spinlock); 1047 m = mle->master; 1048 map_changed = (memcmp(mle->vote_map, mle->node_map, 1049 sizeof(mle->vote_map)) != 0); 1050 voting_done = (memcmp(mle->vote_map, mle->response_map, 1051 sizeof(mle->vote_map)) == 0); 1052 1053 /* restart if we hit any errors */ 1054 if (map_changed) { 1055 int b; 1056 mlog(0, "%s: %.*s: node map changed, restarting\n", 1057 dlm->name, res->lockname.len, res->lockname.name); 1058 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked); 1059 b = (mle->type == DLM_MLE_BLOCK); 1060 if ((*blocked && !b) || (!*blocked && b)) { 1061 mlog(0, "%s:%.*s: status change: old=%d new=%d\n", 1062 dlm->name, res->lockname.len, res->lockname.name, 1063 *blocked, b); 1064 *blocked = b; 1065 } 1066 spin_unlock(&mle->spinlock); 1067 if (ret < 0) { 1068 mlog_errno(ret); 1069 goto leave; 1070 } 1071 mlog(0, "%s:%.*s: restart lock mastery succeeded, " 1072 "rechecking now\n", dlm->name, res->lockname.len, 1073 res->lockname.name); 1074 goto recheck; 1075 } else { 1076 if (!voting_done) { 1077 mlog(0, "map not changed and voting not done " 1078 "for %s:%.*s\n", dlm->name, res->lockname.len, 1079 res->lockname.name); 1080 } 1081 } 1082 1083 if (m != O2NM_MAX_NODES) { 1084 /* another node has done an assert! 1085 * all done! */ 1086 sleep = 0; 1087 } else { 1088 sleep = 1; 1089 /* have all nodes responded? */ 1090 if (voting_done && !*blocked) { 1091 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); 1092 if (dlm->node_num <= bit) { 1093 /* my node number is lowest. 1094 * now tell other nodes that I am 1095 * mastering this. */ 1096 mle->master = dlm->node_num; 1097 /* ref was grabbed in get_lock_resource 1098 * will be dropped in dlmlock_master */ 1099 assert = 1; 1100 sleep = 0; 1101 } 1102 /* if voting is done, but we have not received 1103 * an assert master yet, we must sleep */ 1104 } 1105 } 1106 1107 spin_unlock(&mle->spinlock); 1108 1109 /* sleep if we haven't finished voting yet */ 1110 if (sleep) { 1111 unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS); 1112 1113 /* 1114 if (atomic_read(&mle->mle_refs.refcount) < 2) 1115 mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle, 1116 atomic_read(&mle->mle_refs.refcount), 1117 res->lockname.len, res->lockname.name); 1118 */ 1119 atomic_set(&mle->woken, 0); 1120 (void)wait_event_timeout(mle->wq, 1121 (atomic_read(&mle->woken) == 1), 1122 timeo); 1123 if (res->owner == O2NM_MAX_NODES) { 1124 mlog(0, "%s:%.*s: waiting again\n", dlm->name, 1125 res->lockname.len, res->lockname.name); 1126 goto recheck; 1127 } 1128 mlog(0, "done waiting, master is %u\n", res->owner); 1129 ret = 0; 1130 goto leave; 1131 } 1132 1133 ret = 0; /* done */ 1134 if (assert) { 1135 m = dlm->node_num; 1136 mlog(0, "about to master %.*s here, this=%u\n", 1137 res->lockname.len, res->lockname.name, m); 1138 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0); 1139 if (ret) { 1140 /* This is a failure in the network path, 1141 * not in the response to the assert_master 1142 * (any nonzero response is a BUG on this node). 1143 * Most likely a socket just got disconnected 1144 * due to node death. */ 1145 mlog_errno(ret); 1146 } 1147 /* no longer need to restart lock mastery. 1148 * all living nodes have been contacted. */ 1149 ret = 0; 1150 } 1151 1152 /* set the lockres owner */ 1153 spin_lock(&res->spinlock); 1154 /* mastery reference obtained either during 1155 * assert_master_handler or in get_lock_resource */ 1156 dlm_change_lockres_owner(dlm, res, m); 1157 spin_unlock(&res->spinlock); 1158 1159 leave: 1160 return ret; 1161 } 1162 1163 struct dlm_bitmap_diff_iter 1164 { 1165 int curnode; 1166 unsigned long *orig_bm; 1167 unsigned long *cur_bm; 1168 unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)]; 1169 }; 1170 1171 enum dlm_node_state_change 1172 { 1173 NODE_DOWN = -1, 1174 NODE_NO_CHANGE = 0, 1175 NODE_UP 1176 }; 1177 1178 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter, 1179 unsigned long *orig_bm, 1180 unsigned long *cur_bm) 1181 { 1182 unsigned long p1, p2; 1183 int i; 1184 1185 iter->curnode = -1; 1186 iter->orig_bm = orig_bm; 1187 iter->cur_bm = cur_bm; 1188 1189 for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) { 1190 p1 = *(iter->orig_bm + i); 1191 p2 = *(iter->cur_bm + i); 1192 iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1); 1193 } 1194 } 1195 1196 static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter, 1197 enum dlm_node_state_change *state) 1198 { 1199 int bit; 1200 1201 if (iter->curnode >= O2NM_MAX_NODES) 1202 return -ENOENT; 1203 1204 bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES, 1205 iter->curnode+1); 1206 if (bit >= O2NM_MAX_NODES) { 1207 iter->curnode = O2NM_MAX_NODES; 1208 return -ENOENT; 1209 } 1210 1211 /* if it was there in the original then this node died */ 1212 if (test_bit(bit, iter->orig_bm)) 1213 *state = NODE_DOWN; 1214 else 1215 *state = NODE_UP; 1216 1217 iter->curnode = bit; 1218 return bit; 1219 } 1220 1221 1222 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm, 1223 struct dlm_lock_resource *res, 1224 struct dlm_master_list_entry *mle, 1225 int blocked) 1226 { 1227 struct dlm_bitmap_diff_iter bdi; 1228 enum dlm_node_state_change sc; 1229 int node; 1230 int ret = 0; 1231 1232 mlog(0, "something happened such that the " 1233 "master process may need to be restarted!\n"); 1234 1235 assert_spin_locked(&mle->spinlock); 1236 1237 dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map); 1238 node = dlm_bitmap_diff_iter_next(&bdi, &sc); 1239 while (node >= 0) { 1240 if (sc == NODE_UP) { 1241 /* a node came up. clear any old vote from 1242 * the response map and set it in the vote map 1243 * then restart the mastery. */ 1244 mlog(ML_NOTICE, "node %d up while restarting\n", node); 1245 1246 /* redo the master request, but only for the new node */ 1247 mlog(0, "sending request to new node\n"); 1248 clear_bit(node, mle->response_map); 1249 set_bit(node, mle->vote_map); 1250 } else { 1251 mlog(ML_ERROR, "node down! %d\n", node); 1252 if (blocked) { 1253 int lowest = find_next_bit(mle->maybe_map, 1254 O2NM_MAX_NODES, 0); 1255 1256 /* act like it was never there */ 1257 clear_bit(node, mle->maybe_map); 1258 1259 if (node == lowest) { 1260 mlog(0, "expected master %u died" 1261 " while this node was blocked " 1262 "waiting on it!\n", node); 1263 lowest = find_next_bit(mle->maybe_map, 1264 O2NM_MAX_NODES, 1265 lowest+1); 1266 if (lowest < O2NM_MAX_NODES) { 1267 mlog(0, "%s:%.*s:still " 1268 "blocked. waiting on %u " 1269 "now\n", dlm->name, 1270 res->lockname.len, 1271 res->lockname.name, 1272 lowest); 1273 } else { 1274 /* mle is an MLE_BLOCK, but 1275 * there is now nothing left to 1276 * block on. we need to return 1277 * all the way back out and try 1278 * again with an MLE_MASTER. 1279 * dlm_do_local_recovery_cleanup 1280 * has already run, so the mle 1281 * refcount is ok */ 1282 mlog(0, "%s:%.*s: no " 1283 "longer blocking. try to " 1284 "master this here\n", 1285 dlm->name, 1286 res->lockname.len, 1287 res->lockname.name); 1288 mle->type = DLM_MLE_MASTER; 1289 mle->mleres = res; 1290 } 1291 } 1292 } 1293 1294 /* now blank out everything, as if we had never 1295 * contacted anyone */ 1296 memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); 1297 memset(mle->response_map, 0, sizeof(mle->response_map)); 1298 /* reset the vote_map to the current node_map */ 1299 memcpy(mle->vote_map, mle->node_map, 1300 sizeof(mle->node_map)); 1301 /* put myself into the maybe map */ 1302 if (mle->type != DLM_MLE_BLOCK) 1303 set_bit(dlm->node_num, mle->maybe_map); 1304 } 1305 ret = -EAGAIN; 1306 node = dlm_bitmap_diff_iter_next(&bdi, &sc); 1307 } 1308 return ret; 1309 } 1310 1311 1312 /* 1313 * DLM_MASTER_REQUEST_MSG 1314 * 1315 * returns: 0 on success, 1316 * -errno on a network error 1317 * 1318 * on error, the caller should assume the target node is "dead" 1319 * 1320 */ 1321 1322 static int dlm_do_master_request(struct dlm_lock_resource *res, 1323 struct dlm_master_list_entry *mle, int to) 1324 { 1325 struct dlm_ctxt *dlm = mle->dlm; 1326 struct dlm_master_request request; 1327 int ret, response=0, resend; 1328 1329 memset(&request, 0, sizeof(request)); 1330 request.node_idx = dlm->node_num; 1331 1332 BUG_ON(mle->type == DLM_MLE_MIGRATION); 1333 1334 request.namelen = (u8)mle->mnamelen; 1335 memcpy(request.name, mle->mname, request.namelen); 1336 1337 again: 1338 ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request, 1339 sizeof(request), to, &response); 1340 if (ret < 0) { 1341 if (ret == -ESRCH) { 1342 /* should never happen */ 1343 mlog(ML_ERROR, "TCP stack not ready!\n"); 1344 BUG(); 1345 } else if (ret == -EINVAL) { 1346 mlog(ML_ERROR, "bad args passed to o2net!\n"); 1347 BUG(); 1348 } else if (ret == -ENOMEM) { 1349 mlog(ML_ERROR, "out of memory while trying to send " 1350 "network message! retrying\n"); 1351 /* this is totally crude */ 1352 msleep(50); 1353 goto again; 1354 } else if (!dlm_is_host_down(ret)) { 1355 /* not a network error. bad. */ 1356 mlog_errno(ret); 1357 mlog(ML_ERROR, "unhandled error!"); 1358 BUG(); 1359 } 1360 /* all other errors should be network errors, 1361 * and likely indicate node death */ 1362 mlog(ML_ERROR, "link to %d went down!\n", to); 1363 goto out; 1364 } 1365 1366 ret = 0; 1367 resend = 0; 1368 spin_lock(&mle->spinlock); 1369 switch (response) { 1370 case DLM_MASTER_RESP_YES: 1371 set_bit(to, mle->response_map); 1372 mlog(0, "node %u is the master, response=YES\n", to); 1373 mlog(0, "%s:%.*s: master node %u now knows I have a " 1374 "reference\n", dlm->name, res->lockname.len, 1375 res->lockname.name, to); 1376 mle->master = to; 1377 break; 1378 case DLM_MASTER_RESP_NO: 1379 mlog(0, "node %u not master, response=NO\n", to); 1380 set_bit(to, mle->response_map); 1381 break; 1382 case DLM_MASTER_RESP_MAYBE: 1383 mlog(0, "node %u not master, response=MAYBE\n", to); 1384 set_bit(to, mle->response_map); 1385 set_bit(to, mle->maybe_map); 1386 break; 1387 case DLM_MASTER_RESP_ERROR: 1388 mlog(0, "node %u hit an error, resending\n", to); 1389 resend = 1; 1390 response = 0; 1391 break; 1392 default: 1393 mlog(ML_ERROR, "bad response! %u\n", response); 1394 BUG(); 1395 } 1396 spin_unlock(&mle->spinlock); 1397 if (resend) { 1398 /* this is also totally crude */ 1399 msleep(50); 1400 goto again; 1401 } 1402 1403 out: 1404 return ret; 1405 } 1406 1407 /* 1408 * locks that can be taken here: 1409 * dlm->spinlock 1410 * res->spinlock 1411 * mle->spinlock 1412 * dlm->master_list 1413 * 1414 * if possible, TRIM THIS DOWN!!! 1415 */ 1416 int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data, 1417 void **ret_data) 1418 { 1419 u8 response = DLM_MASTER_RESP_MAYBE; 1420 struct dlm_ctxt *dlm = data; 1421 struct dlm_lock_resource *res = NULL; 1422 struct dlm_master_request *request = (struct dlm_master_request *) msg->buf; 1423 struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL; 1424 char *name; 1425 unsigned int namelen, hash; 1426 int found, ret; 1427 int set_maybe; 1428 int dispatch_assert = 0; 1429 1430 if (!dlm_grab(dlm)) 1431 return DLM_MASTER_RESP_NO; 1432 1433 if (!dlm_domain_fully_joined(dlm)) { 1434 response = DLM_MASTER_RESP_NO; 1435 goto send_response; 1436 } 1437 1438 name = request->name; 1439 namelen = request->namelen; 1440 hash = dlm_lockid_hash(name, namelen); 1441 1442 if (namelen > DLM_LOCKID_NAME_MAX) { 1443 response = DLM_IVBUFLEN; 1444 goto send_response; 1445 } 1446 1447 way_up_top: 1448 spin_lock(&dlm->spinlock); 1449 res = __dlm_lookup_lockres(dlm, name, namelen, hash); 1450 if (res) { 1451 spin_unlock(&dlm->spinlock); 1452 1453 /* take care of the easy cases up front */ 1454 spin_lock(&res->spinlock); 1455 1456 /* 1457 * Right after dlm spinlock was released, dlm_thread could have 1458 * purged the lockres. Check if lockres got unhashed. If so 1459 * start over. 1460 */ 1461 if (hlist_unhashed(&res->hash_node)) { 1462 spin_unlock(&res->spinlock); 1463 dlm_lockres_put(res); 1464 goto way_up_top; 1465 } 1466 1467 if (res->state & (DLM_LOCK_RES_RECOVERING| 1468 DLM_LOCK_RES_MIGRATING)) { 1469 spin_unlock(&res->spinlock); 1470 mlog(0, "returning DLM_MASTER_RESP_ERROR since res is " 1471 "being recovered/migrated\n"); 1472 response = DLM_MASTER_RESP_ERROR; 1473 if (mle) 1474 kmem_cache_free(dlm_mle_cache, mle); 1475 goto send_response; 1476 } 1477 1478 if (res->owner == dlm->node_num) { 1479 dlm_lockres_set_refmap_bit(dlm, res, request->node_idx); 1480 spin_unlock(&res->spinlock); 1481 response = DLM_MASTER_RESP_YES; 1482 if (mle) 1483 kmem_cache_free(dlm_mle_cache, mle); 1484 1485 /* this node is the owner. 1486 * there is some extra work that needs to 1487 * happen now. the requesting node has 1488 * caused all nodes up to this one to 1489 * create mles. this node now needs to 1490 * go back and clean those up. */ 1491 dispatch_assert = 1; 1492 goto send_response; 1493 } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { 1494 spin_unlock(&res->spinlock); 1495 // mlog(0, "node %u is the master\n", res->owner); 1496 response = DLM_MASTER_RESP_NO; 1497 if (mle) 1498 kmem_cache_free(dlm_mle_cache, mle); 1499 goto send_response; 1500 } 1501 1502 /* ok, there is no owner. either this node is 1503 * being blocked, or it is actively trying to 1504 * master this lock. */ 1505 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { 1506 mlog(ML_ERROR, "lock with no owner should be " 1507 "in-progress!\n"); 1508 BUG(); 1509 } 1510 1511 // mlog(0, "lockres is in progress...\n"); 1512 spin_lock(&dlm->master_lock); 1513 found = dlm_find_mle(dlm, &tmpmle, name, namelen); 1514 if (!found) { 1515 mlog(ML_ERROR, "no mle found for this lock!\n"); 1516 BUG(); 1517 } 1518 set_maybe = 1; 1519 spin_lock(&tmpmle->spinlock); 1520 if (tmpmle->type == DLM_MLE_BLOCK) { 1521 // mlog(0, "this node is waiting for " 1522 // "lockres to be mastered\n"); 1523 response = DLM_MASTER_RESP_NO; 1524 } else if (tmpmle->type == DLM_MLE_MIGRATION) { 1525 mlog(0, "node %u is master, but trying to migrate to " 1526 "node %u.\n", tmpmle->master, tmpmle->new_master); 1527 if (tmpmle->master == dlm->node_num) { 1528 mlog(ML_ERROR, "no owner on lockres, but this " 1529 "node is trying to migrate it to %u?!\n", 1530 tmpmle->new_master); 1531 BUG(); 1532 } else { 1533 /* the real master can respond on its own */ 1534 response = DLM_MASTER_RESP_NO; 1535 } 1536 } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) { 1537 set_maybe = 0; 1538 if (tmpmle->master == dlm->node_num) { 1539 response = DLM_MASTER_RESP_YES; 1540 /* this node will be the owner. 1541 * go back and clean the mles on any 1542 * other nodes */ 1543 dispatch_assert = 1; 1544 dlm_lockres_set_refmap_bit(dlm, res, 1545 request->node_idx); 1546 } else 1547 response = DLM_MASTER_RESP_NO; 1548 } else { 1549 // mlog(0, "this node is attempting to " 1550 // "master lockres\n"); 1551 response = DLM_MASTER_RESP_MAYBE; 1552 } 1553 if (set_maybe) 1554 set_bit(request->node_idx, tmpmle->maybe_map); 1555 spin_unlock(&tmpmle->spinlock); 1556 1557 spin_unlock(&dlm->master_lock); 1558 spin_unlock(&res->spinlock); 1559 1560 /* keep the mle attached to heartbeat events */ 1561 dlm_put_mle(tmpmle); 1562 if (mle) 1563 kmem_cache_free(dlm_mle_cache, mle); 1564 goto send_response; 1565 } 1566 1567 /* 1568 * lockres doesn't exist on this node 1569 * if there is an MLE_BLOCK, return NO 1570 * if there is an MLE_MASTER, return MAYBE 1571 * otherwise, add an MLE_BLOCK, return NO 1572 */ 1573 spin_lock(&dlm->master_lock); 1574 found = dlm_find_mle(dlm, &tmpmle, name, namelen); 1575 if (!found) { 1576 /* this lockid has never been seen on this node yet */ 1577 // mlog(0, "no mle found\n"); 1578 if (!mle) { 1579 spin_unlock(&dlm->master_lock); 1580 spin_unlock(&dlm->spinlock); 1581 1582 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); 1583 if (!mle) { 1584 response = DLM_MASTER_RESP_ERROR; 1585 mlog_errno(-ENOMEM); 1586 goto send_response; 1587 } 1588 goto way_up_top; 1589 } 1590 1591 // mlog(0, "this is second time thru, already allocated, " 1592 // "add the block.\n"); 1593 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen); 1594 set_bit(request->node_idx, mle->maybe_map); 1595 __dlm_insert_mle(dlm, mle); 1596 response = DLM_MASTER_RESP_NO; 1597 } else { 1598 // mlog(0, "mle was found\n"); 1599 set_maybe = 1; 1600 spin_lock(&tmpmle->spinlock); 1601 if (tmpmle->master == dlm->node_num) { 1602 mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n"); 1603 BUG(); 1604 } 1605 if (tmpmle->type == DLM_MLE_BLOCK) 1606 response = DLM_MASTER_RESP_NO; 1607 else if (tmpmle->type == DLM_MLE_MIGRATION) { 1608 mlog(0, "migration mle was found (%u->%u)\n", 1609 tmpmle->master, tmpmle->new_master); 1610 /* real master can respond on its own */ 1611 response = DLM_MASTER_RESP_NO; 1612 } else 1613 response = DLM_MASTER_RESP_MAYBE; 1614 if (set_maybe) 1615 set_bit(request->node_idx, tmpmle->maybe_map); 1616 spin_unlock(&tmpmle->spinlock); 1617 } 1618 spin_unlock(&dlm->master_lock); 1619 spin_unlock(&dlm->spinlock); 1620 1621 if (found) { 1622 /* keep the mle attached to heartbeat events */ 1623 dlm_put_mle(tmpmle); 1624 } 1625 send_response: 1626 /* 1627 * __dlm_lookup_lockres() grabbed a reference to this lockres. 1628 * The reference is released by dlm_assert_master_worker() under 1629 * the call to dlm_dispatch_assert_master(). If 1630 * dlm_assert_master_worker() isn't called, we drop it here. 1631 */ 1632 if (dispatch_assert) { 1633 if (response != DLM_MASTER_RESP_YES) 1634 mlog(ML_ERROR, "invalid response %d\n", response); 1635 if (!res) { 1636 mlog(ML_ERROR, "bad lockres while trying to assert!\n"); 1637 BUG(); 1638 } 1639 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n", 1640 dlm->node_num, res->lockname.len, res->lockname.name); 1641 spin_lock(&res->spinlock); 1642 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, 1643 DLM_ASSERT_MASTER_MLE_CLEANUP); 1644 if (ret < 0) { 1645 mlog(ML_ERROR, "failed to dispatch assert master work\n"); 1646 response = DLM_MASTER_RESP_ERROR; 1647 dlm_lockres_put(res); 1648 } else 1649 __dlm_lockres_grab_inflight_worker(dlm, res); 1650 spin_unlock(&res->spinlock); 1651 } else { 1652 if (res) 1653 dlm_lockres_put(res); 1654 } 1655 1656 dlm_put(dlm); 1657 return response; 1658 } 1659 1660 /* 1661 * DLM_ASSERT_MASTER_MSG 1662 */ 1663 1664 1665 /* 1666 * NOTE: this can be used for debugging 1667 * can periodically run all locks owned by this node 1668 * and re-assert across the cluster... 1669 */ 1670 static int dlm_do_assert_master(struct dlm_ctxt *dlm, 1671 struct dlm_lock_resource *res, 1672 void *nodemap, u32 flags) 1673 { 1674 struct dlm_assert_master assert; 1675 int to, tmpret; 1676 struct dlm_node_iter iter; 1677 int ret = 0; 1678 int reassert; 1679 const char *lockname = res->lockname.name; 1680 unsigned int namelen = res->lockname.len; 1681 1682 BUG_ON(namelen > O2NM_MAX_NAME_LEN); 1683 1684 spin_lock(&res->spinlock); 1685 res->state |= DLM_LOCK_RES_SETREF_INPROG; 1686 spin_unlock(&res->spinlock); 1687 1688 again: 1689 reassert = 0; 1690 1691 /* note that if this nodemap is empty, it returns 0 */ 1692 dlm_node_iter_init(nodemap, &iter); 1693 while ((to = dlm_node_iter_next(&iter)) >= 0) { 1694 int r = 0; 1695 struct dlm_master_list_entry *mle = NULL; 1696 1697 mlog(0, "sending assert master to %d (%.*s)\n", to, 1698 namelen, lockname); 1699 memset(&assert, 0, sizeof(assert)); 1700 assert.node_idx = dlm->node_num; 1701 assert.namelen = namelen; 1702 memcpy(assert.name, lockname, namelen); 1703 assert.flags = cpu_to_be32(flags); 1704 1705 tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key, 1706 &assert, sizeof(assert), to, &r); 1707 if (tmpret < 0) { 1708 mlog(ML_ERROR, "Error %d when sending message %u (key " 1709 "0x%x) to node %u\n", tmpret, 1710 DLM_ASSERT_MASTER_MSG, dlm->key, to); 1711 if (!dlm_is_host_down(tmpret)) { 1712 mlog(ML_ERROR, "unhandled error=%d!\n", tmpret); 1713 BUG(); 1714 } 1715 /* a node died. finish out the rest of the nodes. */ 1716 mlog(0, "link to %d went down!\n", to); 1717 /* any nonzero status return will do */ 1718 ret = tmpret; 1719 r = 0; 1720 } else if (r < 0) { 1721 /* ok, something horribly messed. kill thyself. */ 1722 mlog(ML_ERROR,"during assert master of %.*s to %u, " 1723 "got %d.\n", namelen, lockname, to, r); 1724 spin_lock(&dlm->spinlock); 1725 spin_lock(&dlm->master_lock); 1726 if (dlm_find_mle(dlm, &mle, (char *)lockname, 1727 namelen)) { 1728 dlm_print_one_mle(mle); 1729 __dlm_put_mle(mle); 1730 } 1731 spin_unlock(&dlm->master_lock); 1732 spin_unlock(&dlm->spinlock); 1733 BUG(); 1734 } 1735 1736 if (r & DLM_ASSERT_RESPONSE_REASSERT && 1737 !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) { 1738 mlog(ML_ERROR, "%.*s: very strange, " 1739 "master MLE but no lockres on %u\n", 1740 namelen, lockname, to); 1741 } 1742 1743 if (r & DLM_ASSERT_RESPONSE_REASSERT) { 1744 mlog(0, "%.*s: node %u create mles on other " 1745 "nodes and requests a re-assert\n", 1746 namelen, lockname, to); 1747 reassert = 1; 1748 } 1749 if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) { 1750 mlog(0, "%.*s: node %u has a reference to this " 1751 "lockres, set the bit in the refmap\n", 1752 namelen, lockname, to); 1753 spin_lock(&res->spinlock); 1754 dlm_lockres_set_refmap_bit(dlm, res, to); 1755 spin_unlock(&res->spinlock); 1756 } 1757 } 1758 1759 if (reassert) 1760 goto again; 1761 1762 spin_lock(&res->spinlock); 1763 res->state &= ~DLM_LOCK_RES_SETREF_INPROG; 1764 spin_unlock(&res->spinlock); 1765 wake_up(&res->wq); 1766 1767 return ret; 1768 } 1769 1770 /* 1771 * locks that can be taken here: 1772 * dlm->spinlock 1773 * res->spinlock 1774 * mle->spinlock 1775 * dlm->master_list 1776 * 1777 * if possible, TRIM THIS DOWN!!! 1778 */ 1779 int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data, 1780 void **ret_data) 1781 { 1782 struct dlm_ctxt *dlm = data; 1783 struct dlm_master_list_entry *mle = NULL; 1784 struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf; 1785 struct dlm_lock_resource *res = NULL; 1786 char *name; 1787 unsigned int namelen, hash; 1788 u32 flags; 1789 int master_request = 0, have_lockres_ref = 0; 1790 int ret = 0; 1791 1792 if (!dlm_grab(dlm)) 1793 return 0; 1794 1795 name = assert->name; 1796 namelen = assert->namelen; 1797 hash = dlm_lockid_hash(name, namelen); 1798 flags = be32_to_cpu(assert->flags); 1799 1800 if (namelen > DLM_LOCKID_NAME_MAX) { 1801 mlog(ML_ERROR, "Invalid name length!"); 1802 goto done; 1803 } 1804 1805 spin_lock(&dlm->spinlock); 1806 1807 if (flags) 1808 mlog(0, "assert_master with flags: %u\n", flags); 1809 1810 /* find the MLE */ 1811 spin_lock(&dlm->master_lock); 1812 if (!dlm_find_mle(dlm, &mle, name, namelen)) { 1813 /* not an error, could be master just re-asserting */ 1814 mlog(0, "just got an assert_master from %u, but no " 1815 "MLE for it! (%.*s)\n", assert->node_idx, 1816 namelen, name); 1817 } else { 1818 int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0); 1819 if (bit >= O2NM_MAX_NODES) { 1820 /* not necessarily an error, though less likely. 1821 * could be master just re-asserting. */ 1822 mlog(0, "no bits set in the maybe_map, but %u " 1823 "is asserting! (%.*s)\n", assert->node_idx, 1824 namelen, name); 1825 } else if (bit != assert->node_idx) { 1826 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) { 1827 mlog(0, "master %u was found, %u should " 1828 "back off\n", assert->node_idx, bit); 1829 } else { 1830 /* with the fix for bug 569, a higher node 1831 * number winning the mastery will respond 1832 * YES to mastery requests, but this node 1833 * had no way of knowing. let it pass. */ 1834 mlog(0, "%u is the lowest node, " 1835 "%u is asserting. (%.*s) %u must " 1836 "have begun after %u won.\n", bit, 1837 assert->node_idx, namelen, name, bit, 1838 assert->node_idx); 1839 } 1840 } 1841 if (mle->type == DLM_MLE_MIGRATION) { 1842 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) { 1843 mlog(0, "%s:%.*s: got cleanup assert" 1844 " from %u for migration\n", 1845 dlm->name, namelen, name, 1846 assert->node_idx); 1847 } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) { 1848 mlog(0, "%s:%.*s: got unrelated assert" 1849 " from %u for migration, ignoring\n", 1850 dlm->name, namelen, name, 1851 assert->node_idx); 1852 __dlm_put_mle(mle); 1853 spin_unlock(&dlm->master_lock); 1854 spin_unlock(&dlm->spinlock); 1855 goto done; 1856 } 1857 } 1858 } 1859 spin_unlock(&dlm->master_lock); 1860 1861 /* ok everything checks out with the MLE 1862 * now check to see if there is a lockres */ 1863 res = __dlm_lookup_lockres(dlm, name, namelen, hash); 1864 if (res) { 1865 spin_lock(&res->spinlock); 1866 if (res->state & DLM_LOCK_RES_RECOVERING) { 1867 mlog(ML_ERROR, "%u asserting but %.*s is " 1868 "RECOVERING!\n", assert->node_idx, namelen, name); 1869 goto kill; 1870 } 1871 if (!mle) { 1872 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN && 1873 res->owner != assert->node_idx) { 1874 mlog(ML_ERROR, "DIE! Mastery assert from %u, " 1875 "but current owner is %u! (%.*s)\n", 1876 assert->node_idx, res->owner, namelen, 1877 name); 1878 __dlm_print_one_lock_resource(res); 1879 BUG(); 1880 } 1881 } else if (mle->type != DLM_MLE_MIGRATION) { 1882 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { 1883 /* owner is just re-asserting */ 1884 if (res->owner == assert->node_idx) { 1885 mlog(0, "owner %u re-asserting on " 1886 "lock %.*s\n", assert->node_idx, 1887 namelen, name); 1888 goto ok; 1889 } 1890 mlog(ML_ERROR, "got assert_master from " 1891 "node %u, but %u is the owner! " 1892 "(%.*s)\n", assert->node_idx, 1893 res->owner, namelen, name); 1894 goto kill; 1895 } 1896 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { 1897 mlog(ML_ERROR, "got assert from %u, but lock " 1898 "with no owner should be " 1899 "in-progress! (%.*s)\n", 1900 assert->node_idx, 1901 namelen, name); 1902 goto kill; 1903 } 1904 } else /* mle->type == DLM_MLE_MIGRATION */ { 1905 /* should only be getting an assert from new master */ 1906 if (assert->node_idx != mle->new_master) { 1907 mlog(ML_ERROR, "got assert from %u, but " 1908 "new master is %u, and old master " 1909 "was %u (%.*s)\n", 1910 assert->node_idx, mle->new_master, 1911 mle->master, namelen, name); 1912 goto kill; 1913 } 1914 1915 } 1916 ok: 1917 spin_unlock(&res->spinlock); 1918 } 1919 1920 // mlog(0, "woo! got an assert_master from node %u!\n", 1921 // assert->node_idx); 1922 if (mle) { 1923 int extra_ref = 0; 1924 int nn = -1; 1925 int rr, err = 0; 1926 1927 spin_lock(&mle->spinlock); 1928 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION) 1929 extra_ref = 1; 1930 else { 1931 /* MASTER mle: if any bits set in the response map 1932 * then the calling node needs to re-assert to clear 1933 * up nodes that this node contacted */ 1934 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, 1935 nn+1)) < O2NM_MAX_NODES) { 1936 if (nn != dlm->node_num && nn != assert->node_idx) { 1937 master_request = 1; 1938 break; 1939 } 1940 } 1941 } 1942 mle->master = assert->node_idx; 1943 atomic_set(&mle->woken, 1); 1944 wake_up(&mle->wq); 1945 spin_unlock(&mle->spinlock); 1946 1947 if (res) { 1948 int wake = 0; 1949 spin_lock(&res->spinlock); 1950 if (mle->type == DLM_MLE_MIGRATION) { 1951 mlog(0, "finishing off migration of lockres %.*s, " 1952 "from %u to %u\n", 1953 res->lockname.len, res->lockname.name, 1954 dlm->node_num, mle->new_master); 1955 res->state &= ~DLM_LOCK_RES_MIGRATING; 1956 wake = 1; 1957 dlm_change_lockres_owner(dlm, res, mle->new_master); 1958 BUG_ON(res->state & DLM_LOCK_RES_DIRTY); 1959 } else { 1960 dlm_change_lockres_owner(dlm, res, mle->master); 1961 } 1962 spin_unlock(&res->spinlock); 1963 have_lockres_ref = 1; 1964 if (wake) 1965 wake_up(&res->wq); 1966 } 1967 1968 /* master is known, detach if not already detached. 1969 * ensures that only one assert_master call will happen 1970 * on this mle. */ 1971 spin_lock(&dlm->master_lock); 1972 1973 rr = atomic_read(&mle->mle_refs.refcount); 1974 if (mle->inuse > 0) { 1975 if (extra_ref && rr < 3) 1976 err = 1; 1977 else if (!extra_ref && rr < 2) 1978 err = 1; 1979 } else { 1980 if (extra_ref && rr < 2) 1981 err = 1; 1982 else if (!extra_ref && rr < 1) 1983 err = 1; 1984 } 1985 if (err) { 1986 mlog(ML_ERROR, "%s:%.*s: got assert master from %u " 1987 "that will mess up this node, refs=%d, extra=%d, " 1988 "inuse=%d\n", dlm->name, namelen, name, 1989 assert->node_idx, rr, extra_ref, mle->inuse); 1990 dlm_print_one_mle(mle); 1991 } 1992 __dlm_unlink_mle(dlm, mle); 1993 __dlm_mle_detach_hb_events(dlm, mle); 1994 __dlm_put_mle(mle); 1995 if (extra_ref) { 1996 /* the assert master message now balances the extra 1997 * ref given by the master / migration request message. 1998 * if this is the last put, it will be removed 1999 * from the list. */ 2000 __dlm_put_mle(mle); 2001 } 2002 spin_unlock(&dlm->master_lock); 2003 } else if (res) { 2004 if (res->owner != assert->node_idx) { 2005 mlog(0, "assert_master from %u, but current " 2006 "owner is %u (%.*s), no mle\n", assert->node_idx, 2007 res->owner, namelen, name); 2008 } 2009 } 2010 spin_unlock(&dlm->spinlock); 2011 2012 done: 2013 ret = 0; 2014 if (res) { 2015 spin_lock(&res->spinlock); 2016 res->state |= DLM_LOCK_RES_SETREF_INPROG; 2017 spin_unlock(&res->spinlock); 2018 *ret_data = (void *)res; 2019 } 2020 dlm_put(dlm); 2021 if (master_request) { 2022 mlog(0, "need to tell master to reassert\n"); 2023 /* positive. negative would shoot down the node. */ 2024 ret |= DLM_ASSERT_RESPONSE_REASSERT; 2025 if (!have_lockres_ref) { 2026 mlog(ML_ERROR, "strange, got assert from %u, MASTER " 2027 "mle present here for %s:%.*s, but no lockres!\n", 2028 assert->node_idx, dlm->name, namelen, name); 2029 } 2030 } 2031 if (have_lockres_ref) { 2032 /* let the master know we have a reference to the lockres */ 2033 ret |= DLM_ASSERT_RESPONSE_MASTERY_REF; 2034 mlog(0, "%s:%.*s: got assert from %u, need a ref\n", 2035 dlm->name, namelen, name, assert->node_idx); 2036 } 2037 return ret; 2038 2039 kill: 2040 /* kill the caller! */ 2041 mlog(ML_ERROR, "Bad message received from another node. Dumping state " 2042 "and killing the other node now! This node is OK and can continue.\n"); 2043 __dlm_print_one_lock_resource(res); 2044 spin_unlock(&res->spinlock); 2045 spin_lock(&dlm->master_lock); 2046 if (mle) 2047 __dlm_put_mle(mle); 2048 spin_unlock(&dlm->master_lock); 2049 spin_unlock(&dlm->spinlock); 2050 *ret_data = (void *)res; 2051 dlm_put(dlm); 2052 return -EINVAL; 2053 } 2054 2055 void dlm_assert_master_post_handler(int status, void *data, void *ret_data) 2056 { 2057 struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data; 2058 2059 if (ret_data) { 2060 spin_lock(&res->spinlock); 2061 res->state &= ~DLM_LOCK_RES_SETREF_INPROG; 2062 spin_unlock(&res->spinlock); 2063 wake_up(&res->wq); 2064 dlm_lockres_put(res); 2065 } 2066 return; 2067 } 2068 2069 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, 2070 struct dlm_lock_resource *res, 2071 int ignore_higher, u8 request_from, u32 flags) 2072 { 2073 struct dlm_work_item *item; 2074 item = kzalloc(sizeof(*item), GFP_ATOMIC); 2075 if (!item) 2076 return -ENOMEM; 2077 2078 2079 /* queue up work for dlm_assert_master_worker */ 2080 dlm_grab(dlm); /* get an extra ref for the work item */ 2081 dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL); 2082 item->u.am.lockres = res; /* already have a ref */ 2083 /* can optionally ignore node numbers higher than this node */ 2084 item->u.am.ignore_higher = ignore_higher; 2085 item->u.am.request_from = request_from; 2086 item->u.am.flags = flags; 2087 2088 if (ignore_higher) 2089 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, 2090 res->lockname.name); 2091 2092 spin_lock(&dlm->work_lock); 2093 list_add_tail(&item->list, &dlm->work_list); 2094 spin_unlock(&dlm->work_lock); 2095 2096 queue_work(dlm->dlm_worker, &dlm->dispatched_work); 2097 return 0; 2098 } 2099 2100 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data) 2101 { 2102 struct dlm_ctxt *dlm = data; 2103 int ret = 0; 2104 struct dlm_lock_resource *res; 2105 unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)]; 2106 int ignore_higher; 2107 int bit; 2108 u8 request_from; 2109 u32 flags; 2110 2111 dlm = item->dlm; 2112 res = item->u.am.lockres; 2113 ignore_higher = item->u.am.ignore_higher; 2114 request_from = item->u.am.request_from; 2115 flags = item->u.am.flags; 2116 2117 spin_lock(&dlm->spinlock); 2118 memcpy(nodemap, dlm->domain_map, sizeof(nodemap)); 2119 spin_unlock(&dlm->spinlock); 2120 2121 clear_bit(dlm->node_num, nodemap); 2122 if (ignore_higher) { 2123 /* if is this just to clear up mles for nodes below 2124 * this node, do not send the message to the original 2125 * caller or any node number higher than this */ 2126 clear_bit(request_from, nodemap); 2127 bit = dlm->node_num; 2128 while (1) { 2129 bit = find_next_bit(nodemap, O2NM_MAX_NODES, 2130 bit+1); 2131 if (bit >= O2NM_MAX_NODES) 2132 break; 2133 clear_bit(bit, nodemap); 2134 } 2135 } 2136 2137 /* 2138 * If we're migrating this lock to someone else, we are no 2139 * longer allowed to assert out own mastery. OTOH, we need to 2140 * prevent migration from starting while we're still asserting 2141 * our dominance. The reserved ast delays migration. 2142 */ 2143 spin_lock(&res->spinlock); 2144 if (res->state & DLM_LOCK_RES_MIGRATING) { 2145 mlog(0, "Someone asked us to assert mastery, but we're " 2146 "in the middle of migration. Skipping assert, " 2147 "the new master will handle that.\n"); 2148 spin_unlock(&res->spinlock); 2149 goto put; 2150 } else 2151 __dlm_lockres_reserve_ast(res); 2152 spin_unlock(&res->spinlock); 2153 2154 /* this call now finishes out the nodemap 2155 * even if one or more nodes die */ 2156 mlog(0, "worker about to master %.*s here, this=%u\n", 2157 res->lockname.len, res->lockname.name, dlm->node_num); 2158 ret = dlm_do_assert_master(dlm, res, nodemap, flags); 2159 if (ret < 0) { 2160 /* no need to restart, we are done */ 2161 if (!dlm_is_host_down(ret)) 2162 mlog_errno(ret); 2163 } 2164 2165 /* Ok, we've asserted ourselves. Let's let migration start. */ 2166 dlm_lockres_release_ast(dlm, res); 2167 2168 put: 2169 dlm_lockres_drop_inflight_worker(dlm, res); 2170 2171 dlm_lockres_put(res); 2172 2173 mlog(0, "finished with dlm_assert_master_worker\n"); 2174 } 2175 2176 /* SPECIAL CASE for the $RECOVERY lock used by the recovery thread. 2177 * We cannot wait for node recovery to complete to begin mastering this 2178 * lockres because this lockres is used to kick off recovery! ;-) 2179 * So, do a pre-check on all living nodes to see if any of those nodes 2180 * think that $RECOVERY is currently mastered by a dead node. If so, 2181 * we wait a short time to allow that node to get notified by its own 2182 * heartbeat stack, then check again. All $RECOVERY lock resources 2183 * mastered by dead nodes are purged when the hearbeat callback is 2184 * fired, so we can know for sure that it is safe to continue once 2185 * the node returns a live node or no node. */ 2186 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, 2187 struct dlm_lock_resource *res) 2188 { 2189 struct dlm_node_iter iter; 2190 int nodenum; 2191 int ret = 0; 2192 u8 master = DLM_LOCK_RES_OWNER_UNKNOWN; 2193 2194 spin_lock(&dlm->spinlock); 2195 dlm_node_iter_init(dlm->domain_map, &iter); 2196 spin_unlock(&dlm->spinlock); 2197 2198 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 2199 /* do not send to self */ 2200 if (nodenum == dlm->node_num) 2201 continue; 2202 ret = dlm_do_master_requery(dlm, res, nodenum, &master); 2203 if (ret < 0) { 2204 mlog_errno(ret); 2205 if (!dlm_is_host_down(ret)) 2206 BUG(); 2207 /* host is down, so answer for that node would be 2208 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */ 2209 ret = 0; 2210 } 2211 2212 if (master != DLM_LOCK_RES_OWNER_UNKNOWN) { 2213 /* check to see if this master is in the recovery map */ 2214 spin_lock(&dlm->spinlock); 2215 if (test_bit(master, dlm->recovery_map)) { 2216 mlog(ML_NOTICE, "%s: node %u has not seen " 2217 "node %u go down yet, and thinks the " 2218 "dead node is mastering the recovery " 2219 "lock. must wait.\n", dlm->name, 2220 nodenum, master); 2221 ret = -EAGAIN; 2222 } 2223 spin_unlock(&dlm->spinlock); 2224 mlog(0, "%s: reco lock master is %u\n", dlm->name, 2225 master); 2226 break; 2227 } 2228 } 2229 return ret; 2230 } 2231 2232 /* 2233 * DLM_DEREF_LOCKRES_MSG 2234 */ 2235 2236 int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) 2237 { 2238 struct dlm_deref_lockres deref; 2239 int ret = 0, r; 2240 const char *lockname; 2241 unsigned int namelen; 2242 2243 lockname = res->lockname.name; 2244 namelen = res->lockname.len; 2245 BUG_ON(namelen > O2NM_MAX_NAME_LEN); 2246 2247 memset(&deref, 0, sizeof(deref)); 2248 deref.node_idx = dlm->node_num; 2249 deref.namelen = namelen; 2250 memcpy(deref.name, lockname, namelen); 2251 2252 ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key, 2253 &deref, sizeof(deref), res->owner, &r); 2254 if (ret < 0) 2255 mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n", 2256 dlm->name, namelen, lockname, ret, res->owner); 2257 else if (r < 0) { 2258 /* BAD. other node says I did not have a ref. */ 2259 mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n", 2260 dlm->name, namelen, lockname, res->owner, r); 2261 dlm_print_one_lock_resource(res); 2262 BUG(); 2263 } 2264 return ret; 2265 } 2266 2267 int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data, 2268 void **ret_data) 2269 { 2270 struct dlm_ctxt *dlm = data; 2271 struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf; 2272 struct dlm_lock_resource *res = NULL; 2273 char *name; 2274 unsigned int namelen; 2275 int ret = -EINVAL; 2276 u8 node; 2277 unsigned int hash; 2278 struct dlm_work_item *item; 2279 int cleared = 0; 2280 int dispatch = 0; 2281 2282 if (!dlm_grab(dlm)) 2283 return 0; 2284 2285 name = deref->name; 2286 namelen = deref->namelen; 2287 node = deref->node_idx; 2288 2289 if (namelen > DLM_LOCKID_NAME_MAX) { 2290 mlog(ML_ERROR, "Invalid name length!"); 2291 goto done; 2292 } 2293 if (deref->node_idx >= O2NM_MAX_NODES) { 2294 mlog(ML_ERROR, "Invalid node number: %u\n", node); 2295 goto done; 2296 } 2297 2298 hash = dlm_lockid_hash(name, namelen); 2299 2300 spin_lock(&dlm->spinlock); 2301 res = __dlm_lookup_lockres_full(dlm, name, namelen, hash); 2302 if (!res) { 2303 spin_unlock(&dlm->spinlock); 2304 mlog(ML_ERROR, "%s:%.*s: bad lockres name\n", 2305 dlm->name, namelen, name); 2306 goto done; 2307 } 2308 spin_unlock(&dlm->spinlock); 2309 2310 spin_lock(&res->spinlock); 2311 if (res->state & DLM_LOCK_RES_SETREF_INPROG) 2312 dispatch = 1; 2313 else { 2314 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); 2315 if (test_bit(node, res->refmap)) { 2316 dlm_lockres_clear_refmap_bit(dlm, res, node); 2317 cleared = 1; 2318 } 2319 } 2320 spin_unlock(&res->spinlock); 2321 2322 if (!dispatch) { 2323 if (cleared) 2324 dlm_lockres_calc_usage(dlm, res); 2325 else { 2326 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref " 2327 "but it is already dropped!\n", dlm->name, 2328 res->lockname.len, res->lockname.name, node); 2329 dlm_print_one_lock_resource(res); 2330 } 2331 ret = 0; 2332 goto done; 2333 } 2334 2335 item = kzalloc(sizeof(*item), GFP_NOFS); 2336 if (!item) { 2337 ret = -ENOMEM; 2338 mlog_errno(ret); 2339 goto done; 2340 } 2341 2342 dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL); 2343 item->u.dl.deref_res = res; 2344 item->u.dl.deref_node = node; 2345 2346 spin_lock(&dlm->work_lock); 2347 list_add_tail(&item->list, &dlm->work_list); 2348 spin_unlock(&dlm->work_lock); 2349 2350 queue_work(dlm->dlm_worker, &dlm->dispatched_work); 2351 return 0; 2352 2353 done: 2354 if (res) 2355 dlm_lockres_put(res); 2356 dlm_put(dlm); 2357 2358 return ret; 2359 } 2360 2361 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data) 2362 { 2363 struct dlm_ctxt *dlm; 2364 struct dlm_lock_resource *res; 2365 u8 node; 2366 u8 cleared = 0; 2367 2368 dlm = item->dlm; 2369 res = item->u.dl.deref_res; 2370 node = item->u.dl.deref_node; 2371 2372 spin_lock(&res->spinlock); 2373 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); 2374 if (test_bit(node, res->refmap)) { 2375 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); 2376 dlm_lockres_clear_refmap_bit(dlm, res, node); 2377 cleared = 1; 2378 } 2379 spin_unlock(&res->spinlock); 2380 2381 if (cleared) { 2382 mlog(0, "%s:%.*s node %u ref dropped in dispatch\n", 2383 dlm->name, res->lockname.len, res->lockname.name, node); 2384 dlm_lockres_calc_usage(dlm, res); 2385 } else { 2386 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref " 2387 "but it is already dropped!\n", dlm->name, 2388 res->lockname.len, res->lockname.name, node); 2389 dlm_print_one_lock_resource(res); 2390 } 2391 2392 dlm_lockres_put(res); 2393 } 2394 2395 /* 2396 * A migrateable resource is one that is : 2397 * 1. locally mastered, and, 2398 * 2. zero local locks, and, 2399 * 3. one or more non-local locks, or, one or more references 2400 * Returns 1 if yes, 0 if not. 2401 */ 2402 static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm, 2403 struct dlm_lock_resource *res) 2404 { 2405 enum dlm_lockres_list idx; 2406 int nonlocal = 0, node_ref; 2407 struct list_head *queue; 2408 struct dlm_lock *lock; 2409 u64 cookie; 2410 2411 assert_spin_locked(&res->spinlock); 2412 2413 /* delay migration when the lockres is in MIGRATING state */ 2414 if (res->state & DLM_LOCK_RES_MIGRATING) 2415 return 0; 2416 2417 /* delay migration when the lockres is in RECOCERING state */ 2418 if (res->state & DLM_LOCK_RES_RECOVERING) 2419 return 0; 2420 2421 if (res->owner != dlm->node_num) 2422 return 0; 2423 2424 for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) { 2425 queue = dlm_list_idx_to_ptr(res, idx); 2426 list_for_each_entry(lock, queue, list) { 2427 if (lock->ml.node != dlm->node_num) { 2428 nonlocal++; 2429 continue; 2430 } 2431 cookie = be64_to_cpu(lock->ml.cookie); 2432 mlog(0, "%s: Not migrateable res %.*s, lock %u:%llu on " 2433 "%s list\n", dlm->name, res->lockname.len, 2434 res->lockname.name, 2435 dlm_get_lock_cookie_node(cookie), 2436 dlm_get_lock_cookie_seq(cookie), 2437 dlm_list_in_text(idx)); 2438 return 0; 2439 } 2440 } 2441 2442 if (!nonlocal) { 2443 node_ref = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); 2444 if (node_ref >= O2NM_MAX_NODES) 2445 return 0; 2446 } 2447 2448 mlog(0, "%s: res %.*s, Migrateable\n", dlm->name, res->lockname.len, 2449 res->lockname.name); 2450 2451 return 1; 2452 } 2453 2454 /* 2455 * DLM_MIGRATE_LOCKRES 2456 */ 2457 2458 2459 static int dlm_migrate_lockres(struct dlm_ctxt *dlm, 2460 struct dlm_lock_resource *res, u8 target) 2461 { 2462 struct dlm_master_list_entry *mle = NULL; 2463 struct dlm_master_list_entry *oldmle = NULL; 2464 struct dlm_migratable_lockres *mres = NULL; 2465 int ret = 0; 2466 const char *name; 2467 unsigned int namelen; 2468 int mle_added = 0; 2469 int wake = 0; 2470 2471 if (!dlm_grab(dlm)) 2472 return -EINVAL; 2473 2474 BUG_ON(target == O2NM_MAX_NODES); 2475 2476 name = res->lockname.name; 2477 namelen = res->lockname.len; 2478 2479 mlog(0, "%s: Migrating %.*s to node %u\n", dlm->name, namelen, name, 2480 target); 2481 2482 /* preallocate up front. if this fails, abort */ 2483 ret = -ENOMEM; 2484 mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS); 2485 if (!mres) { 2486 mlog_errno(ret); 2487 goto leave; 2488 } 2489 2490 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); 2491 if (!mle) { 2492 mlog_errno(ret); 2493 goto leave; 2494 } 2495 ret = 0; 2496 2497 /* 2498 * clear any existing master requests and 2499 * add the migration mle to the list 2500 */ 2501 spin_lock(&dlm->spinlock); 2502 spin_lock(&dlm->master_lock); 2503 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name, 2504 namelen, target, dlm->node_num); 2505 spin_unlock(&dlm->master_lock); 2506 spin_unlock(&dlm->spinlock); 2507 2508 if (ret == -EEXIST) { 2509 mlog(0, "another process is already migrating it\n"); 2510 goto fail; 2511 } 2512 mle_added = 1; 2513 2514 /* 2515 * set the MIGRATING flag and flush asts 2516 * if we fail after this we need to re-dirty the lockres 2517 */ 2518 if (dlm_mark_lockres_migrating(dlm, res, target) < 0) { 2519 mlog(ML_ERROR, "tried to migrate %.*s to %u, but " 2520 "the target went down.\n", res->lockname.len, 2521 res->lockname.name, target); 2522 spin_lock(&res->spinlock); 2523 res->state &= ~DLM_LOCK_RES_MIGRATING; 2524 wake = 1; 2525 spin_unlock(&res->spinlock); 2526 ret = -EINVAL; 2527 } 2528 2529 fail: 2530 if (oldmle) { 2531 /* master is known, detach if not already detached */ 2532 dlm_mle_detach_hb_events(dlm, oldmle); 2533 dlm_put_mle(oldmle); 2534 } 2535 2536 if (ret < 0) { 2537 if (mle_added) { 2538 dlm_mle_detach_hb_events(dlm, mle); 2539 dlm_put_mle(mle); 2540 } else if (mle) { 2541 kmem_cache_free(dlm_mle_cache, mle); 2542 mle = NULL; 2543 } 2544 goto leave; 2545 } 2546 2547 /* 2548 * at this point, we have a migration target, an mle 2549 * in the master list, and the MIGRATING flag set on 2550 * the lockres 2551 */ 2552 2553 /* now that remote nodes are spinning on the MIGRATING flag, 2554 * ensure that all assert_master work is flushed. */ 2555 flush_workqueue(dlm->dlm_worker); 2556 2557 /* get an extra reference on the mle. 2558 * otherwise the assert_master from the new 2559 * master will destroy this. 2560 * also, make sure that all callers of dlm_get_mle 2561 * take both dlm->spinlock and dlm->master_lock */ 2562 spin_lock(&dlm->spinlock); 2563 spin_lock(&dlm->master_lock); 2564 dlm_get_mle_inuse(mle); 2565 spin_unlock(&dlm->master_lock); 2566 spin_unlock(&dlm->spinlock); 2567 2568 /* notify new node and send all lock state */ 2569 /* call send_one_lockres with migration flag. 2570 * this serves as notice to the target node that a 2571 * migration is starting. */ 2572 ret = dlm_send_one_lockres(dlm, res, mres, target, 2573 DLM_MRES_MIGRATION); 2574 2575 if (ret < 0) { 2576 mlog(0, "migration to node %u failed with %d\n", 2577 target, ret); 2578 /* migration failed, detach and clean up mle */ 2579 dlm_mle_detach_hb_events(dlm, mle); 2580 dlm_put_mle(mle); 2581 dlm_put_mle_inuse(mle); 2582 spin_lock(&res->spinlock); 2583 res->state &= ~DLM_LOCK_RES_MIGRATING; 2584 wake = 1; 2585 spin_unlock(&res->spinlock); 2586 if (dlm_is_host_down(ret)) 2587 dlm_wait_for_node_death(dlm, target, 2588 DLM_NODE_DEATH_WAIT_MAX); 2589 goto leave; 2590 } 2591 2592 /* at this point, the target sends a message to all nodes, 2593 * (using dlm_do_migrate_request). this node is skipped since 2594 * we had to put an mle in the list to begin the process. this 2595 * node now waits for target to do an assert master. this node 2596 * will be the last one notified, ensuring that the migration 2597 * is complete everywhere. if the target dies while this is 2598 * going on, some nodes could potentially see the target as the 2599 * master, so it is important that my recovery finds the migration 2600 * mle and sets the master to UNKNOWN. */ 2601 2602 2603 /* wait for new node to assert master */ 2604 while (1) { 2605 ret = wait_event_interruptible_timeout(mle->wq, 2606 (atomic_read(&mle->woken) == 1), 2607 msecs_to_jiffies(5000)); 2608 2609 if (ret >= 0) { 2610 if (atomic_read(&mle->woken) == 1 || 2611 res->owner == target) 2612 break; 2613 2614 mlog(0, "%s:%.*s: timed out during migration\n", 2615 dlm->name, res->lockname.len, res->lockname.name); 2616 /* avoid hang during shutdown when migrating lockres 2617 * to a node which also goes down */ 2618 if (dlm_is_node_dead(dlm, target)) { 2619 mlog(0, "%s:%.*s: expected migration " 2620 "target %u is no longer up, restarting\n", 2621 dlm->name, res->lockname.len, 2622 res->lockname.name, target); 2623 ret = -EINVAL; 2624 /* migration failed, detach and clean up mle */ 2625 dlm_mle_detach_hb_events(dlm, mle); 2626 dlm_put_mle(mle); 2627 dlm_put_mle_inuse(mle); 2628 spin_lock(&res->spinlock); 2629 res->state &= ~DLM_LOCK_RES_MIGRATING; 2630 wake = 1; 2631 spin_unlock(&res->spinlock); 2632 goto leave; 2633 } 2634 } else 2635 mlog(0, "%s:%.*s: caught signal during migration\n", 2636 dlm->name, res->lockname.len, res->lockname.name); 2637 } 2638 2639 /* all done, set the owner, clear the flag */ 2640 spin_lock(&res->spinlock); 2641 dlm_set_lockres_owner(dlm, res, target); 2642 res->state &= ~DLM_LOCK_RES_MIGRATING; 2643 dlm_remove_nonlocal_locks(dlm, res); 2644 spin_unlock(&res->spinlock); 2645 wake_up(&res->wq); 2646 2647 /* master is known, detach if not already detached */ 2648 dlm_mle_detach_hb_events(dlm, mle); 2649 dlm_put_mle_inuse(mle); 2650 ret = 0; 2651 2652 dlm_lockres_calc_usage(dlm, res); 2653 2654 leave: 2655 /* re-dirty the lockres if we failed */ 2656 if (ret < 0) 2657 dlm_kick_thread(dlm, res); 2658 2659 /* wake up waiters if the MIGRATING flag got set 2660 * but migration failed */ 2661 if (wake) 2662 wake_up(&res->wq); 2663 2664 if (mres) 2665 free_page((unsigned long)mres); 2666 2667 dlm_put(dlm); 2668 2669 mlog(0, "%s: Migrating %.*s to %u, returns %d\n", dlm->name, namelen, 2670 name, target, ret); 2671 return ret; 2672 } 2673 2674 #define DLM_MIGRATION_RETRY_MS 100 2675 2676 /* 2677 * Should be called only after beginning the domain leave process. 2678 * There should not be any remaining locks on nonlocal lock resources, 2679 * and there should be no local locks left on locally mastered resources. 2680 * 2681 * Called with the dlm spinlock held, may drop it to do migration, but 2682 * will re-acquire before exit. 2683 * 2684 * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped 2685 */ 2686 int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) 2687 { 2688 int ret; 2689 int lock_dropped = 0; 2690 u8 target = O2NM_MAX_NODES; 2691 2692 assert_spin_locked(&dlm->spinlock); 2693 2694 spin_lock(&res->spinlock); 2695 if (dlm_is_lockres_migrateable(dlm, res)) 2696 target = dlm_pick_migration_target(dlm, res); 2697 spin_unlock(&res->spinlock); 2698 2699 if (target == O2NM_MAX_NODES) 2700 goto leave; 2701 2702 /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */ 2703 spin_unlock(&dlm->spinlock); 2704 lock_dropped = 1; 2705 ret = dlm_migrate_lockres(dlm, res, target); 2706 if (ret) 2707 mlog(0, "%s: res %.*s, Migrate to node %u failed with %d\n", 2708 dlm->name, res->lockname.len, res->lockname.name, 2709 target, ret); 2710 spin_lock(&dlm->spinlock); 2711 leave: 2712 return lock_dropped; 2713 } 2714 2715 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock) 2716 { 2717 int ret; 2718 spin_lock(&dlm->ast_lock); 2719 spin_lock(&lock->spinlock); 2720 ret = (list_empty(&lock->bast_list) && !lock->bast_pending); 2721 spin_unlock(&lock->spinlock); 2722 spin_unlock(&dlm->ast_lock); 2723 return ret; 2724 } 2725 2726 static int dlm_migration_can_proceed(struct dlm_ctxt *dlm, 2727 struct dlm_lock_resource *res, 2728 u8 mig_target) 2729 { 2730 int can_proceed; 2731 spin_lock(&res->spinlock); 2732 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING); 2733 spin_unlock(&res->spinlock); 2734 2735 /* target has died, so make the caller break out of the 2736 * wait_event, but caller must recheck the domain_map */ 2737 spin_lock(&dlm->spinlock); 2738 if (!test_bit(mig_target, dlm->domain_map)) 2739 can_proceed = 1; 2740 spin_unlock(&dlm->spinlock); 2741 return can_proceed; 2742 } 2743 2744 static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm, 2745 struct dlm_lock_resource *res) 2746 { 2747 int ret; 2748 spin_lock(&res->spinlock); 2749 ret = !!(res->state & DLM_LOCK_RES_DIRTY); 2750 spin_unlock(&res->spinlock); 2751 return ret; 2752 } 2753 2754 2755 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm, 2756 struct dlm_lock_resource *res, 2757 u8 target) 2758 { 2759 int ret = 0; 2760 2761 mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n", 2762 res->lockname.len, res->lockname.name, dlm->node_num, 2763 target); 2764 /* need to set MIGRATING flag on lockres. this is done by 2765 * ensuring that all asts have been flushed for this lockres. */ 2766 spin_lock(&res->spinlock); 2767 BUG_ON(res->migration_pending); 2768 res->migration_pending = 1; 2769 /* strategy is to reserve an extra ast then release 2770 * it below, letting the release do all of the work */ 2771 __dlm_lockres_reserve_ast(res); 2772 spin_unlock(&res->spinlock); 2773 2774 /* now flush all the pending asts */ 2775 dlm_kick_thread(dlm, res); 2776 /* before waiting on DIRTY, block processes which may 2777 * try to dirty the lockres before MIGRATING is set */ 2778 spin_lock(&res->spinlock); 2779 BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY); 2780 res->state |= DLM_LOCK_RES_BLOCK_DIRTY; 2781 spin_unlock(&res->spinlock); 2782 /* now wait on any pending asts and the DIRTY state */ 2783 wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res)); 2784 dlm_lockres_release_ast(dlm, res); 2785 2786 mlog(0, "about to wait on migration_wq, dirty=%s\n", 2787 res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no"); 2788 /* if the extra ref we just put was the final one, this 2789 * will pass thru immediately. otherwise, we need to wait 2790 * for the last ast to finish. */ 2791 again: 2792 ret = wait_event_interruptible_timeout(dlm->migration_wq, 2793 dlm_migration_can_proceed(dlm, res, target), 2794 msecs_to_jiffies(1000)); 2795 if (ret < 0) { 2796 mlog(0, "woken again: migrating? %s, dead? %s\n", 2797 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", 2798 test_bit(target, dlm->domain_map) ? "no":"yes"); 2799 } else { 2800 mlog(0, "all is well: migrating? %s, dead? %s\n", 2801 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", 2802 test_bit(target, dlm->domain_map) ? "no":"yes"); 2803 } 2804 if (!dlm_migration_can_proceed(dlm, res, target)) { 2805 mlog(0, "trying again...\n"); 2806 goto again; 2807 } 2808 2809 ret = 0; 2810 /* did the target go down or die? */ 2811 spin_lock(&dlm->spinlock); 2812 if (!test_bit(target, dlm->domain_map)) { 2813 mlog(ML_ERROR, "aha. migration target %u just went down\n", 2814 target); 2815 ret = -EHOSTDOWN; 2816 } 2817 spin_unlock(&dlm->spinlock); 2818 2819 /* 2820 * if target is down, we need to clear DLM_LOCK_RES_BLOCK_DIRTY for 2821 * another try; otherwise, we are sure the MIGRATING state is there, 2822 * drop the unneded state which blocked threads trying to DIRTY 2823 */ 2824 spin_lock(&res->spinlock); 2825 BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY)); 2826 res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY; 2827 if (!ret) 2828 BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING)); 2829 spin_unlock(&res->spinlock); 2830 2831 /* 2832 * at this point: 2833 * 2834 * o the DLM_LOCK_RES_MIGRATING flag is set if target not down 2835 * o there are no pending asts on this lockres 2836 * o all processes trying to reserve an ast on this 2837 * lockres must wait for the MIGRATING flag to clear 2838 */ 2839 return ret; 2840 } 2841 2842 /* last step in the migration process. 2843 * original master calls this to free all of the dlm_lock 2844 * structures that used to be for other nodes. */ 2845 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, 2846 struct dlm_lock_resource *res) 2847 { 2848 struct list_head *queue = &res->granted; 2849 int i, bit; 2850 struct dlm_lock *lock, *next; 2851 2852 assert_spin_locked(&res->spinlock); 2853 2854 BUG_ON(res->owner == dlm->node_num); 2855 2856 for (i=0; i<3; i++) { 2857 list_for_each_entry_safe(lock, next, queue, list) { 2858 if (lock->ml.node != dlm->node_num) { 2859 mlog(0, "putting lock for node %u\n", 2860 lock->ml.node); 2861 /* be extra careful */ 2862 BUG_ON(!list_empty(&lock->ast_list)); 2863 BUG_ON(!list_empty(&lock->bast_list)); 2864 BUG_ON(lock->ast_pending); 2865 BUG_ON(lock->bast_pending); 2866 dlm_lockres_clear_refmap_bit(dlm, res, 2867 lock->ml.node); 2868 list_del_init(&lock->list); 2869 dlm_lock_put(lock); 2870 /* In a normal unlock, we would have added a 2871 * DLM_UNLOCK_FREE_LOCK action. Force it. */ 2872 dlm_lock_put(lock); 2873 } 2874 } 2875 queue++; 2876 } 2877 bit = 0; 2878 while (1) { 2879 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit); 2880 if (bit >= O2NM_MAX_NODES) 2881 break; 2882 /* do not clear the local node reference, if there is a 2883 * process holding this, let it drop the ref itself */ 2884 if (bit != dlm->node_num) { 2885 mlog(0, "%s:%.*s: node %u had a ref to this " 2886 "migrating lockres, clearing\n", dlm->name, 2887 res->lockname.len, res->lockname.name, bit); 2888 dlm_lockres_clear_refmap_bit(dlm, res, bit); 2889 } 2890 bit++; 2891 } 2892 } 2893 2894 /* 2895 * Pick a node to migrate the lock resource to. This function selects a 2896 * potential target based first on the locks and then on refmap. It skips 2897 * nodes that are in the process of exiting the domain. 2898 */ 2899 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm, 2900 struct dlm_lock_resource *res) 2901 { 2902 enum dlm_lockres_list idx; 2903 struct list_head *queue = &res->granted; 2904 struct dlm_lock *lock; 2905 int noderef; 2906 u8 nodenum = O2NM_MAX_NODES; 2907 2908 assert_spin_locked(&dlm->spinlock); 2909 assert_spin_locked(&res->spinlock); 2910 2911 /* Go through all the locks */ 2912 for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) { 2913 queue = dlm_list_idx_to_ptr(res, idx); 2914 list_for_each_entry(lock, queue, list) { 2915 if (lock->ml.node == dlm->node_num) 2916 continue; 2917 if (test_bit(lock->ml.node, dlm->exit_domain_map)) 2918 continue; 2919 nodenum = lock->ml.node; 2920 goto bail; 2921 } 2922 } 2923 2924 /* Go thru the refmap */ 2925 noderef = -1; 2926 while (1) { 2927 noderef = find_next_bit(res->refmap, O2NM_MAX_NODES, 2928 noderef + 1); 2929 if (noderef >= O2NM_MAX_NODES) 2930 break; 2931 if (noderef == dlm->node_num) 2932 continue; 2933 if (test_bit(noderef, dlm->exit_domain_map)) 2934 continue; 2935 nodenum = noderef; 2936 goto bail; 2937 } 2938 2939 bail: 2940 return nodenum; 2941 } 2942 2943 /* this is called by the new master once all lockres 2944 * data has been received */ 2945 static int dlm_do_migrate_request(struct dlm_ctxt *dlm, 2946 struct dlm_lock_resource *res, 2947 u8 master, u8 new_master, 2948 struct dlm_node_iter *iter) 2949 { 2950 struct dlm_migrate_request migrate; 2951 int ret, skip, status = 0; 2952 int nodenum; 2953 2954 memset(&migrate, 0, sizeof(migrate)); 2955 migrate.namelen = res->lockname.len; 2956 memcpy(migrate.name, res->lockname.name, migrate.namelen); 2957 migrate.new_master = new_master; 2958 migrate.master = master; 2959 2960 ret = 0; 2961 2962 /* send message to all nodes, except the master and myself */ 2963 while ((nodenum = dlm_node_iter_next(iter)) >= 0) { 2964 if (nodenum == master || 2965 nodenum == new_master) 2966 continue; 2967 2968 /* We could race exit domain. If exited, skip. */ 2969 spin_lock(&dlm->spinlock); 2970 skip = (!test_bit(nodenum, dlm->domain_map)); 2971 spin_unlock(&dlm->spinlock); 2972 if (skip) { 2973 clear_bit(nodenum, iter->node_map); 2974 continue; 2975 } 2976 2977 ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key, 2978 &migrate, sizeof(migrate), nodenum, 2979 &status); 2980 if (ret < 0) { 2981 mlog(ML_ERROR, "%s: res %.*s, Error %d send " 2982 "MIGRATE_REQUEST to node %u\n", dlm->name, 2983 migrate.namelen, migrate.name, ret, nodenum); 2984 if (!dlm_is_host_down(ret)) { 2985 mlog(ML_ERROR, "unhandled error=%d!\n", ret); 2986 BUG(); 2987 } 2988 clear_bit(nodenum, iter->node_map); 2989 ret = 0; 2990 } else if (status < 0) { 2991 mlog(0, "migrate request (node %u) returned %d!\n", 2992 nodenum, status); 2993 ret = status; 2994 } else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) { 2995 /* during the migration request we short-circuited 2996 * the mastery of the lockres. make sure we have 2997 * a mastery ref for nodenum */ 2998 mlog(0, "%s:%.*s: need ref for node %u\n", 2999 dlm->name, res->lockname.len, res->lockname.name, 3000 nodenum); 3001 spin_lock(&res->spinlock); 3002 dlm_lockres_set_refmap_bit(dlm, res, nodenum); 3003 spin_unlock(&res->spinlock); 3004 } 3005 } 3006 3007 if (ret < 0) 3008 mlog_errno(ret); 3009 3010 mlog(0, "returning ret=%d\n", ret); 3011 return ret; 3012 } 3013 3014 3015 /* if there is an existing mle for this lockres, we now know who the master is. 3016 * (the one who sent us *this* message) we can clear it up right away. 3017 * since the process that put the mle on the list still has a reference to it, 3018 * we can unhash it now, set the master and wake the process. as a result, 3019 * we will have no mle in the list to start with. now we can add an mle for 3020 * the migration and this should be the only one found for those scanning the 3021 * list. */ 3022 int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data, 3023 void **ret_data) 3024 { 3025 struct dlm_ctxt *dlm = data; 3026 struct dlm_lock_resource *res = NULL; 3027 struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf; 3028 struct dlm_master_list_entry *mle = NULL, *oldmle = NULL; 3029 const char *name; 3030 unsigned int namelen, hash; 3031 int ret = 0; 3032 3033 if (!dlm_grab(dlm)) 3034 return -EINVAL; 3035 3036 name = migrate->name; 3037 namelen = migrate->namelen; 3038 hash = dlm_lockid_hash(name, namelen); 3039 3040 /* preallocate.. if this fails, abort */ 3041 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); 3042 3043 if (!mle) { 3044 ret = -ENOMEM; 3045 goto leave; 3046 } 3047 3048 /* check for pre-existing lock */ 3049 spin_lock(&dlm->spinlock); 3050 res = __dlm_lookup_lockres(dlm, name, namelen, hash); 3051 if (res) { 3052 spin_lock(&res->spinlock); 3053 if (res->state & DLM_LOCK_RES_RECOVERING) { 3054 /* if all is working ok, this can only mean that we got 3055 * a migrate request from a node that we now see as 3056 * dead. what can we do here? drop it to the floor? */ 3057 spin_unlock(&res->spinlock); 3058 mlog(ML_ERROR, "Got a migrate request, but the " 3059 "lockres is marked as recovering!"); 3060 kmem_cache_free(dlm_mle_cache, mle); 3061 ret = -EINVAL; /* need a better solution */ 3062 goto unlock; 3063 } 3064 res->state |= DLM_LOCK_RES_MIGRATING; 3065 spin_unlock(&res->spinlock); 3066 } 3067 3068 spin_lock(&dlm->master_lock); 3069 /* ignore status. only nonzero status would BUG. */ 3070 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, 3071 name, namelen, 3072 migrate->new_master, 3073 migrate->master); 3074 3075 spin_unlock(&dlm->master_lock); 3076 unlock: 3077 spin_unlock(&dlm->spinlock); 3078 3079 if (oldmle) { 3080 /* master is known, detach if not already detached */ 3081 dlm_mle_detach_hb_events(dlm, oldmle); 3082 dlm_put_mle(oldmle); 3083 } 3084 3085 if (res) 3086 dlm_lockres_put(res); 3087 leave: 3088 dlm_put(dlm); 3089 return ret; 3090 } 3091 3092 /* must be holding dlm->spinlock and dlm->master_lock 3093 * when adding a migration mle, we can clear any other mles 3094 * in the master list because we know with certainty that 3095 * the master is "master". so we remove any old mle from 3096 * the list after setting it's master field, and then add 3097 * the new migration mle. this way we can hold with the rule 3098 * of having only one mle for a given lock name at all times. */ 3099 static int dlm_add_migration_mle(struct dlm_ctxt *dlm, 3100 struct dlm_lock_resource *res, 3101 struct dlm_master_list_entry *mle, 3102 struct dlm_master_list_entry **oldmle, 3103 const char *name, unsigned int namelen, 3104 u8 new_master, u8 master) 3105 { 3106 int found; 3107 int ret = 0; 3108 3109 *oldmle = NULL; 3110 3111 assert_spin_locked(&dlm->spinlock); 3112 assert_spin_locked(&dlm->master_lock); 3113 3114 /* caller is responsible for any ref taken here on oldmle */ 3115 found = dlm_find_mle(dlm, oldmle, (char *)name, namelen); 3116 if (found) { 3117 struct dlm_master_list_entry *tmp = *oldmle; 3118 spin_lock(&tmp->spinlock); 3119 if (tmp->type == DLM_MLE_MIGRATION) { 3120 if (master == dlm->node_num) { 3121 /* ah another process raced me to it */ 3122 mlog(0, "tried to migrate %.*s, but some " 3123 "process beat me to it\n", 3124 namelen, name); 3125 ret = -EEXIST; 3126 } else { 3127 /* bad. 2 NODES are trying to migrate! */ 3128 mlog(ML_ERROR, "migration error mle: " 3129 "master=%u new_master=%u // request: " 3130 "master=%u new_master=%u // " 3131 "lockres=%.*s\n", 3132 tmp->master, tmp->new_master, 3133 master, new_master, 3134 namelen, name); 3135 BUG(); 3136 } 3137 } else { 3138 /* this is essentially what assert_master does */ 3139 tmp->master = master; 3140 atomic_set(&tmp->woken, 1); 3141 wake_up(&tmp->wq); 3142 /* remove it so that only one mle will be found */ 3143 __dlm_unlink_mle(dlm, tmp); 3144 __dlm_mle_detach_hb_events(dlm, tmp); 3145 if (tmp->type == DLM_MLE_MASTER) { 3146 ret = DLM_MIGRATE_RESPONSE_MASTERY_REF; 3147 mlog(0, "%s:%.*s: master=%u, newmaster=%u, " 3148 "telling master to get ref " 3149 "for cleared out mle during " 3150 "migration\n", dlm->name, 3151 namelen, name, master, 3152 new_master); 3153 } 3154 } 3155 spin_unlock(&tmp->spinlock); 3156 } 3157 3158 /* now add a migration mle to the tail of the list */ 3159 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen); 3160 mle->new_master = new_master; 3161 /* the new master will be sending an assert master for this. 3162 * at that point we will get the refmap reference */ 3163 mle->master = master; 3164 /* do this for consistency with other mle types */ 3165 set_bit(new_master, mle->maybe_map); 3166 __dlm_insert_mle(dlm, mle); 3167 3168 return ret; 3169 } 3170 3171 /* 3172 * Sets the owner of the lockres, associated to the mle, to UNKNOWN 3173 */ 3174 static struct dlm_lock_resource *dlm_reset_mleres_owner(struct dlm_ctxt *dlm, 3175 struct dlm_master_list_entry *mle) 3176 { 3177 struct dlm_lock_resource *res; 3178 3179 /* Find the lockres associated to the mle and set its owner to UNK */ 3180 res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen, 3181 mle->mnamehash); 3182 if (res) { 3183 spin_unlock(&dlm->master_lock); 3184 3185 /* move lockres onto recovery list */ 3186 spin_lock(&res->spinlock); 3187 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN); 3188 dlm_move_lockres_to_recovery_list(dlm, res); 3189 spin_unlock(&res->spinlock); 3190 dlm_lockres_put(res); 3191 3192 /* about to get rid of mle, detach from heartbeat */ 3193 __dlm_mle_detach_hb_events(dlm, mle); 3194 3195 /* dump the mle */ 3196 spin_lock(&dlm->master_lock); 3197 __dlm_put_mle(mle); 3198 spin_unlock(&dlm->master_lock); 3199 } 3200 3201 return res; 3202 } 3203 3204 static void dlm_clean_migration_mle(struct dlm_ctxt *dlm, 3205 struct dlm_master_list_entry *mle) 3206 { 3207 __dlm_mle_detach_hb_events(dlm, mle); 3208 3209 spin_lock(&mle->spinlock); 3210 __dlm_unlink_mle(dlm, mle); 3211 atomic_set(&mle->woken, 1); 3212 spin_unlock(&mle->spinlock); 3213 3214 wake_up(&mle->wq); 3215 } 3216 3217 static void dlm_clean_block_mle(struct dlm_ctxt *dlm, 3218 struct dlm_master_list_entry *mle, u8 dead_node) 3219 { 3220 int bit; 3221 3222 BUG_ON(mle->type != DLM_MLE_BLOCK); 3223 3224 spin_lock(&mle->spinlock); 3225 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); 3226 if (bit != dead_node) { 3227 mlog(0, "mle found, but dead node %u would not have been " 3228 "master\n", dead_node); 3229 spin_unlock(&mle->spinlock); 3230 } else { 3231 /* Must drop the refcount by one since the assert_master will 3232 * never arrive. This may result in the mle being unlinked and 3233 * freed, but there may still be a process waiting in the 3234 * dlmlock path which is fine. */ 3235 mlog(0, "node %u was expected master\n", dead_node); 3236 atomic_set(&mle->woken, 1); 3237 spin_unlock(&mle->spinlock); 3238 wake_up(&mle->wq); 3239 3240 /* Do not need events any longer, so detach from heartbeat */ 3241 __dlm_mle_detach_hb_events(dlm, mle); 3242 __dlm_put_mle(mle); 3243 } 3244 } 3245 3246 void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node) 3247 { 3248 struct dlm_master_list_entry *mle; 3249 struct dlm_lock_resource *res; 3250 struct hlist_head *bucket; 3251 struct hlist_node *tmp; 3252 unsigned int i; 3253 3254 mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node); 3255 top: 3256 assert_spin_locked(&dlm->spinlock); 3257 3258 /* clean the master list */ 3259 spin_lock(&dlm->master_lock); 3260 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 3261 bucket = dlm_master_hash(dlm, i); 3262 hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) { 3263 BUG_ON(mle->type != DLM_MLE_BLOCK && 3264 mle->type != DLM_MLE_MASTER && 3265 mle->type != DLM_MLE_MIGRATION); 3266 3267 /* MASTER mles are initiated locally. The waiting 3268 * process will notice the node map change shortly. 3269 * Let that happen as normal. */ 3270 if (mle->type == DLM_MLE_MASTER) 3271 continue; 3272 3273 /* BLOCK mles are initiated by other nodes. Need to 3274 * clean up if the dead node would have been the 3275 * master. */ 3276 if (mle->type == DLM_MLE_BLOCK) { 3277 dlm_clean_block_mle(dlm, mle, dead_node); 3278 continue; 3279 } 3280 3281 /* Everything else is a MIGRATION mle */ 3282 3283 /* The rule for MIGRATION mles is that the master 3284 * becomes UNKNOWN if *either* the original or the new 3285 * master dies. All UNKNOWN lockres' are sent to 3286 * whichever node becomes the recovery master. The new 3287 * master is responsible for determining if there is 3288 * still a master for this lockres, or if he needs to 3289 * take over mastery. Either way, this node should 3290 * expect another message to resolve this. */ 3291 3292 if (mle->master != dead_node && 3293 mle->new_master != dead_node) 3294 continue; 3295 3296 /* If we have reached this point, this mle needs to be 3297 * removed from the list and freed. */ 3298 dlm_clean_migration_mle(dlm, mle); 3299 3300 mlog(0, "%s: node %u died during migration from " 3301 "%u to %u!\n", dlm->name, dead_node, mle->master, 3302 mle->new_master); 3303 3304 /* If we find a lockres associated with the mle, we've 3305 * hit this rare case that messes up our lock ordering. 3306 * If so, we need to drop the master lock so that we can 3307 * take the lockres lock, meaning that we will have to 3308 * restart from the head of list. */ 3309 res = dlm_reset_mleres_owner(dlm, mle); 3310 if (res) 3311 /* restart */ 3312 goto top; 3313 3314 /* This may be the last reference */ 3315 __dlm_put_mle(mle); 3316 } 3317 } 3318 spin_unlock(&dlm->master_lock); 3319 } 3320 3321 int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 3322 u8 old_master) 3323 { 3324 struct dlm_node_iter iter; 3325 int ret = 0; 3326 3327 spin_lock(&dlm->spinlock); 3328 dlm_node_iter_init(dlm->domain_map, &iter); 3329 clear_bit(old_master, iter.node_map); 3330 clear_bit(dlm->node_num, iter.node_map); 3331 spin_unlock(&dlm->spinlock); 3332 3333 /* ownership of the lockres is changing. account for the 3334 * mastery reference here since old_master will briefly have 3335 * a reference after the migration completes */ 3336 spin_lock(&res->spinlock); 3337 dlm_lockres_set_refmap_bit(dlm, res, old_master); 3338 spin_unlock(&res->spinlock); 3339 3340 mlog(0, "now time to do a migrate request to other nodes\n"); 3341 ret = dlm_do_migrate_request(dlm, res, old_master, 3342 dlm->node_num, &iter); 3343 if (ret < 0) { 3344 mlog_errno(ret); 3345 goto leave; 3346 } 3347 3348 mlog(0, "doing assert master of %.*s to all except the original node\n", 3349 res->lockname.len, res->lockname.name); 3350 /* this call now finishes out the nodemap 3351 * even if one or more nodes die */ 3352 ret = dlm_do_assert_master(dlm, res, iter.node_map, 3353 DLM_ASSERT_MASTER_FINISH_MIGRATION); 3354 if (ret < 0) { 3355 /* no longer need to retry. all living nodes contacted. */ 3356 mlog_errno(ret); 3357 ret = 0; 3358 } 3359 3360 memset(iter.node_map, 0, sizeof(iter.node_map)); 3361 set_bit(old_master, iter.node_map); 3362 mlog(0, "doing assert master of %.*s back to %u\n", 3363 res->lockname.len, res->lockname.name, old_master); 3364 ret = dlm_do_assert_master(dlm, res, iter.node_map, 3365 DLM_ASSERT_MASTER_FINISH_MIGRATION); 3366 if (ret < 0) { 3367 mlog(0, "assert master to original master failed " 3368 "with %d.\n", ret); 3369 /* the only nonzero status here would be because of 3370 * a dead original node. we're done. */ 3371 ret = 0; 3372 } 3373 3374 /* all done, set the owner, clear the flag */ 3375 spin_lock(&res->spinlock); 3376 dlm_set_lockres_owner(dlm, res, dlm->node_num); 3377 res->state &= ~DLM_LOCK_RES_MIGRATING; 3378 spin_unlock(&res->spinlock); 3379 /* re-dirty it on the new master */ 3380 dlm_kick_thread(dlm, res); 3381 wake_up(&res->wq); 3382 leave: 3383 return ret; 3384 } 3385 3386 /* 3387 * LOCKRES AST REFCOUNT 3388 * this is integral to migration 3389 */ 3390 3391 /* for future intent to call an ast, reserve one ahead of time. 3392 * this should be called only after waiting on the lockres 3393 * with dlm_wait_on_lockres, and while still holding the 3394 * spinlock after the call. */ 3395 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res) 3396 { 3397 assert_spin_locked(&res->spinlock); 3398 if (res->state & DLM_LOCK_RES_MIGRATING) { 3399 __dlm_print_one_lock_resource(res); 3400 } 3401 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); 3402 3403 atomic_inc(&res->asts_reserved); 3404 } 3405 3406 /* 3407 * used to drop the reserved ast, either because it went unused, 3408 * or because the ast/bast was actually called. 3409 * 3410 * also, if there is a pending migration on this lockres, 3411 * and this was the last pending ast on the lockres, 3412 * atomically set the MIGRATING flag before we drop the lock. 3413 * this is how we ensure that migration can proceed with no 3414 * asts in progress. note that it is ok if the state of the 3415 * queues is such that a lock should be granted in the future 3416 * or that a bast should be fired, because the new master will 3417 * shuffle the lists on this lockres as soon as it is migrated. 3418 */ 3419 void dlm_lockres_release_ast(struct dlm_ctxt *dlm, 3420 struct dlm_lock_resource *res) 3421 { 3422 if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock)) 3423 return; 3424 3425 if (!res->migration_pending) { 3426 spin_unlock(&res->spinlock); 3427 return; 3428 } 3429 3430 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); 3431 res->migration_pending = 0; 3432 res->state |= DLM_LOCK_RES_MIGRATING; 3433 spin_unlock(&res->spinlock); 3434 wake_up(&res->wq); 3435 wake_up(&dlm->migration_wq); 3436 } 3437 3438 void dlm_force_free_mles(struct dlm_ctxt *dlm) 3439 { 3440 int i; 3441 struct hlist_head *bucket; 3442 struct dlm_master_list_entry *mle; 3443 struct hlist_node *tmp; 3444 3445 /* 3446 * We notified all other nodes that we are exiting the domain and 3447 * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still 3448 * around we force free them and wake any processes that are waiting 3449 * on the mles 3450 */ 3451 spin_lock(&dlm->spinlock); 3452 spin_lock(&dlm->master_lock); 3453 3454 BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING); 3455 BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES)); 3456 3457 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 3458 bucket = dlm_master_hash(dlm, i); 3459 hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) { 3460 if (mle->type != DLM_MLE_BLOCK) { 3461 mlog(ML_ERROR, "bad mle: %p\n", mle); 3462 dlm_print_one_mle(mle); 3463 } 3464 atomic_set(&mle->woken, 1); 3465 wake_up(&mle->wq); 3466 3467 __dlm_unlink_mle(dlm, mle); 3468 __dlm_mle_detach_hb_events(dlm, mle); 3469 __dlm_put_mle(mle); 3470 } 3471 } 3472 spin_unlock(&dlm->master_lock); 3473 spin_unlock(&dlm->spinlock); 3474 } 3475