1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * dlmrecovery.c 5 * 6 * recovery stuff 7 * 8 * Copyright (C) 2004 Oracle. All rights reserved. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public 12 * License as published by the Free Software Foundation; either 13 * version 2 of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public 21 * License along with this program; if not, write to the 22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 23 * Boston, MA 021110-1307, USA. 24 * 25 */ 26 27 28 #include <linux/module.h> 29 #include <linux/fs.h> 30 #include <linux/types.h> 31 #include <linux/slab.h> 32 #include <linux/highmem.h> 33 #include <linux/utsname.h> 34 #include <linux/init.h> 35 #include <linux/sysctl.h> 36 #include <linux/random.h> 37 #include <linux/blkdev.h> 38 #include <linux/socket.h> 39 #include <linux/inet.h> 40 #include <linux/timer.h> 41 #include <linux/kthread.h> 42 #include <linux/delay.h> 43 44 45 #include "cluster/heartbeat.h" 46 #include "cluster/nodemanager.h" 47 #include "cluster/tcp.h" 48 49 #include "dlmapi.h" 50 #include "dlmcommon.h" 51 #include "dlmdomain.h" 52 53 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY) 54 #include "cluster/masklog.h" 55 56 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node); 57 58 static int dlm_recovery_thread(void *data); 59 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm); 60 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm); 61 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm); 62 static int dlm_do_recovery(struct dlm_ctxt *dlm); 63 64 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm); 65 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node); 66 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node); 67 static int dlm_request_all_locks(struct dlm_ctxt *dlm, 68 u8 request_from, u8 dead_node); 69 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node); 70 71 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res); 72 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, 73 const char *lockname, int namelen, 74 int total_locks, u64 cookie, 75 u8 flags, u8 master); 76 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, 77 struct dlm_migratable_lockres *mres, 78 u8 send_to, 79 struct dlm_lock_resource *res, 80 int total_locks); 81 static int dlm_process_recovery_data(struct dlm_ctxt *dlm, 82 struct dlm_lock_resource *res, 83 struct dlm_migratable_lockres *mres); 84 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm); 85 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, 86 u8 dead_node, u8 send_to); 87 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node); 88 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, 89 struct list_head *list, u8 dead_node); 90 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, 91 u8 dead_node, u8 new_master); 92 static void dlm_reco_ast(void *astdata); 93 static void dlm_reco_bast(void *astdata, int blocked_type); 94 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st); 95 static void dlm_request_all_locks_worker(struct dlm_work_item *item, 96 void *data); 97 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data); 98 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, 99 struct dlm_lock_resource *res, 100 u8 *real_master); 101 102 static u64 dlm_get_next_mig_cookie(void); 103 104 static DEFINE_SPINLOCK(dlm_reco_state_lock); 105 static DEFINE_SPINLOCK(dlm_mig_cookie_lock); 106 static u64 dlm_mig_cookie = 1; 107 108 static u64 dlm_get_next_mig_cookie(void) 109 { 110 u64 c; 111 spin_lock(&dlm_mig_cookie_lock); 112 c = dlm_mig_cookie; 113 if (dlm_mig_cookie == (~0ULL)) 114 dlm_mig_cookie = 1; 115 else 116 dlm_mig_cookie++; 117 spin_unlock(&dlm_mig_cookie_lock); 118 return c; 119 } 120 121 static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm, 122 u8 dead_node) 123 { 124 assert_spin_locked(&dlm->spinlock); 125 if (dlm->reco.dead_node != dead_node) 126 mlog(0, "%s: changing dead_node from %u to %u\n", 127 dlm->name, dlm->reco.dead_node, dead_node); 128 dlm->reco.dead_node = dead_node; 129 } 130 131 static inline void dlm_set_reco_master(struct dlm_ctxt *dlm, 132 u8 master) 133 { 134 assert_spin_locked(&dlm->spinlock); 135 mlog(0, "%s: changing new_master from %u to %u\n", 136 dlm->name, dlm->reco.new_master, master); 137 dlm->reco.new_master = master; 138 } 139 140 static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm) 141 { 142 assert_spin_locked(&dlm->spinlock); 143 clear_bit(dlm->reco.dead_node, dlm->recovery_map); 144 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); 145 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM); 146 } 147 148 static inline void dlm_reset_recovery(struct dlm_ctxt *dlm) 149 { 150 spin_lock(&dlm->spinlock); 151 __dlm_reset_recovery(dlm); 152 spin_unlock(&dlm->spinlock); 153 } 154 155 /* Worker function used during recovery. */ 156 void dlm_dispatch_work(void *data) 157 { 158 struct dlm_ctxt *dlm = (struct dlm_ctxt *)data; 159 LIST_HEAD(tmp_list); 160 struct list_head *iter, *iter2; 161 struct dlm_work_item *item; 162 dlm_workfunc_t *workfunc; 163 int tot=0; 164 165 if (!dlm_joined(dlm)) 166 return; 167 168 spin_lock(&dlm->work_lock); 169 list_splice_init(&dlm->work_list, &tmp_list); 170 spin_unlock(&dlm->work_lock); 171 172 list_for_each_safe(iter, iter2, &tmp_list) { 173 tot++; 174 } 175 mlog(0, "%s: work thread has %d work items\n", dlm->name, tot); 176 177 list_for_each_safe(iter, iter2, &tmp_list) { 178 item = list_entry(iter, struct dlm_work_item, list); 179 workfunc = item->func; 180 list_del_init(&item->list); 181 182 /* already have ref on dlm to avoid having 183 * it disappear. just double-check. */ 184 BUG_ON(item->dlm != dlm); 185 186 /* this is allowed to sleep and 187 * call network stuff */ 188 workfunc(item, item->data); 189 190 dlm_put(dlm); 191 kfree(item); 192 } 193 } 194 195 /* 196 * RECOVERY THREAD 197 */ 198 199 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm) 200 { 201 /* wake the recovery thread 202 * this will wake the reco thread in one of three places 203 * 1) sleeping with no recovery happening 204 * 2) sleeping with recovery mastered elsewhere 205 * 3) recovery mastered here, waiting on reco data */ 206 207 wake_up(&dlm->dlm_reco_thread_wq); 208 } 209 210 /* Launch the recovery thread */ 211 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm) 212 { 213 mlog(0, "starting dlm recovery thread...\n"); 214 215 dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm, 216 "dlm_reco_thread"); 217 if (IS_ERR(dlm->dlm_reco_thread_task)) { 218 mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task)); 219 dlm->dlm_reco_thread_task = NULL; 220 return -EINVAL; 221 } 222 223 return 0; 224 } 225 226 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm) 227 { 228 if (dlm->dlm_reco_thread_task) { 229 mlog(0, "waiting for dlm recovery thread to exit\n"); 230 kthread_stop(dlm->dlm_reco_thread_task); 231 dlm->dlm_reco_thread_task = NULL; 232 } 233 } 234 235 236 237 /* 238 * this is lame, but here's how recovery works... 239 * 1) all recovery threads cluster wide will work on recovering 240 * ONE node at a time 241 * 2) negotiate who will take over all the locks for the dead node. 242 * thats right... ALL the locks. 243 * 3) once a new master is chosen, everyone scans all locks 244 * and moves aside those mastered by the dead guy 245 * 4) each of these locks should be locked until recovery is done 246 * 5) the new master collects up all of secondary lock queue info 247 * one lock at a time, forcing each node to communicate back 248 * before continuing 249 * 6) each secondary lock queue responds with the full known lock info 250 * 7) once the new master has run all its locks, it sends a ALLDONE! 251 * message to everyone 252 * 8) upon receiving this message, the secondary queue node unlocks 253 * and responds to the ALLDONE 254 * 9) once the new master gets responses from everyone, he unlocks 255 * everything and recovery for this dead node is done 256 *10) go back to 2) while there are still dead nodes 257 * 258 */ 259 260 static void dlm_print_reco_node_status(struct dlm_ctxt *dlm) 261 { 262 struct dlm_reco_node_data *ndata; 263 struct dlm_lock_resource *res; 264 265 mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n", 266 dlm->name, dlm->dlm_reco_thread_task->pid, 267 dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive", 268 dlm->reco.dead_node, dlm->reco.new_master); 269 270 list_for_each_entry(ndata, &dlm->reco.node_data, list) { 271 char *st = "unknown"; 272 switch (ndata->state) { 273 case DLM_RECO_NODE_DATA_INIT: 274 st = "init"; 275 break; 276 case DLM_RECO_NODE_DATA_REQUESTING: 277 st = "requesting"; 278 break; 279 case DLM_RECO_NODE_DATA_DEAD: 280 st = "dead"; 281 break; 282 case DLM_RECO_NODE_DATA_RECEIVING: 283 st = "receiving"; 284 break; 285 case DLM_RECO_NODE_DATA_REQUESTED: 286 st = "requested"; 287 break; 288 case DLM_RECO_NODE_DATA_DONE: 289 st = "done"; 290 break; 291 case DLM_RECO_NODE_DATA_FINALIZE_SENT: 292 st = "finalize-sent"; 293 break; 294 default: 295 st = "bad"; 296 break; 297 } 298 mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n", 299 dlm->name, ndata->node_num, st); 300 } 301 list_for_each_entry(res, &dlm->reco.resources, recovering) { 302 mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n", 303 dlm->name, res->lockname.len, res->lockname.name); 304 } 305 } 306 307 #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000) 308 309 static int dlm_recovery_thread(void *data) 310 { 311 int status; 312 struct dlm_ctxt *dlm = data; 313 unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS); 314 315 mlog(0, "dlm thread running for %s...\n", dlm->name); 316 317 while (!kthread_should_stop()) { 318 if (dlm_joined(dlm)) { 319 status = dlm_do_recovery(dlm); 320 if (status == -EAGAIN) { 321 /* do not sleep, recheck immediately. */ 322 continue; 323 } 324 if (status < 0) 325 mlog_errno(status); 326 } 327 328 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq, 329 kthread_should_stop(), 330 timeout); 331 } 332 333 mlog(0, "quitting DLM recovery thread\n"); 334 return 0; 335 } 336 337 /* returns true when the recovery master has contacted us */ 338 static int dlm_reco_master_ready(struct dlm_ctxt *dlm) 339 { 340 int ready; 341 spin_lock(&dlm->spinlock); 342 ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM); 343 spin_unlock(&dlm->spinlock); 344 return ready; 345 } 346 347 /* returns true if node is no longer in the domain 348 * could be dead or just not joined */ 349 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node) 350 { 351 int dead; 352 spin_lock(&dlm->spinlock); 353 dead = !test_bit(node, dlm->domain_map); 354 spin_unlock(&dlm->spinlock); 355 return dead; 356 } 357 358 /* returns true if node is no longer in the domain 359 * could be dead or just not joined */ 360 static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node) 361 { 362 int recovered; 363 spin_lock(&dlm->spinlock); 364 recovered = !test_bit(node, dlm->recovery_map); 365 spin_unlock(&dlm->spinlock); 366 return recovered; 367 } 368 369 370 int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout) 371 { 372 if (timeout) { 373 mlog(ML_NOTICE, "%s: waiting %dms for notification of " 374 "death of node %u\n", dlm->name, timeout, node); 375 wait_event_timeout(dlm->dlm_reco_thread_wq, 376 dlm_is_node_dead(dlm, node), 377 msecs_to_jiffies(timeout)); 378 } else { 379 mlog(ML_NOTICE, "%s: waiting indefinitely for notification " 380 "of death of node %u\n", dlm->name, node); 381 wait_event(dlm->dlm_reco_thread_wq, 382 dlm_is_node_dead(dlm, node)); 383 } 384 /* for now, return 0 */ 385 return 0; 386 } 387 388 int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout) 389 { 390 if (timeout) { 391 mlog(0, "%s: waiting %dms for notification of " 392 "recovery of node %u\n", dlm->name, timeout, node); 393 wait_event_timeout(dlm->dlm_reco_thread_wq, 394 dlm_is_node_recovered(dlm, node), 395 msecs_to_jiffies(timeout)); 396 } else { 397 mlog(0, "%s: waiting indefinitely for notification " 398 "of recovery of node %u\n", dlm->name, node); 399 wait_event(dlm->dlm_reco_thread_wq, 400 dlm_is_node_recovered(dlm, node)); 401 } 402 /* for now, return 0 */ 403 return 0; 404 } 405 406 /* callers of the top-level api calls (dlmlock/dlmunlock) should 407 * block on the dlm->reco.event when recovery is in progress. 408 * the dlm recovery thread will set this state when it begins 409 * recovering a dead node (as the new master or not) and clear 410 * the state and wake as soon as all affected lock resources have 411 * been marked with the RECOVERY flag */ 412 static int dlm_in_recovery(struct dlm_ctxt *dlm) 413 { 414 int in_recovery; 415 spin_lock(&dlm->spinlock); 416 in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE); 417 spin_unlock(&dlm->spinlock); 418 return in_recovery; 419 } 420 421 422 void dlm_wait_for_recovery(struct dlm_ctxt *dlm) 423 { 424 if (dlm_in_recovery(dlm)) { 425 mlog(0, "%s: reco thread %d in recovery: " 426 "state=%d, master=%u, dead=%u\n", 427 dlm->name, dlm->dlm_reco_thread_task->pid, 428 dlm->reco.state, dlm->reco.new_master, 429 dlm->reco.dead_node); 430 } 431 wait_event(dlm->reco.event, !dlm_in_recovery(dlm)); 432 } 433 434 static void dlm_begin_recovery(struct dlm_ctxt *dlm) 435 { 436 spin_lock(&dlm->spinlock); 437 BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE); 438 dlm->reco.state |= DLM_RECO_STATE_ACTIVE; 439 spin_unlock(&dlm->spinlock); 440 } 441 442 static void dlm_end_recovery(struct dlm_ctxt *dlm) 443 { 444 spin_lock(&dlm->spinlock); 445 BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE)); 446 dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE; 447 spin_unlock(&dlm->spinlock); 448 wake_up(&dlm->reco.event); 449 } 450 451 static int dlm_do_recovery(struct dlm_ctxt *dlm) 452 { 453 int status = 0; 454 int ret; 455 456 spin_lock(&dlm->spinlock); 457 458 /* check to see if the new master has died */ 459 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM && 460 test_bit(dlm->reco.new_master, dlm->recovery_map)) { 461 mlog(0, "new master %u died while recovering %u!\n", 462 dlm->reco.new_master, dlm->reco.dead_node); 463 /* unset the new_master, leave dead_node */ 464 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM); 465 } 466 467 /* select a target to recover */ 468 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { 469 int bit; 470 471 bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0); 472 if (bit >= O2NM_MAX_NODES || bit < 0) 473 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); 474 else 475 dlm_set_reco_dead_node(dlm, bit); 476 } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) { 477 /* BUG? */ 478 mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n", 479 dlm->reco.dead_node); 480 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); 481 } 482 483 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { 484 // mlog(0, "nothing to recover! sleeping now!\n"); 485 spin_unlock(&dlm->spinlock); 486 /* return to main thread loop and sleep. */ 487 return 0; 488 } 489 mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n", 490 dlm->name, dlm->dlm_reco_thread_task->pid, 491 dlm->reco.dead_node); 492 spin_unlock(&dlm->spinlock); 493 494 /* take write barrier */ 495 /* (stops the list reshuffling thread, proxy ast handling) */ 496 dlm_begin_recovery(dlm); 497 498 if (dlm->reco.new_master == dlm->node_num) 499 goto master_here; 500 501 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) { 502 /* choose a new master, returns 0 if this node 503 * is the master, -EEXIST if it's another node. 504 * this does not return until a new master is chosen 505 * or recovery completes entirely. */ 506 ret = dlm_pick_recovery_master(dlm); 507 if (!ret) { 508 /* already notified everyone. go. */ 509 goto master_here; 510 } 511 mlog(0, "another node will master this recovery session.\n"); 512 } 513 mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n", 514 dlm->name, dlm->dlm_reco_thread_task->pid, dlm->reco.new_master, 515 dlm->node_num, dlm->reco.dead_node); 516 517 /* it is safe to start everything back up here 518 * because all of the dead node's lock resources 519 * have been marked as in-recovery */ 520 dlm_end_recovery(dlm); 521 522 /* sleep out in main dlm_recovery_thread loop. */ 523 return 0; 524 525 master_here: 526 mlog(0, "(%d) mastering recovery of %s:%u here(this=%u)!\n", 527 dlm->dlm_reco_thread_task->pid, 528 dlm->name, dlm->reco.dead_node, dlm->node_num); 529 530 status = dlm_remaster_locks(dlm, dlm->reco.dead_node); 531 if (status < 0) { 532 /* we should never hit this anymore */ 533 mlog(ML_ERROR, "error %d remastering locks for node %u, " 534 "retrying.\n", status, dlm->reco.dead_node); 535 /* yield a bit to allow any final network messages 536 * to get handled on remaining nodes */ 537 msleep(100); 538 } else { 539 /* success! see if any other nodes need recovery */ 540 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n", 541 dlm->name, dlm->reco.dead_node, dlm->node_num); 542 dlm_reset_recovery(dlm); 543 } 544 dlm_end_recovery(dlm); 545 546 /* continue and look for another dead node */ 547 return -EAGAIN; 548 } 549 550 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) 551 { 552 int status = 0; 553 struct dlm_reco_node_data *ndata; 554 struct list_head *iter; 555 int all_nodes_done; 556 int destroy = 0; 557 int pass = 0; 558 559 do { 560 /* we have become recovery master. there is no escaping 561 * this, so just keep trying until we get it. */ 562 status = dlm_init_recovery_area(dlm, dead_node); 563 if (status < 0) { 564 mlog(ML_ERROR, "%s: failed to alloc recovery area, " 565 "retrying\n", dlm->name); 566 msleep(1000); 567 } 568 } while (status != 0); 569 570 /* safe to access the node data list without a lock, since this 571 * process is the only one to change the list */ 572 list_for_each(iter, &dlm->reco.node_data) { 573 ndata = list_entry (iter, struct dlm_reco_node_data, list); 574 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT); 575 ndata->state = DLM_RECO_NODE_DATA_REQUESTING; 576 577 mlog(0, "requesting lock info from node %u\n", 578 ndata->node_num); 579 580 if (ndata->node_num == dlm->node_num) { 581 ndata->state = DLM_RECO_NODE_DATA_DONE; 582 continue; 583 } 584 585 do { 586 status = dlm_request_all_locks(dlm, ndata->node_num, 587 dead_node); 588 if (status < 0) { 589 mlog_errno(status); 590 if (dlm_is_host_down(status)) { 591 /* node died, ignore it for recovery */ 592 status = 0; 593 ndata->state = DLM_RECO_NODE_DATA_DEAD; 594 /* wait for the domain map to catch up 595 * with the network state. */ 596 wait_event_timeout(dlm->dlm_reco_thread_wq, 597 dlm_is_node_dead(dlm, 598 ndata->node_num), 599 msecs_to_jiffies(1000)); 600 mlog(0, "waited 1 sec for %u, " 601 "dead? %s\n", ndata->node_num, 602 dlm_is_node_dead(dlm, ndata->node_num) ? 603 "yes" : "no"); 604 } else { 605 /* -ENOMEM on the other node */ 606 mlog(0, "%s: node %u returned " 607 "%d during recovery, retrying " 608 "after a short wait\n", 609 dlm->name, ndata->node_num, 610 status); 611 msleep(100); 612 } 613 } 614 } while (status != 0); 615 616 switch (ndata->state) { 617 case DLM_RECO_NODE_DATA_INIT: 618 case DLM_RECO_NODE_DATA_FINALIZE_SENT: 619 case DLM_RECO_NODE_DATA_REQUESTED: 620 BUG(); 621 break; 622 case DLM_RECO_NODE_DATA_DEAD: 623 mlog(0, "node %u died after requesting " 624 "recovery info for node %u\n", 625 ndata->node_num, dead_node); 626 /* fine. don't need this node's info. 627 * continue without it. */ 628 break; 629 case DLM_RECO_NODE_DATA_REQUESTING: 630 ndata->state = DLM_RECO_NODE_DATA_REQUESTED; 631 mlog(0, "now receiving recovery data from " 632 "node %u for dead node %u\n", 633 ndata->node_num, dead_node); 634 break; 635 case DLM_RECO_NODE_DATA_RECEIVING: 636 mlog(0, "already receiving recovery data from " 637 "node %u for dead node %u\n", 638 ndata->node_num, dead_node); 639 break; 640 case DLM_RECO_NODE_DATA_DONE: 641 mlog(0, "already DONE receiving recovery data " 642 "from node %u for dead node %u\n", 643 ndata->node_num, dead_node); 644 break; 645 } 646 } 647 648 mlog(0, "done requesting all lock info\n"); 649 650 /* nodes should be sending reco data now 651 * just need to wait */ 652 653 while (1) { 654 /* check all the nodes now to see if we are 655 * done, or if anyone died */ 656 all_nodes_done = 1; 657 spin_lock(&dlm_reco_state_lock); 658 list_for_each(iter, &dlm->reco.node_data) { 659 ndata = list_entry (iter, struct dlm_reco_node_data, list); 660 661 mlog(0, "checking recovery state of node %u\n", 662 ndata->node_num); 663 switch (ndata->state) { 664 case DLM_RECO_NODE_DATA_INIT: 665 case DLM_RECO_NODE_DATA_REQUESTING: 666 mlog(ML_ERROR, "bad ndata state for " 667 "node %u: state=%d\n", 668 ndata->node_num, ndata->state); 669 BUG(); 670 break; 671 case DLM_RECO_NODE_DATA_DEAD: 672 mlog(0, "node %u died after " 673 "requesting recovery info for " 674 "node %u\n", ndata->node_num, 675 dead_node); 676 break; 677 case DLM_RECO_NODE_DATA_RECEIVING: 678 case DLM_RECO_NODE_DATA_REQUESTED: 679 mlog(0, "%s: node %u still in state %s\n", 680 dlm->name, ndata->node_num, 681 ndata->state==DLM_RECO_NODE_DATA_RECEIVING ? 682 "receiving" : "requested"); 683 all_nodes_done = 0; 684 break; 685 case DLM_RECO_NODE_DATA_DONE: 686 mlog(0, "%s: node %u state is done\n", 687 dlm->name, ndata->node_num); 688 break; 689 case DLM_RECO_NODE_DATA_FINALIZE_SENT: 690 mlog(0, "%s: node %u state is finalize\n", 691 dlm->name, ndata->node_num); 692 break; 693 } 694 } 695 spin_unlock(&dlm_reco_state_lock); 696 697 mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass, 698 all_nodes_done?"yes":"no"); 699 if (all_nodes_done) { 700 int ret; 701 702 /* all nodes are now in DLM_RECO_NODE_DATA_DONE state 703 * just send a finalize message to everyone and 704 * clean up */ 705 mlog(0, "all nodes are done! send finalize\n"); 706 ret = dlm_send_finalize_reco_message(dlm); 707 if (ret < 0) 708 mlog_errno(ret); 709 710 spin_lock(&dlm->spinlock); 711 dlm_finish_local_lockres_recovery(dlm, dead_node, 712 dlm->node_num); 713 spin_unlock(&dlm->spinlock); 714 mlog(0, "should be done with recovery!\n"); 715 716 mlog(0, "finishing recovery of %s at %lu, " 717 "dead=%u, this=%u, new=%u\n", dlm->name, 718 jiffies, dlm->reco.dead_node, 719 dlm->node_num, dlm->reco.new_master); 720 destroy = 1; 721 status = 0; 722 /* rescan everything marked dirty along the way */ 723 dlm_kick_thread(dlm, NULL); 724 break; 725 } 726 /* wait to be signalled, with periodic timeout 727 * to check for node death */ 728 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq, 729 kthread_should_stop(), 730 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS)); 731 732 } 733 734 if (destroy) 735 dlm_destroy_recovery_area(dlm, dead_node); 736 737 mlog_exit(status); 738 return status; 739 } 740 741 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node) 742 { 743 int num=0; 744 struct dlm_reco_node_data *ndata; 745 746 spin_lock(&dlm->spinlock); 747 memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map)); 748 /* nodes can only be removed (by dying) after dropping 749 * this lock, and death will be trapped later, so this should do */ 750 spin_unlock(&dlm->spinlock); 751 752 while (1) { 753 num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num); 754 if (num >= O2NM_MAX_NODES) { 755 break; 756 } 757 BUG_ON(num == dead_node); 758 759 ndata = kcalloc(1, sizeof(*ndata), GFP_NOFS); 760 if (!ndata) { 761 dlm_destroy_recovery_area(dlm, dead_node); 762 return -ENOMEM; 763 } 764 ndata->node_num = num; 765 ndata->state = DLM_RECO_NODE_DATA_INIT; 766 spin_lock(&dlm_reco_state_lock); 767 list_add_tail(&ndata->list, &dlm->reco.node_data); 768 spin_unlock(&dlm_reco_state_lock); 769 num++; 770 } 771 772 return 0; 773 } 774 775 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node) 776 { 777 struct list_head *iter, *iter2; 778 struct dlm_reco_node_data *ndata; 779 LIST_HEAD(tmplist); 780 781 spin_lock(&dlm_reco_state_lock); 782 list_splice_init(&dlm->reco.node_data, &tmplist); 783 spin_unlock(&dlm_reco_state_lock); 784 785 list_for_each_safe(iter, iter2, &tmplist) { 786 ndata = list_entry (iter, struct dlm_reco_node_data, list); 787 list_del_init(&ndata->list); 788 kfree(ndata); 789 } 790 } 791 792 static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from, 793 u8 dead_node) 794 { 795 struct dlm_lock_request lr; 796 enum dlm_status ret; 797 798 mlog(0, "\n"); 799 800 801 mlog(0, "dlm_request_all_locks: dead node is %u, sending request " 802 "to %u\n", dead_node, request_from); 803 804 memset(&lr, 0, sizeof(lr)); 805 lr.node_idx = dlm->node_num; 806 lr.dead_node = dead_node; 807 808 // send message 809 ret = DLM_NOLOCKMGR; 810 ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key, 811 &lr, sizeof(lr), request_from, NULL); 812 813 /* negative status is handled by caller */ 814 if (ret < 0) 815 mlog_errno(ret); 816 817 // return from here, then 818 // sleep until all received or error 819 return ret; 820 821 } 822 823 int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data) 824 { 825 struct dlm_ctxt *dlm = data; 826 struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf; 827 char *buf = NULL; 828 struct dlm_work_item *item = NULL; 829 830 if (!dlm_grab(dlm)) 831 return -EINVAL; 832 833 if (lr->dead_node != dlm->reco.dead_node) { 834 mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local " 835 "dead_node is %u\n", dlm->name, lr->node_idx, 836 lr->dead_node, dlm->reco.dead_node); 837 dlm_print_reco_node_status(dlm); 838 /* this is a hack */ 839 dlm_put(dlm); 840 return -ENOMEM; 841 } 842 BUG_ON(lr->dead_node != dlm->reco.dead_node); 843 844 item = kcalloc(1, sizeof(*item), GFP_NOFS); 845 if (!item) { 846 dlm_put(dlm); 847 return -ENOMEM; 848 } 849 850 /* this will get freed by dlm_request_all_locks_worker */ 851 buf = (char *) __get_free_page(GFP_NOFS); 852 if (!buf) { 853 kfree(item); 854 dlm_put(dlm); 855 return -ENOMEM; 856 } 857 858 /* queue up work for dlm_request_all_locks_worker */ 859 dlm_grab(dlm); /* get an extra ref for the work item */ 860 dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf); 861 item->u.ral.reco_master = lr->node_idx; 862 item->u.ral.dead_node = lr->dead_node; 863 spin_lock(&dlm->work_lock); 864 list_add_tail(&item->list, &dlm->work_list); 865 spin_unlock(&dlm->work_lock); 866 queue_work(dlm->dlm_worker, &dlm->dispatched_work); 867 868 dlm_put(dlm); 869 return 0; 870 } 871 872 static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data) 873 { 874 struct dlm_migratable_lockres *mres; 875 struct dlm_lock_resource *res; 876 struct dlm_ctxt *dlm; 877 LIST_HEAD(resources); 878 struct list_head *iter; 879 int ret; 880 u8 dead_node, reco_master; 881 int skip_all_done = 0; 882 883 dlm = item->dlm; 884 dead_node = item->u.ral.dead_node; 885 reco_master = item->u.ral.reco_master; 886 mres = (struct dlm_migratable_lockres *)data; 887 888 mlog(0, "%s: recovery worker started, dead=%u, master=%u\n", 889 dlm->name, dead_node, reco_master); 890 891 if (dead_node != dlm->reco.dead_node || 892 reco_master != dlm->reco.new_master) { 893 /* worker could have been created before the recovery master 894 * died. if so, do not continue, but do not error. */ 895 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) { 896 mlog(ML_NOTICE, "%s: will not send recovery state, " 897 "recovery master %u died, thread=(dead=%u,mas=%u)" 898 " current=(dead=%u,mas=%u)\n", dlm->name, 899 reco_master, dead_node, reco_master, 900 dlm->reco.dead_node, dlm->reco.new_master); 901 } else { 902 mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, " 903 "master=%u), request(dead=%u, master=%u)\n", 904 dlm->name, dlm->reco.dead_node, 905 dlm->reco.new_master, dead_node, reco_master); 906 } 907 goto leave; 908 } 909 910 /* lock resources should have already been moved to the 911 * dlm->reco.resources list. now move items from that list 912 * to a temp list if the dead owner matches. note that the 913 * whole cluster recovers only one node at a time, so we 914 * can safely move UNKNOWN lock resources for each recovery 915 * session. */ 916 dlm_move_reco_locks_to_list(dlm, &resources, dead_node); 917 918 /* now we can begin blasting lockreses without the dlm lock */ 919 920 /* any errors returned will be due to the new_master dying, 921 * the dlm_reco_thread should detect this */ 922 list_for_each(iter, &resources) { 923 res = list_entry (iter, struct dlm_lock_resource, recovering); 924 ret = dlm_send_one_lockres(dlm, res, mres, reco_master, 925 DLM_MRES_RECOVERY); 926 if (ret < 0) { 927 mlog(ML_ERROR, "%s: node %u went down while sending " 928 "recovery state for dead node %u, ret=%d\n", dlm->name, 929 reco_master, dead_node, ret); 930 skip_all_done = 1; 931 break; 932 } 933 } 934 935 /* move the resources back to the list */ 936 spin_lock(&dlm->spinlock); 937 list_splice_init(&resources, &dlm->reco.resources); 938 spin_unlock(&dlm->spinlock); 939 940 if (!skip_all_done) { 941 ret = dlm_send_all_done_msg(dlm, dead_node, reco_master); 942 if (ret < 0) { 943 mlog(ML_ERROR, "%s: node %u went down while sending " 944 "recovery all-done for dead node %u, ret=%d\n", 945 dlm->name, reco_master, dead_node, ret); 946 } 947 } 948 leave: 949 free_page((unsigned long)data); 950 } 951 952 953 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to) 954 { 955 int ret, tmpret; 956 struct dlm_reco_data_done done_msg; 957 958 memset(&done_msg, 0, sizeof(done_msg)); 959 done_msg.node_idx = dlm->node_num; 960 done_msg.dead_node = dead_node; 961 mlog(0, "sending DATA DONE message to %u, " 962 "my node=%u, dead node=%u\n", send_to, done_msg.node_idx, 963 done_msg.dead_node); 964 965 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg, 966 sizeof(done_msg), send_to, &tmpret); 967 if (ret < 0) { 968 if (!dlm_is_host_down(ret)) { 969 mlog_errno(ret); 970 mlog(ML_ERROR, "%s: unknown error sending data-done " 971 "to %u\n", dlm->name, send_to); 972 BUG(); 973 } 974 } else 975 ret = tmpret; 976 return ret; 977 } 978 979 980 int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data) 981 { 982 struct dlm_ctxt *dlm = data; 983 struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf; 984 struct list_head *iter; 985 struct dlm_reco_node_data *ndata = NULL; 986 int ret = -EINVAL; 987 988 if (!dlm_grab(dlm)) 989 return -EINVAL; 990 991 mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, " 992 "node_idx=%u, this node=%u\n", done->dead_node, 993 dlm->reco.dead_node, done->node_idx, dlm->node_num); 994 995 mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node), 996 "Got DATA DONE: dead_node=%u, reco.dead_node=%u, " 997 "node_idx=%u, this node=%u\n", done->dead_node, 998 dlm->reco.dead_node, done->node_idx, dlm->node_num); 999 1000 spin_lock(&dlm_reco_state_lock); 1001 list_for_each(iter, &dlm->reco.node_data) { 1002 ndata = list_entry (iter, struct dlm_reco_node_data, list); 1003 if (ndata->node_num != done->node_idx) 1004 continue; 1005 1006 switch (ndata->state) { 1007 /* should have moved beyond INIT but not to FINALIZE yet */ 1008 case DLM_RECO_NODE_DATA_INIT: 1009 case DLM_RECO_NODE_DATA_DEAD: 1010 case DLM_RECO_NODE_DATA_FINALIZE_SENT: 1011 mlog(ML_ERROR, "bad ndata state for node %u:" 1012 " state=%d\n", ndata->node_num, 1013 ndata->state); 1014 BUG(); 1015 break; 1016 /* these states are possible at this point, anywhere along 1017 * the line of recovery */ 1018 case DLM_RECO_NODE_DATA_DONE: 1019 case DLM_RECO_NODE_DATA_RECEIVING: 1020 case DLM_RECO_NODE_DATA_REQUESTED: 1021 case DLM_RECO_NODE_DATA_REQUESTING: 1022 mlog(0, "node %u is DONE sending " 1023 "recovery data!\n", 1024 ndata->node_num); 1025 1026 ndata->state = DLM_RECO_NODE_DATA_DONE; 1027 ret = 0; 1028 break; 1029 } 1030 } 1031 spin_unlock(&dlm_reco_state_lock); 1032 1033 /* wake the recovery thread, some node is done */ 1034 if (!ret) 1035 dlm_kick_recovery_thread(dlm); 1036 1037 if (ret < 0) 1038 mlog(ML_ERROR, "failed to find recovery node data for node " 1039 "%u\n", done->node_idx); 1040 dlm_put(dlm); 1041 1042 mlog(0, "leaving reco data done handler, ret=%d\n", ret); 1043 return ret; 1044 } 1045 1046 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, 1047 struct list_head *list, 1048 u8 dead_node) 1049 { 1050 struct dlm_lock_resource *res; 1051 struct list_head *iter, *iter2; 1052 struct dlm_lock *lock; 1053 1054 spin_lock(&dlm->spinlock); 1055 list_for_each_safe(iter, iter2, &dlm->reco.resources) { 1056 res = list_entry (iter, struct dlm_lock_resource, recovering); 1057 /* always prune any $RECOVERY entries for dead nodes, 1058 * otherwise hangs can occur during later recovery */ 1059 if (dlm_is_recovery_lock(res->lockname.name, 1060 res->lockname.len)) { 1061 spin_lock(&res->spinlock); 1062 list_for_each_entry(lock, &res->granted, list) { 1063 if (lock->ml.node == dead_node) { 1064 mlog(0, "AHA! there was " 1065 "a $RECOVERY lock for dead " 1066 "node %u (%s)!\n", 1067 dead_node, dlm->name); 1068 list_del_init(&lock->list); 1069 dlm_lock_put(lock); 1070 break; 1071 } 1072 } 1073 spin_unlock(&res->spinlock); 1074 continue; 1075 } 1076 1077 if (res->owner == dead_node) { 1078 mlog(0, "found lockres owned by dead node while " 1079 "doing recovery for node %u. sending it.\n", 1080 dead_node); 1081 list_move_tail(&res->recovering, list); 1082 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { 1083 mlog(0, "found UNKNOWN owner while doing recovery " 1084 "for node %u. sending it.\n", dead_node); 1085 list_move_tail(&res->recovering, list); 1086 } 1087 } 1088 spin_unlock(&dlm->spinlock); 1089 } 1090 1091 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res) 1092 { 1093 int total_locks = 0; 1094 struct list_head *iter, *queue = &res->granted; 1095 int i; 1096 1097 for (i=0; i<3; i++) { 1098 list_for_each(iter, queue) 1099 total_locks++; 1100 queue++; 1101 } 1102 return total_locks; 1103 } 1104 1105 1106 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, 1107 struct dlm_migratable_lockres *mres, 1108 u8 send_to, 1109 struct dlm_lock_resource *res, 1110 int total_locks) 1111 { 1112 u64 mig_cookie = be64_to_cpu(mres->mig_cookie); 1113 int mres_total_locks = be32_to_cpu(mres->total_locks); 1114 int sz, ret = 0, status = 0; 1115 u8 orig_flags = mres->flags, 1116 orig_master = mres->master; 1117 1118 BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS); 1119 if (!mres->num_locks) 1120 return 0; 1121 1122 sz = sizeof(struct dlm_migratable_lockres) + 1123 (mres->num_locks * sizeof(struct dlm_migratable_lock)); 1124 1125 /* add an all-done flag if we reached the last lock */ 1126 orig_flags = mres->flags; 1127 BUG_ON(total_locks > mres_total_locks); 1128 if (total_locks == mres_total_locks) 1129 mres->flags |= DLM_MRES_ALL_DONE; 1130 1131 /* send it */ 1132 ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres, 1133 sz, send_to, &status); 1134 if (ret < 0) { 1135 /* XXX: negative status is not handled. 1136 * this will end up killing this node. */ 1137 mlog_errno(ret); 1138 } else { 1139 /* might get an -ENOMEM back here */ 1140 ret = status; 1141 if (ret < 0) { 1142 mlog_errno(ret); 1143 1144 if (ret == -EFAULT) { 1145 mlog(ML_ERROR, "node %u told me to kill " 1146 "myself!\n", send_to); 1147 BUG(); 1148 } 1149 } 1150 } 1151 1152 /* zero and reinit the message buffer */ 1153 dlm_init_migratable_lockres(mres, res->lockname.name, 1154 res->lockname.len, mres_total_locks, 1155 mig_cookie, orig_flags, orig_master); 1156 return ret; 1157 } 1158 1159 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, 1160 const char *lockname, int namelen, 1161 int total_locks, u64 cookie, 1162 u8 flags, u8 master) 1163 { 1164 /* mres here is one full page */ 1165 memset(mres, 0, PAGE_SIZE); 1166 mres->lockname_len = namelen; 1167 memcpy(mres->lockname, lockname, namelen); 1168 mres->num_locks = 0; 1169 mres->total_locks = cpu_to_be32(total_locks); 1170 mres->mig_cookie = cpu_to_be64(cookie); 1171 mres->flags = flags; 1172 mres->master = master; 1173 } 1174 1175 1176 /* returns 1 if this lock fills the network structure, 1177 * 0 otherwise */ 1178 static int dlm_add_lock_to_array(struct dlm_lock *lock, 1179 struct dlm_migratable_lockres *mres, int queue) 1180 { 1181 struct dlm_migratable_lock *ml; 1182 int lock_num = mres->num_locks; 1183 1184 ml = &(mres->ml[lock_num]); 1185 ml->cookie = lock->ml.cookie; 1186 ml->type = lock->ml.type; 1187 ml->convert_type = lock->ml.convert_type; 1188 ml->highest_blocked = lock->ml.highest_blocked; 1189 ml->list = queue; 1190 if (lock->lksb) { 1191 ml->flags = lock->lksb->flags; 1192 /* send our current lvb */ 1193 if (ml->type == LKM_EXMODE || 1194 ml->type == LKM_PRMODE) { 1195 /* if it is already set, this had better be a PR 1196 * and it has to match */ 1197 if (!dlm_lvb_is_empty(mres->lvb) && 1198 (ml->type == LKM_EXMODE || 1199 memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) { 1200 mlog(ML_ERROR, "mismatched lvbs!\n"); 1201 __dlm_print_one_lock_resource(lock->lockres); 1202 BUG(); 1203 } 1204 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN); 1205 } 1206 } 1207 ml->node = lock->ml.node; 1208 mres->num_locks++; 1209 /* we reached the max, send this network message */ 1210 if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS) 1211 return 1; 1212 return 0; 1213 } 1214 1215 1216 int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 1217 struct dlm_migratable_lockres *mres, 1218 u8 send_to, u8 flags) 1219 { 1220 struct list_head *queue, *iter; 1221 int total_locks, i; 1222 u64 mig_cookie = 0; 1223 struct dlm_lock *lock; 1224 int ret = 0; 1225 1226 BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); 1227 1228 mlog(0, "sending to %u\n", send_to); 1229 1230 total_locks = dlm_num_locks_in_lockres(res); 1231 if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) { 1232 /* rare, but possible */ 1233 mlog(0, "argh. lockres has %d locks. this will " 1234 "require more than one network packet to " 1235 "migrate\n", total_locks); 1236 mig_cookie = dlm_get_next_mig_cookie(); 1237 } 1238 1239 dlm_init_migratable_lockres(mres, res->lockname.name, 1240 res->lockname.len, total_locks, 1241 mig_cookie, flags, res->owner); 1242 1243 total_locks = 0; 1244 for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) { 1245 queue = dlm_list_idx_to_ptr(res, i); 1246 list_for_each(iter, queue) { 1247 lock = list_entry (iter, struct dlm_lock, list); 1248 1249 /* add another lock. */ 1250 total_locks++; 1251 if (!dlm_add_lock_to_array(lock, mres, i)) 1252 continue; 1253 1254 /* this filled the lock message, 1255 * we must send it immediately. */ 1256 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, 1257 res, total_locks); 1258 if (ret < 0) 1259 goto error; 1260 } 1261 } 1262 /* flush any remaining locks */ 1263 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks); 1264 if (ret < 0) 1265 goto error; 1266 return ret; 1267 1268 error: 1269 mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n", 1270 dlm->name, ret); 1271 if (!dlm_is_host_down(ret)) 1272 BUG(); 1273 mlog(0, "%s: node %u went down while sending %s " 1274 "lockres %.*s\n", dlm->name, send_to, 1275 flags & DLM_MRES_RECOVERY ? "recovery" : "migration", 1276 res->lockname.len, res->lockname.name); 1277 return ret; 1278 } 1279 1280 1281 1282 /* 1283 * this message will contain no more than one page worth of 1284 * recovery data, and it will work on only one lockres. 1285 * there may be many locks in this page, and we may need to wait 1286 * for additional packets to complete all the locks (rare, but 1287 * possible). 1288 */ 1289 /* 1290 * NOTE: the allocation error cases here are scary 1291 * we really cannot afford to fail an alloc in recovery 1292 * do we spin? returning an error only delays the problem really 1293 */ 1294 1295 int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data) 1296 { 1297 struct dlm_ctxt *dlm = data; 1298 struct dlm_migratable_lockres *mres = 1299 (struct dlm_migratable_lockres *)msg->buf; 1300 int ret = 0; 1301 u8 real_master; 1302 char *buf = NULL; 1303 struct dlm_work_item *item = NULL; 1304 struct dlm_lock_resource *res = NULL; 1305 1306 if (!dlm_grab(dlm)) 1307 return -EINVAL; 1308 1309 BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); 1310 1311 real_master = mres->master; 1312 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { 1313 /* cannot migrate a lockres with no master */ 1314 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY)); 1315 } 1316 1317 mlog(0, "%s message received from node %u\n", 1318 (mres->flags & DLM_MRES_RECOVERY) ? 1319 "recovery" : "migration", mres->master); 1320 if (mres->flags & DLM_MRES_ALL_DONE) 1321 mlog(0, "all done flag. all lockres data received!\n"); 1322 1323 ret = -ENOMEM; 1324 buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS); 1325 item = kcalloc(1, sizeof(*item), GFP_NOFS); 1326 if (!buf || !item) 1327 goto leave; 1328 1329 /* lookup the lock to see if we have a secondary queue for this 1330 * already... just add the locks in and this will have its owner 1331 * and RECOVERY flag changed when it completes. */ 1332 res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len); 1333 if (res) { 1334 /* this will get a ref on res */ 1335 /* mark it as recovering/migrating and hash it */ 1336 spin_lock(&res->spinlock); 1337 if (mres->flags & DLM_MRES_RECOVERY) { 1338 res->state |= DLM_LOCK_RES_RECOVERING; 1339 } else { 1340 if (res->state & DLM_LOCK_RES_MIGRATING) { 1341 /* this is at least the second 1342 * lockres message */ 1343 mlog(0, "lock %.*s is already migrating\n", 1344 mres->lockname_len, 1345 mres->lockname); 1346 } else if (res->state & DLM_LOCK_RES_RECOVERING) { 1347 /* caller should BUG */ 1348 mlog(ML_ERROR, "node is attempting to migrate " 1349 "lock %.*s, but marked as recovering!\n", 1350 mres->lockname_len, mres->lockname); 1351 ret = -EFAULT; 1352 spin_unlock(&res->spinlock); 1353 goto leave; 1354 } 1355 res->state |= DLM_LOCK_RES_MIGRATING; 1356 } 1357 spin_unlock(&res->spinlock); 1358 } else { 1359 /* need to allocate, just like if it was 1360 * mastered here normally */ 1361 res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len); 1362 if (!res) 1363 goto leave; 1364 1365 /* to match the ref that we would have gotten if 1366 * dlm_lookup_lockres had succeeded */ 1367 dlm_lockres_get(res); 1368 1369 /* mark it as recovering/migrating and hash it */ 1370 if (mres->flags & DLM_MRES_RECOVERY) 1371 res->state |= DLM_LOCK_RES_RECOVERING; 1372 else 1373 res->state |= DLM_LOCK_RES_MIGRATING; 1374 1375 spin_lock(&dlm->spinlock); 1376 __dlm_insert_lockres(dlm, res); 1377 spin_unlock(&dlm->spinlock); 1378 1379 /* now that the new lockres is inserted, 1380 * make it usable by other processes */ 1381 spin_lock(&res->spinlock); 1382 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 1383 spin_unlock(&res->spinlock); 1384 1385 /* add an extra ref for just-allocated lockres 1386 * otherwise the lockres will be purged immediately */ 1387 dlm_lockres_get(res); 1388 1389 } 1390 1391 /* at this point we have allocated everything we need, 1392 * and we have a hashed lockres with an extra ref and 1393 * the proper res->state flags. */ 1394 ret = 0; 1395 if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) { 1396 /* migration cannot have an unknown master */ 1397 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY)); 1398 mlog(0, "recovery has passed me a lockres with an " 1399 "unknown owner.. will need to requery: " 1400 "%.*s\n", mres->lockname_len, mres->lockname); 1401 } else { 1402 spin_lock(&res->spinlock); 1403 dlm_change_lockres_owner(dlm, res, dlm->node_num); 1404 spin_unlock(&res->spinlock); 1405 } 1406 1407 /* queue up work for dlm_mig_lockres_worker */ 1408 dlm_grab(dlm); /* get an extra ref for the work item */ 1409 memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */ 1410 dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf); 1411 item->u.ml.lockres = res; /* already have a ref */ 1412 item->u.ml.real_master = real_master; 1413 spin_lock(&dlm->work_lock); 1414 list_add_tail(&item->list, &dlm->work_list); 1415 spin_unlock(&dlm->work_lock); 1416 queue_work(dlm->dlm_worker, &dlm->dispatched_work); 1417 1418 leave: 1419 dlm_put(dlm); 1420 if (ret < 0) { 1421 if (buf) 1422 kfree(buf); 1423 if (item) 1424 kfree(item); 1425 } 1426 1427 mlog_exit(ret); 1428 return ret; 1429 } 1430 1431 1432 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data) 1433 { 1434 struct dlm_ctxt *dlm = data; 1435 struct dlm_migratable_lockres *mres; 1436 int ret = 0; 1437 struct dlm_lock_resource *res; 1438 u8 real_master; 1439 1440 dlm = item->dlm; 1441 mres = (struct dlm_migratable_lockres *)data; 1442 1443 res = item->u.ml.lockres; 1444 real_master = item->u.ml.real_master; 1445 1446 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { 1447 /* this case is super-rare. only occurs if 1448 * node death happens during migration. */ 1449 again: 1450 ret = dlm_lockres_master_requery(dlm, res, &real_master); 1451 if (ret < 0) { 1452 mlog(0, "dlm_lockres_master_requery ret=%d\n", 1453 ret); 1454 goto again; 1455 } 1456 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { 1457 mlog(0, "lockres %.*s not claimed. " 1458 "this node will take it.\n", 1459 res->lockname.len, res->lockname.name); 1460 } else { 1461 mlog(0, "master needs to respond to sender " 1462 "that node %u still owns %.*s\n", 1463 real_master, res->lockname.len, 1464 res->lockname.name); 1465 /* cannot touch this lockres */ 1466 goto leave; 1467 } 1468 } 1469 1470 ret = dlm_process_recovery_data(dlm, res, mres); 1471 if (ret < 0) 1472 mlog(0, "dlm_process_recovery_data returned %d\n", ret); 1473 else 1474 mlog(0, "dlm_process_recovery_data succeeded\n"); 1475 1476 if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) == 1477 (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) { 1478 ret = dlm_finish_migration(dlm, res, mres->master); 1479 if (ret < 0) 1480 mlog_errno(ret); 1481 } 1482 1483 leave: 1484 kfree(data); 1485 mlog_exit(ret); 1486 } 1487 1488 1489 1490 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, 1491 struct dlm_lock_resource *res, 1492 u8 *real_master) 1493 { 1494 struct dlm_node_iter iter; 1495 int nodenum; 1496 int ret = 0; 1497 1498 *real_master = DLM_LOCK_RES_OWNER_UNKNOWN; 1499 1500 /* we only reach here if one of the two nodes in a 1501 * migration died while the migration was in progress. 1502 * at this point we need to requery the master. we 1503 * know that the new_master got as far as creating 1504 * an mle on at least one node, but we do not know 1505 * if any nodes had actually cleared the mle and set 1506 * the master to the new_master. the old master 1507 * is supposed to set the owner to UNKNOWN in the 1508 * event of a new_master death, so the only possible 1509 * responses that we can get from nodes here are 1510 * that the master is new_master, or that the master 1511 * is UNKNOWN. 1512 * if all nodes come back with UNKNOWN then we know 1513 * the lock needs remastering here. 1514 * if any node comes back with a valid master, check 1515 * to see if that master is the one that we are 1516 * recovering. if so, then the new_master died and 1517 * we need to remaster this lock. if not, then the 1518 * new_master survived and that node will respond to 1519 * other nodes about the owner. 1520 * if there is an owner, this node needs to dump this 1521 * lockres and alert the sender that this lockres 1522 * was rejected. */ 1523 spin_lock(&dlm->spinlock); 1524 dlm_node_iter_init(dlm->domain_map, &iter); 1525 spin_unlock(&dlm->spinlock); 1526 1527 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 1528 /* do not send to self */ 1529 if (nodenum == dlm->node_num) 1530 continue; 1531 ret = dlm_do_master_requery(dlm, res, nodenum, real_master); 1532 if (ret < 0) { 1533 mlog_errno(ret); 1534 if (!dlm_is_host_down(ret)) 1535 BUG(); 1536 /* host is down, so answer for that node would be 1537 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */ 1538 } 1539 if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) { 1540 mlog(0, "lock master is %u\n", *real_master); 1541 break; 1542 } 1543 } 1544 return ret; 1545 } 1546 1547 1548 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 1549 u8 nodenum, u8 *real_master) 1550 { 1551 int ret = -EINVAL; 1552 struct dlm_master_requery req; 1553 int status = DLM_LOCK_RES_OWNER_UNKNOWN; 1554 1555 memset(&req, 0, sizeof(req)); 1556 req.node_idx = dlm->node_num; 1557 req.namelen = res->lockname.len; 1558 memcpy(req.name, res->lockname.name, res->lockname.len); 1559 1560 ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key, 1561 &req, sizeof(req), nodenum, &status); 1562 /* XXX: negative status not handled properly here. */ 1563 if (ret < 0) 1564 mlog_errno(ret); 1565 else { 1566 BUG_ON(status < 0); 1567 BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN); 1568 *real_master = (u8) (status & 0xff); 1569 mlog(0, "node %u responded to master requery with %u\n", 1570 nodenum, *real_master); 1571 ret = 0; 1572 } 1573 return ret; 1574 } 1575 1576 1577 /* this function cannot error, so unless the sending 1578 * or receiving of the message failed, the owner can 1579 * be trusted */ 1580 int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data) 1581 { 1582 struct dlm_ctxt *dlm = data; 1583 struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf; 1584 struct dlm_lock_resource *res = NULL; 1585 unsigned int hash; 1586 int master = DLM_LOCK_RES_OWNER_UNKNOWN; 1587 u32 flags = DLM_ASSERT_MASTER_REQUERY; 1588 1589 if (!dlm_grab(dlm)) { 1590 /* since the domain has gone away on this 1591 * node, the proper response is UNKNOWN */ 1592 return master; 1593 } 1594 1595 hash = dlm_lockid_hash(req->name, req->namelen); 1596 1597 spin_lock(&dlm->spinlock); 1598 res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash); 1599 if (res) { 1600 spin_lock(&res->spinlock); 1601 master = res->owner; 1602 if (master == dlm->node_num) { 1603 int ret = dlm_dispatch_assert_master(dlm, res, 1604 0, 0, flags); 1605 if (ret < 0) { 1606 mlog_errno(-ENOMEM); 1607 /* retry!? */ 1608 BUG(); 1609 } 1610 } 1611 spin_unlock(&res->spinlock); 1612 } 1613 spin_unlock(&dlm->spinlock); 1614 1615 dlm_put(dlm); 1616 return master; 1617 } 1618 1619 static inline struct list_head * 1620 dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num) 1621 { 1622 struct list_head *ret; 1623 BUG_ON(list_num < 0); 1624 BUG_ON(list_num > 2); 1625 ret = &(res->granted); 1626 ret += list_num; 1627 return ret; 1628 } 1629 /* TODO: do ast flush business 1630 * TODO: do MIGRATING and RECOVERING spinning 1631 */ 1632 1633 /* 1634 * NOTE about in-flight requests during migration: 1635 * 1636 * Before attempting the migrate, the master has marked the lockres as 1637 * MIGRATING and then flushed all of its pending ASTS. So any in-flight 1638 * requests either got queued before the MIGRATING flag got set, in which 1639 * case the lock data will reflect the change and a return message is on 1640 * the way, or the request failed to get in before MIGRATING got set. In 1641 * this case, the caller will be told to spin and wait for the MIGRATING 1642 * flag to be dropped, then recheck the master. 1643 * This holds true for the convert, cancel and unlock cases, and since lvb 1644 * updates are tied to these same messages, it applies to lvb updates as 1645 * well. For the lock case, there is no way a lock can be on the master 1646 * queue and not be on the secondary queue since the lock is always added 1647 * locally first. This means that the new target node will never be sent 1648 * a lock that he doesn't already have on the list. 1649 * In total, this means that the local lock is correct and should not be 1650 * updated to match the one sent by the master. Any messages sent back 1651 * from the master before the MIGRATING flag will bring the lock properly 1652 * up-to-date, and the change will be ordered properly for the waiter. 1653 * We will *not* attempt to modify the lock underneath the waiter. 1654 */ 1655 1656 static int dlm_process_recovery_data(struct dlm_ctxt *dlm, 1657 struct dlm_lock_resource *res, 1658 struct dlm_migratable_lockres *mres) 1659 { 1660 struct dlm_migratable_lock *ml; 1661 struct list_head *queue; 1662 struct dlm_lock *newlock = NULL; 1663 struct dlm_lockstatus *lksb = NULL; 1664 int ret = 0; 1665 int i, bad; 1666 struct list_head *iter; 1667 struct dlm_lock *lock = NULL; 1668 1669 mlog(0, "running %d locks for this lockres\n", mres->num_locks); 1670 for (i=0; i<mres->num_locks; i++) { 1671 ml = &(mres->ml[i]); 1672 BUG_ON(ml->highest_blocked != LKM_IVMODE); 1673 newlock = NULL; 1674 lksb = NULL; 1675 1676 queue = dlm_list_num_to_pointer(res, ml->list); 1677 1678 /* if the lock is for the local node it needs to 1679 * be moved to the proper location within the queue. 1680 * do not allocate a new lock structure. */ 1681 if (ml->node == dlm->node_num) { 1682 /* MIGRATION ONLY! */ 1683 BUG_ON(!(mres->flags & DLM_MRES_MIGRATION)); 1684 1685 spin_lock(&res->spinlock); 1686 list_for_each(iter, queue) { 1687 lock = list_entry (iter, struct dlm_lock, list); 1688 if (lock->ml.cookie != ml->cookie) 1689 lock = NULL; 1690 else 1691 break; 1692 } 1693 1694 /* lock is always created locally first, and 1695 * destroyed locally last. it must be on the list */ 1696 if (!lock) { 1697 u64 c = ml->cookie; 1698 mlog(ML_ERROR, "could not find local lock " 1699 "with cookie %u:%llu!\n", 1700 dlm_get_lock_cookie_node(c), 1701 dlm_get_lock_cookie_seq(c)); 1702 BUG(); 1703 } 1704 BUG_ON(lock->ml.node != ml->node); 1705 1706 /* see NOTE above about why we do not update 1707 * to match the master here */ 1708 1709 /* move the lock to its proper place */ 1710 /* do not alter lock refcount. switching lists. */ 1711 list_move_tail(&lock->list, queue); 1712 spin_unlock(&res->spinlock); 1713 1714 mlog(0, "just reordered a local lock!\n"); 1715 continue; 1716 } 1717 1718 /* lock is for another node. */ 1719 newlock = dlm_new_lock(ml->type, ml->node, 1720 be64_to_cpu(ml->cookie), NULL); 1721 if (!newlock) { 1722 ret = -ENOMEM; 1723 goto leave; 1724 } 1725 lksb = newlock->lksb; 1726 dlm_lock_attach_lockres(newlock, res); 1727 1728 if (ml->convert_type != LKM_IVMODE) { 1729 BUG_ON(queue != &res->converting); 1730 newlock->ml.convert_type = ml->convert_type; 1731 } 1732 lksb->flags |= (ml->flags & 1733 (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); 1734 1735 if (ml->type == LKM_NLMODE) 1736 goto skip_lvb; 1737 1738 if (!dlm_lvb_is_empty(mres->lvb)) { 1739 if (lksb->flags & DLM_LKSB_PUT_LVB) { 1740 /* other node was trying to update 1741 * lvb when node died. recreate the 1742 * lksb with the updated lvb. */ 1743 memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN); 1744 /* the lock resource lvb update must happen 1745 * NOW, before the spinlock is dropped. 1746 * we no longer wait for the AST to update 1747 * the lvb. */ 1748 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); 1749 } else { 1750 /* otherwise, the node is sending its 1751 * most recent valid lvb info */ 1752 BUG_ON(ml->type != LKM_EXMODE && 1753 ml->type != LKM_PRMODE); 1754 if (!dlm_lvb_is_empty(res->lvb) && 1755 (ml->type == LKM_EXMODE || 1756 memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) { 1757 int i; 1758 mlog(ML_ERROR, "%s:%.*s: received bad " 1759 "lvb! type=%d\n", dlm->name, 1760 res->lockname.len, 1761 res->lockname.name, ml->type); 1762 printk("lockres lvb=["); 1763 for (i=0; i<DLM_LVB_LEN; i++) 1764 printk("%02x", res->lvb[i]); 1765 printk("]\nmigrated lvb=["); 1766 for (i=0; i<DLM_LVB_LEN; i++) 1767 printk("%02x", mres->lvb[i]); 1768 printk("]\n"); 1769 dlm_print_one_lock_resource(res); 1770 BUG(); 1771 } 1772 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); 1773 } 1774 } 1775 skip_lvb: 1776 1777 /* NOTE: 1778 * wrt lock queue ordering and recovery: 1779 * 1. order of locks on granted queue is 1780 * meaningless. 1781 * 2. order of locks on converting queue is 1782 * LOST with the node death. sorry charlie. 1783 * 3. order of locks on the blocked queue is 1784 * also LOST. 1785 * order of locks does not affect integrity, it 1786 * just means that a lock request may get pushed 1787 * back in line as a result of the node death. 1788 * also note that for a given node the lock order 1789 * for its secondary queue locks is preserved 1790 * relative to each other, but clearly *not* 1791 * preserved relative to locks from other nodes. 1792 */ 1793 bad = 0; 1794 spin_lock(&res->spinlock); 1795 list_for_each_entry(lock, queue, list) { 1796 if (lock->ml.cookie == ml->cookie) { 1797 u64 c = lock->ml.cookie; 1798 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already " 1799 "exists on this lockres!\n", dlm->name, 1800 res->lockname.len, res->lockname.name, 1801 dlm_get_lock_cookie_node(c), 1802 dlm_get_lock_cookie_seq(c)); 1803 1804 mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, " 1805 "node=%u, cookie=%u:%llu, queue=%d\n", 1806 ml->type, ml->convert_type, ml->node, 1807 dlm_get_lock_cookie_node(ml->cookie), 1808 dlm_get_lock_cookie_seq(ml->cookie), 1809 ml->list); 1810 1811 __dlm_print_one_lock_resource(res); 1812 bad = 1; 1813 break; 1814 } 1815 } 1816 if (!bad) { 1817 dlm_lock_get(newlock); 1818 list_add_tail(&newlock->list, queue); 1819 } 1820 spin_unlock(&res->spinlock); 1821 } 1822 mlog(0, "done running all the locks\n"); 1823 1824 leave: 1825 if (ret < 0) { 1826 mlog_errno(ret); 1827 if (newlock) 1828 dlm_lock_put(newlock); 1829 } 1830 1831 mlog_exit(ret); 1832 return ret; 1833 } 1834 1835 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm, 1836 struct dlm_lock_resource *res) 1837 { 1838 int i; 1839 struct list_head *queue, *iter, *iter2; 1840 struct dlm_lock *lock; 1841 1842 res->state |= DLM_LOCK_RES_RECOVERING; 1843 if (!list_empty(&res->recovering)) { 1844 mlog(0, 1845 "Recovering res %s:%.*s, is already on recovery list!\n", 1846 dlm->name, res->lockname.len, res->lockname.name); 1847 list_del_init(&res->recovering); 1848 } 1849 /* We need to hold a reference while on the recovery list */ 1850 dlm_lockres_get(res); 1851 list_add_tail(&res->recovering, &dlm->reco.resources); 1852 1853 /* find any pending locks and put them back on proper list */ 1854 for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) { 1855 queue = dlm_list_idx_to_ptr(res, i); 1856 list_for_each_safe(iter, iter2, queue) { 1857 lock = list_entry (iter, struct dlm_lock, list); 1858 dlm_lock_get(lock); 1859 if (lock->convert_pending) { 1860 /* move converting lock back to granted */ 1861 BUG_ON(i != DLM_CONVERTING_LIST); 1862 mlog(0, "node died with convert pending " 1863 "on %.*s. move back to granted list.\n", 1864 res->lockname.len, res->lockname.name); 1865 dlm_revert_pending_convert(res, lock); 1866 lock->convert_pending = 0; 1867 } else if (lock->lock_pending) { 1868 /* remove pending lock requests completely */ 1869 BUG_ON(i != DLM_BLOCKED_LIST); 1870 mlog(0, "node died with lock pending " 1871 "on %.*s. remove from blocked list and skip.\n", 1872 res->lockname.len, res->lockname.name); 1873 /* lock will be floating until ref in 1874 * dlmlock_remote is freed after the network 1875 * call returns. ok for it to not be on any 1876 * list since no ast can be called 1877 * (the master is dead). */ 1878 dlm_revert_pending_lock(res, lock); 1879 lock->lock_pending = 0; 1880 } else if (lock->unlock_pending) { 1881 /* if an unlock was in progress, treat as 1882 * if this had completed successfully 1883 * before sending this lock state to the 1884 * new master. note that the dlm_unlock 1885 * call is still responsible for calling 1886 * the unlockast. that will happen after 1887 * the network call times out. for now, 1888 * just move lists to prepare the new 1889 * recovery master. */ 1890 BUG_ON(i != DLM_GRANTED_LIST); 1891 mlog(0, "node died with unlock pending " 1892 "on %.*s. remove from blocked list and skip.\n", 1893 res->lockname.len, res->lockname.name); 1894 dlm_commit_pending_unlock(res, lock); 1895 lock->unlock_pending = 0; 1896 } else if (lock->cancel_pending) { 1897 /* if a cancel was in progress, treat as 1898 * if this had completed successfully 1899 * before sending this lock state to the 1900 * new master */ 1901 BUG_ON(i != DLM_CONVERTING_LIST); 1902 mlog(0, "node died with cancel pending " 1903 "on %.*s. move back to granted list.\n", 1904 res->lockname.len, res->lockname.name); 1905 dlm_commit_pending_cancel(res, lock); 1906 lock->cancel_pending = 0; 1907 } 1908 dlm_lock_put(lock); 1909 } 1910 } 1911 } 1912 1913 1914 1915 /* removes all recovered locks from the recovery list. 1916 * sets the res->owner to the new master. 1917 * unsets the RECOVERY flag and wakes waiters. */ 1918 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, 1919 u8 dead_node, u8 new_master) 1920 { 1921 int i; 1922 struct list_head *iter, *iter2; 1923 struct hlist_node *hash_iter; 1924 struct hlist_head *bucket; 1925 1926 struct dlm_lock_resource *res; 1927 1928 mlog_entry_void(); 1929 1930 assert_spin_locked(&dlm->spinlock); 1931 1932 list_for_each_safe(iter, iter2, &dlm->reco.resources) { 1933 res = list_entry (iter, struct dlm_lock_resource, recovering); 1934 if (res->owner == dead_node) { 1935 list_del_init(&res->recovering); 1936 spin_lock(&res->spinlock); 1937 dlm_change_lockres_owner(dlm, res, new_master); 1938 res->state &= ~DLM_LOCK_RES_RECOVERING; 1939 if (!__dlm_lockres_unused(res)) 1940 __dlm_dirty_lockres(dlm, res); 1941 spin_unlock(&res->spinlock); 1942 wake_up(&res->wq); 1943 dlm_lockres_put(res); 1944 } 1945 } 1946 1947 /* this will become unnecessary eventually, but 1948 * for now we need to run the whole hash, clear 1949 * the RECOVERING state and set the owner 1950 * if necessary */ 1951 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 1952 bucket = dlm_lockres_hash(dlm, i); 1953 hlist_for_each_entry(res, hash_iter, bucket, hash_node) { 1954 if (res->state & DLM_LOCK_RES_RECOVERING) { 1955 if (res->owner == dead_node) { 1956 mlog(0, "(this=%u) res %.*s owner=%u " 1957 "was not on recovering list, but " 1958 "clearing state anyway\n", 1959 dlm->node_num, res->lockname.len, 1960 res->lockname.name, new_master); 1961 } else if (res->owner == dlm->node_num) { 1962 mlog(0, "(this=%u) res %.*s owner=%u " 1963 "was not on recovering list, " 1964 "owner is THIS node, clearing\n", 1965 dlm->node_num, res->lockname.len, 1966 res->lockname.name, new_master); 1967 } else 1968 continue; 1969 1970 if (!list_empty(&res->recovering)) { 1971 mlog(0, "%s:%.*s: lockres was " 1972 "marked RECOVERING, owner=%u\n", 1973 dlm->name, res->lockname.len, 1974 res->lockname.name, res->owner); 1975 list_del_init(&res->recovering); 1976 dlm_lockres_put(res); 1977 } 1978 spin_lock(&res->spinlock); 1979 dlm_change_lockres_owner(dlm, res, new_master); 1980 res->state &= ~DLM_LOCK_RES_RECOVERING; 1981 if (!__dlm_lockres_unused(res)) 1982 __dlm_dirty_lockres(dlm, res); 1983 spin_unlock(&res->spinlock); 1984 wake_up(&res->wq); 1985 } 1986 } 1987 } 1988 } 1989 1990 static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local) 1991 { 1992 if (local) { 1993 if (lock->ml.type != LKM_EXMODE && 1994 lock->ml.type != LKM_PRMODE) 1995 return 1; 1996 } else if (lock->ml.type == LKM_EXMODE) 1997 return 1; 1998 return 0; 1999 } 2000 2001 static void dlm_revalidate_lvb(struct dlm_ctxt *dlm, 2002 struct dlm_lock_resource *res, u8 dead_node) 2003 { 2004 struct list_head *iter, *queue; 2005 struct dlm_lock *lock; 2006 int blank_lvb = 0, local = 0; 2007 int i; 2008 u8 search_node; 2009 2010 assert_spin_locked(&dlm->spinlock); 2011 assert_spin_locked(&res->spinlock); 2012 2013 if (res->owner == dlm->node_num) 2014 /* if this node owned the lockres, and if the dead node 2015 * had an EX when he died, blank out the lvb */ 2016 search_node = dead_node; 2017 else { 2018 /* if this is a secondary lockres, and we had no EX or PR 2019 * locks granted, we can no longer trust the lvb */ 2020 search_node = dlm->node_num; 2021 local = 1; /* check local state for valid lvb */ 2022 } 2023 2024 for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) { 2025 queue = dlm_list_idx_to_ptr(res, i); 2026 list_for_each(iter, queue) { 2027 lock = list_entry (iter, struct dlm_lock, list); 2028 if (lock->ml.node == search_node) { 2029 if (dlm_lvb_needs_invalidation(lock, local)) { 2030 /* zero the lksb lvb and lockres lvb */ 2031 blank_lvb = 1; 2032 memset(lock->lksb->lvb, 0, DLM_LVB_LEN); 2033 } 2034 } 2035 } 2036 } 2037 2038 if (blank_lvb) { 2039 mlog(0, "clearing %.*s lvb, dead node %u had EX\n", 2040 res->lockname.len, res->lockname.name, dead_node); 2041 memset(res->lvb, 0, DLM_LVB_LEN); 2042 } 2043 } 2044 2045 static void dlm_free_dead_locks(struct dlm_ctxt *dlm, 2046 struct dlm_lock_resource *res, u8 dead_node) 2047 { 2048 struct list_head *iter, *tmpiter; 2049 struct dlm_lock *lock; 2050 2051 /* this node is the lockres master: 2052 * 1) remove any stale locks for the dead node 2053 * 2) if the dead node had an EX when he died, blank out the lvb 2054 */ 2055 assert_spin_locked(&dlm->spinlock); 2056 assert_spin_locked(&res->spinlock); 2057 2058 /* TODO: check pending_asts, pending_basts here */ 2059 list_for_each_safe(iter, tmpiter, &res->granted) { 2060 lock = list_entry (iter, struct dlm_lock, list); 2061 if (lock->ml.node == dead_node) { 2062 list_del_init(&lock->list); 2063 dlm_lock_put(lock); 2064 } 2065 } 2066 list_for_each_safe(iter, tmpiter, &res->converting) { 2067 lock = list_entry (iter, struct dlm_lock, list); 2068 if (lock->ml.node == dead_node) { 2069 list_del_init(&lock->list); 2070 dlm_lock_put(lock); 2071 } 2072 } 2073 list_for_each_safe(iter, tmpiter, &res->blocked) { 2074 lock = list_entry (iter, struct dlm_lock, list); 2075 if (lock->ml.node == dead_node) { 2076 list_del_init(&lock->list); 2077 dlm_lock_put(lock); 2078 } 2079 } 2080 2081 /* do not kick thread yet */ 2082 __dlm_dirty_lockres(dlm, res); 2083 } 2084 2085 /* if this node is the recovery master, and there are no 2086 * locks for a given lockres owned by this node that are in 2087 * either PR or EX mode, zero out the lvb before requesting. 2088 * 2089 */ 2090 2091 2092 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) 2093 { 2094 struct hlist_node *iter; 2095 struct dlm_lock_resource *res; 2096 int i; 2097 struct hlist_head *bucket; 2098 struct dlm_lock *lock; 2099 2100 2101 /* purge any stale mles */ 2102 dlm_clean_master_list(dlm, dead_node); 2103 2104 /* 2105 * now clean up all lock resources. there are two rules: 2106 * 2107 * 1) if the dead node was the master, move the lockres 2108 * to the recovering list. set the RECOVERING flag. 2109 * this lockres needs to be cleaned up before it can 2110 * be used further. 2111 * 2112 * 2) if this node was the master, remove all locks from 2113 * each of the lockres queues that were owned by the 2114 * dead node. once recovery finishes, the dlm thread 2115 * can be kicked again to see if any ASTs or BASTs 2116 * need to be fired as a result. 2117 */ 2118 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 2119 bucket = dlm_lockres_hash(dlm, i); 2120 hlist_for_each_entry(res, iter, bucket, hash_node) { 2121 /* always prune any $RECOVERY entries for dead nodes, 2122 * otherwise hangs can occur during later recovery */ 2123 if (dlm_is_recovery_lock(res->lockname.name, 2124 res->lockname.len)) { 2125 spin_lock(&res->spinlock); 2126 list_for_each_entry(lock, &res->granted, list) { 2127 if (lock->ml.node == dead_node) { 2128 mlog(0, "AHA! there was " 2129 "a $RECOVERY lock for dead " 2130 "node %u (%s)!\n", 2131 dead_node, dlm->name); 2132 list_del_init(&lock->list); 2133 dlm_lock_put(lock); 2134 break; 2135 } 2136 } 2137 spin_unlock(&res->spinlock); 2138 continue; 2139 } 2140 spin_lock(&res->spinlock); 2141 /* zero the lvb if necessary */ 2142 dlm_revalidate_lvb(dlm, res, dead_node); 2143 if (res->owner == dead_node) 2144 dlm_move_lockres_to_recovery_list(dlm, res); 2145 else if (res->owner == dlm->node_num) { 2146 dlm_free_dead_locks(dlm, res, dead_node); 2147 __dlm_lockres_calc_usage(dlm, res); 2148 } 2149 spin_unlock(&res->spinlock); 2150 } 2151 } 2152 2153 } 2154 2155 static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx) 2156 { 2157 assert_spin_locked(&dlm->spinlock); 2158 2159 if (dlm->reco.new_master == idx) { 2160 mlog(0, "%s: recovery master %d just died\n", 2161 dlm->name, idx); 2162 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { 2163 /* finalize1 was reached, so it is safe to clear 2164 * the new_master and dead_node. that recovery 2165 * is complete. */ 2166 mlog(0, "%s: dead master %d had reached " 2167 "finalize1 state, clearing\n", dlm->name, idx); 2168 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; 2169 __dlm_reset_recovery(dlm); 2170 } 2171 } 2172 2173 /* check to see if the node is already considered dead */ 2174 if (!test_bit(idx, dlm->live_nodes_map)) { 2175 mlog(0, "for domain %s, node %d is already dead. " 2176 "another node likely did recovery already.\n", 2177 dlm->name, idx); 2178 return; 2179 } 2180 2181 /* check to see if we do not care about this node */ 2182 if (!test_bit(idx, dlm->domain_map)) { 2183 /* This also catches the case that we get a node down 2184 * but haven't joined the domain yet. */ 2185 mlog(0, "node %u already removed from domain!\n", idx); 2186 return; 2187 } 2188 2189 clear_bit(idx, dlm->live_nodes_map); 2190 2191 /* Clean up join state on node death. */ 2192 if (dlm->joining_node == idx) { 2193 mlog(0, "Clearing join state for node %u\n", idx); 2194 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); 2195 } 2196 2197 /* make sure local cleanup occurs before the heartbeat events */ 2198 if (!test_bit(idx, dlm->recovery_map)) 2199 dlm_do_local_recovery_cleanup(dlm, idx); 2200 2201 /* notify anything attached to the heartbeat events */ 2202 dlm_hb_event_notify_attached(dlm, idx, 0); 2203 2204 mlog(0, "node %u being removed from domain map!\n", idx); 2205 clear_bit(idx, dlm->domain_map); 2206 /* wake up migration waiters if a node goes down. 2207 * perhaps later we can genericize this for other waiters. */ 2208 wake_up(&dlm->migration_wq); 2209 2210 if (test_bit(idx, dlm->recovery_map)) 2211 mlog(0, "domain %s, node %u already added " 2212 "to recovery map!\n", dlm->name, idx); 2213 else 2214 set_bit(idx, dlm->recovery_map); 2215 } 2216 2217 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data) 2218 { 2219 struct dlm_ctxt *dlm = data; 2220 2221 if (!dlm_grab(dlm)) 2222 return; 2223 2224 spin_lock(&dlm->spinlock); 2225 __dlm_hb_node_down(dlm, idx); 2226 spin_unlock(&dlm->spinlock); 2227 2228 dlm_put(dlm); 2229 } 2230 2231 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data) 2232 { 2233 struct dlm_ctxt *dlm = data; 2234 2235 if (!dlm_grab(dlm)) 2236 return; 2237 2238 spin_lock(&dlm->spinlock); 2239 set_bit(idx, dlm->live_nodes_map); 2240 /* do NOT notify mle attached to the heartbeat events. 2241 * new nodes are not interesting in mastery until joined. */ 2242 spin_unlock(&dlm->spinlock); 2243 2244 dlm_put(dlm); 2245 } 2246 2247 static void dlm_reco_ast(void *astdata) 2248 { 2249 struct dlm_ctxt *dlm = astdata; 2250 mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n", 2251 dlm->node_num, dlm->name); 2252 } 2253 static void dlm_reco_bast(void *astdata, int blocked_type) 2254 { 2255 struct dlm_ctxt *dlm = astdata; 2256 mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n", 2257 dlm->node_num, dlm->name); 2258 } 2259 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st) 2260 { 2261 mlog(0, "unlockast for recovery lock fired!\n"); 2262 } 2263 2264 /* 2265 * dlm_pick_recovery_master will continually attempt to use 2266 * dlmlock() on the special "$RECOVERY" lockres with the 2267 * LKM_NOQUEUE flag to get an EX. every thread that enters 2268 * this function on each node racing to become the recovery 2269 * master will not stop attempting this until either: 2270 * a) this node gets the EX (and becomes the recovery master), 2271 * or b) dlm->reco.new_master gets set to some nodenum 2272 * != O2NM_INVALID_NODE_NUM (another node will do the reco). 2273 * so each time a recovery master is needed, the entire cluster 2274 * will sync at this point. if the new master dies, that will 2275 * be detected in dlm_do_recovery */ 2276 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm) 2277 { 2278 enum dlm_status ret; 2279 struct dlm_lockstatus lksb; 2280 int status = -EINVAL; 2281 2282 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n", 2283 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num); 2284 again: 2285 memset(&lksb, 0, sizeof(lksb)); 2286 2287 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY, 2288 DLM_RECOVERY_LOCK_NAME, dlm_reco_ast, dlm, dlm_reco_bast); 2289 2290 mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n", 2291 dlm->name, ret, lksb.status); 2292 2293 if (ret == DLM_NORMAL) { 2294 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n", 2295 dlm->name, dlm->node_num); 2296 2297 /* got the EX lock. check to see if another node 2298 * just became the reco master */ 2299 if (dlm_reco_master_ready(dlm)) { 2300 mlog(0, "%s: got reco EX lock, but %u will " 2301 "do the recovery\n", dlm->name, 2302 dlm->reco.new_master); 2303 status = -EEXIST; 2304 } else { 2305 status = 0; 2306 2307 /* see if recovery was already finished elsewhere */ 2308 spin_lock(&dlm->spinlock); 2309 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { 2310 status = -EINVAL; 2311 mlog(0, "%s: got reco EX lock, but " 2312 "node got recovered already\n", dlm->name); 2313 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { 2314 mlog(ML_ERROR, "%s: new master is %u " 2315 "but no dead node!\n", 2316 dlm->name, dlm->reco.new_master); 2317 BUG(); 2318 } 2319 } 2320 spin_unlock(&dlm->spinlock); 2321 } 2322 2323 /* if this node has actually become the recovery master, 2324 * set the master and send the messages to begin recovery */ 2325 if (!status) { 2326 mlog(0, "%s: dead=%u, this=%u, sending " 2327 "begin_reco now\n", dlm->name, 2328 dlm->reco.dead_node, dlm->node_num); 2329 status = dlm_send_begin_reco_message(dlm, 2330 dlm->reco.dead_node); 2331 /* this always succeeds */ 2332 BUG_ON(status); 2333 2334 /* set the new_master to this node */ 2335 spin_lock(&dlm->spinlock); 2336 dlm_set_reco_master(dlm, dlm->node_num); 2337 spin_unlock(&dlm->spinlock); 2338 } 2339 2340 /* recovery lock is a special case. ast will not get fired, 2341 * so just go ahead and unlock it. */ 2342 ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm); 2343 if (ret == DLM_DENIED) { 2344 mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n"); 2345 ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm); 2346 } 2347 if (ret != DLM_NORMAL) { 2348 /* this would really suck. this could only happen 2349 * if there was a network error during the unlock 2350 * because of node death. this means the unlock 2351 * is actually "done" and the lock structure is 2352 * even freed. we can continue, but only 2353 * because this specific lock name is special. */ 2354 mlog(ML_ERROR, "dlmunlock returned %d\n", ret); 2355 } 2356 } else if (ret == DLM_NOTQUEUED) { 2357 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n", 2358 dlm->name, dlm->node_num); 2359 /* another node is master. wait on 2360 * reco.new_master != O2NM_INVALID_NODE_NUM 2361 * for at most one second */ 2362 wait_event_timeout(dlm->dlm_reco_thread_wq, 2363 dlm_reco_master_ready(dlm), 2364 msecs_to_jiffies(1000)); 2365 if (!dlm_reco_master_ready(dlm)) { 2366 mlog(0, "%s: reco master taking awhile\n", 2367 dlm->name); 2368 goto again; 2369 } 2370 /* another node has informed this one that it is reco master */ 2371 mlog(0, "%s: reco master %u is ready to recover %u\n", 2372 dlm->name, dlm->reco.new_master, dlm->reco.dead_node); 2373 status = -EEXIST; 2374 } else if (ret == DLM_RECOVERING) { 2375 mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n", 2376 dlm->name, dlm->node_num); 2377 goto again; 2378 } else { 2379 struct dlm_lock_resource *res; 2380 2381 /* dlmlock returned something other than NOTQUEUED or NORMAL */ 2382 mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), " 2383 "lksb.status=%s\n", dlm->name, dlm_errname(ret), 2384 dlm_errname(lksb.status)); 2385 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME, 2386 DLM_RECOVERY_LOCK_NAME_LEN); 2387 if (res) { 2388 dlm_print_one_lock_resource(res); 2389 dlm_lockres_put(res); 2390 } else { 2391 mlog(ML_ERROR, "recovery lock not found\n"); 2392 } 2393 BUG(); 2394 } 2395 2396 return status; 2397 } 2398 2399 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node) 2400 { 2401 struct dlm_begin_reco br; 2402 int ret = 0; 2403 struct dlm_node_iter iter; 2404 int nodenum; 2405 int status; 2406 2407 mlog_entry("%u\n", dead_node); 2408 2409 mlog(0, "%s: dead node is %u\n", dlm->name, dead_node); 2410 2411 spin_lock(&dlm->spinlock); 2412 dlm_node_iter_init(dlm->domain_map, &iter); 2413 spin_unlock(&dlm->spinlock); 2414 2415 clear_bit(dead_node, iter.node_map); 2416 2417 memset(&br, 0, sizeof(br)); 2418 br.node_idx = dlm->node_num; 2419 br.dead_node = dead_node; 2420 2421 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 2422 ret = 0; 2423 if (nodenum == dead_node) { 2424 mlog(0, "not sending begin reco to dead node " 2425 "%u\n", dead_node); 2426 continue; 2427 } 2428 if (nodenum == dlm->node_num) { 2429 mlog(0, "not sending begin reco to self\n"); 2430 continue; 2431 } 2432 retry: 2433 ret = -EINVAL; 2434 mlog(0, "attempting to send begin reco msg to %d\n", 2435 nodenum); 2436 ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key, 2437 &br, sizeof(br), nodenum, &status); 2438 /* negative status is handled ok by caller here */ 2439 if (ret >= 0) 2440 ret = status; 2441 if (dlm_is_host_down(ret)) { 2442 /* node is down. not involved in recovery 2443 * so just keep going */ 2444 mlog(0, "%s: node %u was down when sending " 2445 "begin reco msg (%d)\n", dlm->name, nodenum, ret); 2446 ret = 0; 2447 } 2448 if (ret < 0) { 2449 struct dlm_lock_resource *res; 2450 /* this is now a serious problem, possibly ENOMEM 2451 * in the network stack. must retry */ 2452 mlog_errno(ret); 2453 mlog(ML_ERROR, "begin reco of dlm %s to node %u " 2454 " returned %d\n", dlm->name, nodenum, ret); 2455 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME, 2456 DLM_RECOVERY_LOCK_NAME_LEN); 2457 if (res) { 2458 dlm_print_one_lock_resource(res); 2459 dlm_lockres_put(res); 2460 } else { 2461 mlog(ML_ERROR, "recovery lock not found\n"); 2462 } 2463 /* sleep for a bit in hopes that we can avoid 2464 * another ENOMEM */ 2465 msleep(100); 2466 goto retry; 2467 } else if (ret == EAGAIN) { 2468 mlog(0, "%s: trying to start recovery of node " 2469 "%u, but node %u is waiting for last recovery " 2470 "to complete, backoff for a bit\n", dlm->name, 2471 dead_node, nodenum); 2472 /* TODO Look into replacing msleep with cond_resched() */ 2473 msleep(100); 2474 goto retry; 2475 } 2476 } 2477 2478 return ret; 2479 } 2480 2481 int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data) 2482 { 2483 struct dlm_ctxt *dlm = data; 2484 struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf; 2485 2486 /* ok to return 0, domain has gone away */ 2487 if (!dlm_grab(dlm)) 2488 return 0; 2489 2490 spin_lock(&dlm->spinlock); 2491 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { 2492 mlog(0, "%s: node %u wants to recover node %u (%u:%u) " 2493 "but this node is in finalize state, waiting on finalize2\n", 2494 dlm->name, br->node_idx, br->dead_node, 2495 dlm->reco.dead_node, dlm->reco.new_master); 2496 spin_unlock(&dlm->spinlock); 2497 return EAGAIN; 2498 } 2499 spin_unlock(&dlm->spinlock); 2500 2501 mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n", 2502 dlm->name, br->node_idx, br->dead_node, 2503 dlm->reco.dead_node, dlm->reco.new_master); 2504 2505 dlm_fire_domain_eviction_callbacks(dlm, br->dead_node); 2506 2507 spin_lock(&dlm->spinlock); 2508 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { 2509 if (test_bit(dlm->reco.new_master, dlm->recovery_map)) { 2510 mlog(0, "%s: new_master %u died, changing " 2511 "to %u\n", dlm->name, dlm->reco.new_master, 2512 br->node_idx); 2513 } else { 2514 mlog(0, "%s: new_master %u NOT DEAD, changing " 2515 "to %u\n", dlm->name, dlm->reco.new_master, 2516 br->node_idx); 2517 /* may not have seen the new master as dead yet */ 2518 } 2519 } 2520 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) { 2521 mlog(ML_NOTICE, "%s: dead_node previously set to %u, " 2522 "node %u changing it to %u\n", dlm->name, 2523 dlm->reco.dead_node, br->node_idx, br->dead_node); 2524 } 2525 dlm_set_reco_master(dlm, br->node_idx); 2526 dlm_set_reco_dead_node(dlm, br->dead_node); 2527 if (!test_bit(br->dead_node, dlm->recovery_map)) { 2528 mlog(0, "recovery master %u sees %u as dead, but this " 2529 "node has not yet. marking %u as dead\n", 2530 br->node_idx, br->dead_node, br->dead_node); 2531 if (!test_bit(br->dead_node, dlm->domain_map) || 2532 !test_bit(br->dead_node, dlm->live_nodes_map)) 2533 mlog(0, "%u not in domain/live_nodes map " 2534 "so setting it in reco map manually\n", 2535 br->dead_node); 2536 /* force the recovery cleanup in __dlm_hb_node_down 2537 * both of these will be cleared in a moment */ 2538 set_bit(br->dead_node, dlm->domain_map); 2539 set_bit(br->dead_node, dlm->live_nodes_map); 2540 __dlm_hb_node_down(dlm, br->dead_node); 2541 } 2542 spin_unlock(&dlm->spinlock); 2543 2544 dlm_kick_recovery_thread(dlm); 2545 2546 mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n", 2547 dlm->name, br->node_idx, br->dead_node, 2548 dlm->reco.dead_node, dlm->reco.new_master); 2549 2550 dlm_put(dlm); 2551 return 0; 2552 } 2553 2554 #define DLM_FINALIZE_STAGE2 0x01 2555 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm) 2556 { 2557 int ret = 0; 2558 struct dlm_finalize_reco fr; 2559 struct dlm_node_iter iter; 2560 int nodenum; 2561 int status; 2562 int stage = 1; 2563 2564 mlog(0, "finishing recovery for node %s:%u, " 2565 "stage %d\n", dlm->name, dlm->reco.dead_node, stage); 2566 2567 spin_lock(&dlm->spinlock); 2568 dlm_node_iter_init(dlm->domain_map, &iter); 2569 spin_unlock(&dlm->spinlock); 2570 2571 stage2: 2572 memset(&fr, 0, sizeof(fr)); 2573 fr.node_idx = dlm->node_num; 2574 fr.dead_node = dlm->reco.dead_node; 2575 if (stage == 2) 2576 fr.flags |= DLM_FINALIZE_STAGE2; 2577 2578 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 2579 if (nodenum == dlm->node_num) 2580 continue; 2581 ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key, 2582 &fr, sizeof(fr), nodenum, &status); 2583 if (ret >= 0) 2584 ret = status; 2585 if (ret < 0) { 2586 mlog_errno(ret); 2587 if (dlm_is_host_down(ret)) { 2588 /* this has no effect on this recovery 2589 * session, so set the status to zero to 2590 * finish out the last recovery */ 2591 mlog(ML_ERROR, "node %u went down after this " 2592 "node finished recovery.\n", nodenum); 2593 ret = 0; 2594 continue; 2595 } 2596 break; 2597 } 2598 } 2599 if (stage == 1) { 2600 /* reset the node_iter back to the top and send finalize2 */ 2601 iter.curnode = -1; 2602 stage = 2; 2603 goto stage2; 2604 } 2605 2606 return ret; 2607 } 2608 2609 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data) 2610 { 2611 struct dlm_ctxt *dlm = data; 2612 struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf; 2613 int stage = 1; 2614 2615 /* ok to return 0, domain has gone away */ 2616 if (!dlm_grab(dlm)) 2617 return 0; 2618 2619 if (fr->flags & DLM_FINALIZE_STAGE2) 2620 stage = 2; 2621 2622 mlog(0, "%s: node %u finalizing recovery stage%d of " 2623 "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage, 2624 fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master); 2625 2626 spin_lock(&dlm->spinlock); 2627 2628 if (dlm->reco.new_master != fr->node_idx) { 2629 mlog(ML_ERROR, "node %u sent recovery finalize msg, but node " 2630 "%u is supposed to be the new master, dead=%u\n", 2631 fr->node_idx, dlm->reco.new_master, fr->dead_node); 2632 BUG(); 2633 } 2634 if (dlm->reco.dead_node != fr->dead_node) { 2635 mlog(ML_ERROR, "node %u sent recovery finalize msg for dead " 2636 "node %u, but node %u is supposed to be dead\n", 2637 fr->node_idx, fr->dead_node, dlm->reco.dead_node); 2638 BUG(); 2639 } 2640 2641 switch (stage) { 2642 case 1: 2643 dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx); 2644 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { 2645 mlog(ML_ERROR, "%s: received finalize1 from " 2646 "new master %u for dead node %u, but " 2647 "this node has already received it!\n", 2648 dlm->name, fr->node_idx, fr->dead_node); 2649 dlm_print_reco_node_status(dlm); 2650 BUG(); 2651 } 2652 dlm->reco.state |= DLM_RECO_STATE_FINALIZE; 2653 spin_unlock(&dlm->spinlock); 2654 break; 2655 case 2: 2656 if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) { 2657 mlog(ML_ERROR, "%s: received finalize2 from " 2658 "new master %u for dead node %u, but " 2659 "this node did not have finalize1!\n", 2660 dlm->name, fr->node_idx, fr->dead_node); 2661 dlm_print_reco_node_status(dlm); 2662 BUG(); 2663 } 2664 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; 2665 spin_unlock(&dlm->spinlock); 2666 dlm_reset_recovery(dlm); 2667 dlm_kick_recovery_thread(dlm); 2668 break; 2669 default: 2670 BUG(); 2671 } 2672 2673 mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n", 2674 dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master); 2675 2676 dlm_put(dlm); 2677 return 0; 2678 } 2679