1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * dlmrecovery.c
4 *
5 * recovery stuff
6 *
7 * Copyright (C) 2004 Oracle. All rights reserved.
8 */
9
10
11 #include <linux/module.h>
12 #include <linux/fs.h>
13 #include <linux/types.h>
14 #include <linux/slab.h>
15 #include <linux/highmem.h>
16 #include <linux/init.h>
17 #include <linux/sysctl.h>
18 #include <linux/random.h>
19 #include <linux/blkdev.h>
20 #include <linux/socket.h>
21 #include <linux/inet.h>
22 #include <linux/timer.h>
23 #include <linux/kthread.h>
24 #include <linux/delay.h>
25 #include <linux/string_choices.h>
26
27 #include "../cluster/heartbeat.h"
28 #include "../cluster/nodemanager.h"
29 #include "../cluster/tcp.h"
30
31 #include "dlmapi.h"
32 #include "dlmcommon.h"
33 #include "dlmdomain.h"
34
35 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
36 #include "../cluster/masklog.h"
37
38 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
39
40 static int dlm_recovery_thread(void *data);
41 static int dlm_do_recovery(struct dlm_ctxt *dlm);
42
43 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm);
44 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
45 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
46 static int dlm_request_all_locks(struct dlm_ctxt *dlm,
47 u8 request_from, u8 dead_node);
48 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm);
49
50 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
51 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
52 const char *lockname, int namelen,
53 int total_locks, u64 cookie,
54 u8 flags, u8 master);
55 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
56 struct dlm_migratable_lockres *mres,
57 u8 send_to,
58 struct dlm_lock_resource *res,
59 int total_locks);
60 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
61 struct dlm_lock_resource *res,
62 struct dlm_migratable_lockres *mres);
63 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm);
64 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm,
65 u8 dead_node, u8 send_to);
66 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node);
67 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
68 struct list_head *list, u8 dead_node);
69 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
70 u8 dead_node, u8 new_master);
71 static void dlm_reco_ast(void *astdata);
72 static void dlm_reco_bast(void *astdata, int blocked_type);
73 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
74 static void dlm_request_all_locks_worker(struct dlm_work_item *item,
75 void *data);
76 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
77 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
78 struct dlm_lock_resource *res,
79 u8 *real_master);
80
81 static u64 dlm_get_next_mig_cookie(void);
82
83 static DEFINE_SPINLOCK(dlm_reco_state_lock);
84 static DEFINE_SPINLOCK(dlm_mig_cookie_lock);
85 static u64 dlm_mig_cookie = 1;
86
dlm_get_next_mig_cookie(void)87 static u64 dlm_get_next_mig_cookie(void)
88 {
89 u64 c;
90 spin_lock(&dlm_mig_cookie_lock);
91 c = dlm_mig_cookie;
92 if (dlm_mig_cookie == (~0ULL))
93 dlm_mig_cookie = 1;
94 else
95 dlm_mig_cookie++;
96 spin_unlock(&dlm_mig_cookie_lock);
97 return c;
98 }
99
dlm_set_reco_dead_node(struct dlm_ctxt * dlm,u8 dead_node)100 static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm,
101 u8 dead_node)
102 {
103 assert_spin_locked(&dlm->spinlock);
104 if (dlm->reco.dead_node != dead_node)
105 mlog(0, "%s: changing dead_node from %u to %u\n",
106 dlm->name, dlm->reco.dead_node, dead_node);
107 dlm->reco.dead_node = dead_node;
108 }
109
dlm_set_reco_master(struct dlm_ctxt * dlm,u8 master)110 static inline void dlm_set_reco_master(struct dlm_ctxt *dlm,
111 u8 master)
112 {
113 assert_spin_locked(&dlm->spinlock);
114 mlog(0, "%s: changing new_master from %u to %u\n",
115 dlm->name, dlm->reco.new_master, master);
116 dlm->reco.new_master = master;
117 }
118
__dlm_reset_recovery(struct dlm_ctxt * dlm)119 static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm)
120 {
121 assert_spin_locked(&dlm->spinlock);
122 clear_bit(dlm->reco.dead_node, dlm->recovery_map);
123 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
124 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
125 }
126
127 /* Worker function used during recovery. */
dlm_dispatch_work(struct work_struct * work)128 void dlm_dispatch_work(struct work_struct *work)
129 {
130 struct dlm_ctxt *dlm =
131 container_of(work, struct dlm_ctxt, dispatched_work);
132 LIST_HEAD(tmp_list);
133 struct dlm_work_item *item, *next;
134 dlm_workfunc_t *workfunc;
135 int tot=0;
136
137 spin_lock(&dlm->work_lock);
138 list_splice_init(&dlm->work_list, &tmp_list);
139 spin_unlock(&dlm->work_lock);
140
141 list_for_each_entry(item, &tmp_list, list) {
142 tot++;
143 }
144 mlog(0, "%s: work thread has %d work items\n", dlm->name, tot);
145
146 list_for_each_entry_safe(item, next, &tmp_list, list) {
147 workfunc = item->func;
148 list_del_init(&item->list);
149
150 /* already have ref on dlm to avoid having
151 * it disappear. just double-check. */
152 BUG_ON(item->dlm != dlm);
153
154 /* this is allowed to sleep and
155 * call network stuff */
156 workfunc(item, item->data);
157
158 dlm_put(dlm);
159 kfree(item);
160 }
161 }
162
163 /*
164 * RECOVERY THREAD
165 */
166
dlm_kick_recovery_thread(struct dlm_ctxt * dlm)167 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm)
168 {
169 /* wake the recovery thread
170 * this will wake the reco thread in one of three places
171 * 1) sleeping with no recovery happening
172 * 2) sleeping with recovery mastered elsewhere
173 * 3) recovery mastered here, waiting on reco data */
174
175 wake_up(&dlm->dlm_reco_thread_wq);
176 }
177
178 /* Launch the recovery thread */
dlm_launch_recovery_thread(struct dlm_ctxt * dlm)179 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm)
180 {
181 mlog(0, "starting dlm recovery thread...\n");
182
183 dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
184 "dlm_reco-%s", dlm->name);
185 if (IS_ERR(dlm->dlm_reco_thread_task)) {
186 mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
187 dlm->dlm_reco_thread_task = NULL;
188 return -EINVAL;
189 }
190
191 return 0;
192 }
193
dlm_complete_recovery_thread(struct dlm_ctxt * dlm)194 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
195 {
196 if (dlm->dlm_reco_thread_task) {
197 mlog(0, "waiting for dlm recovery thread to exit\n");
198 kthread_stop(dlm->dlm_reco_thread_task);
199 dlm->dlm_reco_thread_task = NULL;
200 }
201 }
202
203
204
205 /*
206 * this is lame, but here's how recovery works...
207 * 1) all recovery threads cluster wide will work on recovering
208 * ONE node at a time
209 * 2) negotiate who will take over all the locks for the dead node.
210 * that's right... ALL the locks.
211 * 3) once a new master is chosen, everyone scans all locks
212 * and moves aside those mastered by the dead guy
213 * 4) each of these locks should be locked until recovery is done
214 * 5) the new master collects up all of secondary lock queue info
215 * one lock at a time, forcing each node to communicate back
216 * before continuing
217 * 6) each secondary lock queue responds with the full known lock info
218 * 7) once the new master has run all its locks, it sends a ALLDONE!
219 * message to everyone
220 * 8) upon receiving this message, the secondary queue node unlocks
221 * and responds to the ALLDONE
222 * 9) once the new master gets responses from everyone, he unlocks
223 * everything and recovery for this dead node is done
224 *10) go back to 2) while there are still dead nodes
225 *
226 */
227
dlm_print_reco_node_status(struct dlm_ctxt * dlm)228 static void dlm_print_reco_node_status(struct dlm_ctxt *dlm)
229 {
230 struct dlm_reco_node_data *ndata;
231 struct dlm_lock_resource *res;
232
233 mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n",
234 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
235 dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive",
236 dlm->reco.dead_node, dlm->reco.new_master);
237
238 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
239 char *st = "unknown";
240 switch (ndata->state) {
241 case DLM_RECO_NODE_DATA_INIT:
242 st = "init";
243 break;
244 case DLM_RECO_NODE_DATA_REQUESTING:
245 st = "requesting";
246 break;
247 case DLM_RECO_NODE_DATA_DEAD:
248 st = "dead";
249 break;
250 case DLM_RECO_NODE_DATA_RECEIVING:
251 st = "receiving";
252 break;
253 case DLM_RECO_NODE_DATA_REQUESTED:
254 st = "requested";
255 break;
256 case DLM_RECO_NODE_DATA_DONE:
257 st = "done";
258 break;
259 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
260 st = "finalize-sent";
261 break;
262 default:
263 st = "bad";
264 break;
265 }
266 mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n",
267 dlm->name, ndata->node_num, st);
268 }
269 list_for_each_entry(res, &dlm->reco.resources, recovering) {
270 mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n",
271 dlm->name, res->lockname.len, res->lockname.name);
272 }
273 }
274
275 #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
276
dlm_recovery_thread(void * data)277 static int dlm_recovery_thread(void *data)
278 {
279 int status;
280 struct dlm_ctxt *dlm = data;
281 unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS);
282
283 mlog(0, "dlm thread running for %s...\n", dlm->name);
284
285 while (!kthread_should_stop()) {
286 if (dlm_domain_fully_joined(dlm)) {
287 status = dlm_do_recovery(dlm);
288 if (status == -EAGAIN) {
289 /* do not sleep, recheck immediately. */
290 continue;
291 }
292 if (status < 0)
293 mlog_errno(status);
294 }
295
296 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
297 kthread_should_stop(),
298 timeout);
299 }
300
301 mlog(0, "quitting DLM recovery thread\n");
302 return 0;
303 }
304
305 /* returns true when the recovery master has contacted us */
dlm_reco_master_ready(struct dlm_ctxt * dlm)306 static int dlm_reco_master_ready(struct dlm_ctxt *dlm)
307 {
308 int ready;
309 spin_lock(&dlm->spinlock);
310 ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM);
311 spin_unlock(&dlm->spinlock);
312 return ready;
313 }
314
315 /* returns true if node is no longer in the domain
316 * could be dead or just not joined */
dlm_is_node_dead(struct dlm_ctxt * dlm,u8 node)317 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
318 {
319 int dead;
320 spin_lock(&dlm->spinlock);
321 dead = !test_bit(node, dlm->domain_map);
322 spin_unlock(&dlm->spinlock);
323 return dead;
324 }
325
326 /* returns true if node is no longer in the domain
327 * could be dead or just not joined */
dlm_is_node_recovered(struct dlm_ctxt * dlm,u8 node)328 static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
329 {
330 int recovered;
331 spin_lock(&dlm->spinlock);
332 recovered = !test_bit(node, dlm->recovery_map);
333 spin_unlock(&dlm->spinlock);
334 return recovered;
335 }
336
337
dlm_wait_for_node_death(struct dlm_ctxt * dlm,u8 node,int timeout)338 void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
339 {
340 if (dlm_is_node_dead(dlm, node))
341 return;
342
343 printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in "
344 "domain %s\n", node, dlm->name);
345
346 if (timeout)
347 wait_event_timeout(dlm->dlm_reco_thread_wq,
348 dlm_is_node_dead(dlm, node),
349 msecs_to_jiffies(timeout));
350 else
351 wait_event(dlm->dlm_reco_thread_wq,
352 dlm_is_node_dead(dlm, node));
353 }
354
dlm_wait_for_node_recovery(struct dlm_ctxt * dlm,u8 node,int timeout)355 void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
356 {
357 if (dlm_is_node_recovered(dlm, node))
358 return;
359
360 printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in "
361 "domain %s\n", node, dlm->name);
362
363 if (timeout)
364 wait_event_timeout(dlm->dlm_reco_thread_wq,
365 dlm_is_node_recovered(dlm, node),
366 msecs_to_jiffies(timeout));
367 else
368 wait_event(dlm->dlm_reco_thread_wq,
369 dlm_is_node_recovered(dlm, node));
370 }
371
372 /* callers of the top-level api calls (dlmlock/dlmunlock) should
373 * block on the dlm->reco.event when recovery is in progress.
374 * the dlm recovery thread will set this state when it begins
375 * recovering a dead node (as the new master or not) and clear
376 * the state and wake as soon as all affected lock resources have
377 * been marked with the RECOVERY flag */
dlm_in_recovery(struct dlm_ctxt * dlm)378 static int dlm_in_recovery(struct dlm_ctxt *dlm)
379 {
380 int in_recovery;
381 spin_lock(&dlm->spinlock);
382 in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
383 spin_unlock(&dlm->spinlock);
384 return in_recovery;
385 }
386
387
dlm_wait_for_recovery(struct dlm_ctxt * dlm)388 void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
389 {
390 if (dlm_in_recovery(dlm)) {
391 mlog(0, "%s: reco thread %d in recovery: "
392 "state=%d, master=%u, dead=%u\n",
393 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
394 dlm->reco.state, dlm->reco.new_master,
395 dlm->reco.dead_node);
396 }
397 wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
398 }
399
dlm_begin_recovery(struct dlm_ctxt * dlm)400 static void dlm_begin_recovery(struct dlm_ctxt *dlm)
401 {
402 assert_spin_locked(&dlm->spinlock);
403 BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
404 printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n",
405 dlm->name, dlm->reco.dead_node);
406 dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
407 }
408
dlm_end_recovery(struct dlm_ctxt * dlm)409 static void dlm_end_recovery(struct dlm_ctxt *dlm)
410 {
411 spin_lock(&dlm->spinlock);
412 BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
413 dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
414 spin_unlock(&dlm->spinlock);
415 printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name);
416 wake_up(&dlm->reco.event);
417 }
418
dlm_print_recovery_master(struct dlm_ctxt * dlm)419 static void dlm_print_recovery_master(struct dlm_ctxt *dlm)
420 {
421 printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the "
422 "dead node %u in domain %s\n", dlm->reco.new_master,
423 (dlm->node_num == dlm->reco.new_master ? "me" : "he"),
424 dlm->reco.dead_node, dlm->name);
425 }
426
dlm_do_recovery(struct dlm_ctxt * dlm)427 static int dlm_do_recovery(struct dlm_ctxt *dlm)
428 {
429 int status = 0;
430 int ret;
431
432 spin_lock(&dlm->spinlock);
433
434 if (dlm->migrate_done) {
435 mlog(0, "%s: no need do recovery after migrating all "
436 "lock resources\n", dlm->name);
437 spin_unlock(&dlm->spinlock);
438 return 0;
439 }
440
441 /* check to see if the new master has died */
442 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
443 test_bit(dlm->reco.new_master, dlm->recovery_map)) {
444 mlog(0, "new master %u died while recovering %u!\n",
445 dlm->reco.new_master, dlm->reco.dead_node);
446 /* unset the new_master, leave dead_node */
447 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
448 }
449
450 /* select a target to recover */
451 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
452 int bit;
453
454 bit = find_first_bit(dlm->recovery_map, O2NM_MAX_NODES);
455 if (bit >= O2NM_MAX_NODES || bit < 0)
456 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
457 else
458 dlm_set_reco_dead_node(dlm, bit);
459 } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
460 /* BUG? */
461 mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
462 dlm->reco.dead_node);
463 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
464 }
465
466 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
467 spin_unlock(&dlm->spinlock);
468 /* return to main thread loop and sleep. */
469 return 0;
470 }
471 mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
472 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
473 dlm->reco.dead_node);
474
475 /* take write barrier */
476 /* (stops the list reshuffling thread, proxy ast handling) */
477 dlm_begin_recovery(dlm);
478
479 spin_unlock(&dlm->spinlock);
480
481 if (dlm->reco.new_master == dlm->node_num)
482 goto master_here;
483
484 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
485 /* choose a new master, returns 0 if this node
486 * is the master, -EEXIST if it's another node.
487 * this does not return until a new master is chosen
488 * or recovery completes entirely. */
489 ret = dlm_pick_recovery_master(dlm);
490 if (!ret) {
491 /* already notified everyone. go. */
492 goto master_here;
493 }
494 mlog(0, "another node will master this recovery session.\n");
495 }
496
497 dlm_print_recovery_master(dlm);
498
499 /* it is safe to start everything back up here
500 * because all of the dead node's lock resources
501 * have been marked as in-recovery */
502 dlm_end_recovery(dlm);
503
504 /* sleep out in main dlm_recovery_thread loop. */
505 return 0;
506
507 master_here:
508 dlm_print_recovery_master(dlm);
509
510 status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
511 if (status < 0) {
512 /* we should never hit this anymore */
513 mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, "
514 "retrying.\n", dlm->name, status, dlm->reco.dead_node);
515 /* yield a bit to allow any final network messages
516 * to get handled on remaining nodes */
517 msleep(100);
518 } else {
519 /* success! see if any other nodes need recovery */
520 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
521 dlm->name, dlm->reco.dead_node, dlm->node_num);
522 spin_lock(&dlm->spinlock);
523 __dlm_reset_recovery(dlm);
524 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
525 spin_unlock(&dlm->spinlock);
526 }
527 dlm_end_recovery(dlm);
528
529 /* continue and look for another dead node */
530 return -EAGAIN;
531 }
532
dlm_remaster_locks(struct dlm_ctxt * dlm,u8 dead_node)533 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
534 {
535 int status = 0;
536 struct dlm_reco_node_data *ndata;
537 int all_nodes_done;
538 int destroy = 0;
539 int pass = 0;
540
541 do {
542 /* we have become recovery master. there is no escaping
543 * this, so just keep trying until we get it. */
544 status = dlm_init_recovery_area(dlm, dead_node);
545 if (status < 0) {
546 mlog(ML_ERROR, "%s: failed to alloc recovery area, "
547 "retrying\n", dlm->name);
548 msleep(1000);
549 }
550 } while (status != 0);
551
552 /* safe to access the node data list without a lock, since this
553 * process is the only one to change the list */
554 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
555 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
556 ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
557
558 mlog(0, "%s: Requesting lock info from node %u\n", dlm->name,
559 ndata->node_num);
560
561 if (ndata->node_num == dlm->node_num) {
562 ndata->state = DLM_RECO_NODE_DATA_DONE;
563 continue;
564 }
565
566 do {
567 status = dlm_request_all_locks(dlm, ndata->node_num,
568 dead_node);
569 if (status < 0) {
570 mlog_errno(status);
571 if (dlm_is_host_down(status)) {
572 /* node died, ignore it for recovery */
573 status = 0;
574 ndata->state = DLM_RECO_NODE_DATA_DEAD;
575 /* wait for the domain map to catch up
576 * with the network state. */
577 wait_event_timeout(dlm->dlm_reco_thread_wq,
578 dlm_is_node_dead(dlm,
579 ndata->node_num),
580 msecs_to_jiffies(1000));
581 mlog(0, "waited 1 sec for %u, "
582 "dead? %s\n", ndata->node_num,
583 str_yes_no(dlm_is_node_dead(dlm, ndata->node_num)));
584 } else {
585 /* -ENOMEM on the other node */
586 mlog(0, "%s: node %u returned "
587 "%d during recovery, retrying "
588 "after a short wait\n",
589 dlm->name, ndata->node_num,
590 status);
591 msleep(100);
592 }
593 }
594 } while (status != 0);
595
596 spin_lock(&dlm_reco_state_lock);
597 switch (ndata->state) {
598 case DLM_RECO_NODE_DATA_INIT:
599 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
600 case DLM_RECO_NODE_DATA_REQUESTED:
601 BUG();
602 break;
603 case DLM_RECO_NODE_DATA_DEAD:
604 mlog(0, "node %u died after requesting "
605 "recovery info for node %u\n",
606 ndata->node_num, dead_node);
607 /* fine. don't need this node's info.
608 * continue without it. */
609 break;
610 case DLM_RECO_NODE_DATA_REQUESTING:
611 ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
612 mlog(0, "now receiving recovery data from "
613 "node %u for dead node %u\n",
614 ndata->node_num, dead_node);
615 break;
616 case DLM_RECO_NODE_DATA_RECEIVING:
617 mlog(0, "already receiving recovery data from "
618 "node %u for dead node %u\n",
619 ndata->node_num, dead_node);
620 break;
621 case DLM_RECO_NODE_DATA_DONE:
622 mlog(0, "already DONE receiving recovery data "
623 "from node %u for dead node %u\n",
624 ndata->node_num, dead_node);
625 break;
626 }
627 spin_unlock(&dlm_reco_state_lock);
628 }
629
630 mlog(0, "%s: Done requesting all lock info\n", dlm->name);
631
632 /* nodes should be sending reco data now
633 * just need to wait */
634
635 while (1) {
636 /* check all the nodes now to see if we are
637 * done, or if anyone died */
638 all_nodes_done = 1;
639 spin_lock(&dlm_reco_state_lock);
640 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
641 mlog(0, "checking recovery state of node %u\n",
642 ndata->node_num);
643 switch (ndata->state) {
644 case DLM_RECO_NODE_DATA_INIT:
645 case DLM_RECO_NODE_DATA_REQUESTING:
646 mlog(ML_ERROR, "bad ndata state for "
647 "node %u: state=%d\n",
648 ndata->node_num, ndata->state);
649 BUG();
650 break;
651 case DLM_RECO_NODE_DATA_DEAD:
652 mlog(0, "node %u died after "
653 "requesting recovery info for "
654 "node %u\n", ndata->node_num,
655 dead_node);
656 break;
657 case DLM_RECO_NODE_DATA_RECEIVING:
658 case DLM_RECO_NODE_DATA_REQUESTED:
659 mlog(0, "%s: node %u still in state %s\n",
660 dlm->name, ndata->node_num,
661 ndata->state==DLM_RECO_NODE_DATA_RECEIVING ?
662 "receiving" : "requested");
663 all_nodes_done = 0;
664 break;
665 case DLM_RECO_NODE_DATA_DONE:
666 mlog(0, "%s: node %u state is done\n",
667 dlm->name, ndata->node_num);
668 break;
669 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
670 mlog(0, "%s: node %u state is finalize\n",
671 dlm->name, ndata->node_num);
672 break;
673 }
674 }
675 spin_unlock(&dlm_reco_state_lock);
676
677 mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
678 str_yes_no(all_nodes_done));
679 if (all_nodes_done) {
680 int ret;
681
682 /* Set this flag on recovery master to avoid
683 * a new recovery for another dead node start
684 * before the recovery is not done. That may
685 * cause recovery hung.*/
686 spin_lock(&dlm->spinlock);
687 dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
688 spin_unlock(&dlm->spinlock);
689
690 /* all nodes are now in DLM_RECO_NODE_DATA_DONE state
691 * just send a finalize message to everyone and
692 * clean up */
693 mlog(0, "all nodes are done! send finalize\n");
694 ret = dlm_send_finalize_reco_message(dlm);
695 if (ret < 0)
696 mlog_errno(ret);
697
698 spin_lock(&dlm->spinlock);
699 dlm_finish_local_lockres_recovery(dlm, dead_node,
700 dlm->node_num);
701 spin_unlock(&dlm->spinlock);
702 mlog(0, "should be done with recovery!\n");
703
704 mlog(0, "finishing recovery of %s at %lu, "
705 "dead=%u, this=%u, new=%u\n", dlm->name,
706 jiffies, dlm->reco.dead_node,
707 dlm->node_num, dlm->reco.new_master);
708 destroy = 1;
709 status = 0;
710 /* rescan everything marked dirty along the way */
711 dlm_kick_thread(dlm, NULL);
712 break;
713 }
714 /* wait to be signalled, with periodic timeout
715 * to check for node death */
716 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
717 kthread_should_stop(),
718 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS));
719
720 }
721
722 if (destroy)
723 dlm_destroy_recovery_area(dlm);
724
725 return status;
726 }
727
dlm_init_recovery_area(struct dlm_ctxt * dlm,u8 dead_node)728 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
729 {
730 int num=0;
731 struct dlm_reco_node_data *ndata;
732
733 spin_lock(&dlm->spinlock);
734 bitmap_copy(dlm->reco.node_map, dlm->domain_map, O2NM_MAX_NODES);
735 /* nodes can only be removed (by dying) after dropping
736 * this lock, and death will be trapped later, so this should do */
737 spin_unlock(&dlm->spinlock);
738
739 while (1) {
740 num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
741 if (num >= O2NM_MAX_NODES) {
742 break;
743 }
744 BUG_ON(num == dead_node);
745
746 ndata = kzalloc(sizeof(*ndata), GFP_NOFS);
747 if (!ndata) {
748 dlm_destroy_recovery_area(dlm);
749 return -ENOMEM;
750 }
751 ndata->node_num = num;
752 ndata->state = DLM_RECO_NODE_DATA_INIT;
753 spin_lock(&dlm_reco_state_lock);
754 list_add_tail(&ndata->list, &dlm->reco.node_data);
755 spin_unlock(&dlm_reco_state_lock);
756 num++;
757 }
758
759 return 0;
760 }
761
dlm_destroy_recovery_area(struct dlm_ctxt * dlm)762 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm)
763 {
764 struct dlm_reco_node_data *ndata, *next;
765 LIST_HEAD(tmplist);
766
767 spin_lock(&dlm_reco_state_lock);
768 list_splice_init(&dlm->reco.node_data, &tmplist);
769 spin_unlock(&dlm_reco_state_lock);
770
771 list_for_each_entry_safe(ndata, next, &tmplist, list) {
772 list_del_init(&ndata->list);
773 kfree(ndata);
774 }
775 }
776
dlm_request_all_locks(struct dlm_ctxt * dlm,u8 request_from,u8 dead_node)777 static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
778 u8 dead_node)
779 {
780 struct dlm_lock_request lr;
781 int ret;
782 int status;
783
784 mlog(0, "\n");
785
786
787 mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
788 "to %u\n", dead_node, request_from);
789
790 memset(&lr, 0, sizeof(lr));
791 lr.node_idx = dlm->node_num;
792 lr.dead_node = dead_node;
793
794 // send message
795 ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
796 &lr, sizeof(lr), request_from, &status);
797
798 /* negative status is handled by caller */
799 if (ret < 0)
800 mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u "
801 "to recover dead node %u\n", dlm->name, ret,
802 request_from, dead_node);
803 else
804 ret = status;
805 // return from here, then
806 // sleep until all received or error
807 return ret;
808
809 }
810
dlm_request_all_locks_handler(struct o2net_msg * msg,u32 len,void * data,void ** ret_data)811 int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data,
812 void **ret_data)
813 {
814 struct dlm_ctxt *dlm = data;
815 struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
816 char *buf = NULL;
817 struct dlm_work_item *item = NULL;
818
819 if (!dlm_grab(dlm))
820 return -EINVAL;
821
822 if (lr->dead_node != dlm->reco.dead_node) {
823 mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local "
824 "dead_node is %u\n", dlm->name, lr->node_idx,
825 lr->dead_node, dlm->reco.dead_node);
826 dlm_print_reco_node_status(dlm);
827 /* this is a hack */
828 dlm_put(dlm);
829 return -ENOMEM;
830 }
831 BUG_ON(lr->dead_node != dlm->reco.dead_node);
832
833 item = kzalloc(sizeof(*item), GFP_NOFS);
834 if (!item) {
835 dlm_put(dlm);
836 return -ENOMEM;
837 }
838
839 /* this will get freed by dlm_request_all_locks_worker */
840 buf = (char *) __get_free_page(GFP_NOFS);
841 if (!buf) {
842 kfree(item);
843 dlm_put(dlm);
844 return -ENOMEM;
845 }
846
847 /* queue up work for dlm_request_all_locks_worker */
848 dlm_grab(dlm); /* get an extra ref for the work item */
849 dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
850 item->u.ral.reco_master = lr->node_idx;
851 item->u.ral.dead_node = lr->dead_node;
852 spin_lock(&dlm->work_lock);
853 list_add_tail(&item->list, &dlm->work_list);
854 spin_unlock(&dlm->work_lock);
855 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
856
857 dlm_put(dlm);
858 return 0;
859 }
860
dlm_request_all_locks_worker(struct dlm_work_item * item,void * data)861 static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
862 {
863 struct dlm_migratable_lockres *mres;
864 struct dlm_lock_resource *res;
865 struct dlm_ctxt *dlm;
866 LIST_HEAD(resources);
867 int ret;
868 u8 dead_node, reco_master;
869 int skip_all_done = 0;
870
871 dlm = item->dlm;
872 dead_node = item->u.ral.dead_node;
873 reco_master = item->u.ral.reco_master;
874 mres = (struct dlm_migratable_lockres *)data;
875
876 mlog(0, "%s: recovery worker started, dead=%u, master=%u\n",
877 dlm->name, dead_node, reco_master);
878
879 if (dead_node != dlm->reco.dead_node ||
880 reco_master != dlm->reco.new_master) {
881 /* worker could have been created before the recovery master
882 * died. if so, do not continue, but do not error. */
883 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
884 mlog(ML_NOTICE, "%s: will not send recovery state, "
885 "recovery master %u died, thread=(dead=%u,mas=%u)"
886 " current=(dead=%u,mas=%u)\n", dlm->name,
887 reco_master, dead_node, reco_master,
888 dlm->reco.dead_node, dlm->reco.new_master);
889 } else {
890 mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, "
891 "master=%u), request(dead=%u, master=%u)\n",
892 dlm->name, dlm->reco.dead_node,
893 dlm->reco.new_master, dead_node, reco_master);
894 }
895 goto leave;
896 }
897
898 /* lock resources should have already been moved to the
899 * dlm->reco.resources list. now move items from that list
900 * to a temp list if the dead owner matches. note that the
901 * whole cluster recovers only one node at a time, so we
902 * can safely move UNKNOWN lock resources for each recovery
903 * session. */
904 dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
905
906 /* now we can begin blasting lockreses without the dlm lock */
907
908 /* any errors returned will be due to the new_master dying,
909 * the dlm_reco_thread should detect this */
910 list_for_each_entry(res, &resources, recovering) {
911 ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
912 DLM_MRES_RECOVERY);
913 if (ret < 0) {
914 mlog(ML_ERROR, "%s: node %u went down while sending "
915 "recovery state for dead node %u, ret=%d\n", dlm->name,
916 reco_master, dead_node, ret);
917 skip_all_done = 1;
918 break;
919 }
920 }
921
922 /* move the resources back to the list */
923 spin_lock(&dlm->spinlock);
924 list_splice_init(&resources, &dlm->reco.resources);
925 spin_unlock(&dlm->spinlock);
926
927 if (!skip_all_done) {
928 ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
929 if (ret < 0) {
930 mlog(ML_ERROR, "%s: node %u went down while sending "
931 "recovery all-done for dead node %u, ret=%d\n",
932 dlm->name, reco_master, dead_node, ret);
933 }
934 }
935 leave:
936 free_page((unsigned long)data);
937 }
938
939
dlm_send_all_done_msg(struct dlm_ctxt * dlm,u8 dead_node,u8 send_to)940 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
941 {
942 int ret, tmpret;
943 struct dlm_reco_data_done done_msg;
944
945 memset(&done_msg, 0, sizeof(done_msg));
946 done_msg.node_idx = dlm->node_num;
947 done_msg.dead_node = dead_node;
948 mlog(0, "sending DATA DONE message to %u, "
949 "my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
950 done_msg.dead_node);
951
952 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
953 sizeof(done_msg), send_to, &tmpret);
954 if (ret < 0) {
955 mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u "
956 "to recover dead node %u\n", dlm->name, ret, send_to,
957 dead_node);
958 if (!dlm_is_host_down(ret)) {
959 BUG();
960 }
961 } else
962 ret = tmpret;
963 return ret;
964 }
965
966
dlm_reco_data_done_handler(struct o2net_msg * msg,u32 len,void * data,void ** ret_data)967 int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data,
968 void **ret_data)
969 {
970 struct dlm_ctxt *dlm = data;
971 struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
972 struct dlm_reco_node_data *ndata = NULL;
973 int ret = -EINVAL;
974
975 if (!dlm_grab(dlm))
976 return -EINVAL;
977
978 mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
979 "node_idx=%u, this node=%u\n", done->dead_node,
980 dlm->reco.dead_node, done->node_idx, dlm->node_num);
981
982 mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node),
983 "Got DATA DONE: dead_node=%u, reco.dead_node=%u, "
984 "node_idx=%u, this node=%u\n", done->dead_node,
985 dlm->reco.dead_node, done->node_idx, dlm->node_num);
986
987 spin_lock(&dlm_reco_state_lock);
988 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
989 if (ndata->node_num != done->node_idx)
990 continue;
991
992 switch (ndata->state) {
993 /* should have moved beyond INIT but not to FINALIZE yet */
994 case DLM_RECO_NODE_DATA_INIT:
995 case DLM_RECO_NODE_DATA_DEAD:
996 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
997 mlog(ML_ERROR, "bad ndata state for node %u:"
998 " state=%d\n", ndata->node_num,
999 ndata->state);
1000 BUG();
1001 break;
1002 /* these states are possible at this point, anywhere along
1003 * the line of recovery */
1004 case DLM_RECO_NODE_DATA_DONE:
1005 case DLM_RECO_NODE_DATA_RECEIVING:
1006 case DLM_RECO_NODE_DATA_REQUESTED:
1007 case DLM_RECO_NODE_DATA_REQUESTING:
1008 mlog(0, "node %u is DONE sending "
1009 "recovery data!\n",
1010 ndata->node_num);
1011
1012 ndata->state = DLM_RECO_NODE_DATA_DONE;
1013 ret = 0;
1014 break;
1015 }
1016 }
1017 spin_unlock(&dlm_reco_state_lock);
1018
1019 /* wake the recovery thread, some node is done */
1020 if (!ret)
1021 dlm_kick_recovery_thread(dlm);
1022
1023 if (ret < 0)
1024 mlog(ML_ERROR, "failed to find recovery node data for node "
1025 "%u\n", done->node_idx);
1026 dlm_put(dlm);
1027
1028 mlog(0, "leaving reco data done handler, ret=%d\n", ret);
1029 return ret;
1030 }
1031
dlm_move_reco_locks_to_list(struct dlm_ctxt * dlm,struct list_head * list,u8 dead_node)1032 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
1033 struct list_head *list,
1034 u8 dead_node)
1035 {
1036 struct dlm_lock_resource *res, *next;
1037 struct dlm_lock *lock;
1038
1039 spin_lock(&dlm->spinlock);
1040 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
1041 /* always prune any $RECOVERY entries for dead nodes,
1042 * otherwise hangs can occur during later recovery */
1043 if (dlm_is_recovery_lock(res->lockname.name,
1044 res->lockname.len)) {
1045 spin_lock(&res->spinlock);
1046 list_for_each_entry(lock, &res->granted, list) {
1047 if (lock->ml.node == dead_node) {
1048 mlog(0, "AHA! there was "
1049 "a $RECOVERY lock for dead "
1050 "node %u (%s)!\n",
1051 dead_node, dlm->name);
1052 list_del_init(&lock->list);
1053 dlm_lock_put(lock);
1054 /* Can't schedule DLM_UNLOCK_FREE_LOCK
1055 * - do manually */
1056 dlm_lock_put(lock);
1057 break;
1058 }
1059 }
1060 spin_unlock(&res->spinlock);
1061 continue;
1062 }
1063
1064 if (res->owner == dead_node) {
1065 mlog(0, "found lockres owned by dead node while "
1066 "doing recovery for node %u. sending it.\n",
1067 dead_node);
1068 list_move_tail(&res->recovering, list);
1069 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
1070 mlog(0, "found UNKNOWN owner while doing recovery "
1071 "for node %u. sending it.\n", dead_node);
1072 list_move_tail(&res->recovering, list);
1073 }
1074 }
1075 spin_unlock(&dlm->spinlock);
1076 }
1077
dlm_num_locks_in_lockres(struct dlm_lock_resource * res)1078 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
1079 {
1080 int total_locks = 0;
1081 struct list_head *iter, *queue = &res->granted;
1082 int i;
1083
1084 for (i=0; i<3; i++) {
1085 list_for_each(iter, queue)
1086 total_locks++;
1087 queue++;
1088 }
1089 return total_locks;
1090 }
1091
1092
dlm_send_mig_lockres_msg(struct dlm_ctxt * dlm,struct dlm_migratable_lockres * mres,u8 send_to,struct dlm_lock_resource * res,int total_locks)1093 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
1094 struct dlm_migratable_lockres *mres,
1095 u8 send_to,
1096 struct dlm_lock_resource *res,
1097 int total_locks)
1098 {
1099 u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
1100 int mres_total_locks = be32_to_cpu(mres->total_locks);
1101 int ret = 0, status = 0;
1102 u8 orig_flags = mres->flags,
1103 orig_master = mres->master;
1104
1105 BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS);
1106 if (!mres->num_locks)
1107 return 0;
1108
1109 /* add an all-done flag if we reached the last lock */
1110 orig_flags = mres->flags;
1111 BUG_ON(total_locks > mres_total_locks);
1112 if (total_locks == mres_total_locks)
1113 mres->flags |= DLM_MRES_ALL_DONE;
1114
1115 mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n",
1116 dlm->name, res->lockname.len, res->lockname.name,
1117 orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery",
1118 send_to);
1119
1120 /* send it */
1121 ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
1122 struct_size(mres, ml, mres->num_locks),
1123 send_to, &status);
1124 if (ret < 0) {
1125 /* XXX: negative status is not handled.
1126 * this will end up killing this node. */
1127 mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to "
1128 "node %u (%s)\n", dlm->name, mres->lockname_len,
1129 mres->lockname, ret, send_to,
1130 (orig_flags & DLM_MRES_MIGRATION ?
1131 "migration" : "recovery"));
1132 } else {
1133 /* might get an -ENOMEM back here */
1134 ret = status;
1135 if (ret < 0) {
1136 mlog_errno(ret);
1137
1138 if (ret == -EFAULT) {
1139 mlog(ML_ERROR, "node %u told me to kill "
1140 "myself!\n", send_to);
1141 BUG();
1142 }
1143 }
1144 }
1145
1146 /* zero and reinit the message buffer */
1147 dlm_init_migratable_lockres(mres, res->lockname.name,
1148 res->lockname.len, mres_total_locks,
1149 mig_cookie, orig_flags, orig_master);
1150 return ret;
1151 }
1152
dlm_init_migratable_lockres(struct dlm_migratable_lockres * mres,const char * lockname,int namelen,int total_locks,u64 cookie,u8 flags,u8 master)1153 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
1154 const char *lockname, int namelen,
1155 int total_locks, u64 cookie,
1156 u8 flags, u8 master)
1157 {
1158 /* mres here is one full page */
1159 clear_page(mres);
1160 mres->lockname_len = namelen;
1161 memcpy(mres->lockname, lockname, namelen);
1162 mres->num_locks = 0;
1163 mres->total_locks = cpu_to_be32(total_locks);
1164 mres->mig_cookie = cpu_to_be64(cookie);
1165 mres->flags = flags;
1166 mres->master = master;
1167 }
1168
dlm_prepare_lvb_for_migration(struct dlm_lock * lock,struct dlm_migratable_lockres * mres,int queue)1169 static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock,
1170 struct dlm_migratable_lockres *mres,
1171 int queue)
1172 {
1173 if (!lock->lksb)
1174 return;
1175
1176 /* Ignore lvb in all locks in the blocked list */
1177 if (queue == DLM_BLOCKED_LIST)
1178 return;
1179
1180 /* Only consider lvbs in locks with granted EX or PR lock levels */
1181 if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE)
1182 return;
1183
1184 if (dlm_lvb_is_empty(mres->lvb)) {
1185 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1186 return;
1187 }
1188
1189 /* Ensure the lvb copied for migration matches in other valid locks */
1190 if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))
1191 return;
1192
1193 mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, "
1194 "node=%u\n",
1195 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
1196 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
1197 lock->lockres->lockname.len, lock->lockres->lockname.name,
1198 lock->ml.node);
1199 dlm_print_one_lock_resource(lock->lockres);
1200 BUG();
1201 }
1202
1203 /* returns 1 if this lock fills the network structure,
1204 * 0 otherwise */
dlm_add_lock_to_array(struct dlm_lock * lock,struct dlm_migratable_lockres * mres,int queue)1205 static int dlm_add_lock_to_array(struct dlm_lock *lock,
1206 struct dlm_migratable_lockres *mres, int queue)
1207 {
1208 struct dlm_migratable_lock *ml;
1209 int lock_num = mres->num_locks;
1210
1211 ml = &(mres->ml[lock_num]);
1212 ml->cookie = lock->ml.cookie;
1213 ml->type = lock->ml.type;
1214 ml->convert_type = lock->ml.convert_type;
1215 ml->highest_blocked = lock->ml.highest_blocked;
1216 ml->list = queue;
1217 if (lock->lksb) {
1218 ml->flags = lock->lksb->flags;
1219 dlm_prepare_lvb_for_migration(lock, mres, queue);
1220 }
1221 ml->node = lock->ml.node;
1222 mres->num_locks++;
1223 /* we reached the max, send this network message */
1224 if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS)
1225 return 1;
1226 return 0;
1227 }
1228
dlm_add_dummy_lock(struct dlm_ctxt * dlm,struct dlm_migratable_lockres * mres)1229 static void dlm_add_dummy_lock(struct dlm_ctxt *dlm,
1230 struct dlm_migratable_lockres *mres)
1231 {
1232 struct dlm_lock dummy;
1233 memset(&dummy, 0, sizeof(dummy));
1234 dummy.ml.cookie = 0;
1235 dummy.ml.type = LKM_IVMODE;
1236 dummy.ml.convert_type = LKM_IVMODE;
1237 dummy.ml.highest_blocked = LKM_IVMODE;
1238 dummy.lksb = NULL;
1239 dummy.ml.node = dlm->node_num;
1240 dlm_add_lock_to_array(&dummy, mres, DLM_BLOCKED_LIST);
1241 }
1242
dlm_is_dummy_lock(struct dlm_ctxt * dlm,struct dlm_migratable_lock * ml,u8 * nodenum)1243 static inline int dlm_is_dummy_lock(struct dlm_ctxt *dlm,
1244 struct dlm_migratable_lock *ml,
1245 u8 *nodenum)
1246 {
1247 if (unlikely(ml->cookie == 0 &&
1248 ml->type == LKM_IVMODE &&
1249 ml->convert_type == LKM_IVMODE &&
1250 ml->highest_blocked == LKM_IVMODE &&
1251 ml->list == DLM_BLOCKED_LIST)) {
1252 *nodenum = ml->node;
1253 return 1;
1254 }
1255 return 0;
1256 }
1257
dlm_send_one_lockres(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,struct dlm_migratable_lockres * mres,u8 send_to,u8 flags)1258 int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1259 struct dlm_migratable_lockres *mres,
1260 u8 send_to, u8 flags)
1261 {
1262 struct list_head *queue;
1263 int total_locks, i;
1264 u64 mig_cookie = 0;
1265 struct dlm_lock *lock;
1266 int ret = 0;
1267
1268 BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1269
1270 mlog(0, "sending to %u\n", send_to);
1271
1272 total_locks = dlm_num_locks_in_lockres(res);
1273 if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) {
1274 /* rare, but possible */
1275 mlog(0, "argh. lockres has %d locks. this will "
1276 "require more than one network packet to "
1277 "migrate\n", total_locks);
1278 mig_cookie = dlm_get_next_mig_cookie();
1279 }
1280
1281 dlm_init_migratable_lockres(mres, res->lockname.name,
1282 res->lockname.len, total_locks,
1283 mig_cookie, flags, res->owner);
1284
1285 total_locks = 0;
1286 for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
1287 queue = dlm_list_idx_to_ptr(res, i);
1288 list_for_each_entry(lock, queue, list) {
1289 /* add another lock. */
1290 total_locks++;
1291 if (!dlm_add_lock_to_array(lock, mres, i))
1292 continue;
1293
1294 /* this filled the lock message,
1295 * we must send it immediately. */
1296 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
1297 res, total_locks);
1298 if (ret < 0)
1299 goto error;
1300 }
1301 }
1302 if (total_locks == 0) {
1303 /* send a dummy lock to indicate a mastery reference only */
1304 mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n",
1305 dlm->name, res->lockname.len, res->lockname.name,
1306 send_to, flags & DLM_MRES_RECOVERY ? "recovery" :
1307 "migration");
1308 dlm_add_dummy_lock(dlm, mres);
1309 }
1310 /* flush any remaining locks */
1311 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
1312 if (ret < 0)
1313 goto error;
1314 return ret;
1315
1316 error:
1317 mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n",
1318 dlm->name, ret);
1319 if (!dlm_is_host_down(ret))
1320 BUG();
1321 mlog(0, "%s: node %u went down while sending %s "
1322 "lockres %.*s\n", dlm->name, send_to,
1323 flags & DLM_MRES_RECOVERY ? "recovery" : "migration",
1324 res->lockname.len, res->lockname.name);
1325 return ret;
1326 }
1327
1328
1329
1330 /*
1331 * this message will contain no more than one page worth of
1332 * recovery data, and it will work on only one lockres.
1333 * there may be many locks in this page, and we may need to wait
1334 * for additional packets to complete all the locks (rare, but
1335 * possible).
1336 */
1337 /*
1338 * NOTE: the allocation error cases here are scary
1339 * we really cannot afford to fail an alloc in recovery
1340 * do we spin? returning an error only delays the problem really
1341 */
1342
dlm_mig_lockres_handler(struct o2net_msg * msg,u32 len,void * data,void ** ret_data)1343 int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
1344 void **ret_data)
1345 {
1346 struct dlm_ctxt *dlm = data;
1347 struct dlm_migratable_lockres *mres =
1348 (struct dlm_migratable_lockres *)msg->buf;
1349 int ret = 0;
1350 u8 real_master;
1351 u8 extra_refs = 0;
1352 char *buf = NULL;
1353 struct dlm_work_item *item = NULL;
1354 struct dlm_lock_resource *res = NULL;
1355 unsigned int hash;
1356
1357 if (!dlm_grab(dlm))
1358 return -EINVAL;
1359
1360 if (!dlm_joined(dlm)) {
1361 mlog(ML_ERROR, "Domain %s not joined! "
1362 "lockres %.*s, master %u\n",
1363 dlm->name, mres->lockname_len,
1364 mres->lockname, mres->master);
1365 dlm_put(dlm);
1366 return -EINVAL;
1367 }
1368
1369 BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1370
1371 real_master = mres->master;
1372 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1373 /* cannot migrate a lockres with no master */
1374 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1375 }
1376
1377 mlog(0, "%s message received from node %u\n",
1378 (mres->flags & DLM_MRES_RECOVERY) ?
1379 "recovery" : "migration", mres->master);
1380 if (mres->flags & DLM_MRES_ALL_DONE)
1381 mlog(0, "all done flag. all lockres data received!\n");
1382
1383 ret = -ENOMEM;
1384 buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS);
1385 item = kzalloc(sizeof(*item), GFP_NOFS);
1386 if (!buf || !item)
1387 goto leave;
1388
1389 /* lookup the lock to see if we have a secondary queue for this
1390 * already... just add the locks in and this will have its owner
1391 * and RECOVERY flag changed when it completes. */
1392 hash = dlm_lockid_hash(mres->lockname, mres->lockname_len);
1393 spin_lock(&dlm->spinlock);
1394 res = __dlm_lookup_lockres_full(dlm, mres->lockname, mres->lockname_len,
1395 hash);
1396 if (res) {
1397 /* this will get a ref on res */
1398 /* mark it as recovering/migrating and hash it */
1399 spin_lock(&res->spinlock);
1400 if (res->state & DLM_LOCK_RES_DROPPING_REF) {
1401 mlog(0, "%s: node is attempting to migrate "
1402 "lockres %.*s, but marked as dropping "
1403 " ref!\n", dlm->name,
1404 mres->lockname_len, mres->lockname);
1405 ret = -EINVAL;
1406 spin_unlock(&res->spinlock);
1407 spin_unlock(&dlm->spinlock);
1408 dlm_lockres_put(res);
1409 goto leave;
1410 }
1411
1412 if (mres->flags & DLM_MRES_RECOVERY) {
1413 res->state |= DLM_LOCK_RES_RECOVERING;
1414 } else {
1415 if (res->state & DLM_LOCK_RES_MIGRATING) {
1416 /* this is at least the second
1417 * lockres message */
1418 mlog(0, "lock %.*s is already migrating\n",
1419 mres->lockname_len,
1420 mres->lockname);
1421 } else if (res->state & DLM_LOCK_RES_RECOVERING) {
1422 /* caller should BUG */
1423 mlog(ML_ERROR, "node is attempting to migrate "
1424 "lock %.*s, but marked as recovering!\n",
1425 mres->lockname_len, mres->lockname);
1426 ret = -EFAULT;
1427 spin_unlock(&res->spinlock);
1428 spin_unlock(&dlm->spinlock);
1429 dlm_lockres_put(res);
1430 goto leave;
1431 }
1432 res->state |= DLM_LOCK_RES_MIGRATING;
1433 }
1434 spin_unlock(&res->spinlock);
1435 spin_unlock(&dlm->spinlock);
1436 } else {
1437 spin_unlock(&dlm->spinlock);
1438 /* need to allocate, just like if it was
1439 * mastered here normally */
1440 res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
1441 if (!res)
1442 goto leave;
1443
1444 /* to match the ref that we would have gotten if
1445 * dlm_lookup_lockres had succeeded */
1446 dlm_lockres_get(res);
1447
1448 /* mark it as recovering/migrating and hash it */
1449 if (mres->flags & DLM_MRES_RECOVERY)
1450 res->state |= DLM_LOCK_RES_RECOVERING;
1451 else
1452 res->state |= DLM_LOCK_RES_MIGRATING;
1453
1454 spin_lock(&dlm->spinlock);
1455 __dlm_insert_lockres(dlm, res);
1456 spin_unlock(&dlm->spinlock);
1457
1458 /* Add an extra ref for this lock-less lockres lest the
1459 * dlm_thread purges it before we get the chance to add
1460 * locks to it */
1461 dlm_lockres_get(res);
1462
1463 /* There are three refs that need to be put.
1464 * 1. Taken above.
1465 * 2. kref_init in dlm_new_lockres()->dlm_init_lockres().
1466 * 3. dlm_lookup_lockres()
1467 * The first one is handled at the end of this function. The
1468 * other two are handled in the worker thread after locks have
1469 * been attached. Yes, we don't wait for purge time to match
1470 * kref_init. The lockres will still have at least one ref
1471 * added because it is in the hash __dlm_insert_lockres() */
1472 extra_refs++;
1473
1474 /* now that the new lockres is inserted,
1475 * make it usable by other processes */
1476 spin_lock(&res->spinlock);
1477 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1478 spin_unlock(&res->spinlock);
1479 wake_up(&res->wq);
1480 }
1481
1482 /* at this point we have allocated everything we need,
1483 * and we have a hashed lockres with an extra ref and
1484 * the proper res->state flags. */
1485 ret = 0;
1486 spin_lock(&res->spinlock);
1487 /* drop this either when master requery finds a different master
1488 * or when a lock is added by the recovery worker */
1489 dlm_lockres_grab_inflight_ref(dlm, res);
1490 if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1491 /* migration cannot have an unknown master */
1492 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1493 mlog(0, "recovery has passed me a lockres with an "
1494 "unknown owner.. will need to requery: "
1495 "%.*s\n", mres->lockname_len, mres->lockname);
1496 } else {
1497 /* take a reference now to pin the lockres, drop it
1498 * when locks are added in the worker */
1499 dlm_change_lockres_owner(dlm, res, dlm->node_num);
1500 }
1501 spin_unlock(&res->spinlock);
1502
1503 /* queue up work for dlm_mig_lockres_worker */
1504 dlm_grab(dlm); /* get an extra ref for the work item */
1505 memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */
1506 dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
1507 item->u.ml.lockres = res; /* already have a ref */
1508 item->u.ml.real_master = real_master;
1509 item->u.ml.extra_ref = extra_refs;
1510 spin_lock(&dlm->work_lock);
1511 list_add_tail(&item->list, &dlm->work_list);
1512 spin_unlock(&dlm->work_lock);
1513 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
1514
1515 leave:
1516 /* One extra ref taken needs to be put here */
1517 if (extra_refs)
1518 dlm_lockres_put(res);
1519
1520 dlm_put(dlm);
1521 if (ret < 0) {
1522 kfree(buf);
1523 kfree(item);
1524 mlog_errno(ret);
1525 }
1526
1527 return ret;
1528 }
1529
1530
dlm_mig_lockres_worker(struct dlm_work_item * item,void * data)1531 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
1532 {
1533 struct dlm_ctxt *dlm;
1534 struct dlm_migratable_lockres *mres;
1535 int ret = 0;
1536 struct dlm_lock_resource *res;
1537 u8 real_master;
1538 u8 extra_ref;
1539
1540 dlm = item->dlm;
1541 mres = (struct dlm_migratable_lockres *)data;
1542
1543 res = item->u.ml.lockres;
1544 real_master = item->u.ml.real_master;
1545 extra_ref = item->u.ml.extra_ref;
1546
1547 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1548 /* this case is super-rare. only occurs if
1549 * node death happens during migration. */
1550 again:
1551 ret = dlm_lockres_master_requery(dlm, res, &real_master);
1552 if (ret < 0) {
1553 mlog(0, "dlm_lockres_master_requery ret=%d\n",
1554 ret);
1555 goto again;
1556 }
1557 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1558 mlog(0, "lockres %.*s not claimed. "
1559 "this node will take it.\n",
1560 res->lockname.len, res->lockname.name);
1561 } else {
1562 spin_lock(&res->spinlock);
1563 dlm_lockres_drop_inflight_ref(dlm, res);
1564 spin_unlock(&res->spinlock);
1565 mlog(0, "master needs to respond to sender "
1566 "that node %u still owns %.*s\n",
1567 real_master, res->lockname.len,
1568 res->lockname.name);
1569 /* cannot touch this lockres */
1570 goto leave;
1571 }
1572 }
1573
1574 ret = dlm_process_recovery_data(dlm, res, mres);
1575 if (ret < 0)
1576 mlog(0, "dlm_process_recovery_data returned %d\n", ret);
1577 else
1578 mlog(0, "dlm_process_recovery_data succeeded\n");
1579
1580 if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
1581 (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {
1582 ret = dlm_finish_migration(dlm, res, mres->master);
1583 if (ret < 0)
1584 mlog_errno(ret);
1585 }
1586
1587 leave:
1588 /* See comment in dlm_mig_lockres_handler() */
1589 if (res) {
1590 if (extra_ref)
1591 dlm_lockres_put(res);
1592 dlm_lockres_put(res);
1593 }
1594 kfree(data);
1595 }
1596
1597
1598
dlm_lockres_master_requery(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,u8 * real_master)1599 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
1600 struct dlm_lock_resource *res,
1601 u8 *real_master)
1602 {
1603 struct dlm_node_iter iter;
1604 int nodenum;
1605 int ret = 0;
1606
1607 *real_master = DLM_LOCK_RES_OWNER_UNKNOWN;
1608
1609 /* we only reach here if one of the two nodes in a
1610 * migration died while the migration was in progress.
1611 * at this point we need to requery the master. we
1612 * know that the new_master got as far as creating
1613 * an mle on at least one node, but we do not know
1614 * if any nodes had actually cleared the mle and set
1615 * the master to the new_master. the old master
1616 * is supposed to set the owner to UNKNOWN in the
1617 * event of a new_master death, so the only possible
1618 * responses that we can get from nodes here are
1619 * that the master is new_master, or that the master
1620 * is UNKNOWN.
1621 * if all nodes come back with UNKNOWN then we know
1622 * the lock needs remastering here.
1623 * if any node comes back with a valid master, check
1624 * to see if that master is the one that we are
1625 * recovering. if so, then the new_master died and
1626 * we need to remaster this lock. if not, then the
1627 * new_master survived and that node will respond to
1628 * other nodes about the owner.
1629 * if there is an owner, this node needs to dump this
1630 * lockres and alert the sender that this lockres
1631 * was rejected. */
1632 spin_lock(&dlm->spinlock);
1633 dlm_node_iter_init(dlm->domain_map, &iter);
1634 spin_unlock(&dlm->spinlock);
1635
1636 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
1637 /* do not send to self */
1638 if (nodenum == dlm->node_num)
1639 continue;
1640 ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
1641 if (ret < 0) {
1642 mlog_errno(ret);
1643 if (!dlm_is_host_down(ret))
1644 BUG();
1645 /* host is down, so answer for that node would be
1646 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
1647 }
1648 if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1649 mlog(0, "lock master is %u\n", *real_master);
1650 break;
1651 }
1652 }
1653 return ret;
1654 }
1655
1656
dlm_do_master_requery(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,u8 nodenum,u8 * real_master)1657 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1658 u8 nodenum, u8 *real_master)
1659 {
1660 int ret;
1661 struct dlm_master_requery req;
1662 int status = DLM_LOCK_RES_OWNER_UNKNOWN;
1663
1664 memset(&req, 0, sizeof(req));
1665 req.node_idx = dlm->node_num;
1666 req.namelen = res->lockname.len;
1667 memcpy(req.name, res->lockname.name, res->lockname.len);
1668
1669 resend:
1670 ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
1671 &req, sizeof(req), nodenum, &status);
1672 if (ret < 0)
1673 mlog(ML_ERROR, "Error %d when sending message %u (key "
1674 "0x%x) to node %u\n", ret, DLM_MASTER_REQUERY_MSG,
1675 dlm->key, nodenum);
1676 else if (status == -ENOMEM) {
1677 mlog_errno(status);
1678 msleep(50);
1679 goto resend;
1680 } else {
1681 BUG_ON(status < 0);
1682 BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);
1683 *real_master = (u8) (status & 0xff);
1684 mlog(0, "node %u responded to master requery with %u\n",
1685 nodenum, *real_master);
1686 ret = 0;
1687 }
1688 return ret;
1689 }
1690
1691
1692 /* this function cannot error, so unless the sending
1693 * or receiving of the message failed, the owner can
1694 * be trusted */
dlm_master_requery_handler(struct o2net_msg * msg,u32 len,void * data,void ** ret_data)1695 int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
1696 void **ret_data)
1697 {
1698 struct dlm_ctxt *dlm = data;
1699 struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
1700 struct dlm_lock_resource *res = NULL;
1701 unsigned int hash;
1702 int master = DLM_LOCK_RES_OWNER_UNKNOWN;
1703 u32 flags = DLM_ASSERT_MASTER_REQUERY;
1704 int dispatched = 0;
1705
1706 if (!dlm_grab(dlm)) {
1707 /* since the domain has gone away on this
1708 * node, the proper response is UNKNOWN */
1709 return master;
1710 }
1711
1712 hash = dlm_lockid_hash(req->name, req->namelen);
1713
1714 spin_lock(&dlm->spinlock);
1715 res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash);
1716 if (res) {
1717 spin_lock(&res->spinlock);
1718 master = res->owner;
1719 if (master == dlm->node_num) {
1720 int ret = dlm_dispatch_assert_master(dlm, res,
1721 0, 0, flags);
1722 if (ret < 0) {
1723 mlog_errno(ret);
1724 spin_unlock(&res->spinlock);
1725 dlm_lockres_put(res);
1726 spin_unlock(&dlm->spinlock);
1727 dlm_put(dlm);
1728 /* sender will take care of this and retry */
1729 return ret;
1730 } else {
1731 dispatched = 1;
1732 __dlm_lockres_grab_inflight_worker(dlm, res);
1733 spin_unlock(&res->spinlock);
1734 }
1735 } else {
1736 /* put.. in case we are not the master */
1737 spin_unlock(&res->spinlock);
1738 dlm_lockres_put(res);
1739 }
1740 }
1741 spin_unlock(&dlm->spinlock);
1742
1743 if (!dispatched)
1744 dlm_put(dlm);
1745 return master;
1746 }
1747
1748 static inline struct list_head *
dlm_list_num_to_pointer(struct dlm_lock_resource * res,int list_num)1749 dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
1750 {
1751 struct list_head *ret;
1752 BUG_ON(list_num < 0);
1753 BUG_ON(list_num > 2);
1754 ret = &(res->granted);
1755 ret += list_num;
1756 return ret;
1757 }
1758 /* TODO: do ast flush business
1759 * TODO: do MIGRATING and RECOVERING spinning
1760 */
1761
1762 /*
1763 * NOTE about in-flight requests during migration:
1764 *
1765 * Before attempting the migrate, the master has marked the lockres as
1766 * MIGRATING and then flushed all of its pending ASTS. So any in-flight
1767 * requests either got queued before the MIGRATING flag got set, in which
1768 * case the lock data will reflect the change and a return message is on
1769 * the way, or the request failed to get in before MIGRATING got set. In
1770 * this case, the caller will be told to spin and wait for the MIGRATING
1771 * flag to be dropped, then recheck the master.
1772 * This holds true for the convert, cancel and unlock cases, and since lvb
1773 * updates are tied to these same messages, it applies to lvb updates as
1774 * well. For the lock case, there is no way a lock can be on the master
1775 * queue and not be on the secondary queue since the lock is always added
1776 * locally first. This means that the new target node will never be sent
1777 * a lock that he doesn't already have on the list.
1778 * In total, this means that the local lock is correct and should not be
1779 * updated to match the one sent by the master. Any messages sent back
1780 * from the master before the MIGRATING flag will bring the lock properly
1781 * up-to-date, and the change will be ordered properly for the waiter.
1782 * We will *not* attempt to modify the lock underneath the waiter.
1783 */
1784
dlm_process_recovery_data(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,struct dlm_migratable_lockres * mres)1785 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1786 struct dlm_lock_resource *res,
1787 struct dlm_migratable_lockres *mres)
1788 {
1789 struct dlm_migratable_lock *ml;
1790 struct list_head *queue, *iter;
1791 struct list_head *tmpq = NULL;
1792 struct dlm_lock *newlock = NULL;
1793 struct dlm_lockstatus *lksb = NULL;
1794 int ret = 0;
1795 int i, j, bad;
1796 struct dlm_lock *lock;
1797 u8 from = O2NM_MAX_NODES;
1798 __be64 c;
1799
1800 mlog(0, "running %d locks for this lockres\n", mres->num_locks);
1801 for (i=0; i<mres->num_locks; i++) {
1802 ml = &(mres->ml[i]);
1803
1804 if (dlm_is_dummy_lock(dlm, ml, &from)) {
1805 /* placeholder, just need to set the refmap bit */
1806 BUG_ON(mres->num_locks != 1);
1807 mlog(0, "%s:%.*s: dummy lock for %u\n",
1808 dlm->name, mres->lockname_len, mres->lockname,
1809 from);
1810 spin_lock(&res->spinlock);
1811 dlm_lockres_set_refmap_bit(dlm, res, from);
1812 spin_unlock(&res->spinlock);
1813 break;
1814 }
1815 BUG_ON(ml->highest_blocked != LKM_IVMODE);
1816 newlock = NULL;
1817 lksb = NULL;
1818
1819 queue = dlm_list_num_to_pointer(res, ml->list);
1820 tmpq = NULL;
1821
1822 /* if the lock is for the local node it needs to
1823 * be moved to the proper location within the queue.
1824 * do not allocate a new lock structure. */
1825 if (ml->node == dlm->node_num) {
1826 /* MIGRATION ONLY! */
1827 BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
1828
1829 lock = NULL;
1830 spin_lock(&res->spinlock);
1831 for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) {
1832 tmpq = dlm_list_idx_to_ptr(res, j);
1833 list_for_each(iter, tmpq) {
1834 lock = list_entry(iter,
1835 struct dlm_lock, list);
1836 if (lock->ml.cookie == ml->cookie)
1837 break;
1838 lock = NULL;
1839 }
1840 if (lock)
1841 break;
1842 }
1843
1844 /* lock is always created locally first, and
1845 * destroyed locally last. it must be on the list */
1846 if (!lock) {
1847 c = ml->cookie;
1848 mlog(ML_ERROR, "Could not find local lock "
1849 "with cookie %u:%llu, node %u, "
1850 "list %u, flags 0x%x, type %d, "
1851 "conv %d, highest blocked %d\n",
1852 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1853 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1854 ml->node, ml->list, ml->flags, ml->type,
1855 ml->convert_type, ml->highest_blocked);
1856 __dlm_print_one_lock_resource(res);
1857 BUG();
1858 }
1859
1860 if (lock->ml.node != ml->node) {
1861 c = lock->ml.cookie;
1862 mlog(ML_ERROR, "Mismatched node# in lock "
1863 "cookie %u:%llu, name %.*s, node %u\n",
1864 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1865 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1866 res->lockname.len, res->lockname.name,
1867 lock->ml.node);
1868 c = ml->cookie;
1869 mlog(ML_ERROR, "Migrate lock cookie %u:%llu, "
1870 "node %u, list %u, flags 0x%x, type %d, "
1871 "conv %d, highest blocked %d\n",
1872 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1873 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1874 ml->node, ml->list, ml->flags, ml->type,
1875 ml->convert_type, ml->highest_blocked);
1876 __dlm_print_one_lock_resource(res);
1877 BUG();
1878 }
1879
1880 if (tmpq != queue) {
1881 c = ml->cookie;
1882 mlog(0, "Lock cookie %u:%llu was on list %u "
1883 "instead of list %u for %.*s\n",
1884 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1885 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1886 j, ml->list, res->lockname.len,
1887 res->lockname.name);
1888 __dlm_print_one_lock_resource(res);
1889 spin_unlock(&res->spinlock);
1890 continue;
1891 }
1892
1893 /* see NOTE above about why we do not update
1894 * to match the master here */
1895
1896 /* move the lock to its proper place */
1897 /* do not alter lock refcount. switching lists. */
1898 list_move_tail(&lock->list, queue);
1899 spin_unlock(&res->spinlock);
1900
1901 mlog(0, "just reordered a local lock!\n");
1902 continue;
1903 }
1904
1905 /* lock is for another node. */
1906 newlock = dlm_new_lock(ml->type, ml->node,
1907 be64_to_cpu(ml->cookie), NULL);
1908 if (!newlock) {
1909 ret = -ENOMEM;
1910 goto leave;
1911 }
1912 lksb = newlock->lksb;
1913 dlm_lock_attach_lockres(newlock, res);
1914
1915 if (ml->convert_type != LKM_IVMODE) {
1916 BUG_ON(queue != &res->converting);
1917 newlock->ml.convert_type = ml->convert_type;
1918 }
1919 lksb->flags |= (ml->flags &
1920 (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
1921
1922 if (ml->type == LKM_NLMODE)
1923 goto skip_lvb;
1924
1925 /*
1926 * If the lock is in the blocked list it can't have a valid lvb,
1927 * so skip it
1928 */
1929 if (ml->list == DLM_BLOCKED_LIST)
1930 goto skip_lvb;
1931
1932 if (!dlm_lvb_is_empty(mres->lvb)) {
1933 if (lksb->flags & DLM_LKSB_PUT_LVB) {
1934 /* other node was trying to update
1935 * lvb when node died. recreate the
1936 * lksb with the updated lvb. */
1937 memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
1938 /* the lock resource lvb update must happen
1939 * NOW, before the spinlock is dropped.
1940 * we no longer wait for the AST to update
1941 * the lvb. */
1942 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1943 } else {
1944 /* otherwise, the node is sending its
1945 * most recent valid lvb info */
1946 BUG_ON(ml->type != LKM_EXMODE &&
1947 ml->type != LKM_PRMODE);
1948 if (!dlm_lvb_is_empty(res->lvb) &&
1949 (ml->type == LKM_EXMODE ||
1950 memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
1951 int i;
1952 mlog(ML_ERROR, "%s:%.*s: received bad "
1953 "lvb! type=%d\n", dlm->name,
1954 res->lockname.len,
1955 res->lockname.name, ml->type);
1956 printk("lockres lvb=[");
1957 for (i=0; i<DLM_LVB_LEN; i++)
1958 printk("%02x", res->lvb[i]);
1959 printk("]\nmigrated lvb=[");
1960 for (i=0; i<DLM_LVB_LEN; i++)
1961 printk("%02x", mres->lvb[i]);
1962 printk("]\n");
1963 dlm_print_one_lock_resource(res);
1964 BUG();
1965 }
1966 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1967 }
1968 }
1969 skip_lvb:
1970
1971 /* NOTE:
1972 * wrt lock queue ordering and recovery:
1973 * 1. order of locks on granted queue is
1974 * meaningless.
1975 * 2. order of locks on converting queue is
1976 * LOST with the node death. sorry charlie.
1977 * 3. order of locks on the blocked queue is
1978 * also LOST.
1979 * order of locks does not affect integrity, it
1980 * just means that a lock request may get pushed
1981 * back in line as a result of the node death.
1982 * also note that for a given node the lock order
1983 * for its secondary queue locks is preserved
1984 * relative to each other, but clearly *not*
1985 * preserved relative to locks from other nodes.
1986 */
1987 bad = 0;
1988 spin_lock(&res->spinlock);
1989 list_for_each_entry(lock, queue, list) {
1990 if (lock->ml.cookie == ml->cookie) {
1991 c = lock->ml.cookie;
1992 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
1993 "exists on this lockres!\n", dlm->name,
1994 res->lockname.len, res->lockname.name,
1995 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1996 dlm_get_lock_cookie_seq(be64_to_cpu(c)));
1997
1998 mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, "
1999 "node=%u, cookie=%u:%llu, queue=%d\n",
2000 ml->type, ml->convert_type, ml->node,
2001 dlm_get_lock_cookie_node(be64_to_cpu(ml->cookie)),
2002 dlm_get_lock_cookie_seq(be64_to_cpu(ml->cookie)),
2003 ml->list);
2004
2005 __dlm_print_one_lock_resource(res);
2006 bad = 1;
2007 break;
2008 }
2009 }
2010 if (!bad) {
2011 dlm_lock_get(newlock);
2012 if (mres->flags & DLM_MRES_RECOVERY &&
2013 ml->list == DLM_CONVERTING_LIST &&
2014 newlock->ml.type >
2015 newlock->ml.convert_type) {
2016 /* newlock is doing downconvert, add it to the
2017 * head of converting list */
2018 list_add(&newlock->list, queue);
2019 } else
2020 list_add_tail(&newlock->list, queue);
2021 mlog(0, "%s:%.*s: added lock for node %u, "
2022 "setting refmap bit\n", dlm->name,
2023 res->lockname.len, res->lockname.name, ml->node);
2024 dlm_lockres_set_refmap_bit(dlm, res, ml->node);
2025 }
2026 spin_unlock(&res->spinlock);
2027 }
2028 mlog(0, "done running all the locks\n");
2029
2030 leave:
2031 /* balance the ref taken when the work was queued */
2032 spin_lock(&res->spinlock);
2033 dlm_lockres_drop_inflight_ref(dlm, res);
2034 spin_unlock(&res->spinlock);
2035
2036 if (ret < 0)
2037 mlog_errno(ret);
2038
2039 return ret;
2040 }
2041
dlm_move_lockres_to_recovery_list(struct dlm_ctxt * dlm,struct dlm_lock_resource * res)2042 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
2043 struct dlm_lock_resource *res)
2044 {
2045 int i;
2046 struct list_head *queue;
2047 struct dlm_lock *lock, *next;
2048
2049 assert_spin_locked(&dlm->spinlock);
2050 assert_spin_locked(&res->spinlock);
2051 res->state |= DLM_LOCK_RES_RECOVERING;
2052 if (!list_empty(&res->recovering)) {
2053 mlog(0,
2054 "Recovering res %s:%.*s, is already on recovery list!\n",
2055 dlm->name, res->lockname.len, res->lockname.name);
2056 list_del_init(&res->recovering);
2057 dlm_lockres_put(res);
2058 }
2059 /* We need to hold a reference while on the recovery list */
2060 dlm_lockres_get(res);
2061 list_add_tail(&res->recovering, &dlm->reco.resources);
2062
2063 /* find any pending locks and put them back on proper list */
2064 for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
2065 queue = dlm_list_idx_to_ptr(res, i);
2066 list_for_each_entry_safe(lock, next, queue, list) {
2067 dlm_lock_get(lock);
2068 if (lock->convert_pending) {
2069 /* move converting lock back to granted */
2070 mlog(0, "node died with convert pending "
2071 "on %.*s. move back to granted list.\n",
2072 res->lockname.len, res->lockname.name);
2073 dlm_revert_pending_convert(res, lock);
2074 lock->convert_pending = 0;
2075 } else if (lock->lock_pending) {
2076 /* remove pending lock requests completely */
2077 BUG_ON(i != DLM_BLOCKED_LIST);
2078 mlog(0, "node died with lock pending "
2079 "on %.*s. remove from blocked list and skip.\n",
2080 res->lockname.len, res->lockname.name);
2081 /* lock will be floating until ref in
2082 * dlmlock_remote is freed after the network
2083 * call returns. ok for it to not be on any
2084 * list since no ast can be called
2085 * (the master is dead). */
2086 dlm_revert_pending_lock(res, lock);
2087 lock->lock_pending = 0;
2088 } else if (lock->unlock_pending) {
2089 /* if an unlock was in progress, treat as
2090 * if this had completed successfully
2091 * before sending this lock state to the
2092 * new master. note that the dlm_unlock
2093 * call is still responsible for calling
2094 * the unlockast. that will happen after
2095 * the network call times out. for now,
2096 * just move lists to prepare the new
2097 * recovery master. */
2098 BUG_ON(i != DLM_GRANTED_LIST);
2099 mlog(0, "node died with unlock pending "
2100 "on %.*s. remove from blocked list and skip.\n",
2101 res->lockname.len, res->lockname.name);
2102 dlm_commit_pending_unlock(res, lock);
2103 lock->unlock_pending = 0;
2104 } else if (lock->cancel_pending) {
2105 /* if a cancel was in progress, treat as
2106 * if this had completed successfully
2107 * before sending this lock state to the
2108 * new master */
2109 BUG_ON(i != DLM_CONVERTING_LIST);
2110 mlog(0, "node died with cancel pending "
2111 "on %.*s. move back to granted list.\n",
2112 res->lockname.len, res->lockname.name);
2113 dlm_commit_pending_cancel(res, lock);
2114 lock->cancel_pending = 0;
2115 }
2116 dlm_lock_put(lock);
2117 }
2118 }
2119 }
2120
2121
2122
2123 /* removes all recovered locks from the recovery list.
2124 * sets the res->owner to the new master.
2125 * unsets the RECOVERY flag and wakes waiters. */
dlm_finish_local_lockres_recovery(struct dlm_ctxt * dlm,u8 dead_node,u8 new_master)2126 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
2127 u8 dead_node, u8 new_master)
2128 {
2129 int i;
2130 struct hlist_head *bucket;
2131 struct dlm_lock_resource *res, *next;
2132
2133 assert_spin_locked(&dlm->spinlock);
2134
2135 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
2136 if (res->owner == dead_node) {
2137 mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
2138 dlm->name, res->lockname.len, res->lockname.name,
2139 res->owner, new_master);
2140 list_del_init(&res->recovering);
2141 spin_lock(&res->spinlock);
2142 /* new_master has our reference from
2143 * the lock state sent during recovery */
2144 dlm_change_lockres_owner(dlm, res, new_master);
2145 res->state &= ~DLM_LOCK_RES_RECOVERING;
2146 if (__dlm_lockres_has_locks(res))
2147 __dlm_dirty_lockres(dlm, res);
2148 spin_unlock(&res->spinlock);
2149 wake_up(&res->wq);
2150 dlm_lockres_put(res);
2151 }
2152 }
2153
2154 /* this will become unnecessary eventually, but
2155 * for now we need to run the whole hash, clear
2156 * the RECOVERING state and set the owner
2157 * if necessary */
2158 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2159 bucket = dlm_lockres_hash(dlm, i);
2160 hlist_for_each_entry(res, bucket, hash_node) {
2161 if (res->state & DLM_LOCK_RES_RECOVERY_WAITING) {
2162 spin_lock(&res->spinlock);
2163 res->state &= ~DLM_LOCK_RES_RECOVERY_WAITING;
2164 spin_unlock(&res->spinlock);
2165 wake_up(&res->wq);
2166 }
2167
2168 if (!(res->state & DLM_LOCK_RES_RECOVERING))
2169 continue;
2170
2171 if (res->owner != dead_node &&
2172 res->owner != dlm->node_num)
2173 continue;
2174
2175 if (!list_empty(&res->recovering)) {
2176 list_del_init(&res->recovering);
2177 dlm_lockres_put(res);
2178 }
2179
2180 /* new_master has our reference from
2181 * the lock state sent during recovery */
2182 mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
2183 dlm->name, res->lockname.len, res->lockname.name,
2184 res->owner, new_master);
2185 spin_lock(&res->spinlock);
2186 dlm_change_lockres_owner(dlm, res, new_master);
2187 res->state &= ~DLM_LOCK_RES_RECOVERING;
2188 if (__dlm_lockres_has_locks(res))
2189 __dlm_dirty_lockres(dlm, res);
2190 spin_unlock(&res->spinlock);
2191 wake_up(&res->wq);
2192 }
2193 }
2194 }
2195
dlm_lvb_needs_invalidation(struct dlm_lock * lock,int local)2196 static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local)
2197 {
2198 if (local) {
2199 if (lock->ml.type != LKM_EXMODE &&
2200 lock->ml.type != LKM_PRMODE)
2201 return 1;
2202 } else if (lock->ml.type == LKM_EXMODE)
2203 return 1;
2204 return 0;
2205 }
2206
dlm_revalidate_lvb(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,u8 dead_node)2207 static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
2208 struct dlm_lock_resource *res, u8 dead_node)
2209 {
2210 struct list_head *queue;
2211 struct dlm_lock *lock;
2212 int blank_lvb = 0, local = 0;
2213 int i;
2214 u8 search_node;
2215
2216 assert_spin_locked(&dlm->spinlock);
2217 assert_spin_locked(&res->spinlock);
2218
2219 if (res->owner == dlm->node_num)
2220 /* if this node owned the lockres, and if the dead node
2221 * had an EX when he died, blank out the lvb */
2222 search_node = dead_node;
2223 else {
2224 /* if this is a secondary lockres, and we had no EX or PR
2225 * locks granted, we can no longer trust the lvb */
2226 search_node = dlm->node_num;
2227 local = 1; /* check local state for valid lvb */
2228 }
2229
2230 for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) {
2231 queue = dlm_list_idx_to_ptr(res, i);
2232 list_for_each_entry(lock, queue, list) {
2233 if (lock->ml.node == search_node) {
2234 if (dlm_lvb_needs_invalidation(lock, local)) {
2235 /* zero the lksb lvb and lockres lvb */
2236 blank_lvb = 1;
2237 memset(lock->lksb->lvb, 0, DLM_LVB_LEN);
2238 }
2239 }
2240 }
2241 }
2242
2243 if (blank_lvb) {
2244 mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
2245 res->lockname.len, res->lockname.name, dead_node);
2246 memset(res->lvb, 0, DLM_LVB_LEN);
2247 }
2248 }
2249
dlm_free_dead_locks(struct dlm_ctxt * dlm,struct dlm_lock_resource * res,u8 dead_node)2250 static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2251 struct dlm_lock_resource *res, u8 dead_node)
2252 {
2253 struct dlm_lock *lock, *next;
2254 unsigned int freed = 0;
2255
2256 /* this node is the lockres master:
2257 * 1) remove any stale locks for the dead node
2258 * 2) if the dead node had an EX when he died, blank out the lvb
2259 */
2260 assert_spin_locked(&dlm->spinlock);
2261 assert_spin_locked(&res->spinlock);
2262
2263 /* We do two dlm_lock_put(). One for removing from list and the other is
2264 * to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */
2265
2266 /* TODO: check pending_asts, pending_basts here */
2267 list_for_each_entry_safe(lock, next, &res->granted, list) {
2268 if (lock->ml.node == dead_node) {
2269 list_del_init(&lock->list);
2270 dlm_lock_put(lock);
2271 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2272 dlm_lock_put(lock);
2273 freed++;
2274 }
2275 }
2276 list_for_each_entry_safe(lock, next, &res->converting, list) {
2277 if (lock->ml.node == dead_node) {
2278 list_del_init(&lock->list);
2279 dlm_lock_put(lock);
2280 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2281 dlm_lock_put(lock);
2282 freed++;
2283 }
2284 }
2285 list_for_each_entry_safe(lock, next, &res->blocked, list) {
2286 if (lock->ml.node == dead_node) {
2287 list_del_init(&lock->list);
2288 dlm_lock_put(lock);
2289 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2290 dlm_lock_put(lock);
2291 freed++;
2292 }
2293 }
2294
2295 if (freed) {
2296 mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
2297 "dropping ref from lockres\n", dlm->name,
2298 res->lockname.len, res->lockname.name, freed, dead_node);
2299 if(!test_bit(dead_node, res->refmap)) {
2300 mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, "
2301 "but ref was not set\n", dlm->name,
2302 res->lockname.len, res->lockname.name, freed, dead_node);
2303 __dlm_print_one_lock_resource(res);
2304 }
2305 res->state |= DLM_LOCK_RES_RECOVERY_WAITING;
2306 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2307 } else if (test_bit(dead_node, res->refmap)) {
2308 mlog(0, "%s:%.*s: dead node %u had a ref, but had "
2309 "no locks and had not purged before dying\n", dlm->name,
2310 res->lockname.len, res->lockname.name, dead_node);
2311 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2312 }
2313
2314 /* do not kick thread yet */
2315 __dlm_dirty_lockres(dlm, res);
2316 }
2317
dlm_do_local_recovery_cleanup(struct dlm_ctxt * dlm,u8 dead_node)2318 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2319 {
2320 struct dlm_lock_resource *res;
2321 int i;
2322 struct hlist_head *bucket;
2323 struct hlist_node *tmp;
2324 struct dlm_lock *lock;
2325
2326
2327 /* purge any stale mles */
2328 dlm_clean_master_list(dlm, dead_node);
2329
2330 /*
2331 * now clean up all lock resources. there are two rules:
2332 *
2333 * 1) if the dead node was the master, move the lockres
2334 * to the recovering list. set the RECOVERING flag.
2335 * this lockres needs to be cleaned up before it can
2336 * be used further.
2337 *
2338 * 2) if this node was the master, remove all locks from
2339 * each of the lockres queues that were owned by the
2340 * dead node. once recovery finishes, the dlm thread
2341 * can be kicked again to see if any ASTs or BASTs
2342 * need to be fired as a result.
2343 */
2344 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2345 bucket = dlm_lockres_hash(dlm, i);
2346 hlist_for_each_entry_safe(res, tmp, bucket, hash_node) {
2347 /* always prune any $RECOVERY entries for dead nodes,
2348 * otherwise hangs can occur during later recovery */
2349 if (dlm_is_recovery_lock(res->lockname.name,
2350 res->lockname.len)) {
2351 spin_lock(&res->spinlock);
2352 list_for_each_entry(lock, &res->granted, list) {
2353 if (lock->ml.node == dead_node) {
2354 mlog(0, "AHA! there was "
2355 "a $RECOVERY lock for dead "
2356 "node %u (%s)!\n",
2357 dead_node, dlm->name);
2358 list_del_init(&lock->list);
2359 dlm_lock_put(lock);
2360 /* Can't schedule
2361 * DLM_UNLOCK_FREE_LOCK
2362 * - do manually */
2363 dlm_lock_put(lock);
2364 break;
2365 }
2366 }
2367
2368 if ((res->owner == dead_node) &&
2369 (res->state & DLM_LOCK_RES_DROPPING_REF)) {
2370 dlm_lockres_get(res);
2371 __dlm_do_purge_lockres(dlm, res);
2372 spin_unlock(&res->spinlock);
2373 wake_up(&res->wq);
2374 dlm_lockres_put(res);
2375 continue;
2376 } else if (res->owner == dlm->node_num)
2377 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2378 spin_unlock(&res->spinlock);
2379 continue;
2380 }
2381 spin_lock(&res->spinlock);
2382 /* zero the lvb if necessary */
2383 dlm_revalidate_lvb(dlm, res, dead_node);
2384 if (res->owner == dead_node) {
2385 if (res->state & DLM_LOCK_RES_DROPPING_REF) {
2386 mlog(0, "%s:%.*s: owned by "
2387 "dead node %u, this node was "
2388 "dropping its ref when master died. "
2389 "continue, purging the lockres.\n",
2390 dlm->name, res->lockname.len,
2391 res->lockname.name, dead_node);
2392 dlm_lockres_get(res);
2393 __dlm_do_purge_lockres(dlm, res);
2394 spin_unlock(&res->spinlock);
2395 wake_up(&res->wq);
2396 dlm_lockres_put(res);
2397 continue;
2398 }
2399 dlm_move_lockres_to_recovery_list(dlm, res);
2400 } else if (res->owner == dlm->node_num) {
2401 dlm_free_dead_locks(dlm, res, dead_node);
2402 __dlm_lockres_calc_usage(dlm, res);
2403 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
2404 if (test_bit(dead_node, res->refmap)) {
2405 mlog(0, "%s:%.*s: dead node %u had a ref, but had "
2406 "no locks and had not purged before dying\n",
2407 dlm->name, res->lockname.len,
2408 res->lockname.name, dead_node);
2409 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2410 }
2411 }
2412 spin_unlock(&res->spinlock);
2413 }
2414 }
2415
2416 }
2417
__dlm_hb_node_down(struct dlm_ctxt * dlm,int idx)2418 static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
2419 {
2420 assert_spin_locked(&dlm->spinlock);
2421
2422 if (dlm->reco.new_master == idx) {
2423 mlog(0, "%s: recovery master %d just died\n",
2424 dlm->name, idx);
2425 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2426 /* finalize1 was reached, so it is safe to clear
2427 * the new_master and dead_node. that recovery
2428 * is complete. */
2429 mlog(0, "%s: dead master %d had reached "
2430 "finalize1 state, clearing\n", dlm->name, idx);
2431 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2432 __dlm_reset_recovery(dlm);
2433 }
2434 }
2435
2436 /* Clean up join state on node death. */
2437 if (dlm->joining_node == idx) {
2438 mlog(0, "Clearing join state for node %u\n", idx);
2439 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
2440 }
2441
2442 /* check to see if the node is already considered dead */
2443 if (!test_bit(idx, dlm->live_nodes_map)) {
2444 mlog(0, "for domain %s, node %d is already dead. "
2445 "another node likely did recovery already.\n",
2446 dlm->name, idx);
2447 return;
2448 }
2449
2450 /* check to see if we do not care about this node */
2451 if (!test_bit(idx, dlm->domain_map)) {
2452 /* This also catches the case that we get a node down
2453 * but haven't joined the domain yet. */
2454 mlog(0, "node %u already removed from domain!\n", idx);
2455 return;
2456 }
2457
2458 clear_bit(idx, dlm->live_nodes_map);
2459
2460 /* make sure local cleanup occurs before the heartbeat events */
2461 if (!test_bit(idx, dlm->recovery_map))
2462 dlm_do_local_recovery_cleanup(dlm, idx);
2463
2464 /* notify anything attached to the heartbeat events */
2465 dlm_hb_event_notify_attached(dlm, idx, 0);
2466
2467 mlog(0, "node %u being removed from domain map!\n", idx);
2468 clear_bit(idx, dlm->domain_map);
2469 clear_bit(idx, dlm->exit_domain_map);
2470 /* wake up migration waiters if a node goes down.
2471 * perhaps later we can genericize this for other waiters. */
2472 wake_up(&dlm->migration_wq);
2473
2474 set_bit(idx, dlm->recovery_map);
2475 }
2476
dlm_hb_node_down_cb(struct o2nm_node * node,int idx,void * data)2477 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
2478 {
2479 struct dlm_ctxt *dlm = data;
2480
2481 if (!dlm_grab(dlm))
2482 return;
2483
2484 /*
2485 * This will notify any dlm users that a node in our domain
2486 * went away without notifying us first.
2487 */
2488 if (test_bit(idx, dlm->domain_map))
2489 dlm_fire_domain_eviction_callbacks(dlm, idx);
2490
2491 spin_lock(&dlm->spinlock);
2492 __dlm_hb_node_down(dlm, idx);
2493 spin_unlock(&dlm->spinlock);
2494
2495 dlm_put(dlm);
2496 }
2497
dlm_hb_node_up_cb(struct o2nm_node * node,int idx,void * data)2498 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
2499 {
2500 struct dlm_ctxt *dlm = data;
2501
2502 if (!dlm_grab(dlm))
2503 return;
2504
2505 spin_lock(&dlm->spinlock);
2506 set_bit(idx, dlm->live_nodes_map);
2507 /* do NOT notify mle attached to the heartbeat events.
2508 * new nodes are not interesting in mastery until joined. */
2509 spin_unlock(&dlm->spinlock);
2510
2511 dlm_put(dlm);
2512 }
2513
dlm_reco_ast(void * astdata)2514 static void dlm_reco_ast(void *astdata)
2515 {
2516 struct dlm_ctxt *dlm = astdata;
2517 mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
2518 dlm->node_num, dlm->name);
2519 }
dlm_reco_bast(void * astdata,int blocked_type)2520 static void dlm_reco_bast(void *astdata, int blocked_type)
2521 {
2522 struct dlm_ctxt *dlm = astdata;
2523 mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
2524 dlm->node_num, dlm->name);
2525 }
dlm_reco_unlock_ast(void * astdata,enum dlm_status st)2526 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
2527 {
2528 mlog(0, "unlockast for recovery lock fired!\n");
2529 }
2530
2531 /*
2532 * dlm_pick_recovery_master will continually attempt to use
2533 * dlmlock() on the special "$RECOVERY" lockres with the
2534 * LKM_NOQUEUE flag to get an EX. every thread that enters
2535 * this function on each node racing to become the recovery
2536 * master will not stop attempting this until either:
2537 * a) this node gets the EX (and becomes the recovery master),
2538 * or b) dlm->reco.new_master gets set to some nodenum
2539 * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2540 * so each time a recovery master is needed, the entire cluster
2541 * will sync at this point. if the new master dies, that will
2542 * be detected in dlm_do_recovery */
dlm_pick_recovery_master(struct dlm_ctxt * dlm)2543 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
2544 {
2545 enum dlm_status ret;
2546 struct dlm_lockstatus lksb;
2547 int status = -EINVAL;
2548
2549 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2550 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
2551 again:
2552 memset(&lksb, 0, sizeof(lksb));
2553
2554 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
2555 DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN,
2556 dlm_reco_ast, dlm, dlm_reco_bast);
2557
2558 mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
2559 dlm->name, ret, lksb.status);
2560
2561 if (ret == DLM_NORMAL) {
2562 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2563 dlm->name, dlm->node_num);
2564
2565 /* got the EX lock. check to see if another node
2566 * just became the reco master */
2567 if (dlm_reco_master_ready(dlm)) {
2568 mlog(0, "%s: got reco EX lock, but %u will "
2569 "do the recovery\n", dlm->name,
2570 dlm->reco.new_master);
2571 status = -EEXIST;
2572 } else {
2573 status = 0;
2574
2575 /* see if recovery was already finished elsewhere */
2576 spin_lock(&dlm->spinlock);
2577 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2578 status = -EINVAL;
2579 mlog(0, "%s: got reco EX lock, but "
2580 "node got recovered already\n", dlm->name);
2581 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2582 mlog(ML_ERROR, "%s: new master is %u "
2583 "but no dead node!\n",
2584 dlm->name, dlm->reco.new_master);
2585 BUG();
2586 }
2587 }
2588 spin_unlock(&dlm->spinlock);
2589 }
2590
2591 /* if this node has actually become the recovery master,
2592 * set the master and send the messages to begin recovery */
2593 if (!status) {
2594 mlog(0, "%s: dead=%u, this=%u, sending "
2595 "begin_reco now\n", dlm->name,
2596 dlm->reco.dead_node, dlm->node_num);
2597 status = dlm_send_begin_reco_message(dlm,
2598 dlm->reco.dead_node);
2599 /* this always succeeds */
2600 BUG_ON(status);
2601
2602 /* set the new_master to this node */
2603 spin_lock(&dlm->spinlock);
2604 dlm_set_reco_master(dlm, dlm->node_num);
2605 spin_unlock(&dlm->spinlock);
2606 }
2607
2608 /* recovery lock is a special case. ast will not get fired,
2609 * so just go ahead and unlock it. */
2610 ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
2611 if (ret == DLM_DENIED) {
2612 mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
2613 ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm);
2614 }
2615 if (ret != DLM_NORMAL) {
2616 /* this would really suck. this could only happen
2617 * if there was a network error during the unlock
2618 * because of node death. this means the unlock
2619 * is actually "done" and the lock structure is
2620 * even freed. we can continue, but only
2621 * because this specific lock name is special. */
2622 mlog(ML_ERROR, "dlmunlock returned %d\n", ret);
2623 }
2624 } else if (ret == DLM_NOTQUEUED) {
2625 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2626 dlm->name, dlm->node_num);
2627 /* another node is master. wait on
2628 * reco.new_master != O2NM_INVALID_NODE_NUM
2629 * for at most one second */
2630 wait_event_timeout(dlm->dlm_reco_thread_wq,
2631 dlm_reco_master_ready(dlm),
2632 msecs_to_jiffies(1000));
2633 if (!dlm_reco_master_ready(dlm)) {
2634 mlog(0, "%s: reco master taking a while\n",
2635 dlm->name);
2636 goto again;
2637 }
2638 /* another node has informed this one that it is reco master */
2639 mlog(0, "%s: reco master %u is ready to recover %u\n",
2640 dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
2641 status = -EEXIST;
2642 } else if (ret == DLM_RECOVERING) {
2643 mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
2644 dlm->name, dlm->node_num);
2645 goto again;
2646 } else {
2647 struct dlm_lock_resource *res;
2648
2649 /* dlmlock returned something other than NOTQUEUED or NORMAL */
2650 mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), "
2651 "lksb.status=%s\n", dlm->name, dlm_errname(ret),
2652 dlm_errname(lksb.status));
2653 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2654 DLM_RECOVERY_LOCK_NAME_LEN);
2655 if (res) {
2656 dlm_print_one_lock_resource(res);
2657 dlm_lockres_put(res);
2658 } else {
2659 mlog(ML_ERROR, "recovery lock not found\n");
2660 }
2661 BUG();
2662 }
2663
2664 return status;
2665 }
2666
dlm_send_begin_reco_message(struct dlm_ctxt * dlm,u8 dead_node)2667 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
2668 {
2669 struct dlm_begin_reco br;
2670 int ret = 0;
2671 struct dlm_node_iter iter;
2672 int nodenum;
2673 int status;
2674
2675 mlog(0, "%s: dead node is %u\n", dlm->name, dead_node);
2676
2677 spin_lock(&dlm->spinlock);
2678 dlm_node_iter_init(dlm->domain_map, &iter);
2679 spin_unlock(&dlm->spinlock);
2680
2681 clear_bit(dead_node, iter.node_map);
2682
2683 memset(&br, 0, sizeof(br));
2684 br.node_idx = dlm->node_num;
2685 br.dead_node = dead_node;
2686
2687 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2688 ret = 0;
2689 if (nodenum == dead_node) {
2690 mlog(0, "not sending begin reco to dead node "
2691 "%u\n", dead_node);
2692 continue;
2693 }
2694 if (nodenum == dlm->node_num) {
2695 mlog(0, "not sending begin reco to self\n");
2696 continue;
2697 }
2698 retry:
2699 mlog(0, "attempting to send begin reco msg to %d\n",
2700 nodenum);
2701 ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key,
2702 &br, sizeof(br), nodenum, &status);
2703 /* negative status is handled ok by caller here */
2704 if (ret >= 0)
2705 ret = status;
2706 if (dlm_is_host_down(ret)) {
2707 /* node is down. not involved in recovery
2708 * so just keep going */
2709 mlog(ML_NOTICE, "%s: node %u was down when sending "
2710 "begin reco msg (%d)\n", dlm->name, nodenum, ret);
2711 ret = 0;
2712 }
2713
2714 /*
2715 * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8,
2716 * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN.
2717 * We are handling both for compatibility reasons.
2718 */
2719 if (ret == -EAGAIN || ret == EAGAIN) {
2720 mlog(0, "%s: trying to start recovery of node "
2721 "%u, but node %u is waiting for last recovery "
2722 "to complete, backoff for a bit\n", dlm->name,
2723 dead_node, nodenum);
2724 msleep(100);
2725 goto retry;
2726 }
2727 if (ret < 0) {
2728 struct dlm_lock_resource *res;
2729
2730 /* this is now a serious problem, possibly ENOMEM
2731 * in the network stack. must retry */
2732 mlog_errno(ret);
2733 mlog(ML_ERROR, "begin reco of dlm %s to node %u "
2734 "returned %d\n", dlm->name, nodenum, ret);
2735 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2736 DLM_RECOVERY_LOCK_NAME_LEN);
2737 if (res) {
2738 dlm_print_one_lock_resource(res);
2739 dlm_lockres_put(res);
2740 } else {
2741 mlog(ML_ERROR, "recovery lock not found\n");
2742 }
2743 /* sleep for a bit in hopes that we can avoid
2744 * another ENOMEM */
2745 msleep(100);
2746 goto retry;
2747 }
2748 }
2749
2750 return ret;
2751 }
2752
dlm_begin_reco_handler(struct o2net_msg * msg,u32 len,void * data,void ** ret_data)2753 int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2754 void **ret_data)
2755 {
2756 struct dlm_ctxt *dlm = data;
2757 struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;
2758
2759 /* ok to return 0, domain has gone away */
2760 if (!dlm_grab(dlm))
2761 return 0;
2762
2763 spin_lock(&dlm->spinlock);
2764 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2765 mlog(0, "%s: node %u wants to recover node %u (%u:%u) "
2766 "but this node is in finalize state, waiting on finalize2\n",
2767 dlm->name, br->node_idx, br->dead_node,
2768 dlm->reco.dead_node, dlm->reco.new_master);
2769 spin_unlock(&dlm->spinlock);
2770 dlm_put(dlm);
2771 return -EAGAIN;
2772 }
2773 spin_unlock(&dlm->spinlock);
2774
2775 mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n",
2776 dlm->name, br->node_idx, br->dead_node,
2777 dlm->reco.dead_node, dlm->reco.new_master);
2778
2779 dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);
2780
2781 spin_lock(&dlm->spinlock);
2782 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2783 if (test_bit(dlm->reco.new_master, dlm->recovery_map)) {
2784 mlog(0, "%s: new_master %u died, changing "
2785 "to %u\n", dlm->name, dlm->reco.new_master,
2786 br->node_idx);
2787 } else {
2788 mlog(0, "%s: new_master %u NOT DEAD, changing "
2789 "to %u\n", dlm->name, dlm->reco.new_master,
2790 br->node_idx);
2791 /* may not have seen the new master as dead yet */
2792 }
2793 }
2794 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
2795 mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
2796 "node %u changing it to %u\n", dlm->name,
2797 dlm->reco.dead_node, br->node_idx, br->dead_node);
2798 }
2799 dlm_set_reco_master(dlm, br->node_idx);
2800 dlm_set_reco_dead_node(dlm, br->dead_node);
2801 if (!test_bit(br->dead_node, dlm->recovery_map)) {
2802 mlog(0, "recovery master %u sees %u as dead, but this "
2803 "node has not yet. marking %u as dead\n",
2804 br->node_idx, br->dead_node, br->dead_node);
2805 if (!test_bit(br->dead_node, dlm->domain_map) ||
2806 !test_bit(br->dead_node, dlm->live_nodes_map))
2807 mlog(0, "%u not in domain/live_nodes map "
2808 "so setting it in reco map manually\n",
2809 br->dead_node);
2810 /* force the recovery cleanup in __dlm_hb_node_down
2811 * both of these will be cleared in a moment */
2812 set_bit(br->dead_node, dlm->domain_map);
2813 set_bit(br->dead_node, dlm->live_nodes_map);
2814 __dlm_hb_node_down(dlm, br->dead_node);
2815 }
2816 spin_unlock(&dlm->spinlock);
2817
2818 dlm_kick_recovery_thread(dlm);
2819
2820 mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n",
2821 dlm->name, br->node_idx, br->dead_node,
2822 dlm->reco.dead_node, dlm->reco.new_master);
2823
2824 dlm_put(dlm);
2825 return 0;
2826 }
2827
2828 #define DLM_FINALIZE_STAGE2 0x01
dlm_send_finalize_reco_message(struct dlm_ctxt * dlm)2829 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
2830 {
2831 int ret = 0;
2832 struct dlm_finalize_reco fr;
2833 struct dlm_node_iter iter;
2834 int nodenum;
2835 int status;
2836 int stage = 1;
2837
2838 mlog(0, "finishing recovery for node %s:%u, "
2839 "stage %d\n", dlm->name, dlm->reco.dead_node, stage);
2840
2841 spin_lock(&dlm->spinlock);
2842 dlm_node_iter_init(dlm->domain_map, &iter);
2843 spin_unlock(&dlm->spinlock);
2844
2845 stage2:
2846 memset(&fr, 0, sizeof(fr));
2847 fr.node_idx = dlm->node_num;
2848 fr.dead_node = dlm->reco.dead_node;
2849 if (stage == 2)
2850 fr.flags |= DLM_FINALIZE_STAGE2;
2851
2852 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2853 if (nodenum == dlm->node_num)
2854 continue;
2855 ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
2856 &fr, sizeof(fr), nodenum, &status);
2857 if (ret >= 0)
2858 ret = status;
2859 if (ret < 0) {
2860 mlog(ML_ERROR, "Error %d when sending message %u (key "
2861 "0x%x) to node %u\n", ret, DLM_FINALIZE_RECO_MSG,
2862 dlm->key, nodenum);
2863 if (dlm_is_host_down(ret)) {
2864 /* this has no effect on this recovery
2865 * session, so set the status to zero to
2866 * finish out the last recovery */
2867 mlog(ML_ERROR, "node %u went down after this "
2868 "node finished recovery.\n", nodenum);
2869 ret = 0;
2870 continue;
2871 }
2872 break;
2873 }
2874 }
2875 if (stage == 1) {
2876 /* reset the node_iter back to the top and send finalize2 */
2877 iter.curnode = -1;
2878 stage = 2;
2879 goto stage2;
2880 }
2881
2882 return ret;
2883 }
2884
dlm_finalize_reco_handler(struct o2net_msg * msg,u32 len,void * data,void ** ret_data)2885 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2886 void **ret_data)
2887 {
2888 struct dlm_ctxt *dlm = data;
2889 struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
2890 int stage = 1;
2891
2892 /* ok to return 0, domain has gone away */
2893 if (!dlm_grab(dlm))
2894 return 0;
2895
2896 if (fr->flags & DLM_FINALIZE_STAGE2)
2897 stage = 2;
2898
2899 mlog(0, "%s: node %u finalizing recovery stage%d of "
2900 "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
2901 fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
2902
2903 spin_lock(&dlm->spinlock);
2904
2905 if (dlm->reco.new_master != fr->node_idx) {
2906 mlog(ML_ERROR, "node %u sent recovery finalize msg, but node "
2907 "%u is supposed to be the new master, dead=%u\n",
2908 fr->node_idx, dlm->reco.new_master, fr->dead_node);
2909 BUG();
2910 }
2911 if (dlm->reco.dead_node != fr->dead_node) {
2912 mlog(ML_ERROR, "node %u sent recovery finalize msg for dead "
2913 "node %u, but node %u is supposed to be dead\n",
2914 fr->node_idx, fr->dead_node, dlm->reco.dead_node);
2915 BUG();
2916 }
2917
2918 switch (stage) {
2919 case 1:
2920 dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
2921 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2922 mlog(ML_ERROR, "%s: received finalize1 from "
2923 "new master %u for dead node %u, but "
2924 "this node has already received it!\n",
2925 dlm->name, fr->node_idx, fr->dead_node);
2926 dlm_print_reco_node_status(dlm);
2927 BUG();
2928 }
2929 dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
2930 spin_unlock(&dlm->spinlock);
2931 break;
2932 case 2:
2933 if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) {
2934 mlog(ML_ERROR, "%s: received finalize2 from "
2935 "new master %u for dead node %u, but "
2936 "this node did not have finalize1!\n",
2937 dlm->name, fr->node_idx, fr->dead_node);
2938 dlm_print_reco_node_status(dlm);
2939 BUG();
2940 }
2941 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2942 __dlm_reset_recovery(dlm);
2943 spin_unlock(&dlm->spinlock);
2944 dlm_kick_recovery_thread(dlm);
2945 break;
2946 }
2947
2948 mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
2949 dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master);
2950
2951 dlm_put(dlm);
2952 return 0;
2953 }
2954