xref: /linux/fs/ocfs2/dlm/dlmrecovery.c (revision c159dfbdd4fc62fa08f6715d9d6c34d39cf40446)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * dlmrecovery.c
4  *
5  * recovery stuff
6  *
7  * Copyright (C) 2004 Oracle.  All rights reserved.
8  */
9 
10 
11 #include <linux/module.h>
12 #include <linux/fs.h>
13 #include <linux/types.h>
14 #include <linux/slab.h>
15 #include <linux/highmem.h>
16 #include <linux/init.h>
17 #include <linux/sysctl.h>
18 #include <linux/random.h>
19 #include <linux/blkdev.h>
20 #include <linux/socket.h>
21 #include <linux/inet.h>
22 #include <linux/timer.h>
23 #include <linux/kthread.h>
24 #include <linux/delay.h>
25 #include <linux/string_choices.h>
26 
27 #include "../cluster/heartbeat.h"
28 #include "../cluster/nodemanager.h"
29 #include "../cluster/tcp.h"
30 
31 #include "dlmapi.h"
32 #include "dlmcommon.h"
33 #include "dlmdomain.h"
34 
35 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
36 #include "../cluster/masklog.h"
37 
38 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
39 
40 static int dlm_recovery_thread(void *data);
41 static int dlm_do_recovery(struct dlm_ctxt *dlm);
42 
43 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm);
44 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
45 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
46 static int dlm_request_all_locks(struct dlm_ctxt *dlm,
47 				 u8 request_from, u8 dead_node);
48 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm);
49 
50 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
51 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
52 					const char *lockname, int namelen,
53 					int total_locks, u64 cookie,
54 					u8 flags, u8 master);
55 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
56 				    struct dlm_migratable_lockres *mres,
57 				    u8 send_to,
58 				    struct dlm_lock_resource *res,
59 				    int total_locks);
60 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
61 				     struct dlm_lock_resource *res,
62 				     struct dlm_migratable_lockres *mres);
63 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm);
64 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm,
65 				 u8 dead_node, u8 send_to);
66 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node);
67 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
68 					struct list_head *list, u8 dead_node);
69 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
70 					      u8 dead_node, u8 new_master);
71 static void dlm_reco_ast(void *astdata);
72 static void dlm_reco_bast(void *astdata, int blocked_type);
73 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
74 static void dlm_request_all_locks_worker(struct dlm_work_item *item,
75 					 void *data);
76 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
77 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
78 				      struct dlm_lock_resource *res,
79 				      u8 *real_master);
80 
81 static u64 dlm_get_next_mig_cookie(void);
82 
83 static DEFINE_SPINLOCK(dlm_reco_state_lock);
84 static DEFINE_SPINLOCK(dlm_mig_cookie_lock);
85 static u64 dlm_mig_cookie = 1;
86 
87 static u64 dlm_get_next_mig_cookie(void)
88 {
89 	u64 c;
90 	spin_lock(&dlm_mig_cookie_lock);
91 	c = dlm_mig_cookie;
92 	if (dlm_mig_cookie == (~0ULL))
93 		dlm_mig_cookie = 1;
94 	else
95 		dlm_mig_cookie++;
96 	spin_unlock(&dlm_mig_cookie_lock);
97 	return c;
98 }
99 
100 static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm,
101 					  u8 dead_node)
102 {
103 	assert_spin_locked(&dlm->spinlock);
104 	if (dlm->reco.dead_node != dead_node)
105 		mlog(0, "%s: changing dead_node from %u to %u\n",
106 		     dlm->name, dlm->reco.dead_node, dead_node);
107 	dlm->reco.dead_node = dead_node;
108 }
109 
110 static inline void dlm_set_reco_master(struct dlm_ctxt *dlm,
111 				       u8 master)
112 {
113 	assert_spin_locked(&dlm->spinlock);
114 	mlog(0, "%s: changing new_master from %u to %u\n",
115 	     dlm->name, dlm->reco.new_master, master);
116 	dlm->reco.new_master = master;
117 }
118 
119 static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm)
120 {
121 	assert_spin_locked(&dlm->spinlock);
122 	clear_bit(dlm->reco.dead_node, dlm->recovery_map);
123 	dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
124 	dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
125 }
126 
127 /* Worker function used during recovery. */
128 void dlm_dispatch_work(struct work_struct *work)
129 {
130 	struct dlm_ctxt *dlm =
131 		container_of(work, struct dlm_ctxt, dispatched_work);
132 	LIST_HEAD(tmp_list);
133 	struct dlm_work_item *item, *next;
134 	dlm_workfunc_t *workfunc;
135 	int tot=0;
136 
137 	spin_lock(&dlm->work_lock);
138 	list_splice_init(&dlm->work_list, &tmp_list);
139 	spin_unlock(&dlm->work_lock);
140 
141 	list_for_each_entry(item, &tmp_list, list) {
142 		tot++;
143 	}
144 	mlog(0, "%s: work thread has %d work items\n", dlm->name, tot);
145 
146 	list_for_each_entry_safe(item, next, &tmp_list, list) {
147 		workfunc = item->func;
148 		list_del_init(&item->list);
149 
150 		/* already have ref on dlm to avoid having
151 		 * it disappear.  just double-check. */
152 		BUG_ON(item->dlm != dlm);
153 
154 		/* this is allowed to sleep and
155 		 * call network stuff */
156 		workfunc(item, item->data);
157 
158 		dlm_put(dlm);
159 		kfree(item);
160 	}
161 }
162 
163 /*
164  * RECOVERY THREAD
165  */
166 
167 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm)
168 {
169 	/* wake the recovery thread
170 	 * this will wake the reco thread in one of three places
171 	 * 1) sleeping with no recovery happening
172 	 * 2) sleeping with recovery mastered elsewhere
173 	 * 3) recovery mastered here, waiting on reco data */
174 
175 	wake_up(&dlm->dlm_reco_thread_wq);
176 }
177 
178 /* Launch the recovery thread */
179 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm)
180 {
181 	mlog(0, "starting dlm recovery thread...\n");
182 
183 	dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
184 			"dlm_reco-%s", dlm->name);
185 	if (IS_ERR(dlm->dlm_reco_thread_task)) {
186 		mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
187 		dlm->dlm_reco_thread_task = NULL;
188 		return -EINVAL;
189 	}
190 
191 	return 0;
192 }
193 
194 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
195 {
196 	if (dlm->dlm_reco_thread_task) {
197 		mlog(0, "waiting for dlm recovery thread to exit\n");
198 		kthread_stop(dlm->dlm_reco_thread_task);
199 		dlm->dlm_reco_thread_task = NULL;
200 	}
201 }
202 
203 
204 
205 /*
206  * this is lame, but here's how recovery works...
207  * 1) all recovery threads cluster wide will work on recovering
208  *    ONE node at a time
209  * 2) negotiate who will take over all the locks for the dead node.
210  *    that's right... ALL the locks.
211  * 3) once a new master is chosen, everyone scans all locks
212  *    and moves aside those mastered by the dead guy
213  * 4) each of these locks should be locked until recovery is done
214  * 5) the new master collects up all of secondary lock queue info
215  *    one lock at a time, forcing each node to communicate back
216  *    before continuing
217  * 6) each secondary lock queue responds with the full known lock info
218  * 7) once the new master has run all its locks, it sends a ALLDONE!
219  *    message to everyone
220  * 8) upon receiving this message, the secondary queue node unlocks
221  *    and responds to the ALLDONE
222  * 9) once the new master gets responses from everyone, he unlocks
223  *    everything and recovery for this dead node is done
224  *10) go back to 2) while there are still dead nodes
225  *
226  */
227 
228 static void dlm_print_reco_node_status(struct dlm_ctxt *dlm)
229 {
230 	struct dlm_reco_node_data *ndata;
231 	struct dlm_lock_resource *res;
232 
233 	mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n",
234 	     dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
235 	     dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive",
236 	     dlm->reco.dead_node, dlm->reco.new_master);
237 
238 	list_for_each_entry(ndata, &dlm->reco.node_data, list) {
239 		char *st = "unknown";
240 		switch (ndata->state) {
241 			case DLM_RECO_NODE_DATA_INIT:
242 				st = "init";
243 				break;
244 			case DLM_RECO_NODE_DATA_REQUESTING:
245 				st = "requesting";
246 				break;
247 			case DLM_RECO_NODE_DATA_DEAD:
248 				st = "dead";
249 				break;
250 			case DLM_RECO_NODE_DATA_RECEIVING:
251 				st = "receiving";
252 				break;
253 			case DLM_RECO_NODE_DATA_REQUESTED:
254 				st = "requested";
255 				break;
256 			case DLM_RECO_NODE_DATA_DONE:
257 				st = "done";
258 				break;
259 			case DLM_RECO_NODE_DATA_FINALIZE_SENT:
260 				st = "finalize-sent";
261 				break;
262 			default:
263 				st = "bad";
264 				break;
265 		}
266 		mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n",
267 		     dlm->name, ndata->node_num, st);
268 	}
269 	list_for_each_entry(res, &dlm->reco.resources, recovering) {
270 		mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n",
271 		     dlm->name, res->lockname.len, res->lockname.name);
272 	}
273 }
274 
275 #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
276 
277 static int dlm_recovery_thread(void *data)
278 {
279 	int status;
280 	struct dlm_ctxt *dlm = data;
281 	unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS);
282 
283 	mlog(0, "dlm thread running for %s...\n", dlm->name);
284 
285 	while (!kthread_should_stop()) {
286 		if (dlm_domain_fully_joined(dlm)) {
287 			status = dlm_do_recovery(dlm);
288 			if (status == -EAGAIN) {
289 				/* do not sleep, recheck immediately. */
290 				continue;
291 			}
292 			if (status < 0)
293 				mlog_errno(status);
294 		}
295 
296 		wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
297 						 kthread_should_stop(),
298 						 timeout);
299 	}
300 
301 	mlog(0, "quitting DLM recovery thread\n");
302 	return 0;
303 }
304 
305 /* returns true when the recovery master has contacted us */
306 static int dlm_reco_master_ready(struct dlm_ctxt *dlm)
307 {
308 	int ready;
309 	spin_lock(&dlm->spinlock);
310 	ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM);
311 	spin_unlock(&dlm->spinlock);
312 	return ready;
313 }
314 
315 /* returns true if node is no longer in the domain
316  * could be dead or just not joined */
317 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
318 {
319 	int dead;
320 	spin_lock(&dlm->spinlock);
321 	dead = !test_bit(node, dlm->domain_map);
322 	spin_unlock(&dlm->spinlock);
323 	return dead;
324 }
325 
326 /* returns true if node is no longer in the domain
327  * could be dead or just not joined */
328 static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
329 {
330 	int recovered;
331 	spin_lock(&dlm->spinlock);
332 	recovered = !test_bit(node, dlm->recovery_map);
333 	spin_unlock(&dlm->spinlock);
334 	return recovered;
335 }
336 
337 
338 void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
339 {
340 	if (dlm_is_node_dead(dlm, node))
341 		return;
342 
343 	printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in "
344 	       "domain %s\n", node, dlm->name);
345 
346 	if (timeout)
347 		wait_event_timeout(dlm->dlm_reco_thread_wq,
348 				   dlm_is_node_dead(dlm, node),
349 				   msecs_to_jiffies(timeout));
350 	else
351 		wait_event(dlm->dlm_reco_thread_wq,
352 			   dlm_is_node_dead(dlm, node));
353 }
354 
355 void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
356 {
357 	if (dlm_is_node_recovered(dlm, node))
358 		return;
359 
360 	printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in "
361 	       "domain %s\n", node, dlm->name);
362 
363 	if (timeout)
364 		wait_event_timeout(dlm->dlm_reco_thread_wq,
365 				   dlm_is_node_recovered(dlm, node),
366 				   msecs_to_jiffies(timeout));
367 	else
368 		wait_event(dlm->dlm_reco_thread_wq,
369 			   dlm_is_node_recovered(dlm, node));
370 }
371 
372 /* callers of the top-level api calls (dlmlock/dlmunlock) should
373  * block on the dlm->reco.event when recovery is in progress.
374  * the dlm recovery thread will set this state when it begins
375  * recovering a dead node (as the new master or not) and clear
376  * the state and wake as soon as all affected lock resources have
377  * been marked with the RECOVERY flag */
378 static int dlm_in_recovery(struct dlm_ctxt *dlm)
379 {
380 	int in_recovery;
381 	spin_lock(&dlm->spinlock);
382 	in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
383 	spin_unlock(&dlm->spinlock);
384 	return in_recovery;
385 }
386 
387 
388 void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
389 {
390 	if (dlm_in_recovery(dlm)) {
391 		mlog(0, "%s: reco thread %d in recovery: "
392 		     "state=%d, master=%u, dead=%u\n",
393 		     dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
394 		     dlm->reco.state, dlm->reco.new_master,
395 		     dlm->reco.dead_node);
396 	}
397 	wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
398 }
399 
400 static void dlm_begin_recovery(struct dlm_ctxt *dlm)
401 {
402 	assert_spin_locked(&dlm->spinlock);
403 	BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
404 	printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n",
405 	       dlm->name, dlm->reco.dead_node);
406 	dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
407 }
408 
409 static void dlm_end_recovery(struct dlm_ctxt *dlm)
410 {
411 	spin_lock(&dlm->spinlock);
412 	BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
413 	dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
414 	spin_unlock(&dlm->spinlock);
415 	printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name);
416 	wake_up(&dlm->reco.event);
417 }
418 
419 static void dlm_print_recovery_master(struct dlm_ctxt *dlm)
420 {
421 	printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the "
422 	       "dead node %u in domain %s\n", dlm->reco.new_master,
423 	       (dlm->node_num == dlm->reco.new_master ? "me" : "he"),
424 	       dlm->reco.dead_node, dlm->name);
425 }
426 
427 static int dlm_do_recovery(struct dlm_ctxt *dlm)
428 {
429 	int status = 0;
430 	int ret;
431 
432 	spin_lock(&dlm->spinlock);
433 
434 	if (dlm->migrate_done) {
435 		mlog(0, "%s: no need do recovery after migrating all "
436 		     "lock resources\n", dlm->name);
437 		spin_unlock(&dlm->spinlock);
438 		return 0;
439 	}
440 
441 	/* check to see if the new master has died */
442 	if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
443 	    test_bit(dlm->reco.new_master, dlm->recovery_map)) {
444 		mlog(0, "new master %u died while recovering %u!\n",
445 		     dlm->reco.new_master, dlm->reco.dead_node);
446 		/* unset the new_master, leave dead_node */
447 		dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
448 	}
449 
450 	/* select a target to recover */
451 	if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
452 		int bit;
453 
454 		bit = find_first_bit(dlm->recovery_map, O2NM_MAX_NODES);
455 		if (bit >= O2NM_MAX_NODES || bit < 0)
456 			dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
457 		else
458 			dlm_set_reco_dead_node(dlm, bit);
459 	} else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
460 		/* BUG? */
461 		mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
462 		     dlm->reco.dead_node);
463 		dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
464 	}
465 
466 	if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
467 		// mlog(0, "nothing to recover!  sleeping now!\n");
468 		spin_unlock(&dlm->spinlock);
469 		/* return to main thread loop and sleep. */
470 		return 0;
471 	}
472 	mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
473 	     dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
474 	     dlm->reco.dead_node);
475 
476 	/* take write barrier */
477 	/* (stops the list reshuffling thread, proxy ast handling) */
478 	dlm_begin_recovery(dlm);
479 
480 	spin_unlock(&dlm->spinlock);
481 
482 	if (dlm->reco.new_master == dlm->node_num)
483 		goto master_here;
484 
485 	if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
486 		/* choose a new master, returns 0 if this node
487 		 * is the master, -EEXIST if it's another node.
488 		 * this does not return until a new master is chosen
489 		 * or recovery completes entirely. */
490 		ret = dlm_pick_recovery_master(dlm);
491 		if (!ret) {
492 			/* already notified everyone.  go. */
493 			goto master_here;
494 		}
495 		mlog(0, "another node will master this recovery session.\n");
496 	}
497 
498 	dlm_print_recovery_master(dlm);
499 
500 	/* it is safe to start everything back up here
501 	 * because all of the dead node's lock resources
502 	 * have been marked as in-recovery */
503 	dlm_end_recovery(dlm);
504 
505 	/* sleep out in main dlm_recovery_thread loop. */
506 	return 0;
507 
508 master_here:
509 	dlm_print_recovery_master(dlm);
510 
511 	status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
512 	if (status < 0) {
513 		/* we should never hit this anymore */
514 		mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, "
515 		     "retrying.\n", dlm->name, status, dlm->reco.dead_node);
516 		/* yield a bit to allow any final network messages
517 		 * to get handled on remaining nodes */
518 		msleep(100);
519 	} else {
520 		/* success!  see if any other nodes need recovery */
521 		mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
522 		     dlm->name, dlm->reco.dead_node, dlm->node_num);
523 		spin_lock(&dlm->spinlock);
524 		__dlm_reset_recovery(dlm);
525 		dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
526 		spin_unlock(&dlm->spinlock);
527 	}
528 	dlm_end_recovery(dlm);
529 
530 	/* continue and look for another dead node */
531 	return -EAGAIN;
532 }
533 
534 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
535 {
536 	int status = 0;
537 	struct dlm_reco_node_data *ndata;
538 	int all_nodes_done;
539 	int destroy = 0;
540 	int pass = 0;
541 
542 	do {
543 		/* we have become recovery master.  there is no escaping
544 		 * this, so just keep trying until we get it. */
545 		status = dlm_init_recovery_area(dlm, dead_node);
546 		if (status < 0) {
547 			mlog(ML_ERROR, "%s: failed to alloc recovery area, "
548 			     "retrying\n", dlm->name);
549 			msleep(1000);
550 		}
551 	} while (status != 0);
552 
553 	/* safe to access the node data list without a lock, since this
554 	 * process is the only one to change the list */
555 	list_for_each_entry(ndata, &dlm->reco.node_data, list) {
556 		BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
557 		ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
558 
559 		mlog(0, "%s: Requesting lock info from node %u\n", dlm->name,
560 		     ndata->node_num);
561 
562 		if (ndata->node_num == dlm->node_num) {
563 			ndata->state = DLM_RECO_NODE_DATA_DONE;
564 			continue;
565 		}
566 
567 		do {
568 			status = dlm_request_all_locks(dlm, ndata->node_num,
569 						       dead_node);
570 			if (status < 0) {
571 				mlog_errno(status);
572 				if (dlm_is_host_down(status)) {
573 					/* node died, ignore it for recovery */
574 					status = 0;
575 					ndata->state = DLM_RECO_NODE_DATA_DEAD;
576 					/* wait for the domain map to catch up
577 					 * with the network state. */
578 					wait_event_timeout(dlm->dlm_reco_thread_wq,
579 							   dlm_is_node_dead(dlm,
580 								ndata->node_num),
581 							   msecs_to_jiffies(1000));
582 					mlog(0, "waited 1 sec for %u, "
583 					     "dead? %s\n", ndata->node_num,
584 					     str_yes_no(dlm_is_node_dead(dlm, ndata->node_num)));
585 				} else {
586 					/* -ENOMEM on the other node */
587 					mlog(0, "%s: node %u returned "
588 					     "%d during recovery, retrying "
589 					     "after a short wait\n",
590 					     dlm->name, ndata->node_num,
591 					     status);
592 					msleep(100);
593 				}
594 			}
595 		} while (status != 0);
596 
597 		spin_lock(&dlm_reco_state_lock);
598 		switch (ndata->state) {
599 			case DLM_RECO_NODE_DATA_INIT:
600 			case DLM_RECO_NODE_DATA_FINALIZE_SENT:
601 			case DLM_RECO_NODE_DATA_REQUESTED:
602 				BUG();
603 				break;
604 			case DLM_RECO_NODE_DATA_DEAD:
605 				mlog(0, "node %u died after requesting "
606 				     "recovery info for node %u\n",
607 				     ndata->node_num, dead_node);
608 				/* fine.  don't need this node's info.
609 				 * continue without it. */
610 				break;
611 			case DLM_RECO_NODE_DATA_REQUESTING:
612 				ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
613 				mlog(0, "now receiving recovery data from "
614 				     "node %u for dead node %u\n",
615 				     ndata->node_num, dead_node);
616 				break;
617 			case DLM_RECO_NODE_DATA_RECEIVING:
618 				mlog(0, "already receiving recovery data from "
619 				     "node %u for dead node %u\n",
620 				     ndata->node_num, dead_node);
621 				break;
622 			case DLM_RECO_NODE_DATA_DONE:
623 				mlog(0, "already DONE receiving recovery data "
624 				     "from node %u for dead node %u\n",
625 				     ndata->node_num, dead_node);
626 				break;
627 		}
628 		spin_unlock(&dlm_reco_state_lock);
629 	}
630 
631 	mlog(0, "%s: Done requesting all lock info\n", dlm->name);
632 
633 	/* nodes should be sending reco data now
634 	 * just need to wait */
635 
636 	while (1) {
637 		/* check all the nodes now to see if we are
638 		 * done, or if anyone died */
639 		all_nodes_done = 1;
640 		spin_lock(&dlm_reco_state_lock);
641 		list_for_each_entry(ndata, &dlm->reco.node_data, list) {
642 			mlog(0, "checking recovery state of node %u\n",
643 			     ndata->node_num);
644 			switch (ndata->state) {
645 				case DLM_RECO_NODE_DATA_INIT:
646 				case DLM_RECO_NODE_DATA_REQUESTING:
647 					mlog(ML_ERROR, "bad ndata state for "
648 					     "node %u: state=%d\n",
649 					     ndata->node_num, ndata->state);
650 					BUG();
651 					break;
652 				case DLM_RECO_NODE_DATA_DEAD:
653 					mlog(0, "node %u died after "
654 					     "requesting recovery info for "
655 					     "node %u\n", ndata->node_num,
656 					     dead_node);
657 					break;
658 				case DLM_RECO_NODE_DATA_RECEIVING:
659 				case DLM_RECO_NODE_DATA_REQUESTED:
660 					mlog(0, "%s: node %u still in state %s\n",
661 					     dlm->name, ndata->node_num,
662 					     ndata->state==DLM_RECO_NODE_DATA_RECEIVING ?
663 					     "receiving" : "requested");
664 					all_nodes_done = 0;
665 					break;
666 				case DLM_RECO_NODE_DATA_DONE:
667 					mlog(0, "%s: node %u state is done\n",
668 					     dlm->name, ndata->node_num);
669 					break;
670 				case DLM_RECO_NODE_DATA_FINALIZE_SENT:
671 					mlog(0, "%s: node %u state is finalize\n",
672 					     dlm->name, ndata->node_num);
673 					break;
674 			}
675 		}
676 		spin_unlock(&dlm_reco_state_lock);
677 
678 		mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
679 		     str_yes_no(all_nodes_done));
680 		if (all_nodes_done) {
681 			int ret;
682 
683 			/* Set this flag on recovery master to avoid
684 			 * a new recovery for another dead node start
685 			 * before the recovery is not done. That may
686 			 * cause recovery hung.*/
687 			spin_lock(&dlm->spinlock);
688 			dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
689 			spin_unlock(&dlm->spinlock);
690 
691 			/* all nodes are now in DLM_RECO_NODE_DATA_DONE state
692 	 		 * just send a finalize message to everyone and
693 	 		 * clean up */
694 			mlog(0, "all nodes are done! send finalize\n");
695 			ret = dlm_send_finalize_reco_message(dlm);
696 			if (ret < 0)
697 				mlog_errno(ret);
698 
699 			spin_lock(&dlm->spinlock);
700 			dlm_finish_local_lockres_recovery(dlm, dead_node,
701 							  dlm->node_num);
702 			spin_unlock(&dlm->spinlock);
703 			mlog(0, "should be done with recovery!\n");
704 
705 			mlog(0, "finishing recovery of %s at %lu, "
706 			     "dead=%u, this=%u, new=%u\n", dlm->name,
707 			     jiffies, dlm->reco.dead_node,
708 			     dlm->node_num, dlm->reco.new_master);
709 			destroy = 1;
710 			status = 0;
711 			/* rescan everything marked dirty along the way */
712 			dlm_kick_thread(dlm, NULL);
713 			break;
714 		}
715 		/* wait to be signalled, with periodic timeout
716 		 * to check for node death */
717 		wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
718 					 kthread_should_stop(),
719 					 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS));
720 
721 	}
722 
723 	if (destroy)
724 		dlm_destroy_recovery_area(dlm);
725 
726 	return status;
727 }
728 
729 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
730 {
731 	int num=0;
732 	struct dlm_reco_node_data *ndata;
733 
734 	spin_lock(&dlm->spinlock);
735 	bitmap_copy(dlm->reco.node_map, dlm->domain_map, O2NM_MAX_NODES);
736 	/* nodes can only be removed (by dying) after dropping
737 	 * this lock, and death will be trapped later, so this should do */
738 	spin_unlock(&dlm->spinlock);
739 
740 	while (1) {
741 		num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
742 		if (num >= O2NM_MAX_NODES) {
743 			break;
744 		}
745 		BUG_ON(num == dead_node);
746 
747 		ndata = kzalloc(sizeof(*ndata), GFP_NOFS);
748 		if (!ndata) {
749 			dlm_destroy_recovery_area(dlm);
750 			return -ENOMEM;
751 		}
752 		ndata->node_num = num;
753 		ndata->state = DLM_RECO_NODE_DATA_INIT;
754 		spin_lock(&dlm_reco_state_lock);
755 		list_add_tail(&ndata->list, &dlm->reco.node_data);
756 		spin_unlock(&dlm_reco_state_lock);
757 		num++;
758 	}
759 
760 	return 0;
761 }
762 
763 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm)
764 {
765 	struct dlm_reco_node_data *ndata, *next;
766 	LIST_HEAD(tmplist);
767 
768 	spin_lock(&dlm_reco_state_lock);
769 	list_splice_init(&dlm->reco.node_data, &tmplist);
770 	spin_unlock(&dlm_reco_state_lock);
771 
772 	list_for_each_entry_safe(ndata, next, &tmplist, list) {
773 		list_del_init(&ndata->list);
774 		kfree(ndata);
775 	}
776 }
777 
778 static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
779 				 u8 dead_node)
780 {
781 	struct dlm_lock_request lr;
782 	int ret;
783 	int status;
784 
785 	mlog(0, "\n");
786 
787 
788 	mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
789 		  "to %u\n", dead_node, request_from);
790 
791 	memset(&lr, 0, sizeof(lr));
792 	lr.node_idx = dlm->node_num;
793 	lr.dead_node = dead_node;
794 
795 	// send message
796 	ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
797 				 &lr, sizeof(lr), request_from, &status);
798 
799 	/* negative status is handled by caller */
800 	if (ret < 0)
801 		mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u "
802 		     "to recover dead node %u\n", dlm->name, ret,
803 		     request_from, dead_node);
804 	else
805 		ret = status;
806 	// return from here, then
807 	// sleep until all received or error
808 	return ret;
809 
810 }
811 
812 int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data,
813 				  void **ret_data)
814 {
815 	struct dlm_ctxt *dlm = data;
816 	struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
817 	char *buf = NULL;
818 	struct dlm_work_item *item = NULL;
819 
820 	if (!dlm_grab(dlm))
821 		return -EINVAL;
822 
823 	if (lr->dead_node != dlm->reco.dead_node) {
824 		mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local "
825 		     "dead_node is %u\n", dlm->name, lr->node_idx,
826 		     lr->dead_node, dlm->reco.dead_node);
827 		dlm_print_reco_node_status(dlm);
828 		/* this is a hack */
829 		dlm_put(dlm);
830 		return -ENOMEM;
831 	}
832 	BUG_ON(lr->dead_node != dlm->reco.dead_node);
833 
834 	item = kzalloc(sizeof(*item), GFP_NOFS);
835 	if (!item) {
836 		dlm_put(dlm);
837 		return -ENOMEM;
838 	}
839 
840 	/* this will get freed by dlm_request_all_locks_worker */
841 	buf = (char *) __get_free_page(GFP_NOFS);
842 	if (!buf) {
843 		kfree(item);
844 		dlm_put(dlm);
845 		return -ENOMEM;
846 	}
847 
848 	/* queue up work for dlm_request_all_locks_worker */
849 	dlm_grab(dlm);  /* get an extra ref for the work item */
850 	dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
851 	item->u.ral.reco_master = lr->node_idx;
852 	item->u.ral.dead_node = lr->dead_node;
853 	spin_lock(&dlm->work_lock);
854 	list_add_tail(&item->list, &dlm->work_list);
855 	spin_unlock(&dlm->work_lock);
856 	queue_work(dlm->dlm_worker, &dlm->dispatched_work);
857 
858 	dlm_put(dlm);
859 	return 0;
860 }
861 
862 static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
863 {
864 	struct dlm_migratable_lockres *mres;
865 	struct dlm_lock_resource *res;
866 	struct dlm_ctxt *dlm;
867 	LIST_HEAD(resources);
868 	int ret;
869 	u8 dead_node, reco_master;
870 	int skip_all_done = 0;
871 
872 	dlm = item->dlm;
873 	dead_node = item->u.ral.dead_node;
874 	reco_master = item->u.ral.reco_master;
875 	mres = (struct dlm_migratable_lockres *)data;
876 
877 	mlog(0, "%s: recovery worker started, dead=%u, master=%u\n",
878 	     dlm->name, dead_node, reco_master);
879 
880 	if (dead_node != dlm->reco.dead_node ||
881 	    reco_master != dlm->reco.new_master) {
882 		/* worker could have been created before the recovery master
883 		 * died.  if so, do not continue, but do not error. */
884 		if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
885 			mlog(ML_NOTICE, "%s: will not send recovery state, "
886 			     "recovery master %u died, thread=(dead=%u,mas=%u)"
887 			     " current=(dead=%u,mas=%u)\n", dlm->name,
888 			     reco_master, dead_node, reco_master,
889 			     dlm->reco.dead_node, dlm->reco.new_master);
890 		} else {
891 			mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, "
892 			     "master=%u), request(dead=%u, master=%u)\n",
893 			     dlm->name, dlm->reco.dead_node,
894 			     dlm->reco.new_master, dead_node, reco_master);
895 		}
896 		goto leave;
897 	}
898 
899 	/* lock resources should have already been moved to the
900  	 * dlm->reco.resources list.  now move items from that list
901  	 * to a temp list if the dead owner matches.  note that the
902 	 * whole cluster recovers only one node at a time, so we
903 	 * can safely move UNKNOWN lock resources for each recovery
904 	 * session. */
905 	dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
906 
907 	/* now we can begin blasting lockreses without the dlm lock */
908 
909 	/* any errors returned will be due to the new_master dying,
910 	 * the dlm_reco_thread should detect this */
911 	list_for_each_entry(res, &resources, recovering) {
912 		ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
913 				   	DLM_MRES_RECOVERY);
914 		if (ret < 0) {
915 			mlog(ML_ERROR, "%s: node %u went down while sending "
916 			     "recovery state for dead node %u, ret=%d\n", dlm->name,
917 			     reco_master, dead_node, ret);
918 			skip_all_done = 1;
919 			break;
920 		}
921 	}
922 
923 	/* move the resources back to the list */
924 	spin_lock(&dlm->spinlock);
925 	list_splice_init(&resources, &dlm->reco.resources);
926 	spin_unlock(&dlm->spinlock);
927 
928 	if (!skip_all_done) {
929 		ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
930 		if (ret < 0) {
931 			mlog(ML_ERROR, "%s: node %u went down while sending "
932 			     "recovery all-done for dead node %u, ret=%d\n",
933 			     dlm->name, reco_master, dead_node, ret);
934 		}
935 	}
936 leave:
937 	free_page((unsigned long)data);
938 }
939 
940 
941 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
942 {
943 	int ret, tmpret;
944 	struct dlm_reco_data_done done_msg;
945 
946 	memset(&done_msg, 0, sizeof(done_msg));
947 	done_msg.node_idx = dlm->node_num;
948 	done_msg.dead_node = dead_node;
949 	mlog(0, "sending DATA DONE message to %u, "
950 	     "my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
951 	     done_msg.dead_node);
952 
953 	ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
954 				 sizeof(done_msg), send_to, &tmpret);
955 	if (ret < 0) {
956 		mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u "
957 		     "to recover dead node %u\n", dlm->name, ret, send_to,
958 		     dead_node);
959 		if (!dlm_is_host_down(ret)) {
960 			BUG();
961 		}
962 	} else
963 		ret = tmpret;
964 	return ret;
965 }
966 
967 
968 int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data,
969 			       void **ret_data)
970 {
971 	struct dlm_ctxt *dlm = data;
972 	struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
973 	struct dlm_reco_node_data *ndata = NULL;
974 	int ret = -EINVAL;
975 
976 	if (!dlm_grab(dlm))
977 		return -EINVAL;
978 
979 	mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
980 	     "node_idx=%u, this node=%u\n", done->dead_node,
981 	     dlm->reco.dead_node, done->node_idx, dlm->node_num);
982 
983 	mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node),
984 			"Got DATA DONE: dead_node=%u, reco.dead_node=%u, "
985 			"node_idx=%u, this node=%u\n", done->dead_node,
986 			dlm->reco.dead_node, done->node_idx, dlm->node_num);
987 
988 	spin_lock(&dlm_reco_state_lock);
989 	list_for_each_entry(ndata, &dlm->reco.node_data, list) {
990 		if (ndata->node_num != done->node_idx)
991 			continue;
992 
993 		switch (ndata->state) {
994 			/* should have moved beyond INIT but not to FINALIZE yet */
995 			case DLM_RECO_NODE_DATA_INIT:
996 			case DLM_RECO_NODE_DATA_DEAD:
997 			case DLM_RECO_NODE_DATA_FINALIZE_SENT:
998 				mlog(ML_ERROR, "bad ndata state for node %u:"
999 				     " state=%d\n", ndata->node_num,
1000 				     ndata->state);
1001 				BUG();
1002 				break;
1003 			/* these states are possible at this point, anywhere along
1004 			 * the line of recovery */
1005 			case DLM_RECO_NODE_DATA_DONE:
1006 			case DLM_RECO_NODE_DATA_RECEIVING:
1007 			case DLM_RECO_NODE_DATA_REQUESTED:
1008 			case DLM_RECO_NODE_DATA_REQUESTING:
1009 				mlog(0, "node %u is DONE sending "
1010 					  "recovery data!\n",
1011 					  ndata->node_num);
1012 
1013 				ndata->state = DLM_RECO_NODE_DATA_DONE;
1014 				ret = 0;
1015 				break;
1016 		}
1017 	}
1018 	spin_unlock(&dlm_reco_state_lock);
1019 
1020 	/* wake the recovery thread, some node is done */
1021 	if (!ret)
1022 		dlm_kick_recovery_thread(dlm);
1023 
1024 	if (ret < 0)
1025 		mlog(ML_ERROR, "failed to find recovery node data for node "
1026 		     "%u\n", done->node_idx);
1027 	dlm_put(dlm);
1028 
1029 	mlog(0, "leaving reco data done handler, ret=%d\n", ret);
1030 	return ret;
1031 }
1032 
1033 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
1034 					struct list_head *list,
1035 				       	u8 dead_node)
1036 {
1037 	struct dlm_lock_resource *res, *next;
1038 	struct dlm_lock *lock;
1039 
1040 	spin_lock(&dlm->spinlock);
1041 	list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
1042 		/* always prune any $RECOVERY entries for dead nodes,
1043 		 * otherwise hangs can occur during later recovery */
1044 		if (dlm_is_recovery_lock(res->lockname.name,
1045 					 res->lockname.len)) {
1046 			spin_lock(&res->spinlock);
1047 			list_for_each_entry(lock, &res->granted, list) {
1048 				if (lock->ml.node == dead_node) {
1049 					mlog(0, "AHA! there was "
1050 					     "a $RECOVERY lock for dead "
1051 					     "node %u (%s)!\n",
1052 					     dead_node, dlm->name);
1053 					list_del_init(&lock->list);
1054 					dlm_lock_put(lock);
1055 					/* Can't schedule DLM_UNLOCK_FREE_LOCK
1056 					 * - do manually */
1057 					dlm_lock_put(lock);
1058 					break;
1059 				}
1060 			}
1061 			spin_unlock(&res->spinlock);
1062 			continue;
1063 		}
1064 
1065 		if (res->owner == dead_node) {
1066 			mlog(0, "found lockres owned by dead node while "
1067 				  "doing recovery for node %u. sending it.\n",
1068 				  dead_node);
1069 			list_move_tail(&res->recovering, list);
1070 		} else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
1071 			mlog(0, "found UNKNOWN owner while doing recovery "
1072 				  "for node %u. sending it.\n", dead_node);
1073 			list_move_tail(&res->recovering, list);
1074 		}
1075 	}
1076 	spin_unlock(&dlm->spinlock);
1077 }
1078 
1079 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
1080 {
1081 	int total_locks = 0;
1082 	struct list_head *iter, *queue = &res->granted;
1083 	int i;
1084 
1085 	for (i=0; i<3; i++) {
1086 		list_for_each(iter, queue)
1087 			total_locks++;
1088 		queue++;
1089 	}
1090 	return total_locks;
1091 }
1092 
1093 
1094 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
1095 				      struct dlm_migratable_lockres *mres,
1096 				      u8 send_to,
1097 				      struct dlm_lock_resource *res,
1098 				      int total_locks)
1099 {
1100 	u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
1101 	int mres_total_locks = be32_to_cpu(mres->total_locks);
1102 	int ret = 0, status = 0;
1103 	u8 orig_flags = mres->flags,
1104 	   orig_master = mres->master;
1105 
1106 	BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS);
1107 	if (!mres->num_locks)
1108 		return 0;
1109 
1110 	/* add an all-done flag if we reached the last lock */
1111 	orig_flags = mres->flags;
1112 	BUG_ON(total_locks > mres_total_locks);
1113 	if (total_locks == mres_total_locks)
1114 		mres->flags |= DLM_MRES_ALL_DONE;
1115 
1116 	mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n",
1117 	     dlm->name, res->lockname.len, res->lockname.name,
1118 	     orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery",
1119 	     send_to);
1120 
1121 	/* send it */
1122 	ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
1123 				 struct_size(mres, ml, mres->num_locks),
1124 				 send_to, &status);
1125 	if (ret < 0) {
1126 		/* XXX: negative status is not handled.
1127 		 * this will end up killing this node. */
1128 		mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to "
1129 		     "node %u (%s)\n", dlm->name, mres->lockname_len,
1130 		     mres->lockname, ret, send_to,
1131 		     (orig_flags & DLM_MRES_MIGRATION ?
1132 		      "migration" : "recovery"));
1133 	} else {
1134 		/* might get an -ENOMEM back here */
1135 		ret = status;
1136 		if (ret < 0) {
1137 			mlog_errno(ret);
1138 
1139 			if (ret == -EFAULT) {
1140 				mlog(ML_ERROR, "node %u told me to kill "
1141 				     "myself!\n", send_to);
1142 				BUG();
1143 			}
1144 		}
1145 	}
1146 
1147 	/* zero and reinit the message buffer */
1148 	dlm_init_migratable_lockres(mres, res->lockname.name,
1149 				    res->lockname.len, mres_total_locks,
1150 				    mig_cookie, orig_flags, orig_master);
1151 	return ret;
1152 }
1153 
1154 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
1155 					const char *lockname, int namelen,
1156 					int total_locks, u64 cookie,
1157 					u8 flags, u8 master)
1158 {
1159 	/* mres here is one full page */
1160 	clear_page(mres);
1161 	mres->lockname_len = namelen;
1162 	memcpy(mres->lockname, lockname, namelen);
1163 	mres->num_locks = 0;
1164 	mres->total_locks = cpu_to_be32(total_locks);
1165 	mres->mig_cookie = cpu_to_be64(cookie);
1166 	mres->flags = flags;
1167 	mres->master = master;
1168 }
1169 
1170 static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock,
1171 					  struct dlm_migratable_lockres *mres,
1172 					  int queue)
1173 {
1174 	if (!lock->lksb)
1175 	       return;
1176 
1177 	/* Ignore lvb in all locks in the blocked list */
1178 	if (queue == DLM_BLOCKED_LIST)
1179 		return;
1180 
1181 	/* Only consider lvbs in locks with granted EX or PR lock levels */
1182 	if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE)
1183 		return;
1184 
1185 	if (dlm_lvb_is_empty(mres->lvb)) {
1186 		memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1187 		return;
1188 	}
1189 
1190 	/* Ensure the lvb copied for migration matches in other valid locks */
1191 	if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))
1192 		return;
1193 
1194 	mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, "
1195 	     "node=%u\n",
1196 	     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
1197 	     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
1198 	     lock->lockres->lockname.len, lock->lockres->lockname.name,
1199 	     lock->ml.node);
1200 	dlm_print_one_lock_resource(lock->lockres);
1201 	BUG();
1202 }
1203 
1204 /* returns 1 if this lock fills the network structure,
1205  * 0 otherwise */
1206 static int dlm_add_lock_to_array(struct dlm_lock *lock,
1207 				 struct dlm_migratable_lockres *mres, int queue)
1208 {
1209 	struct dlm_migratable_lock *ml;
1210 	int lock_num = mres->num_locks;
1211 
1212 	ml = &(mres->ml[lock_num]);
1213 	ml->cookie = lock->ml.cookie;
1214 	ml->type = lock->ml.type;
1215 	ml->convert_type = lock->ml.convert_type;
1216 	ml->highest_blocked = lock->ml.highest_blocked;
1217 	ml->list = queue;
1218 	if (lock->lksb) {
1219 		ml->flags = lock->lksb->flags;
1220 		dlm_prepare_lvb_for_migration(lock, mres, queue);
1221 	}
1222 	ml->node = lock->ml.node;
1223 	mres->num_locks++;
1224 	/* we reached the max, send this network message */
1225 	if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS)
1226 		return 1;
1227 	return 0;
1228 }
1229 
1230 static void dlm_add_dummy_lock(struct dlm_ctxt *dlm,
1231 			       struct dlm_migratable_lockres *mres)
1232 {
1233 	struct dlm_lock dummy;
1234 	memset(&dummy, 0, sizeof(dummy));
1235 	dummy.ml.cookie = 0;
1236 	dummy.ml.type = LKM_IVMODE;
1237 	dummy.ml.convert_type = LKM_IVMODE;
1238 	dummy.ml.highest_blocked = LKM_IVMODE;
1239 	dummy.lksb = NULL;
1240 	dummy.ml.node = dlm->node_num;
1241 	dlm_add_lock_to_array(&dummy, mres, DLM_BLOCKED_LIST);
1242 }
1243 
1244 static inline int dlm_is_dummy_lock(struct dlm_ctxt *dlm,
1245 				    struct dlm_migratable_lock *ml,
1246 				    u8 *nodenum)
1247 {
1248 	if (unlikely(ml->cookie == 0 &&
1249 	    ml->type == LKM_IVMODE &&
1250 	    ml->convert_type == LKM_IVMODE &&
1251 	    ml->highest_blocked == LKM_IVMODE &&
1252 	    ml->list == DLM_BLOCKED_LIST)) {
1253 		*nodenum = ml->node;
1254 		return 1;
1255 	}
1256 	return 0;
1257 }
1258 
1259 int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1260 			 struct dlm_migratable_lockres *mres,
1261 			 u8 send_to, u8 flags)
1262 {
1263 	struct list_head *queue;
1264 	int total_locks, i;
1265 	u64 mig_cookie = 0;
1266 	struct dlm_lock *lock;
1267 	int ret = 0;
1268 
1269 	BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1270 
1271 	mlog(0, "sending to %u\n", send_to);
1272 
1273 	total_locks = dlm_num_locks_in_lockres(res);
1274 	if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) {
1275 		/* rare, but possible */
1276 		mlog(0, "argh.  lockres has %d locks.  this will "
1277 			  "require more than one network packet to "
1278 			  "migrate\n", total_locks);
1279 		mig_cookie = dlm_get_next_mig_cookie();
1280 	}
1281 
1282 	dlm_init_migratable_lockres(mres, res->lockname.name,
1283 				    res->lockname.len, total_locks,
1284 				    mig_cookie, flags, res->owner);
1285 
1286 	total_locks = 0;
1287 	for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
1288 		queue = dlm_list_idx_to_ptr(res, i);
1289 		list_for_each_entry(lock, queue, list) {
1290 			/* add another lock. */
1291 			total_locks++;
1292 			if (!dlm_add_lock_to_array(lock, mres, i))
1293 				continue;
1294 
1295 			/* this filled the lock message,
1296 			 * we must send it immediately. */
1297 			ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
1298 						       res, total_locks);
1299 			if (ret < 0)
1300 				goto error;
1301 		}
1302 	}
1303 	if (total_locks == 0) {
1304 		/* send a dummy lock to indicate a mastery reference only */
1305 		mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n",
1306 		     dlm->name, res->lockname.len, res->lockname.name,
1307 		     send_to, flags & DLM_MRES_RECOVERY ? "recovery" :
1308 		     "migration");
1309 		dlm_add_dummy_lock(dlm, mres);
1310 	}
1311 	/* flush any remaining locks */
1312 	ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
1313 	if (ret < 0)
1314 		goto error;
1315 	return ret;
1316 
1317 error:
1318 	mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n",
1319 	     dlm->name, ret);
1320 	if (!dlm_is_host_down(ret))
1321 		BUG();
1322 	mlog(0, "%s: node %u went down while sending %s "
1323 	     "lockres %.*s\n", dlm->name, send_to,
1324 	     flags & DLM_MRES_RECOVERY ?  "recovery" : "migration",
1325 	     res->lockname.len, res->lockname.name);
1326 	return ret;
1327 }
1328 
1329 
1330 
1331 /*
1332  * this message will contain no more than one page worth of
1333  * recovery data, and it will work on only one lockres.
1334  * there may be many locks in this page, and we may need to wait
1335  * for additional packets to complete all the locks (rare, but
1336  * possible).
1337  */
1338 /*
1339  * NOTE: the allocation error cases here are scary
1340  * we really cannot afford to fail an alloc in recovery
1341  * do we spin?  returning an error only delays the problem really
1342  */
1343 
1344 int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
1345 			    void **ret_data)
1346 {
1347 	struct dlm_ctxt *dlm = data;
1348 	struct dlm_migratable_lockres *mres =
1349 		(struct dlm_migratable_lockres *)msg->buf;
1350 	int ret = 0;
1351 	u8 real_master;
1352 	u8 extra_refs = 0;
1353 	char *buf = NULL;
1354 	struct dlm_work_item *item = NULL;
1355 	struct dlm_lock_resource *res = NULL;
1356 	unsigned int hash;
1357 
1358 	if (!dlm_grab(dlm))
1359 		return -EINVAL;
1360 
1361 	if (!dlm_joined(dlm)) {
1362 		mlog(ML_ERROR, "Domain %s not joined! "
1363 			  "lockres %.*s, master %u\n",
1364 			  dlm->name, mres->lockname_len,
1365 			  mres->lockname, mres->master);
1366 		dlm_put(dlm);
1367 		return -EINVAL;
1368 	}
1369 
1370 	BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1371 
1372 	real_master = mres->master;
1373 	if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1374 		/* cannot migrate a lockres with no master */
1375 		BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1376 	}
1377 
1378 	mlog(0, "%s message received from node %u\n",
1379 		  (mres->flags & DLM_MRES_RECOVERY) ?
1380 		  "recovery" : "migration", mres->master);
1381 	if (mres->flags & DLM_MRES_ALL_DONE)
1382 		mlog(0, "all done flag.  all lockres data received!\n");
1383 
1384 	ret = -ENOMEM;
1385 	buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS);
1386 	item = kzalloc(sizeof(*item), GFP_NOFS);
1387 	if (!buf || !item)
1388 		goto leave;
1389 
1390 	/* lookup the lock to see if we have a secondary queue for this
1391 	 * already...  just add the locks in and this will have its owner
1392 	 * and RECOVERY flag changed when it completes. */
1393 	hash = dlm_lockid_hash(mres->lockname, mres->lockname_len);
1394 	spin_lock(&dlm->spinlock);
1395 	res = __dlm_lookup_lockres_full(dlm, mres->lockname, mres->lockname_len,
1396 			hash);
1397 	if (res) {
1398 	 	/* this will get a ref on res */
1399 		/* mark it as recovering/migrating and hash it */
1400 		spin_lock(&res->spinlock);
1401 		if (res->state & DLM_LOCK_RES_DROPPING_REF) {
1402 			mlog(0, "%s: node is attempting to migrate "
1403 				"lockres %.*s, but marked as dropping "
1404 				" ref!\n", dlm->name,
1405 				mres->lockname_len, mres->lockname);
1406 			ret = -EINVAL;
1407 			spin_unlock(&res->spinlock);
1408 			spin_unlock(&dlm->spinlock);
1409 			dlm_lockres_put(res);
1410 			goto leave;
1411 		}
1412 
1413 		if (mres->flags & DLM_MRES_RECOVERY) {
1414 			res->state |= DLM_LOCK_RES_RECOVERING;
1415 		} else {
1416 			if (res->state & DLM_LOCK_RES_MIGRATING) {
1417 				/* this is at least the second
1418 				 * lockres message */
1419 				mlog(0, "lock %.*s is already migrating\n",
1420 					  mres->lockname_len,
1421 					  mres->lockname);
1422 			} else if (res->state & DLM_LOCK_RES_RECOVERING) {
1423 				/* caller should BUG */
1424 				mlog(ML_ERROR, "node is attempting to migrate "
1425 				     "lock %.*s, but marked as recovering!\n",
1426 				     mres->lockname_len, mres->lockname);
1427 				ret = -EFAULT;
1428 				spin_unlock(&res->spinlock);
1429 				spin_unlock(&dlm->spinlock);
1430 				dlm_lockres_put(res);
1431 				goto leave;
1432 			}
1433 			res->state |= DLM_LOCK_RES_MIGRATING;
1434 		}
1435 		spin_unlock(&res->spinlock);
1436 		spin_unlock(&dlm->spinlock);
1437 	} else {
1438 		spin_unlock(&dlm->spinlock);
1439 		/* need to allocate, just like if it was
1440 		 * mastered here normally  */
1441 		res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
1442 		if (!res)
1443 			goto leave;
1444 
1445 		/* to match the ref that we would have gotten if
1446 		 * dlm_lookup_lockres had succeeded */
1447 		dlm_lockres_get(res);
1448 
1449 		/* mark it as recovering/migrating and hash it */
1450 		if (mres->flags & DLM_MRES_RECOVERY)
1451 			res->state |= DLM_LOCK_RES_RECOVERING;
1452 		else
1453 			res->state |= DLM_LOCK_RES_MIGRATING;
1454 
1455 		spin_lock(&dlm->spinlock);
1456 		__dlm_insert_lockres(dlm, res);
1457 		spin_unlock(&dlm->spinlock);
1458 
1459 		/* Add an extra ref for this lock-less lockres lest the
1460 		 * dlm_thread purges it before we get the chance to add
1461 		 * locks to it */
1462 		dlm_lockres_get(res);
1463 
1464 		/* There are three refs that need to be put.
1465 		 * 1. Taken above.
1466 		 * 2. kref_init in dlm_new_lockres()->dlm_init_lockres().
1467 		 * 3. dlm_lookup_lockres()
1468 		 * The first one is handled at the end of this function. The
1469 		 * other two are handled in the worker thread after locks have
1470 		 * been attached. Yes, we don't wait for purge time to match
1471 		 * kref_init. The lockres will still have at least one ref
1472 		 * added because it is in the hash __dlm_insert_lockres() */
1473 		extra_refs++;
1474 
1475 		/* now that the new lockres is inserted,
1476 		 * make it usable by other processes */
1477 		spin_lock(&res->spinlock);
1478 		res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1479 		spin_unlock(&res->spinlock);
1480 		wake_up(&res->wq);
1481 	}
1482 
1483 	/* at this point we have allocated everything we need,
1484 	 * and we have a hashed lockres with an extra ref and
1485 	 * the proper res->state flags. */
1486 	ret = 0;
1487 	spin_lock(&res->spinlock);
1488 	/* drop this either when master requery finds a different master
1489 	 * or when a lock is added by the recovery worker */
1490 	dlm_lockres_grab_inflight_ref(dlm, res);
1491 	if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1492 		/* migration cannot have an unknown master */
1493 		BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1494 		mlog(0, "recovery has passed me a lockres with an "
1495 			  "unknown owner.. will need to requery: "
1496 			  "%.*s\n", mres->lockname_len, mres->lockname);
1497 	} else {
1498 		/* take a reference now to pin the lockres, drop it
1499 		 * when locks are added in the worker */
1500 		dlm_change_lockres_owner(dlm, res, dlm->node_num);
1501 	}
1502 	spin_unlock(&res->spinlock);
1503 
1504 	/* queue up work for dlm_mig_lockres_worker */
1505 	dlm_grab(dlm);  /* get an extra ref for the work item */
1506 	memcpy(buf, msg->buf, be16_to_cpu(msg->data_len));  /* copy the whole message */
1507 	dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
1508 	item->u.ml.lockres = res; /* already have a ref */
1509 	item->u.ml.real_master = real_master;
1510 	item->u.ml.extra_ref = extra_refs;
1511 	spin_lock(&dlm->work_lock);
1512 	list_add_tail(&item->list, &dlm->work_list);
1513 	spin_unlock(&dlm->work_lock);
1514 	queue_work(dlm->dlm_worker, &dlm->dispatched_work);
1515 
1516 leave:
1517 	/* One extra ref taken needs to be put here */
1518 	if (extra_refs)
1519 		dlm_lockres_put(res);
1520 
1521 	dlm_put(dlm);
1522 	if (ret < 0) {
1523 		kfree(buf);
1524 		kfree(item);
1525 		mlog_errno(ret);
1526 	}
1527 
1528 	return ret;
1529 }
1530 
1531 
1532 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
1533 {
1534 	struct dlm_ctxt *dlm;
1535 	struct dlm_migratable_lockres *mres;
1536 	int ret = 0;
1537 	struct dlm_lock_resource *res;
1538 	u8 real_master;
1539 	u8 extra_ref;
1540 
1541 	dlm = item->dlm;
1542 	mres = (struct dlm_migratable_lockres *)data;
1543 
1544 	res = item->u.ml.lockres;
1545 	real_master = item->u.ml.real_master;
1546 	extra_ref = item->u.ml.extra_ref;
1547 
1548 	if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1549 		/* this case is super-rare. only occurs if
1550 		 * node death happens during migration. */
1551 again:
1552 		ret = dlm_lockres_master_requery(dlm, res, &real_master);
1553 		if (ret < 0) {
1554 			mlog(0, "dlm_lockres_master_requery ret=%d\n",
1555 				  ret);
1556 			goto again;
1557 		}
1558 		if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1559 			mlog(0, "lockres %.*s not claimed.  "
1560 				   "this node will take it.\n",
1561 				   res->lockname.len, res->lockname.name);
1562 		} else {
1563 			spin_lock(&res->spinlock);
1564 			dlm_lockres_drop_inflight_ref(dlm, res);
1565 			spin_unlock(&res->spinlock);
1566 			mlog(0, "master needs to respond to sender "
1567 				  "that node %u still owns %.*s\n",
1568 				  real_master, res->lockname.len,
1569 				  res->lockname.name);
1570 			/* cannot touch this lockres */
1571 			goto leave;
1572 		}
1573 	}
1574 
1575 	ret = dlm_process_recovery_data(dlm, res, mres);
1576 	if (ret < 0)
1577 		mlog(0, "dlm_process_recovery_data returned  %d\n", ret);
1578 	else
1579 		mlog(0, "dlm_process_recovery_data succeeded\n");
1580 
1581 	if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
1582 	                   (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {
1583 		ret = dlm_finish_migration(dlm, res, mres->master);
1584 		if (ret < 0)
1585 			mlog_errno(ret);
1586 	}
1587 
1588 leave:
1589 	/* See comment in dlm_mig_lockres_handler() */
1590 	if (res) {
1591 		if (extra_ref)
1592 			dlm_lockres_put(res);
1593 		dlm_lockres_put(res);
1594 	}
1595 	kfree(data);
1596 }
1597 
1598 
1599 
1600 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
1601 				      struct dlm_lock_resource *res,
1602 				      u8 *real_master)
1603 {
1604 	struct dlm_node_iter iter;
1605 	int nodenum;
1606 	int ret = 0;
1607 
1608 	*real_master = DLM_LOCK_RES_OWNER_UNKNOWN;
1609 
1610 	/* we only reach here if one of the two nodes in a
1611 	 * migration died while the migration was in progress.
1612 	 * at this point we need to requery the master.  we
1613 	 * know that the new_master got as far as creating
1614 	 * an mle on at least one node, but we do not know
1615 	 * if any nodes had actually cleared the mle and set
1616 	 * the master to the new_master.  the old master
1617 	 * is supposed to set the owner to UNKNOWN in the
1618 	 * event of a new_master death, so the only possible
1619 	 * responses that we can get from nodes here are
1620 	 * that the master is new_master, or that the master
1621 	 * is UNKNOWN.
1622 	 * if all nodes come back with UNKNOWN then we know
1623 	 * the lock needs remastering here.
1624 	 * if any node comes back with a valid master, check
1625 	 * to see if that master is the one that we are
1626 	 * recovering.  if so, then the new_master died and
1627 	 * we need to remaster this lock.  if not, then the
1628 	 * new_master survived and that node will respond to
1629 	 * other nodes about the owner.
1630 	 * if there is an owner, this node needs to dump this
1631 	 * lockres and alert the sender that this lockres
1632 	 * was rejected. */
1633 	spin_lock(&dlm->spinlock);
1634 	dlm_node_iter_init(dlm->domain_map, &iter);
1635 	spin_unlock(&dlm->spinlock);
1636 
1637 	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
1638 		/* do not send to self */
1639 		if (nodenum == dlm->node_num)
1640 			continue;
1641 		ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
1642 		if (ret < 0) {
1643 			mlog_errno(ret);
1644 			if (!dlm_is_host_down(ret))
1645 				BUG();
1646 			/* host is down, so answer for that node would be
1647 			 * DLM_LOCK_RES_OWNER_UNKNOWN.  continue. */
1648 		}
1649 		if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1650 			mlog(0, "lock master is %u\n", *real_master);
1651 			break;
1652 		}
1653 	}
1654 	return ret;
1655 }
1656 
1657 
1658 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1659 			  u8 nodenum, u8 *real_master)
1660 {
1661 	int ret;
1662 	struct dlm_master_requery req;
1663 	int status = DLM_LOCK_RES_OWNER_UNKNOWN;
1664 
1665 	memset(&req, 0, sizeof(req));
1666 	req.node_idx = dlm->node_num;
1667 	req.namelen = res->lockname.len;
1668 	memcpy(req.name, res->lockname.name, res->lockname.len);
1669 
1670 resend:
1671 	ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
1672 				 &req, sizeof(req), nodenum, &status);
1673 	if (ret < 0)
1674 		mlog(ML_ERROR, "Error %d when sending message %u (key "
1675 		     "0x%x) to node %u\n", ret, DLM_MASTER_REQUERY_MSG,
1676 		     dlm->key, nodenum);
1677 	else if (status == -ENOMEM) {
1678 		mlog_errno(status);
1679 		msleep(50);
1680 		goto resend;
1681 	} else {
1682 		BUG_ON(status < 0);
1683 		BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);
1684 		*real_master = (u8) (status & 0xff);
1685 		mlog(0, "node %u responded to master requery with %u\n",
1686 			  nodenum, *real_master);
1687 		ret = 0;
1688 	}
1689 	return ret;
1690 }
1691 
1692 
1693 /* this function cannot error, so unless the sending
1694  * or receiving of the message failed, the owner can
1695  * be trusted */
1696 int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
1697 			       void **ret_data)
1698 {
1699 	struct dlm_ctxt *dlm = data;
1700 	struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
1701 	struct dlm_lock_resource *res = NULL;
1702 	unsigned int hash;
1703 	int master = DLM_LOCK_RES_OWNER_UNKNOWN;
1704 	u32 flags = DLM_ASSERT_MASTER_REQUERY;
1705 	int dispatched = 0;
1706 
1707 	if (!dlm_grab(dlm)) {
1708 		/* since the domain has gone away on this
1709 		 * node, the proper response is UNKNOWN */
1710 		return master;
1711 	}
1712 
1713 	hash = dlm_lockid_hash(req->name, req->namelen);
1714 
1715 	spin_lock(&dlm->spinlock);
1716 	res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash);
1717 	if (res) {
1718 		spin_lock(&res->spinlock);
1719 		master = res->owner;
1720 		if (master == dlm->node_num) {
1721 			int ret = dlm_dispatch_assert_master(dlm, res,
1722 							     0, 0, flags);
1723 			if (ret < 0) {
1724 				mlog_errno(ret);
1725 				spin_unlock(&res->spinlock);
1726 				dlm_lockres_put(res);
1727 				spin_unlock(&dlm->spinlock);
1728 				dlm_put(dlm);
1729 				/* sender will take care of this and retry */
1730 				return ret;
1731 			} else {
1732 				dispatched = 1;
1733 				__dlm_lockres_grab_inflight_worker(dlm, res);
1734 				spin_unlock(&res->spinlock);
1735 			}
1736 		} else {
1737 			/* put.. in case we are not the master */
1738 			spin_unlock(&res->spinlock);
1739 			dlm_lockres_put(res);
1740 		}
1741 	}
1742 	spin_unlock(&dlm->spinlock);
1743 
1744 	if (!dispatched)
1745 		dlm_put(dlm);
1746 	return master;
1747 }
1748 
1749 static inline struct list_head *
1750 dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
1751 {
1752 	struct list_head *ret;
1753 	BUG_ON(list_num < 0);
1754 	BUG_ON(list_num > 2);
1755 	ret = &(res->granted);
1756 	ret += list_num;
1757 	return ret;
1758 }
1759 /* TODO: do ast flush business
1760  * TODO: do MIGRATING and RECOVERING spinning
1761  */
1762 
1763 /*
1764 * NOTE about in-flight requests during migration:
1765 *
1766 * Before attempting the migrate, the master has marked the lockres as
1767 * MIGRATING and then flushed all of its pending ASTS.  So any in-flight
1768 * requests either got queued before the MIGRATING flag got set, in which
1769 * case the lock data will reflect the change and a return message is on
1770 * the way, or the request failed to get in before MIGRATING got set.  In
1771 * this case, the caller will be told to spin and wait for the MIGRATING
1772 * flag to be dropped, then recheck the master.
1773 * This holds true for the convert, cancel and unlock cases, and since lvb
1774 * updates are tied to these same messages, it applies to lvb updates as
1775 * well.  For the lock case, there is no way a lock can be on the master
1776 * queue and not be on the secondary queue since the lock is always added
1777 * locally first.  This means that the new target node will never be sent
1778 * a lock that he doesn't already have on the list.
1779 * In total, this means that the local lock is correct and should not be
1780 * updated to match the one sent by the master.  Any messages sent back
1781 * from the master before the MIGRATING flag will bring the lock properly
1782 * up-to-date, and the change will be ordered properly for the waiter.
1783 * We will *not* attempt to modify the lock underneath the waiter.
1784 */
1785 
1786 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1787 				     struct dlm_lock_resource *res,
1788 				     struct dlm_migratable_lockres *mres)
1789 {
1790 	struct dlm_migratable_lock *ml;
1791 	struct list_head *queue, *iter;
1792 	struct list_head *tmpq = NULL;
1793 	struct dlm_lock *newlock = NULL;
1794 	struct dlm_lockstatus *lksb = NULL;
1795 	int ret = 0;
1796 	int i, j, bad;
1797 	struct dlm_lock *lock;
1798 	u8 from = O2NM_MAX_NODES;
1799 	__be64 c;
1800 
1801 	mlog(0, "running %d locks for this lockres\n", mres->num_locks);
1802 	for (i=0; i<mres->num_locks; i++) {
1803 		ml = &(mres->ml[i]);
1804 
1805 		if (dlm_is_dummy_lock(dlm, ml, &from)) {
1806 			/* placeholder, just need to set the refmap bit */
1807 			BUG_ON(mres->num_locks != 1);
1808 			mlog(0, "%s:%.*s: dummy lock for %u\n",
1809 			     dlm->name, mres->lockname_len, mres->lockname,
1810 			     from);
1811 			spin_lock(&res->spinlock);
1812 			dlm_lockres_set_refmap_bit(dlm, res, from);
1813 			spin_unlock(&res->spinlock);
1814 			break;
1815 		}
1816 		BUG_ON(ml->highest_blocked != LKM_IVMODE);
1817 		newlock = NULL;
1818 		lksb = NULL;
1819 
1820 		queue = dlm_list_num_to_pointer(res, ml->list);
1821 		tmpq = NULL;
1822 
1823 		/* if the lock is for the local node it needs to
1824 		 * be moved to the proper location within the queue.
1825 		 * do not allocate a new lock structure. */
1826 		if (ml->node == dlm->node_num) {
1827 			/* MIGRATION ONLY! */
1828 			BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
1829 
1830 			lock = NULL;
1831 			spin_lock(&res->spinlock);
1832 			for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) {
1833 				tmpq = dlm_list_idx_to_ptr(res, j);
1834 				list_for_each(iter, tmpq) {
1835 					lock = list_entry(iter,
1836 						  struct dlm_lock, list);
1837 					if (lock->ml.cookie == ml->cookie)
1838 						break;
1839 					lock = NULL;
1840 				}
1841 				if (lock)
1842 					break;
1843 			}
1844 
1845 			/* lock is always created locally first, and
1846 			 * destroyed locally last.  it must be on the list */
1847 			if (!lock) {
1848 				c = ml->cookie;
1849 				mlog(ML_ERROR, "Could not find local lock "
1850 					       "with cookie %u:%llu, node %u, "
1851 					       "list %u, flags 0x%x, type %d, "
1852 					       "conv %d, highest blocked %d\n",
1853 				     dlm_get_lock_cookie_node(be64_to_cpu(c)),
1854 				     dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1855 				     ml->node, ml->list, ml->flags, ml->type,
1856 				     ml->convert_type, ml->highest_blocked);
1857 				__dlm_print_one_lock_resource(res);
1858 				BUG();
1859 			}
1860 
1861 			if (lock->ml.node != ml->node) {
1862 				c = lock->ml.cookie;
1863 				mlog(ML_ERROR, "Mismatched node# in lock "
1864 				     "cookie %u:%llu, name %.*s, node %u\n",
1865 				     dlm_get_lock_cookie_node(be64_to_cpu(c)),
1866 				     dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1867 				     res->lockname.len, res->lockname.name,
1868 				     lock->ml.node);
1869 				c = ml->cookie;
1870 				mlog(ML_ERROR, "Migrate lock cookie %u:%llu, "
1871 				     "node %u, list %u, flags 0x%x, type %d, "
1872 				     "conv %d, highest blocked %d\n",
1873 				     dlm_get_lock_cookie_node(be64_to_cpu(c)),
1874 				     dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1875 				     ml->node, ml->list, ml->flags, ml->type,
1876 				     ml->convert_type, ml->highest_blocked);
1877 				__dlm_print_one_lock_resource(res);
1878 				BUG();
1879 			}
1880 
1881 			if (tmpq != queue) {
1882 				c = ml->cookie;
1883 				mlog(0, "Lock cookie %u:%llu was on list %u "
1884 				     "instead of list %u for %.*s\n",
1885 				     dlm_get_lock_cookie_node(be64_to_cpu(c)),
1886 				     dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1887 				     j, ml->list, res->lockname.len,
1888 				     res->lockname.name);
1889 				__dlm_print_one_lock_resource(res);
1890 				spin_unlock(&res->spinlock);
1891 				continue;
1892 			}
1893 
1894 			/* see NOTE above about why we do not update
1895 			 * to match the master here */
1896 
1897 			/* move the lock to its proper place */
1898 			/* do not alter lock refcount.  switching lists. */
1899 			list_move_tail(&lock->list, queue);
1900 			spin_unlock(&res->spinlock);
1901 
1902 			mlog(0, "just reordered a local lock!\n");
1903 			continue;
1904 		}
1905 
1906 		/* lock is for another node. */
1907 		newlock = dlm_new_lock(ml->type, ml->node,
1908 				       be64_to_cpu(ml->cookie), NULL);
1909 		if (!newlock) {
1910 			ret = -ENOMEM;
1911 			goto leave;
1912 		}
1913 		lksb = newlock->lksb;
1914 		dlm_lock_attach_lockres(newlock, res);
1915 
1916 		if (ml->convert_type != LKM_IVMODE) {
1917 			BUG_ON(queue != &res->converting);
1918 			newlock->ml.convert_type = ml->convert_type;
1919 		}
1920 		lksb->flags |= (ml->flags &
1921 				(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
1922 
1923 		if (ml->type == LKM_NLMODE)
1924 			goto skip_lvb;
1925 
1926 		/*
1927 		 * If the lock is in the blocked list it can't have a valid lvb,
1928 		 * so skip it
1929 		 */
1930 		if (ml->list == DLM_BLOCKED_LIST)
1931 			goto skip_lvb;
1932 
1933 		if (!dlm_lvb_is_empty(mres->lvb)) {
1934 			if (lksb->flags & DLM_LKSB_PUT_LVB) {
1935 				/* other node was trying to update
1936 				 * lvb when node died.  recreate the
1937 				 * lksb with the updated lvb. */
1938 				memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
1939 				/* the lock resource lvb update must happen
1940 				 * NOW, before the spinlock is dropped.
1941 				 * we no longer wait for the AST to update
1942 				 * the lvb. */
1943 				memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1944 			} else {
1945 				/* otherwise, the node is sending its
1946 				 * most recent valid lvb info */
1947 				BUG_ON(ml->type != LKM_EXMODE &&
1948 				       ml->type != LKM_PRMODE);
1949 				if (!dlm_lvb_is_empty(res->lvb) &&
1950  				    (ml->type == LKM_EXMODE ||
1951  				     memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
1952  					int i;
1953  					mlog(ML_ERROR, "%s:%.*s: received bad "
1954  					     "lvb! type=%d\n", dlm->name,
1955  					     res->lockname.len,
1956  					     res->lockname.name, ml->type);
1957  					printk("lockres lvb=[");
1958  					for (i=0; i<DLM_LVB_LEN; i++)
1959  						printk("%02x", res->lvb[i]);
1960  					printk("]\nmigrated lvb=[");
1961  					for (i=0; i<DLM_LVB_LEN; i++)
1962  						printk("%02x", mres->lvb[i]);
1963  					printk("]\n");
1964  					dlm_print_one_lock_resource(res);
1965  					BUG();
1966 				}
1967 				memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1968 			}
1969 		}
1970 skip_lvb:
1971 
1972 		/* NOTE:
1973 		 * wrt lock queue ordering and recovery:
1974 		 *    1. order of locks on granted queue is
1975 		 *       meaningless.
1976 		 *    2. order of locks on converting queue is
1977 		 *       LOST with the node death.  sorry charlie.
1978 		 *    3. order of locks on the blocked queue is
1979 		 *       also LOST.
1980 		 * order of locks does not affect integrity, it
1981 		 * just means that a lock request may get pushed
1982 		 * back in line as a result of the node death.
1983 		 * also note that for a given node the lock order
1984 		 * for its secondary queue locks is preserved
1985 		 * relative to each other, but clearly *not*
1986 		 * preserved relative to locks from other nodes.
1987 		 */
1988 		bad = 0;
1989 		spin_lock(&res->spinlock);
1990 		list_for_each_entry(lock, queue, list) {
1991 			if (lock->ml.cookie == ml->cookie) {
1992 				c = lock->ml.cookie;
1993 				mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
1994 				     "exists on this lockres!\n", dlm->name,
1995 				     res->lockname.len, res->lockname.name,
1996 				     dlm_get_lock_cookie_node(be64_to_cpu(c)),
1997 				     dlm_get_lock_cookie_seq(be64_to_cpu(c)));
1998 
1999 				mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, "
2000 				     "node=%u, cookie=%u:%llu, queue=%d\n",
2001 	      			     ml->type, ml->convert_type, ml->node,
2002 				     dlm_get_lock_cookie_node(be64_to_cpu(ml->cookie)),
2003 				     dlm_get_lock_cookie_seq(be64_to_cpu(ml->cookie)),
2004 				     ml->list);
2005 
2006 				__dlm_print_one_lock_resource(res);
2007 				bad = 1;
2008 				break;
2009 			}
2010 		}
2011 		if (!bad) {
2012 			dlm_lock_get(newlock);
2013 			if (mres->flags & DLM_MRES_RECOVERY &&
2014 					ml->list == DLM_CONVERTING_LIST &&
2015 					newlock->ml.type >
2016 					newlock->ml.convert_type) {
2017 				/* newlock is doing downconvert, add it to the
2018 				 * head of converting list */
2019 				list_add(&newlock->list, queue);
2020 			} else
2021 				list_add_tail(&newlock->list, queue);
2022 			mlog(0, "%s:%.*s: added lock for node %u, "
2023 			     "setting refmap bit\n", dlm->name,
2024 			     res->lockname.len, res->lockname.name, ml->node);
2025 			dlm_lockres_set_refmap_bit(dlm, res, ml->node);
2026 		}
2027 		spin_unlock(&res->spinlock);
2028 	}
2029 	mlog(0, "done running all the locks\n");
2030 
2031 leave:
2032 	/* balance the ref taken when the work was queued */
2033 	spin_lock(&res->spinlock);
2034 	dlm_lockres_drop_inflight_ref(dlm, res);
2035 	spin_unlock(&res->spinlock);
2036 
2037 	if (ret < 0)
2038 		mlog_errno(ret);
2039 
2040 	return ret;
2041 }
2042 
2043 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
2044 				       struct dlm_lock_resource *res)
2045 {
2046 	int i;
2047 	struct list_head *queue;
2048 	struct dlm_lock *lock, *next;
2049 
2050 	assert_spin_locked(&dlm->spinlock);
2051 	assert_spin_locked(&res->spinlock);
2052 	res->state |= DLM_LOCK_RES_RECOVERING;
2053 	if (!list_empty(&res->recovering)) {
2054 		mlog(0,
2055 		     "Recovering res %s:%.*s, is already on recovery list!\n",
2056 		     dlm->name, res->lockname.len, res->lockname.name);
2057 		list_del_init(&res->recovering);
2058 		dlm_lockres_put(res);
2059 	}
2060 	/* We need to hold a reference while on the recovery list */
2061 	dlm_lockres_get(res);
2062 	list_add_tail(&res->recovering, &dlm->reco.resources);
2063 
2064 	/* find any pending locks and put them back on proper list */
2065 	for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
2066 		queue = dlm_list_idx_to_ptr(res, i);
2067 		list_for_each_entry_safe(lock, next, queue, list) {
2068 			dlm_lock_get(lock);
2069 			if (lock->convert_pending) {
2070 				/* move converting lock back to granted */
2071 				mlog(0, "node died with convert pending "
2072 				     "on %.*s. move back to granted list.\n",
2073 				     res->lockname.len, res->lockname.name);
2074 				dlm_revert_pending_convert(res, lock);
2075 				lock->convert_pending = 0;
2076 			} else if (lock->lock_pending) {
2077 				/* remove pending lock requests completely */
2078 				BUG_ON(i != DLM_BLOCKED_LIST);
2079 				mlog(0, "node died with lock pending "
2080 				     "on %.*s. remove from blocked list and skip.\n",
2081 				     res->lockname.len, res->lockname.name);
2082 				/* lock will be floating until ref in
2083 				 * dlmlock_remote is freed after the network
2084 				 * call returns.  ok for it to not be on any
2085 				 * list since no ast can be called
2086 				 * (the master is dead). */
2087 				dlm_revert_pending_lock(res, lock);
2088 				lock->lock_pending = 0;
2089 			} else if (lock->unlock_pending) {
2090 				/* if an unlock was in progress, treat as
2091 				 * if this had completed successfully
2092 				 * before sending this lock state to the
2093 				 * new master.  note that the dlm_unlock
2094 				 * call is still responsible for calling
2095 				 * the unlockast.  that will happen after
2096 				 * the network call times out.  for now,
2097 				 * just move lists to prepare the new
2098 				 * recovery master.  */
2099 				BUG_ON(i != DLM_GRANTED_LIST);
2100 				mlog(0, "node died with unlock pending "
2101 				     "on %.*s. remove from blocked list and skip.\n",
2102 				     res->lockname.len, res->lockname.name);
2103 				dlm_commit_pending_unlock(res, lock);
2104 				lock->unlock_pending = 0;
2105 			} else if (lock->cancel_pending) {
2106 				/* if a cancel was in progress, treat as
2107 				 * if this had completed successfully
2108 				 * before sending this lock state to the
2109 				 * new master */
2110 				BUG_ON(i != DLM_CONVERTING_LIST);
2111 				mlog(0, "node died with cancel pending "
2112 				     "on %.*s. move back to granted list.\n",
2113 				     res->lockname.len, res->lockname.name);
2114 				dlm_commit_pending_cancel(res, lock);
2115 				lock->cancel_pending = 0;
2116 			}
2117 			dlm_lock_put(lock);
2118 		}
2119 	}
2120 }
2121 
2122 
2123 
2124 /* removes all recovered locks from the recovery list.
2125  * sets the res->owner to the new master.
2126  * unsets the RECOVERY flag and wakes waiters. */
2127 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
2128 					      u8 dead_node, u8 new_master)
2129 {
2130 	int i;
2131 	struct hlist_head *bucket;
2132 	struct dlm_lock_resource *res, *next;
2133 
2134 	assert_spin_locked(&dlm->spinlock);
2135 
2136 	list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
2137 		if (res->owner == dead_node) {
2138 			mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
2139 			     dlm->name, res->lockname.len, res->lockname.name,
2140 			     res->owner, new_master);
2141 			list_del_init(&res->recovering);
2142 			spin_lock(&res->spinlock);
2143 			/* new_master has our reference from
2144 			 * the lock state sent during recovery */
2145 			dlm_change_lockres_owner(dlm, res, new_master);
2146 			res->state &= ~DLM_LOCK_RES_RECOVERING;
2147 			if (__dlm_lockres_has_locks(res))
2148 				__dlm_dirty_lockres(dlm, res);
2149 			spin_unlock(&res->spinlock);
2150 			wake_up(&res->wq);
2151 			dlm_lockres_put(res);
2152 		}
2153 	}
2154 
2155 	/* this will become unnecessary eventually, but
2156 	 * for now we need to run the whole hash, clear
2157 	 * the RECOVERING state and set the owner
2158 	 * if necessary */
2159 	for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2160 		bucket = dlm_lockres_hash(dlm, i);
2161 		hlist_for_each_entry(res, bucket, hash_node) {
2162 			if (res->state & DLM_LOCK_RES_RECOVERY_WAITING) {
2163 				spin_lock(&res->spinlock);
2164 				res->state &= ~DLM_LOCK_RES_RECOVERY_WAITING;
2165 				spin_unlock(&res->spinlock);
2166 				wake_up(&res->wq);
2167 			}
2168 
2169 			if (!(res->state & DLM_LOCK_RES_RECOVERING))
2170 				continue;
2171 
2172 			if (res->owner != dead_node &&
2173 			    res->owner != dlm->node_num)
2174 				continue;
2175 
2176 			if (!list_empty(&res->recovering)) {
2177 				list_del_init(&res->recovering);
2178 				dlm_lockres_put(res);
2179 			}
2180 
2181 			/* new_master has our reference from
2182 			 * the lock state sent during recovery */
2183 			mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
2184 			     dlm->name, res->lockname.len, res->lockname.name,
2185 			     res->owner, new_master);
2186 			spin_lock(&res->spinlock);
2187 			dlm_change_lockres_owner(dlm, res, new_master);
2188 			res->state &= ~DLM_LOCK_RES_RECOVERING;
2189 			if (__dlm_lockres_has_locks(res))
2190 				__dlm_dirty_lockres(dlm, res);
2191 			spin_unlock(&res->spinlock);
2192 			wake_up(&res->wq);
2193 		}
2194 	}
2195 }
2196 
2197 static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local)
2198 {
2199 	if (local) {
2200 		if (lock->ml.type != LKM_EXMODE &&
2201 		    lock->ml.type != LKM_PRMODE)
2202 			return 1;
2203 	} else if (lock->ml.type == LKM_EXMODE)
2204 		return 1;
2205 	return 0;
2206 }
2207 
2208 static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
2209 			       struct dlm_lock_resource *res, u8 dead_node)
2210 {
2211 	struct list_head *queue;
2212 	struct dlm_lock *lock;
2213 	int blank_lvb = 0, local = 0;
2214 	int i;
2215 	u8 search_node;
2216 
2217 	assert_spin_locked(&dlm->spinlock);
2218 	assert_spin_locked(&res->spinlock);
2219 
2220 	if (res->owner == dlm->node_num)
2221 		/* if this node owned the lockres, and if the dead node
2222 		 * had an EX when he died, blank out the lvb */
2223 		search_node = dead_node;
2224 	else {
2225 		/* if this is a secondary lockres, and we had no EX or PR
2226 		 * locks granted, we can no longer trust the lvb */
2227 		search_node = dlm->node_num;
2228 		local = 1;  /* check local state for valid lvb */
2229 	}
2230 
2231 	for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) {
2232 		queue = dlm_list_idx_to_ptr(res, i);
2233 		list_for_each_entry(lock, queue, list) {
2234 			if (lock->ml.node == search_node) {
2235 				if (dlm_lvb_needs_invalidation(lock, local)) {
2236 					/* zero the lksb lvb and lockres lvb */
2237 					blank_lvb = 1;
2238 					memset(lock->lksb->lvb, 0, DLM_LVB_LEN);
2239 				}
2240 			}
2241 		}
2242 	}
2243 
2244 	if (blank_lvb) {
2245 		mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
2246 		     res->lockname.len, res->lockname.name, dead_node);
2247 		memset(res->lvb, 0, DLM_LVB_LEN);
2248 	}
2249 }
2250 
2251 static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2252 				struct dlm_lock_resource *res, u8 dead_node)
2253 {
2254 	struct dlm_lock *lock, *next;
2255 	unsigned int freed = 0;
2256 
2257 	/* this node is the lockres master:
2258 	 * 1) remove any stale locks for the dead node
2259 	 * 2) if the dead node had an EX when he died, blank out the lvb
2260 	 */
2261 	assert_spin_locked(&dlm->spinlock);
2262 	assert_spin_locked(&res->spinlock);
2263 
2264 	/* We do two dlm_lock_put(). One for removing from list and the other is
2265 	 * to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */
2266 
2267 	/* TODO: check pending_asts, pending_basts here */
2268 	list_for_each_entry_safe(lock, next, &res->granted, list) {
2269 		if (lock->ml.node == dead_node) {
2270 			list_del_init(&lock->list);
2271 			dlm_lock_put(lock);
2272 			/* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2273 			dlm_lock_put(lock);
2274 			freed++;
2275 		}
2276 	}
2277 	list_for_each_entry_safe(lock, next, &res->converting, list) {
2278 		if (lock->ml.node == dead_node) {
2279 			list_del_init(&lock->list);
2280 			dlm_lock_put(lock);
2281 			/* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2282 			dlm_lock_put(lock);
2283 			freed++;
2284 		}
2285 	}
2286 	list_for_each_entry_safe(lock, next, &res->blocked, list) {
2287 		if (lock->ml.node == dead_node) {
2288 			list_del_init(&lock->list);
2289 			dlm_lock_put(lock);
2290 			/* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2291 			dlm_lock_put(lock);
2292 			freed++;
2293 		}
2294 	}
2295 
2296 	if (freed) {
2297 		mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
2298 		     "dropping ref from lockres\n", dlm->name,
2299 		     res->lockname.len, res->lockname.name, freed, dead_node);
2300 		if(!test_bit(dead_node, res->refmap)) {
2301 			mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, "
2302 			     "but ref was not set\n", dlm->name,
2303 			     res->lockname.len, res->lockname.name, freed, dead_node);
2304 			__dlm_print_one_lock_resource(res);
2305 		}
2306 		res->state |= DLM_LOCK_RES_RECOVERY_WAITING;
2307 		dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2308 	} else if (test_bit(dead_node, res->refmap)) {
2309 		mlog(0, "%s:%.*s: dead node %u had a ref, but had "
2310 		     "no locks and had not purged before dying\n", dlm->name,
2311 		     res->lockname.len, res->lockname.name, dead_node);
2312 		dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2313 	}
2314 
2315 	/* do not kick thread yet */
2316 	__dlm_dirty_lockres(dlm, res);
2317 }
2318 
2319 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2320 {
2321 	struct dlm_lock_resource *res;
2322 	int i;
2323 	struct hlist_head *bucket;
2324 	struct hlist_node *tmp;
2325 	struct dlm_lock *lock;
2326 
2327 
2328 	/* purge any stale mles */
2329 	dlm_clean_master_list(dlm, dead_node);
2330 
2331 	/*
2332 	 * now clean up all lock resources.  there are two rules:
2333 	 *
2334 	 * 1) if the dead node was the master, move the lockres
2335 	 *    to the recovering list.  set the RECOVERING flag.
2336 	 *    this lockres needs to be cleaned up before it can
2337 	 *    be used further.
2338 	 *
2339 	 * 2) if this node was the master, remove all locks from
2340 	 *    each of the lockres queues that were owned by the
2341 	 *    dead node.  once recovery finishes, the dlm thread
2342 	 *    can be kicked again to see if any ASTs or BASTs
2343 	 *    need to be fired as a result.
2344 	 */
2345 	for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2346 		bucket = dlm_lockres_hash(dlm, i);
2347 		hlist_for_each_entry_safe(res, tmp, bucket, hash_node) {
2348  			/* always prune any $RECOVERY entries for dead nodes,
2349  			 * otherwise hangs can occur during later recovery */
2350 			if (dlm_is_recovery_lock(res->lockname.name,
2351 						 res->lockname.len)) {
2352 				spin_lock(&res->spinlock);
2353 				list_for_each_entry(lock, &res->granted, list) {
2354 					if (lock->ml.node == dead_node) {
2355 						mlog(0, "AHA! there was "
2356 						     "a $RECOVERY lock for dead "
2357 						     "node %u (%s)!\n",
2358 						     dead_node, dlm->name);
2359 						list_del_init(&lock->list);
2360 						dlm_lock_put(lock);
2361 						/* Can't schedule
2362 						 * DLM_UNLOCK_FREE_LOCK
2363 						 * - do manually */
2364 						dlm_lock_put(lock);
2365 						break;
2366 					}
2367 				}
2368 
2369 				if ((res->owner == dead_node) &&
2370 							(res->state & DLM_LOCK_RES_DROPPING_REF)) {
2371 					dlm_lockres_get(res);
2372 					__dlm_do_purge_lockres(dlm, res);
2373 					spin_unlock(&res->spinlock);
2374 					wake_up(&res->wq);
2375 					dlm_lockres_put(res);
2376 					continue;
2377 				} else if (res->owner == dlm->node_num)
2378 					dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2379 				spin_unlock(&res->spinlock);
2380 				continue;
2381 			}
2382 			spin_lock(&res->spinlock);
2383 			/* zero the lvb if necessary */
2384 			dlm_revalidate_lvb(dlm, res, dead_node);
2385 			if (res->owner == dead_node) {
2386 				if (res->state & DLM_LOCK_RES_DROPPING_REF) {
2387 					mlog(0, "%s:%.*s: owned by "
2388 						"dead node %u, this node was "
2389 						"dropping its ref when master died. "
2390 						"continue, purging the lockres.\n",
2391 						dlm->name, res->lockname.len,
2392 						res->lockname.name, dead_node);
2393 					dlm_lockres_get(res);
2394 					__dlm_do_purge_lockres(dlm, res);
2395 					spin_unlock(&res->spinlock);
2396 					wake_up(&res->wq);
2397 					dlm_lockres_put(res);
2398 					continue;
2399 				}
2400 				dlm_move_lockres_to_recovery_list(dlm, res);
2401 			} else if (res->owner == dlm->node_num) {
2402 				dlm_free_dead_locks(dlm, res, dead_node);
2403 				__dlm_lockres_calc_usage(dlm, res);
2404 			} else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
2405 				if (test_bit(dead_node, res->refmap)) {
2406 					mlog(0, "%s:%.*s: dead node %u had a ref, but had "
2407 						"no locks and had not purged before dying\n",
2408 						dlm->name, res->lockname.len,
2409 						res->lockname.name, dead_node);
2410 					dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2411 				}
2412 			}
2413 			spin_unlock(&res->spinlock);
2414 		}
2415 	}
2416 
2417 }
2418 
2419 static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
2420 {
2421 	assert_spin_locked(&dlm->spinlock);
2422 
2423 	if (dlm->reco.new_master == idx) {
2424 		mlog(0, "%s: recovery master %d just died\n",
2425 		     dlm->name, idx);
2426 		if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2427 			/* finalize1 was reached, so it is safe to clear
2428 			 * the new_master and dead_node.  that recovery
2429 			 * is complete. */
2430 			mlog(0, "%s: dead master %d had reached "
2431 			     "finalize1 state, clearing\n", dlm->name, idx);
2432 			dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2433 			__dlm_reset_recovery(dlm);
2434 		}
2435 	}
2436 
2437 	/* Clean up join state on node death. */
2438 	if (dlm->joining_node == idx) {
2439 		mlog(0, "Clearing join state for node %u\n", idx);
2440 		__dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
2441 	}
2442 
2443 	/* check to see if the node is already considered dead */
2444 	if (!test_bit(idx, dlm->live_nodes_map)) {
2445 		mlog(0, "for domain %s, node %d is already dead. "
2446 		     "another node likely did recovery already.\n",
2447 		     dlm->name, idx);
2448 		return;
2449 	}
2450 
2451 	/* check to see if we do not care about this node */
2452 	if (!test_bit(idx, dlm->domain_map)) {
2453 		/* This also catches the case that we get a node down
2454 		 * but haven't joined the domain yet. */
2455 		mlog(0, "node %u already removed from domain!\n", idx);
2456 		return;
2457 	}
2458 
2459 	clear_bit(idx, dlm->live_nodes_map);
2460 
2461 	/* make sure local cleanup occurs before the heartbeat events */
2462 	if (!test_bit(idx, dlm->recovery_map))
2463 		dlm_do_local_recovery_cleanup(dlm, idx);
2464 
2465 	/* notify anything attached to the heartbeat events */
2466 	dlm_hb_event_notify_attached(dlm, idx, 0);
2467 
2468 	mlog(0, "node %u being removed from domain map!\n", idx);
2469 	clear_bit(idx, dlm->domain_map);
2470 	clear_bit(idx, dlm->exit_domain_map);
2471 	/* wake up migration waiters if a node goes down.
2472 	 * perhaps later we can genericize this for other waiters. */
2473 	wake_up(&dlm->migration_wq);
2474 
2475 	set_bit(idx, dlm->recovery_map);
2476 }
2477 
2478 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
2479 {
2480 	struct dlm_ctxt *dlm = data;
2481 
2482 	if (!dlm_grab(dlm))
2483 		return;
2484 
2485 	/*
2486 	 * This will notify any dlm users that a node in our domain
2487 	 * went away without notifying us first.
2488 	 */
2489 	if (test_bit(idx, dlm->domain_map))
2490 		dlm_fire_domain_eviction_callbacks(dlm, idx);
2491 
2492 	spin_lock(&dlm->spinlock);
2493 	__dlm_hb_node_down(dlm, idx);
2494 	spin_unlock(&dlm->spinlock);
2495 
2496 	dlm_put(dlm);
2497 }
2498 
2499 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
2500 {
2501 	struct dlm_ctxt *dlm = data;
2502 
2503 	if (!dlm_grab(dlm))
2504 		return;
2505 
2506 	spin_lock(&dlm->spinlock);
2507 	set_bit(idx, dlm->live_nodes_map);
2508 	/* do NOT notify mle attached to the heartbeat events.
2509 	 * new nodes are not interesting in mastery until joined. */
2510 	spin_unlock(&dlm->spinlock);
2511 
2512 	dlm_put(dlm);
2513 }
2514 
2515 static void dlm_reco_ast(void *astdata)
2516 {
2517 	struct dlm_ctxt *dlm = astdata;
2518 	mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
2519 	     dlm->node_num, dlm->name);
2520 }
2521 static void dlm_reco_bast(void *astdata, int blocked_type)
2522 {
2523 	struct dlm_ctxt *dlm = astdata;
2524 	mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
2525 	     dlm->node_num, dlm->name);
2526 }
2527 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
2528 {
2529 	mlog(0, "unlockast for recovery lock fired!\n");
2530 }
2531 
2532 /*
2533  * dlm_pick_recovery_master will continually attempt to use
2534  * dlmlock() on the special "$RECOVERY" lockres with the
2535  * LKM_NOQUEUE flag to get an EX.  every thread that enters
2536  * this function on each node racing to become the recovery
2537  * master will not stop attempting this until either:
2538  * a) this node gets the EX (and becomes the recovery master),
2539  * or b) dlm->reco.new_master gets set to some nodenum
2540  * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2541  * so each time a recovery master is needed, the entire cluster
2542  * will sync at this point.  if the new master dies, that will
2543  * be detected in dlm_do_recovery */
2544 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
2545 {
2546 	enum dlm_status ret;
2547 	struct dlm_lockstatus lksb;
2548 	int status = -EINVAL;
2549 
2550 	mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2551 	     dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
2552 again:
2553 	memset(&lksb, 0, sizeof(lksb));
2554 
2555 	ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
2556 		      DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN,
2557 		      dlm_reco_ast, dlm, dlm_reco_bast);
2558 
2559 	mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
2560 	     dlm->name, ret, lksb.status);
2561 
2562 	if (ret == DLM_NORMAL) {
2563 		mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2564 		     dlm->name, dlm->node_num);
2565 
2566 		/* got the EX lock.  check to see if another node
2567 		 * just became the reco master */
2568 		if (dlm_reco_master_ready(dlm)) {
2569 			mlog(0, "%s: got reco EX lock, but %u will "
2570 			     "do the recovery\n", dlm->name,
2571 			     dlm->reco.new_master);
2572 			status = -EEXIST;
2573 		} else {
2574 			status = 0;
2575 
2576 			/* see if recovery was already finished elsewhere */
2577 			spin_lock(&dlm->spinlock);
2578 			if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2579 				status = -EINVAL;
2580 				mlog(0, "%s: got reco EX lock, but "
2581 				     "node got recovered already\n", dlm->name);
2582 				if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2583 					mlog(ML_ERROR, "%s: new master is %u "
2584 					     "but no dead node!\n",
2585 					     dlm->name, dlm->reco.new_master);
2586 					BUG();
2587 				}
2588 			}
2589 			spin_unlock(&dlm->spinlock);
2590 		}
2591 
2592 		/* if this node has actually become the recovery master,
2593 		 * set the master and send the messages to begin recovery */
2594 		if (!status) {
2595 			mlog(0, "%s: dead=%u, this=%u, sending "
2596 			     "begin_reco now\n", dlm->name,
2597 			     dlm->reco.dead_node, dlm->node_num);
2598 			status = dlm_send_begin_reco_message(dlm,
2599 				      dlm->reco.dead_node);
2600 			/* this always succeeds */
2601 			BUG_ON(status);
2602 
2603 			/* set the new_master to this node */
2604 			spin_lock(&dlm->spinlock);
2605 			dlm_set_reco_master(dlm, dlm->node_num);
2606 			spin_unlock(&dlm->spinlock);
2607 		}
2608 
2609 		/* recovery lock is a special case.  ast will not get fired,
2610 		 * so just go ahead and unlock it. */
2611 		ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
2612 		if (ret == DLM_DENIED) {
2613 			mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
2614 			ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm);
2615 		}
2616 		if (ret != DLM_NORMAL) {
2617 			/* this would really suck. this could only happen
2618 			 * if there was a network error during the unlock
2619 			 * because of node death.  this means the unlock
2620 			 * is actually "done" and the lock structure is
2621 			 * even freed.  we can continue, but only
2622 			 * because this specific lock name is special. */
2623 			mlog(ML_ERROR, "dlmunlock returned %d\n", ret);
2624 		}
2625 	} else if (ret == DLM_NOTQUEUED) {
2626 		mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2627 		     dlm->name, dlm->node_num);
2628 		/* another node is master. wait on
2629 		 * reco.new_master != O2NM_INVALID_NODE_NUM
2630 		 * for at most one second */
2631 		wait_event_timeout(dlm->dlm_reco_thread_wq,
2632 					 dlm_reco_master_ready(dlm),
2633 					 msecs_to_jiffies(1000));
2634 		if (!dlm_reco_master_ready(dlm)) {
2635 			mlog(0, "%s: reco master taking awhile\n",
2636 			     dlm->name);
2637 			goto again;
2638 		}
2639 		/* another node has informed this one that it is reco master */
2640 		mlog(0, "%s: reco master %u is ready to recover %u\n",
2641 		     dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
2642 		status = -EEXIST;
2643 	} else if (ret == DLM_RECOVERING) {
2644 		mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
2645 		     dlm->name, dlm->node_num);
2646 		goto again;
2647 	} else {
2648 		struct dlm_lock_resource *res;
2649 
2650 		/* dlmlock returned something other than NOTQUEUED or NORMAL */
2651 		mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), "
2652 		     "lksb.status=%s\n", dlm->name, dlm_errname(ret),
2653 		     dlm_errname(lksb.status));
2654 		res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2655 					 DLM_RECOVERY_LOCK_NAME_LEN);
2656 		if (res) {
2657 			dlm_print_one_lock_resource(res);
2658 			dlm_lockres_put(res);
2659 		} else {
2660 			mlog(ML_ERROR, "recovery lock not found\n");
2661 		}
2662 		BUG();
2663 	}
2664 
2665 	return status;
2666 }
2667 
2668 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
2669 {
2670 	struct dlm_begin_reco br;
2671 	int ret = 0;
2672 	struct dlm_node_iter iter;
2673 	int nodenum;
2674 	int status;
2675 
2676 	mlog(0, "%s: dead node is %u\n", dlm->name, dead_node);
2677 
2678 	spin_lock(&dlm->spinlock);
2679 	dlm_node_iter_init(dlm->domain_map, &iter);
2680 	spin_unlock(&dlm->spinlock);
2681 
2682 	clear_bit(dead_node, iter.node_map);
2683 
2684 	memset(&br, 0, sizeof(br));
2685 	br.node_idx = dlm->node_num;
2686 	br.dead_node = dead_node;
2687 
2688 	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2689 		ret = 0;
2690 		if (nodenum == dead_node) {
2691 			mlog(0, "not sending begin reco to dead node "
2692 				  "%u\n", dead_node);
2693 			continue;
2694 		}
2695 		if (nodenum == dlm->node_num) {
2696 			mlog(0, "not sending begin reco to self\n");
2697 			continue;
2698 		}
2699 retry:
2700 		mlog(0, "attempting to send begin reco msg to %d\n",
2701 			  nodenum);
2702 		ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key,
2703 					 &br, sizeof(br), nodenum, &status);
2704 		/* negative status is handled ok by caller here */
2705 		if (ret >= 0)
2706 			ret = status;
2707 		if (dlm_is_host_down(ret)) {
2708 			/* node is down.  not involved in recovery
2709 			 * so just keep going */
2710 			mlog(ML_NOTICE, "%s: node %u was down when sending "
2711 			     "begin reco msg (%d)\n", dlm->name, nodenum, ret);
2712 			ret = 0;
2713 		}
2714 
2715 		/*
2716 		 * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8,
2717 		 * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN.
2718 		 * We are handling both for compatibility reasons.
2719 		 */
2720 		if (ret == -EAGAIN || ret == EAGAIN) {
2721 			mlog(0, "%s: trying to start recovery of node "
2722 			     "%u, but node %u is waiting for last recovery "
2723 			     "to complete, backoff for a bit\n", dlm->name,
2724 			     dead_node, nodenum);
2725 			msleep(100);
2726 			goto retry;
2727 		}
2728 		if (ret < 0) {
2729 			struct dlm_lock_resource *res;
2730 
2731 			/* this is now a serious problem, possibly ENOMEM
2732 			 * in the network stack.  must retry */
2733 			mlog_errno(ret);
2734 			mlog(ML_ERROR, "begin reco of dlm %s to node %u "
2735 			     "returned %d\n", dlm->name, nodenum, ret);
2736 			res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2737 						 DLM_RECOVERY_LOCK_NAME_LEN);
2738 			if (res) {
2739 				dlm_print_one_lock_resource(res);
2740 				dlm_lockres_put(res);
2741 			} else {
2742 				mlog(ML_ERROR, "recovery lock not found\n");
2743 			}
2744 			/* sleep for a bit in hopes that we can avoid
2745 			 * another ENOMEM */
2746 			msleep(100);
2747 			goto retry;
2748 		}
2749 	}
2750 
2751 	return ret;
2752 }
2753 
2754 int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2755 			   void **ret_data)
2756 {
2757 	struct dlm_ctxt *dlm = data;
2758 	struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;
2759 
2760 	/* ok to return 0, domain has gone away */
2761 	if (!dlm_grab(dlm))
2762 		return 0;
2763 
2764 	spin_lock(&dlm->spinlock);
2765 	if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2766 		mlog(0, "%s: node %u wants to recover node %u (%u:%u) "
2767 		     "but this node is in finalize state, waiting on finalize2\n",
2768 		     dlm->name, br->node_idx, br->dead_node,
2769 		     dlm->reco.dead_node, dlm->reco.new_master);
2770 		spin_unlock(&dlm->spinlock);
2771 		dlm_put(dlm);
2772 		return -EAGAIN;
2773 	}
2774 	spin_unlock(&dlm->spinlock);
2775 
2776 	mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n",
2777 	     dlm->name, br->node_idx, br->dead_node,
2778 	     dlm->reco.dead_node, dlm->reco.new_master);
2779 
2780 	dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);
2781 
2782 	spin_lock(&dlm->spinlock);
2783 	if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2784 		if (test_bit(dlm->reco.new_master, dlm->recovery_map)) {
2785 			mlog(0, "%s: new_master %u died, changing "
2786 			     "to %u\n", dlm->name, dlm->reco.new_master,
2787 			     br->node_idx);
2788 		} else {
2789 			mlog(0, "%s: new_master %u NOT DEAD, changing "
2790 			     "to %u\n", dlm->name, dlm->reco.new_master,
2791 			     br->node_idx);
2792 			/* may not have seen the new master as dead yet */
2793 		}
2794 	}
2795 	if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
2796 		mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
2797 		     "node %u changing it to %u\n", dlm->name,
2798 		     dlm->reco.dead_node, br->node_idx, br->dead_node);
2799 	}
2800 	dlm_set_reco_master(dlm, br->node_idx);
2801 	dlm_set_reco_dead_node(dlm, br->dead_node);
2802 	if (!test_bit(br->dead_node, dlm->recovery_map)) {
2803 		mlog(0, "recovery master %u sees %u as dead, but this "
2804 		     "node has not yet.  marking %u as dead\n",
2805 		     br->node_idx, br->dead_node, br->dead_node);
2806 		if (!test_bit(br->dead_node, dlm->domain_map) ||
2807 		    !test_bit(br->dead_node, dlm->live_nodes_map))
2808 			mlog(0, "%u not in domain/live_nodes map "
2809 			     "so setting it in reco map manually\n",
2810 			     br->dead_node);
2811 		/* force the recovery cleanup in __dlm_hb_node_down
2812 		 * both of these will be cleared in a moment */
2813 		set_bit(br->dead_node, dlm->domain_map);
2814 		set_bit(br->dead_node, dlm->live_nodes_map);
2815 		__dlm_hb_node_down(dlm, br->dead_node);
2816 	}
2817 	spin_unlock(&dlm->spinlock);
2818 
2819 	dlm_kick_recovery_thread(dlm);
2820 
2821 	mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n",
2822 	     dlm->name, br->node_idx, br->dead_node,
2823 	     dlm->reco.dead_node, dlm->reco.new_master);
2824 
2825 	dlm_put(dlm);
2826 	return 0;
2827 }
2828 
2829 #define DLM_FINALIZE_STAGE2  0x01
2830 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
2831 {
2832 	int ret = 0;
2833 	struct dlm_finalize_reco fr;
2834 	struct dlm_node_iter iter;
2835 	int nodenum;
2836 	int status;
2837 	int stage = 1;
2838 
2839 	mlog(0, "finishing recovery for node %s:%u, "
2840 	     "stage %d\n", dlm->name, dlm->reco.dead_node, stage);
2841 
2842 	spin_lock(&dlm->spinlock);
2843 	dlm_node_iter_init(dlm->domain_map, &iter);
2844 	spin_unlock(&dlm->spinlock);
2845 
2846 stage2:
2847 	memset(&fr, 0, sizeof(fr));
2848 	fr.node_idx = dlm->node_num;
2849 	fr.dead_node = dlm->reco.dead_node;
2850 	if (stage == 2)
2851 		fr.flags |= DLM_FINALIZE_STAGE2;
2852 
2853 	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2854 		if (nodenum == dlm->node_num)
2855 			continue;
2856 		ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
2857 					 &fr, sizeof(fr), nodenum, &status);
2858 		if (ret >= 0)
2859 			ret = status;
2860 		if (ret < 0) {
2861 			mlog(ML_ERROR, "Error %d when sending message %u (key "
2862 			     "0x%x) to node %u\n", ret, DLM_FINALIZE_RECO_MSG,
2863 			     dlm->key, nodenum);
2864 			if (dlm_is_host_down(ret)) {
2865 				/* this has no effect on this recovery
2866 				 * session, so set the status to zero to
2867 				 * finish out the last recovery */
2868 				mlog(ML_ERROR, "node %u went down after this "
2869 				     "node finished recovery.\n", nodenum);
2870 				ret = 0;
2871 				continue;
2872 			}
2873 			break;
2874 		}
2875 	}
2876 	if (stage == 1) {
2877 		/* reset the node_iter back to the top and send finalize2 */
2878 		iter.curnode = -1;
2879 		stage = 2;
2880 		goto stage2;
2881 	}
2882 
2883 	return ret;
2884 }
2885 
2886 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2887 			      void **ret_data)
2888 {
2889 	struct dlm_ctxt *dlm = data;
2890 	struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
2891 	int stage = 1;
2892 
2893 	/* ok to return 0, domain has gone away */
2894 	if (!dlm_grab(dlm))
2895 		return 0;
2896 
2897 	if (fr->flags & DLM_FINALIZE_STAGE2)
2898 		stage = 2;
2899 
2900 	mlog(0, "%s: node %u finalizing recovery stage%d of "
2901 	     "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
2902 	     fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
2903 
2904 	spin_lock(&dlm->spinlock);
2905 
2906 	if (dlm->reco.new_master != fr->node_idx) {
2907 		mlog(ML_ERROR, "node %u sent recovery finalize msg, but node "
2908 		     "%u is supposed to be the new master, dead=%u\n",
2909 		     fr->node_idx, dlm->reco.new_master, fr->dead_node);
2910 		BUG();
2911 	}
2912 	if (dlm->reco.dead_node != fr->dead_node) {
2913 		mlog(ML_ERROR, "node %u sent recovery finalize msg for dead "
2914 		     "node %u, but node %u is supposed to be dead\n",
2915 		     fr->node_idx, fr->dead_node, dlm->reco.dead_node);
2916 		BUG();
2917 	}
2918 
2919 	switch (stage) {
2920 		case 1:
2921 			dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
2922 			if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2923 				mlog(ML_ERROR, "%s: received finalize1 from "
2924 				     "new master %u for dead node %u, but "
2925 				     "this node has already received it!\n",
2926 				     dlm->name, fr->node_idx, fr->dead_node);
2927 				dlm_print_reco_node_status(dlm);
2928 				BUG();
2929 			}
2930 			dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
2931 			spin_unlock(&dlm->spinlock);
2932 			break;
2933 		case 2:
2934 			if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) {
2935 				mlog(ML_ERROR, "%s: received finalize2 from "
2936 				     "new master %u for dead node %u, but "
2937 				     "this node did not have finalize1!\n",
2938 				     dlm->name, fr->node_idx, fr->dead_node);
2939 				dlm_print_reco_node_status(dlm);
2940 				BUG();
2941 			}
2942 			dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2943 			__dlm_reset_recovery(dlm);
2944 			spin_unlock(&dlm->spinlock);
2945 			dlm_kick_recovery_thread(dlm);
2946 			break;
2947 	}
2948 
2949 	mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
2950 	     dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master);
2951 
2952 	dlm_put(dlm);
2953 	return 0;
2954 }
2955