xref: /linux/fs/ocfs2/dlm/dlmrecovery.c (revision f3d9478b2ce468c3115b02ecae7e975990697f15)
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * dlmrecovery.c
5  *
6  * recovery stuff
7  *
8  * Copyright (C) 2004 Oracle.  All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License as published by the Free Software Foundation; either
13  * version 2 of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public
21  * License along with this program; if not, write to the
22  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23  * Boston, MA 021110-1307, USA.
24  *
25  */
26 
27 
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/timer.h>
41 #include <linux/kthread.h>
42 #include <linux/delay.h>
43 
44 
45 #include "cluster/heartbeat.h"
46 #include "cluster/nodemanager.h"
47 #include "cluster/tcp.h"
48 
49 #include "dlmapi.h"
50 #include "dlmcommon.h"
51 #include "dlmdomain.h"
52 
53 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
54 #include "cluster/masklog.h"
55 
56 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
57 
58 static int dlm_recovery_thread(void *data);
59 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
60 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
61 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
62 static int dlm_do_recovery(struct dlm_ctxt *dlm);
63 
64 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm);
65 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
66 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
67 static int dlm_request_all_locks(struct dlm_ctxt *dlm,
68 				 u8 request_from, u8 dead_node);
69 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
70 
71 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
72 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
73 					const char *lockname, int namelen,
74 					int total_locks, u64 cookie,
75 					u8 flags, u8 master);
76 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
77 				    struct dlm_migratable_lockres *mres,
78 				    u8 send_to,
79 				    struct dlm_lock_resource *res,
80 				    int total_locks);
81 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
82 				     struct dlm_lock_resource *res,
83 				     struct dlm_migratable_lockres *mres);
84 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm);
85 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm,
86 				 u8 dead_node, u8 send_to);
87 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node);
88 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
89 					struct list_head *list, u8 dead_node);
90 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
91 					      u8 dead_node, u8 new_master);
92 static void dlm_reco_ast(void *astdata);
93 static void dlm_reco_bast(void *astdata, int blocked_type);
94 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
95 static void dlm_request_all_locks_worker(struct dlm_work_item *item,
96 					 void *data);
97 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
98 
99 static u64 dlm_get_next_mig_cookie(void);
100 
101 static spinlock_t dlm_reco_state_lock = SPIN_LOCK_UNLOCKED;
102 static spinlock_t dlm_mig_cookie_lock = SPIN_LOCK_UNLOCKED;
103 static u64 dlm_mig_cookie = 1;
104 
105 static u64 dlm_get_next_mig_cookie(void)
106 {
107 	u64 c;
108 	spin_lock(&dlm_mig_cookie_lock);
109 	c = dlm_mig_cookie;
110 	if (dlm_mig_cookie == (~0ULL))
111 		dlm_mig_cookie = 1;
112 	else
113 		dlm_mig_cookie++;
114 	spin_unlock(&dlm_mig_cookie_lock);
115 	return c;
116 }
117 
118 static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
119 {
120 	spin_lock(&dlm->spinlock);
121 	clear_bit(dlm->reco.dead_node, dlm->recovery_map);
122 	dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
123 	dlm->reco.new_master = O2NM_INVALID_NODE_NUM;
124 	spin_unlock(&dlm->spinlock);
125 }
126 
127 /* Worker function used during recovery. */
128 void dlm_dispatch_work(void *data)
129 {
130 	struct dlm_ctxt *dlm = (struct dlm_ctxt *)data;
131 	LIST_HEAD(tmp_list);
132 	struct list_head *iter, *iter2;
133 	struct dlm_work_item *item;
134 	dlm_workfunc_t *workfunc;
135 
136 	spin_lock(&dlm->work_lock);
137 	list_splice_init(&dlm->work_list, &tmp_list);
138 	spin_unlock(&dlm->work_lock);
139 
140 	list_for_each_safe(iter, iter2, &tmp_list) {
141 		item = list_entry(iter, struct dlm_work_item, list);
142 		workfunc = item->func;
143 		list_del_init(&item->list);
144 
145 		/* already have ref on dlm to avoid having
146 		 * it disappear.  just double-check. */
147 		BUG_ON(item->dlm != dlm);
148 
149 		/* this is allowed to sleep and
150 		 * call network stuff */
151 		workfunc(item, item->data);
152 
153 		dlm_put(dlm);
154 		kfree(item);
155 	}
156 }
157 
158 /*
159  * RECOVERY THREAD
160  */
161 
162 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm)
163 {
164 	/* wake the recovery thread
165 	 * this will wake the reco thread in one of three places
166 	 * 1) sleeping with no recovery happening
167 	 * 2) sleeping with recovery mastered elsewhere
168 	 * 3) recovery mastered here, waiting on reco data */
169 
170 	wake_up(&dlm->dlm_reco_thread_wq);
171 }
172 
173 /* Launch the recovery thread */
174 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm)
175 {
176 	mlog(0, "starting dlm recovery thread...\n");
177 
178 	dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
179 						"dlm_reco_thread");
180 	if (IS_ERR(dlm->dlm_reco_thread_task)) {
181 		mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
182 		dlm->dlm_reco_thread_task = NULL;
183 		return -EINVAL;
184 	}
185 
186 	return 0;
187 }
188 
189 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
190 {
191 	if (dlm->dlm_reco_thread_task) {
192 		mlog(0, "waiting for dlm recovery thread to exit\n");
193 		kthread_stop(dlm->dlm_reco_thread_task);
194 		dlm->dlm_reco_thread_task = NULL;
195 	}
196 }
197 
198 
199 
200 /*
201  * this is lame, but here's how recovery works...
202  * 1) all recovery threads cluster wide will work on recovering
203  *    ONE node at a time
204  * 2) negotiate who will take over all the locks for the dead node.
205  *    thats right... ALL the locks.
206  * 3) once a new master is chosen, everyone scans all locks
207  *    and moves aside those mastered by the dead guy
208  * 4) each of these locks should be locked until recovery is done
209  * 5) the new master collects up all of secondary lock queue info
210  *    one lock at a time, forcing each node to communicate back
211  *    before continuing
212  * 6) each secondary lock queue responds with the full known lock info
213  * 7) once the new master has run all its locks, it sends a ALLDONE!
214  *    message to everyone
215  * 8) upon receiving this message, the secondary queue node unlocks
216  *    and responds to the ALLDONE
217  * 9) once the new master gets responses from everyone, he unlocks
218  *    everything and recovery for this dead node is done
219  *10) go back to 2) while there are still dead nodes
220  *
221  */
222 
223 
224 #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
225 
226 static int dlm_recovery_thread(void *data)
227 {
228 	int status;
229 	struct dlm_ctxt *dlm = data;
230 	unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS);
231 
232 	mlog(0, "dlm thread running for %s...\n", dlm->name);
233 
234 	while (!kthread_should_stop()) {
235 		if (dlm_joined(dlm)) {
236 			status = dlm_do_recovery(dlm);
237 			if (status == -EAGAIN) {
238 				/* do not sleep, recheck immediately. */
239 				continue;
240 			}
241 			if (status < 0)
242 				mlog_errno(status);
243 		}
244 
245 		wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
246 						 kthread_should_stop(),
247 						 timeout);
248 	}
249 
250 	mlog(0, "quitting DLM recovery thread\n");
251 	return 0;
252 }
253 
254 /* returns true when the recovery master has contacted us */
255 static int dlm_reco_master_ready(struct dlm_ctxt *dlm)
256 {
257 	int ready;
258 	spin_lock(&dlm->spinlock);
259 	ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM);
260 	spin_unlock(&dlm->spinlock);
261 	return ready;
262 }
263 
264 /* returns true if node is no longer in the domain
265  * could be dead or just not joined */
266 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
267 {
268 	int dead;
269 	spin_lock(&dlm->spinlock);
270 	dead = test_bit(node, dlm->domain_map);
271 	spin_unlock(&dlm->spinlock);
272 	return dead;
273 }
274 
275 int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
276 {
277 	if (timeout) {
278 		mlog(ML_NOTICE, "%s: waiting %dms for notification of "
279 		     "death of node %u\n", dlm->name, timeout, node);
280 		wait_event_timeout(dlm->dlm_reco_thread_wq,
281 			   dlm_is_node_dead(dlm, node),
282 			   msecs_to_jiffies(timeout));
283 	} else {
284 		mlog(ML_NOTICE, "%s: waiting indefinitely for notification "
285 		     "of death of node %u\n", dlm->name, node);
286 		wait_event(dlm->dlm_reco_thread_wq,
287 			   dlm_is_node_dead(dlm, node));
288 	}
289 	/* for now, return 0 */
290 	return 0;
291 }
292 
293 /* callers of the top-level api calls (dlmlock/dlmunlock) should
294  * block on the dlm->reco.event when recovery is in progress.
295  * the dlm recovery thread will set this state when it begins
296  * recovering a dead node (as the new master or not) and clear
297  * the state and wake as soon as all affected lock resources have
298  * been marked with the RECOVERY flag */
299 static int dlm_in_recovery(struct dlm_ctxt *dlm)
300 {
301 	int in_recovery;
302 	spin_lock(&dlm->spinlock);
303 	in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
304 	spin_unlock(&dlm->spinlock);
305 	return in_recovery;
306 }
307 
308 
309 void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
310 {
311 	wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
312 }
313 
314 static void dlm_begin_recovery(struct dlm_ctxt *dlm)
315 {
316 	spin_lock(&dlm->spinlock);
317 	BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
318 	dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
319 	spin_unlock(&dlm->spinlock);
320 }
321 
322 static void dlm_end_recovery(struct dlm_ctxt *dlm)
323 {
324 	spin_lock(&dlm->spinlock);
325 	BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
326 	dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
327 	spin_unlock(&dlm->spinlock);
328 	wake_up(&dlm->reco.event);
329 }
330 
331 static int dlm_do_recovery(struct dlm_ctxt *dlm)
332 {
333 	int status = 0;
334 	int ret;
335 
336 	spin_lock(&dlm->spinlock);
337 
338 	/* check to see if the new master has died */
339 	if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
340 	    test_bit(dlm->reco.new_master, dlm->recovery_map)) {
341 		mlog(0, "new master %u died while recovering %u!\n",
342 		     dlm->reco.new_master, dlm->reco.dead_node);
343 		/* unset the new_master, leave dead_node */
344 		dlm->reco.new_master = O2NM_INVALID_NODE_NUM;
345 	}
346 
347 	/* select a target to recover */
348 	if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
349 		int bit;
350 
351 		bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0);
352 		if (bit >= O2NM_MAX_NODES || bit < 0)
353 			dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
354 		else
355 			dlm->reco.dead_node = bit;
356 	} else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
357 		/* BUG? */
358 		mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
359 		     dlm->reco.dead_node);
360 		dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
361 	}
362 
363 	if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
364 		// mlog(0, "nothing to recover!  sleeping now!\n");
365 		spin_unlock(&dlm->spinlock);
366 		/* return to main thread loop and sleep. */
367 		return 0;
368 	}
369 	mlog(0, "recovery thread found node %u in the recovery map!\n",
370 	     dlm->reco.dead_node);
371 	spin_unlock(&dlm->spinlock);
372 
373 	/* take write barrier */
374 	/* (stops the list reshuffling thread, proxy ast handling) */
375 	dlm_begin_recovery(dlm);
376 
377 	if (dlm->reco.new_master == dlm->node_num)
378 		goto master_here;
379 
380 	if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
381 		/* choose a new master, returns 0 if this node
382 		 * is the master, -EEXIST if it's another node.
383 		 * this does not return until a new master is chosen
384 		 * or recovery completes entirely. */
385 		ret = dlm_pick_recovery_master(dlm);
386 		if (!ret) {
387 			/* already notified everyone.  go. */
388 			goto master_here;
389 		}
390 		mlog(0, "another node will master this recovery session.\n");
391 	}
392 	mlog(0, "dlm=%s, new_master=%u, this node=%u, dead_node=%u\n",
393 	     dlm->name, dlm->reco.new_master,
394 	     dlm->node_num, dlm->reco.dead_node);
395 
396 	/* it is safe to start everything back up here
397 	 * because all of the dead node's lock resources
398 	 * have been marked as in-recovery */
399 	dlm_end_recovery(dlm);
400 
401 	/* sleep out in main dlm_recovery_thread loop. */
402 	return 0;
403 
404 master_here:
405 	mlog(0, "mastering recovery of %s:%u here(this=%u)!\n",
406 	     dlm->name, dlm->reco.dead_node, dlm->node_num);
407 
408 	status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
409 	if (status < 0) {
410 		mlog(ML_ERROR, "error %d remastering locks for node %u, "
411 		     "retrying.\n", status, dlm->reco.dead_node);
412 		/* yield a bit to allow any final network messages
413 		 * to get handled on remaining nodes */
414 		msleep(100);
415 	} else {
416 		/* success!  see if any other nodes need recovery */
417 		mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
418 		     dlm->name, dlm->reco.dead_node, dlm->node_num);
419 		dlm_reset_recovery(dlm);
420 	}
421 	dlm_end_recovery(dlm);
422 
423 	/* continue and look for another dead node */
424 	return -EAGAIN;
425 }
426 
427 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
428 {
429 	int status = 0;
430 	struct dlm_reco_node_data *ndata;
431 	struct list_head *iter;
432 	int all_nodes_done;
433 	int destroy = 0;
434 	int pass = 0;
435 
436 	status = dlm_init_recovery_area(dlm, dead_node);
437 	if (status < 0)
438 		goto leave;
439 
440 	/* safe to access the node data list without a lock, since this
441 	 * process is the only one to change the list */
442 	list_for_each(iter, &dlm->reco.node_data) {
443 		ndata = list_entry (iter, struct dlm_reco_node_data, list);
444 		BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
445 		ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
446 
447 		mlog(0, "requesting lock info from node %u\n",
448 		     ndata->node_num);
449 
450 		if (ndata->node_num == dlm->node_num) {
451 			ndata->state = DLM_RECO_NODE_DATA_DONE;
452 			continue;
453 		}
454 
455 		status = dlm_request_all_locks(dlm, ndata->node_num, dead_node);
456 		if (status < 0) {
457 			mlog_errno(status);
458 			if (dlm_is_host_down(status))
459 				ndata->state = DLM_RECO_NODE_DATA_DEAD;
460 			else {
461 				destroy = 1;
462 				goto leave;
463 			}
464 		}
465 
466 		switch (ndata->state) {
467 			case DLM_RECO_NODE_DATA_INIT:
468 			case DLM_RECO_NODE_DATA_FINALIZE_SENT:
469 			case DLM_RECO_NODE_DATA_REQUESTED:
470 				BUG();
471 				break;
472 			case DLM_RECO_NODE_DATA_DEAD:
473 				mlog(0, "node %u died after requesting "
474 				     "recovery info for node %u\n",
475 				     ndata->node_num, dead_node);
476 				// start all over
477 				destroy = 1;
478 				status = -EAGAIN;
479 				goto leave;
480 			case DLM_RECO_NODE_DATA_REQUESTING:
481 				ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
482 				mlog(0, "now receiving recovery data from "
483 				     "node %u for dead node %u\n",
484 				     ndata->node_num, dead_node);
485 				break;
486 			case DLM_RECO_NODE_DATA_RECEIVING:
487 				mlog(0, "already receiving recovery data from "
488 				     "node %u for dead node %u\n",
489 				     ndata->node_num, dead_node);
490 				break;
491 			case DLM_RECO_NODE_DATA_DONE:
492 				mlog(0, "already DONE receiving recovery data "
493 				     "from node %u for dead node %u\n",
494 				     ndata->node_num, dead_node);
495 				break;
496 		}
497 	}
498 
499 	mlog(0, "done requesting all lock info\n");
500 
501 	/* nodes should be sending reco data now
502 	 * just need to wait */
503 
504 	while (1) {
505 		/* check all the nodes now to see if we are
506 		 * done, or if anyone died */
507 		all_nodes_done = 1;
508 		spin_lock(&dlm_reco_state_lock);
509 		list_for_each(iter, &dlm->reco.node_data) {
510 			ndata = list_entry (iter, struct dlm_reco_node_data, list);
511 
512 			mlog(0, "checking recovery state of node %u\n",
513 			     ndata->node_num);
514 			switch (ndata->state) {
515 				case DLM_RECO_NODE_DATA_INIT:
516 				case DLM_RECO_NODE_DATA_REQUESTING:
517 					mlog(ML_ERROR, "bad ndata state for "
518 					     "node %u: state=%d\n",
519 					     ndata->node_num, ndata->state);
520 					BUG();
521 					break;
522 				case DLM_RECO_NODE_DATA_DEAD:
523 					mlog(ML_NOTICE, "node %u died after "
524 					     "requesting recovery info for "
525 					     "node %u\n", ndata->node_num,
526 					     dead_node);
527 					spin_unlock(&dlm_reco_state_lock);
528 					// start all over
529 					destroy = 1;
530 					status = -EAGAIN;
531 					/* instead of spinning like crazy here,
532 					 * wait for the domain map to catch up
533 					 * with the network state.  otherwise this
534 					 * can be hit hundreds of times before
535 					 * the node is really seen as dead. */
536 					wait_event_timeout(dlm->dlm_reco_thread_wq,
537 							   dlm_is_node_dead(dlm,
538 								ndata->node_num),
539 							   msecs_to_jiffies(1000));
540 					mlog(0, "waited 1 sec for %u, "
541 					     "dead? %s\n", ndata->node_num,
542 					     dlm_is_node_dead(dlm, ndata->node_num) ?
543 					     "yes" : "no");
544 					goto leave;
545 				case DLM_RECO_NODE_DATA_RECEIVING:
546 				case DLM_RECO_NODE_DATA_REQUESTED:
547 					all_nodes_done = 0;
548 					break;
549 				case DLM_RECO_NODE_DATA_DONE:
550 					break;
551 				case DLM_RECO_NODE_DATA_FINALIZE_SENT:
552 					break;
553 			}
554 		}
555 		spin_unlock(&dlm_reco_state_lock);
556 
557 		mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
558 		     all_nodes_done?"yes":"no");
559 		if (all_nodes_done) {
560 			int ret;
561 
562 			/* all nodes are now in DLM_RECO_NODE_DATA_DONE state
563 	 		 * just send a finalize message to everyone and
564 	 		 * clean up */
565 			mlog(0, "all nodes are done! send finalize\n");
566 			ret = dlm_send_finalize_reco_message(dlm);
567 			if (ret < 0)
568 				mlog_errno(ret);
569 
570 			spin_lock(&dlm->spinlock);
571 			dlm_finish_local_lockres_recovery(dlm, dead_node,
572 							  dlm->node_num);
573 			spin_unlock(&dlm->spinlock);
574 			mlog(0, "should be done with recovery!\n");
575 
576 			mlog(0, "finishing recovery of %s at %lu, "
577 			     "dead=%u, this=%u, new=%u\n", dlm->name,
578 			     jiffies, dlm->reco.dead_node,
579 			     dlm->node_num, dlm->reco.new_master);
580 			destroy = 1;
581 			status = ret;
582 			/* rescan everything marked dirty along the way */
583 			dlm_kick_thread(dlm, NULL);
584 			break;
585 		}
586 		/* wait to be signalled, with periodic timeout
587 		 * to check for node death */
588 		wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
589 					 kthread_should_stop(),
590 					 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS));
591 
592 	}
593 
594 leave:
595 	if (destroy)
596 		dlm_destroy_recovery_area(dlm, dead_node);
597 
598 	mlog_exit(status);
599 	return status;
600 }
601 
602 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
603 {
604 	int num=0;
605 	struct dlm_reco_node_data *ndata;
606 
607 	spin_lock(&dlm->spinlock);
608 	memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map));
609 	/* nodes can only be removed (by dying) after dropping
610 	 * this lock, and death will be trapped later, so this should do */
611 	spin_unlock(&dlm->spinlock);
612 
613 	while (1) {
614 		num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
615 		if (num >= O2NM_MAX_NODES) {
616 			break;
617 		}
618 		BUG_ON(num == dead_node);
619 
620 		ndata = kcalloc(1, sizeof(*ndata), GFP_KERNEL);
621 		if (!ndata) {
622 			dlm_destroy_recovery_area(dlm, dead_node);
623 			return -ENOMEM;
624 		}
625 		ndata->node_num = num;
626 		ndata->state = DLM_RECO_NODE_DATA_INIT;
627 		spin_lock(&dlm_reco_state_lock);
628 		list_add_tail(&ndata->list, &dlm->reco.node_data);
629 		spin_unlock(&dlm_reco_state_lock);
630 		num++;
631 	}
632 
633 	return 0;
634 }
635 
636 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
637 {
638 	struct list_head *iter, *iter2;
639 	struct dlm_reco_node_data *ndata;
640 	LIST_HEAD(tmplist);
641 
642 	spin_lock(&dlm_reco_state_lock);
643 	list_splice_init(&dlm->reco.node_data, &tmplist);
644 	spin_unlock(&dlm_reco_state_lock);
645 
646 	list_for_each_safe(iter, iter2, &tmplist) {
647 		ndata = list_entry (iter, struct dlm_reco_node_data, list);
648 		list_del_init(&ndata->list);
649 		kfree(ndata);
650 	}
651 }
652 
653 static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
654 				 u8 dead_node)
655 {
656 	struct dlm_lock_request lr;
657 	enum dlm_status ret;
658 
659 	mlog(0, "\n");
660 
661 
662 	mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
663 		  "to %u\n", dead_node, request_from);
664 
665 	memset(&lr, 0, sizeof(lr));
666 	lr.node_idx = dlm->node_num;
667 	lr.dead_node = dead_node;
668 
669 	// send message
670 	ret = DLM_NOLOCKMGR;
671 	ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
672 				 &lr, sizeof(lr), request_from, NULL);
673 
674 	/* negative status is handled by caller */
675 	if (ret < 0)
676 		mlog_errno(ret);
677 
678 	// return from here, then
679 	// sleep until all received or error
680 	return ret;
681 
682 }
683 
684 int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data)
685 {
686 	struct dlm_ctxt *dlm = data;
687 	struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
688 	char *buf = NULL;
689 	struct dlm_work_item *item = NULL;
690 
691 	if (!dlm_grab(dlm))
692 		return -EINVAL;
693 
694 	BUG_ON(lr->dead_node != dlm->reco.dead_node);
695 
696 	item = kcalloc(1, sizeof(*item), GFP_KERNEL);
697 	if (!item) {
698 		dlm_put(dlm);
699 		return -ENOMEM;
700 	}
701 
702 	/* this will get freed by dlm_request_all_locks_worker */
703 	buf = (char *) __get_free_page(GFP_KERNEL);
704 	if (!buf) {
705 		kfree(item);
706 		dlm_put(dlm);
707 		return -ENOMEM;
708 	}
709 
710 	/* queue up work for dlm_request_all_locks_worker */
711 	dlm_grab(dlm);  /* get an extra ref for the work item */
712 	dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
713 	item->u.ral.reco_master = lr->node_idx;
714 	item->u.ral.dead_node = lr->dead_node;
715 	spin_lock(&dlm->work_lock);
716 	list_add_tail(&item->list, &dlm->work_list);
717 	spin_unlock(&dlm->work_lock);
718 	schedule_work(&dlm->dispatched_work);
719 
720 	dlm_put(dlm);
721 	return 0;
722 }
723 
724 static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
725 {
726 	struct dlm_migratable_lockres *mres;
727 	struct dlm_lock_resource *res;
728 	struct dlm_ctxt *dlm;
729 	LIST_HEAD(resources);
730 	struct list_head *iter;
731 	int ret;
732 	u8 dead_node, reco_master;
733 
734 	dlm = item->dlm;
735 	dead_node = item->u.ral.dead_node;
736 	reco_master = item->u.ral.reco_master;
737 	mres = (struct dlm_migratable_lockres *)data;
738 
739 	if (dead_node != dlm->reco.dead_node ||
740 	    reco_master != dlm->reco.new_master) {
741 		/* show extra debug info if the recovery state is messed */
742 		mlog(ML_ERROR, "%s: bad reco state: reco(dead=%u, master=%u), "
743 		     "request(dead=%u, master=%u)\n",
744 		     dlm->name, dlm->reco.dead_node, dlm->reco.new_master,
745 		     dead_node, reco_master);
746 		mlog(ML_ERROR, "%s: name=%.*s master=%u locks=%u/%u flags=%u "
747 		     "entry[0]={c=%u:%llu,l=%u,f=%u,t=%d,ct=%d,hb=%d,n=%u}\n",
748 		     dlm->name, mres->lockname_len, mres->lockname, mres->master,
749 		     mres->num_locks, mres->total_locks, mres->flags,
750 		     dlm_get_lock_cookie_node(mres->ml[0].cookie),
751 		     dlm_get_lock_cookie_seq(mres->ml[0].cookie),
752 		     mres->ml[0].list, mres->ml[0].flags,
753 		     mres->ml[0].type, mres->ml[0].convert_type,
754 		     mres->ml[0].highest_blocked, mres->ml[0].node);
755 		BUG();
756 	}
757 	BUG_ON(dead_node != dlm->reco.dead_node);
758 	BUG_ON(reco_master != dlm->reco.new_master);
759 
760 	/* lock resources should have already been moved to the
761  	 * dlm->reco.resources list.  now move items from that list
762  	 * to a temp list if the dead owner matches.  note that the
763 	 * whole cluster recovers only one node at a time, so we
764 	 * can safely move UNKNOWN lock resources for each recovery
765 	 * session. */
766 	dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
767 
768 	/* now we can begin blasting lockreses without the dlm lock */
769 	list_for_each(iter, &resources) {
770 		res = list_entry (iter, struct dlm_lock_resource, recovering);
771 		ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
772 				   	DLM_MRES_RECOVERY);
773 		if (ret < 0)
774 			mlog_errno(ret);
775 	}
776 
777 	/* move the resources back to the list */
778 	spin_lock(&dlm->spinlock);
779 	list_splice_init(&resources, &dlm->reco.resources);
780 	spin_unlock(&dlm->spinlock);
781 
782 	ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
783 	if (ret < 0)
784 		mlog_errno(ret);
785 
786 	free_page((unsigned long)data);
787 }
788 
789 
790 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
791 {
792 	int ret, tmpret;
793 	struct dlm_reco_data_done done_msg;
794 
795 	memset(&done_msg, 0, sizeof(done_msg));
796 	done_msg.node_idx = dlm->node_num;
797 	done_msg.dead_node = dead_node;
798 	mlog(0, "sending DATA DONE message to %u, "
799 	     "my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
800 	     done_msg.dead_node);
801 
802 	ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
803 				 sizeof(done_msg), send_to, &tmpret);
804 	/* negative status is ignored by the caller */
805 	if (ret >= 0)
806 		ret = tmpret;
807 	return ret;
808 }
809 
810 
811 int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data)
812 {
813 	struct dlm_ctxt *dlm = data;
814 	struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
815 	struct list_head *iter;
816 	struct dlm_reco_node_data *ndata = NULL;
817 	int ret = -EINVAL;
818 
819 	if (!dlm_grab(dlm))
820 		return -EINVAL;
821 
822 	mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
823 	     "node_idx=%u, this node=%u\n", done->dead_node,
824 	     dlm->reco.dead_node, done->node_idx, dlm->node_num);
825 	BUG_ON(done->dead_node != dlm->reco.dead_node);
826 
827 	spin_lock(&dlm_reco_state_lock);
828 	list_for_each(iter, &dlm->reco.node_data) {
829 		ndata = list_entry (iter, struct dlm_reco_node_data, list);
830 		if (ndata->node_num != done->node_idx)
831 			continue;
832 
833 		switch (ndata->state) {
834 			/* should have moved beyond INIT but not to FINALIZE yet */
835 			case DLM_RECO_NODE_DATA_INIT:
836 			case DLM_RECO_NODE_DATA_DEAD:
837 			case DLM_RECO_NODE_DATA_FINALIZE_SENT:
838 				mlog(ML_ERROR, "bad ndata state for node %u:"
839 				     " state=%d\n", ndata->node_num,
840 				     ndata->state);
841 				BUG();
842 				break;
843 			/* these states are possible at this point, anywhere along
844 			 * the line of recovery */
845 			case DLM_RECO_NODE_DATA_DONE:
846 			case DLM_RECO_NODE_DATA_RECEIVING:
847 			case DLM_RECO_NODE_DATA_REQUESTED:
848 			case DLM_RECO_NODE_DATA_REQUESTING:
849 				mlog(0, "node %u is DONE sending "
850 					  "recovery data!\n",
851 					  ndata->node_num);
852 
853 				ndata->state = DLM_RECO_NODE_DATA_DONE;
854 				ret = 0;
855 				break;
856 		}
857 	}
858 	spin_unlock(&dlm_reco_state_lock);
859 
860 	/* wake the recovery thread, some node is done */
861 	if (!ret)
862 		dlm_kick_recovery_thread(dlm);
863 
864 	if (ret < 0)
865 		mlog(ML_ERROR, "failed to find recovery node data for node "
866 		     "%u\n", done->node_idx);
867 	dlm_put(dlm);
868 
869 	mlog(0, "leaving reco data done handler, ret=%d\n", ret);
870 	return ret;
871 }
872 
873 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
874 					struct list_head *list,
875 				       	u8 dead_node)
876 {
877 	struct dlm_lock_resource *res;
878 	struct list_head *iter, *iter2;
879 	struct dlm_lock *lock;
880 
881 	spin_lock(&dlm->spinlock);
882 	list_for_each_safe(iter, iter2, &dlm->reco.resources) {
883 		res = list_entry (iter, struct dlm_lock_resource, recovering);
884 		/* always prune any $RECOVERY entries for dead nodes,
885 		 * otherwise hangs can occur during later recovery */
886 		if (dlm_is_recovery_lock(res->lockname.name,
887 					 res->lockname.len)) {
888 			spin_lock(&res->spinlock);
889 			list_for_each_entry(lock, &res->granted, list) {
890 				if (lock->ml.node == dead_node) {
891 					mlog(0, "AHA! there was "
892 					     "a $RECOVERY lock for dead "
893 					     "node %u (%s)!\n",
894 					     dead_node, dlm->name);
895 					list_del_init(&lock->list);
896 					dlm_lock_put(lock);
897 					break;
898 				}
899 			}
900 			spin_unlock(&res->spinlock);
901 			continue;
902 		}
903 
904 		if (res->owner == dead_node) {
905 			mlog(0, "found lockres owned by dead node while "
906 				  "doing recovery for node %u. sending it.\n",
907 				  dead_node);
908 			list_del_init(&res->recovering);
909 			list_add_tail(&res->recovering, list);
910 		} else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
911 			mlog(0, "found UNKNOWN owner while doing recovery "
912 				  "for node %u. sending it.\n", dead_node);
913 			list_del_init(&res->recovering);
914 			list_add_tail(&res->recovering, list);
915 		}
916 	}
917 	spin_unlock(&dlm->spinlock);
918 }
919 
920 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
921 {
922 	int total_locks = 0;
923 	struct list_head *iter, *queue = &res->granted;
924 	int i;
925 
926 	for (i=0; i<3; i++) {
927 		list_for_each(iter, queue)
928 			total_locks++;
929 		queue++;
930 	}
931 	return total_locks;
932 }
933 
934 
935 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
936 				      struct dlm_migratable_lockres *mres,
937 				      u8 send_to,
938 				      struct dlm_lock_resource *res,
939 				      int total_locks)
940 {
941 	u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
942 	int mres_total_locks = be32_to_cpu(mres->total_locks);
943 	int sz, ret = 0, status = 0;
944 	u8 orig_flags = mres->flags,
945 	   orig_master = mres->master;
946 
947 	BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS);
948 	if (!mres->num_locks)
949 		return 0;
950 
951 	sz = sizeof(struct dlm_migratable_lockres) +
952 		(mres->num_locks * sizeof(struct dlm_migratable_lock));
953 
954 	/* add an all-done flag if we reached the last lock */
955 	orig_flags = mres->flags;
956 	BUG_ON(total_locks > mres_total_locks);
957 	if (total_locks == mres_total_locks)
958 		mres->flags |= DLM_MRES_ALL_DONE;
959 
960 	/* send it */
961 	ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
962 				 sz, send_to, &status);
963 	if (ret < 0) {
964 		/* XXX: negative status is not handled.
965 		 * this will end up killing this node. */
966 		mlog_errno(ret);
967 	} else {
968 		/* might get an -ENOMEM back here */
969 		ret = status;
970 		if (ret < 0) {
971 			mlog_errno(ret);
972 
973 			if (ret == -EFAULT) {
974 				mlog(ML_ERROR, "node %u told me to kill "
975 				     "myself!\n", send_to);
976 				BUG();
977 			}
978 		}
979 	}
980 
981 	/* zero and reinit the message buffer */
982 	dlm_init_migratable_lockres(mres, res->lockname.name,
983 				    res->lockname.len, mres_total_locks,
984 				    mig_cookie, orig_flags, orig_master);
985 	return ret;
986 }
987 
988 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
989 					const char *lockname, int namelen,
990 					int total_locks, u64 cookie,
991 					u8 flags, u8 master)
992 {
993 	/* mres here is one full page */
994 	memset(mres, 0, PAGE_SIZE);
995 	mres->lockname_len = namelen;
996 	memcpy(mres->lockname, lockname, namelen);
997 	mres->num_locks = 0;
998 	mres->total_locks = cpu_to_be32(total_locks);
999 	mres->mig_cookie = cpu_to_be64(cookie);
1000 	mres->flags = flags;
1001 	mres->master = master;
1002 }
1003 
1004 
1005 /* returns 1 if this lock fills the network structure,
1006  * 0 otherwise */
1007 static int dlm_add_lock_to_array(struct dlm_lock *lock,
1008 				 struct dlm_migratable_lockres *mres, int queue)
1009 {
1010 	struct dlm_migratable_lock *ml;
1011 	int lock_num = mres->num_locks;
1012 
1013 	ml = &(mres->ml[lock_num]);
1014 	ml->cookie = lock->ml.cookie;
1015 	ml->type = lock->ml.type;
1016 	ml->convert_type = lock->ml.convert_type;
1017 	ml->highest_blocked = lock->ml.highest_blocked;
1018 	ml->list = queue;
1019 	if (lock->lksb) {
1020 		ml->flags = lock->lksb->flags;
1021 		/* send our current lvb */
1022 		if (ml->type == LKM_EXMODE ||
1023 		    ml->type == LKM_PRMODE) {
1024 			/* if it is already set, this had better be a PR
1025 			 * and it has to match */
1026 			if (mres->lvb[0] && (ml->type == LKM_EXMODE ||
1027 			    memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) {
1028 				mlog(ML_ERROR, "mismatched lvbs!\n");
1029 				__dlm_print_one_lock_resource(lock->lockres);
1030 				BUG();
1031 			}
1032 			memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1033 		}
1034 	}
1035 	ml->node = lock->ml.node;
1036 	mres->num_locks++;
1037 	/* we reached the max, send this network message */
1038 	if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS)
1039 		return 1;
1040 	return 0;
1041 }
1042 
1043 
1044 int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1045 			 struct dlm_migratable_lockres *mres,
1046 			 u8 send_to, u8 flags)
1047 {
1048 	struct list_head *queue, *iter;
1049 	int total_locks, i;
1050 	u64 mig_cookie = 0;
1051 	struct dlm_lock *lock;
1052 	int ret = 0;
1053 
1054 	BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1055 
1056 	mlog(0, "sending to %u\n", send_to);
1057 
1058 	total_locks = dlm_num_locks_in_lockres(res);
1059 	if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) {
1060 		/* rare, but possible */
1061 		mlog(0, "argh.  lockres has %d locks.  this will "
1062 			  "require more than one network packet to "
1063 			  "migrate\n", total_locks);
1064 		mig_cookie = dlm_get_next_mig_cookie();
1065 	}
1066 
1067 	dlm_init_migratable_lockres(mres, res->lockname.name,
1068 				    res->lockname.len, total_locks,
1069 				    mig_cookie, flags, res->owner);
1070 
1071 	total_locks = 0;
1072 	for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
1073 		queue = dlm_list_idx_to_ptr(res, i);
1074 		list_for_each(iter, queue) {
1075 			lock = list_entry (iter, struct dlm_lock, list);
1076 
1077 			/* add another lock. */
1078 			total_locks++;
1079 			if (!dlm_add_lock_to_array(lock, mres, i))
1080 				continue;
1081 
1082 			/* this filled the lock message,
1083 			 * we must send it immediately. */
1084 			ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
1085 						       res, total_locks);
1086 			if (ret < 0) {
1087 				// TODO
1088 				mlog(ML_ERROR, "dlm_send_mig_lockres_msg "
1089 				     "returned %d, TODO\n", ret);
1090 				BUG();
1091 			}
1092 		}
1093 	}
1094 	/* flush any remaining locks */
1095 	ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
1096 	if (ret < 0) {
1097 		// TODO
1098 		mlog(ML_ERROR, "dlm_send_mig_lockres_msg returned %d, "
1099 		     "TODO\n", ret);
1100 		BUG();
1101 	}
1102 	return ret;
1103 }
1104 
1105 
1106 
1107 /*
1108  * this message will contain no more than one page worth of
1109  * recovery data, and it will work on only one lockres.
1110  * there may be many locks in this page, and we may need to wait
1111  * for additional packets to complete all the locks (rare, but
1112  * possible).
1113  */
1114 /*
1115  * NOTE: the allocation error cases here are scary
1116  * we really cannot afford to fail an alloc in recovery
1117  * do we spin?  returning an error only delays the problem really
1118  */
1119 
1120 int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data)
1121 {
1122 	struct dlm_ctxt *dlm = data;
1123 	struct dlm_migratable_lockres *mres =
1124 		(struct dlm_migratable_lockres *)msg->buf;
1125 	int ret = 0;
1126 	u8 real_master;
1127 	char *buf = NULL;
1128 	struct dlm_work_item *item = NULL;
1129 	struct dlm_lock_resource *res = NULL;
1130 
1131 	if (!dlm_grab(dlm))
1132 		return -EINVAL;
1133 
1134 	BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1135 
1136 	real_master = mres->master;
1137 	if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1138 		/* cannot migrate a lockres with no master */
1139 		BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1140 	}
1141 
1142 	mlog(0, "%s message received from node %u\n",
1143 		  (mres->flags & DLM_MRES_RECOVERY) ?
1144 		  "recovery" : "migration", mres->master);
1145 	if (mres->flags & DLM_MRES_ALL_DONE)
1146 		mlog(0, "all done flag.  all lockres data received!\n");
1147 
1148 	ret = -ENOMEM;
1149 	buf = kmalloc(be16_to_cpu(msg->data_len), GFP_KERNEL);
1150 	item = kcalloc(1, sizeof(*item), GFP_KERNEL);
1151 	if (!buf || !item)
1152 		goto leave;
1153 
1154 	/* lookup the lock to see if we have a secondary queue for this
1155 	 * already...  just add the locks in and this will have its owner
1156 	 * and RECOVERY flag changed when it completes. */
1157 	res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len);
1158 	if (res) {
1159 	 	/* this will get a ref on res */
1160 		/* mark it as recovering/migrating and hash it */
1161 		spin_lock(&res->spinlock);
1162 		if (mres->flags & DLM_MRES_RECOVERY) {
1163 			res->state |= DLM_LOCK_RES_RECOVERING;
1164 		} else {
1165 			if (res->state & DLM_LOCK_RES_MIGRATING) {
1166 				/* this is at least the second
1167 				 * lockres message */
1168 				mlog(0, "lock %.*s is already migrating\n",
1169 					  mres->lockname_len,
1170 					  mres->lockname);
1171 			} else if (res->state & DLM_LOCK_RES_RECOVERING) {
1172 				/* caller should BUG */
1173 				mlog(ML_ERROR, "node is attempting to migrate "
1174 				     "lock %.*s, but marked as recovering!\n",
1175 				     mres->lockname_len, mres->lockname);
1176 				ret = -EFAULT;
1177 				spin_unlock(&res->spinlock);
1178 				goto leave;
1179 			}
1180 			res->state |= DLM_LOCK_RES_MIGRATING;
1181 		}
1182 		spin_unlock(&res->spinlock);
1183 	} else {
1184 		/* need to allocate, just like if it was
1185 		 * mastered here normally  */
1186 		res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
1187 		if (!res)
1188 			goto leave;
1189 
1190 		/* to match the ref that we would have gotten if
1191 		 * dlm_lookup_lockres had succeeded */
1192 		dlm_lockres_get(res);
1193 
1194 		/* mark it as recovering/migrating and hash it */
1195 		if (mres->flags & DLM_MRES_RECOVERY)
1196 			res->state |= DLM_LOCK_RES_RECOVERING;
1197 		else
1198 			res->state |= DLM_LOCK_RES_MIGRATING;
1199 
1200 		spin_lock(&dlm->spinlock);
1201 		__dlm_insert_lockres(dlm, res);
1202 		spin_unlock(&dlm->spinlock);
1203 
1204 		/* now that the new lockres is inserted,
1205 		 * make it usable by other processes */
1206 		spin_lock(&res->spinlock);
1207 		res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1208 		spin_unlock(&res->spinlock);
1209 
1210 		/* add an extra ref for just-allocated lockres
1211 		 * otherwise the lockres will be purged immediately */
1212 		dlm_lockres_get(res);
1213 
1214 	}
1215 
1216 	/* at this point we have allocated everything we need,
1217 	 * and we have a hashed lockres with an extra ref and
1218 	 * the proper res->state flags. */
1219 	ret = 0;
1220 	if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1221 		/* migration cannot have an unknown master */
1222 		BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1223 		mlog(0, "recovery has passed me a lockres with an "
1224 			  "unknown owner.. will need to requery: "
1225 			  "%.*s\n", mres->lockname_len, mres->lockname);
1226 	} else {
1227 		spin_lock(&res->spinlock);
1228 		dlm_change_lockres_owner(dlm, res, dlm->node_num);
1229 		spin_unlock(&res->spinlock);
1230 	}
1231 
1232 	/* queue up work for dlm_mig_lockres_worker */
1233 	dlm_grab(dlm);  /* get an extra ref for the work item */
1234 	memcpy(buf, msg->buf, be16_to_cpu(msg->data_len));  /* copy the whole message */
1235 	dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
1236 	item->u.ml.lockres = res; /* already have a ref */
1237 	item->u.ml.real_master = real_master;
1238 	spin_lock(&dlm->work_lock);
1239 	list_add_tail(&item->list, &dlm->work_list);
1240 	spin_unlock(&dlm->work_lock);
1241 	schedule_work(&dlm->dispatched_work);
1242 
1243 leave:
1244 	dlm_put(dlm);
1245 	if (ret < 0) {
1246 		if (buf)
1247 			kfree(buf);
1248 		if (item)
1249 			kfree(item);
1250 	}
1251 
1252 	mlog_exit(ret);
1253 	return ret;
1254 }
1255 
1256 
1257 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
1258 {
1259 	struct dlm_ctxt *dlm = data;
1260 	struct dlm_migratable_lockres *mres;
1261 	int ret = 0;
1262 	struct dlm_lock_resource *res;
1263 	u8 real_master;
1264 
1265 	dlm = item->dlm;
1266 	mres = (struct dlm_migratable_lockres *)data;
1267 
1268 	res = item->u.ml.lockres;
1269 	real_master = item->u.ml.real_master;
1270 
1271 	if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1272 		/* this case is super-rare. only occurs if
1273 		 * node death happens during migration. */
1274 again:
1275 		ret = dlm_lockres_master_requery(dlm, res, &real_master);
1276 		if (ret < 0) {
1277 			mlog(0, "dlm_lockres_master_requery ret=%d\n",
1278 				  ret);
1279 			goto again;
1280 		}
1281 		if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1282 			mlog(0, "lockres %.*s not claimed.  "
1283 				   "this node will take it.\n",
1284 				   res->lockname.len, res->lockname.name);
1285 		} else {
1286 			mlog(0, "master needs to respond to sender "
1287 				  "that node %u still owns %.*s\n",
1288 				  real_master, res->lockname.len,
1289 				  res->lockname.name);
1290 			/* cannot touch this lockres */
1291 			goto leave;
1292 		}
1293 	}
1294 
1295 	ret = dlm_process_recovery_data(dlm, res, mres);
1296 	if (ret < 0)
1297 		mlog(0, "dlm_process_recovery_data returned  %d\n", ret);
1298 	else
1299 		mlog(0, "dlm_process_recovery_data succeeded\n");
1300 
1301 	if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
1302 	                   (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {
1303 		ret = dlm_finish_migration(dlm, res, mres->master);
1304 		if (ret < 0)
1305 			mlog_errno(ret);
1306 	}
1307 
1308 leave:
1309 	kfree(data);
1310 	mlog_exit(ret);
1311 }
1312 
1313 
1314 
1315 int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
1316 			       struct dlm_lock_resource *res, u8 *real_master)
1317 {
1318 	struct dlm_node_iter iter;
1319 	int nodenum;
1320 	int ret = 0;
1321 
1322 	*real_master = DLM_LOCK_RES_OWNER_UNKNOWN;
1323 
1324 	/* we only reach here if one of the two nodes in a
1325 	 * migration died while the migration was in progress.
1326 	 * at this point we need to requery the master.  we
1327 	 * know that the new_master got as far as creating
1328 	 * an mle on at least one node, but we do not know
1329 	 * if any nodes had actually cleared the mle and set
1330 	 * the master to the new_master.  the old master
1331 	 * is supposed to set the owner to UNKNOWN in the
1332 	 * event of a new_master death, so the only possible
1333 	 * responses that we can get from nodes here are
1334 	 * that the master is new_master, or that the master
1335 	 * is UNKNOWN.
1336 	 * if all nodes come back with UNKNOWN then we know
1337 	 * the lock needs remastering here.
1338 	 * if any node comes back with a valid master, check
1339 	 * to see if that master is the one that we are
1340 	 * recovering.  if so, then the new_master died and
1341 	 * we need to remaster this lock.  if not, then the
1342 	 * new_master survived and that node will respond to
1343 	 * other nodes about the owner.
1344 	 * if there is an owner, this node needs to dump this
1345 	 * lockres and alert the sender that this lockres
1346 	 * was rejected. */
1347 	spin_lock(&dlm->spinlock);
1348 	dlm_node_iter_init(dlm->domain_map, &iter);
1349 	spin_unlock(&dlm->spinlock);
1350 
1351 	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
1352 		/* do not send to self */
1353 		if (nodenum == dlm->node_num)
1354 			continue;
1355 		ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
1356 		if (ret < 0) {
1357 			mlog_errno(ret);
1358 			if (!dlm_is_host_down(ret))
1359 				BUG();
1360 			/* host is down, so answer for that node would be
1361 			 * DLM_LOCK_RES_OWNER_UNKNOWN.  continue. */
1362 		}
1363 		if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1364 			mlog(0, "lock master is %u\n", *real_master);
1365 			break;
1366 		}
1367 	}
1368 	return ret;
1369 }
1370 
1371 
1372 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1373 			  u8 nodenum, u8 *real_master)
1374 {
1375 	int ret = -EINVAL;
1376 	struct dlm_master_requery req;
1377 	int status = DLM_LOCK_RES_OWNER_UNKNOWN;
1378 
1379 	memset(&req, 0, sizeof(req));
1380 	req.node_idx = dlm->node_num;
1381 	req.namelen = res->lockname.len;
1382 	memcpy(req.name, res->lockname.name, res->lockname.len);
1383 
1384 	ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
1385 				 &req, sizeof(req), nodenum, &status);
1386 	/* XXX: negative status not handled properly here. */
1387 	if (ret < 0)
1388 		mlog_errno(ret);
1389 	else {
1390 		BUG_ON(status < 0);
1391 		BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);
1392 		*real_master = (u8) (status & 0xff);
1393 		mlog(0, "node %u responded to master requery with %u\n",
1394 			  nodenum, *real_master);
1395 		ret = 0;
1396 	}
1397 	return ret;
1398 }
1399 
1400 
1401 /* this function cannot error, so unless the sending
1402  * or receiving of the message failed, the owner can
1403  * be trusted */
1404 int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data)
1405 {
1406 	struct dlm_ctxt *dlm = data;
1407 	struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
1408 	struct dlm_lock_resource *res = NULL;
1409 	int master = DLM_LOCK_RES_OWNER_UNKNOWN;
1410 	u32 flags = DLM_ASSERT_MASTER_REQUERY;
1411 
1412 	if (!dlm_grab(dlm)) {
1413 		/* since the domain has gone away on this
1414 		 * node, the proper response is UNKNOWN */
1415 		return master;
1416 	}
1417 
1418 	spin_lock(&dlm->spinlock);
1419 	res = __dlm_lookup_lockres(dlm, req->name, req->namelen);
1420 	if (res) {
1421 		spin_lock(&res->spinlock);
1422 		master = res->owner;
1423 		if (master == dlm->node_num) {
1424 			int ret = dlm_dispatch_assert_master(dlm, res,
1425 							     0, 0, flags);
1426 			if (ret < 0) {
1427 				mlog_errno(-ENOMEM);
1428 				/* retry!? */
1429 				BUG();
1430 			}
1431 		}
1432 		spin_unlock(&res->spinlock);
1433 	}
1434 	spin_unlock(&dlm->spinlock);
1435 
1436 	dlm_put(dlm);
1437 	return master;
1438 }
1439 
1440 static inline struct list_head *
1441 dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
1442 {
1443 	struct list_head *ret;
1444 	BUG_ON(list_num < 0);
1445 	BUG_ON(list_num > 2);
1446 	ret = &(res->granted);
1447 	ret += list_num;
1448 	return ret;
1449 }
1450 /* TODO: do ast flush business
1451  * TODO: do MIGRATING and RECOVERING spinning
1452  */
1453 
1454 /*
1455 * NOTE about in-flight requests during migration:
1456 *
1457 * Before attempting the migrate, the master has marked the lockres as
1458 * MIGRATING and then flushed all of its pending ASTS.  So any in-flight
1459 * requests either got queued before the MIGRATING flag got set, in which
1460 * case the lock data will reflect the change and a return message is on
1461 * the way, or the request failed to get in before MIGRATING got set.  In
1462 * this case, the caller will be told to spin and wait for the MIGRATING
1463 * flag to be dropped, then recheck the master.
1464 * This holds true for the convert, cancel and unlock cases, and since lvb
1465 * updates are tied to these same messages, it applies to lvb updates as
1466 * well.  For the lock case, there is no way a lock can be on the master
1467 * queue and not be on the secondary queue since the lock is always added
1468 * locally first.  This means that the new target node will never be sent
1469 * a lock that he doesn't already have on the list.
1470 * In total, this means that the local lock is correct and should not be
1471 * updated to match the one sent by the master.  Any messages sent back
1472 * from the master before the MIGRATING flag will bring the lock properly
1473 * up-to-date, and the change will be ordered properly for the waiter.
1474 * We will *not* attempt to modify the lock underneath the waiter.
1475 */
1476 
1477 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1478 				     struct dlm_lock_resource *res,
1479 				     struct dlm_migratable_lockres *mres)
1480 {
1481 	struct dlm_migratable_lock *ml;
1482 	struct list_head *queue;
1483 	struct dlm_lock *newlock = NULL;
1484 	struct dlm_lockstatus *lksb = NULL;
1485 	int ret = 0;
1486 	int i;
1487 	struct list_head *iter;
1488 	struct dlm_lock *lock = NULL;
1489 
1490 	mlog(0, "running %d locks for this lockres\n", mres->num_locks);
1491 	for (i=0; i<mres->num_locks; i++) {
1492 		ml = &(mres->ml[i]);
1493 		BUG_ON(ml->highest_blocked != LKM_IVMODE);
1494 		newlock = NULL;
1495 		lksb = NULL;
1496 
1497 		queue = dlm_list_num_to_pointer(res, ml->list);
1498 
1499 		/* if the lock is for the local node it needs to
1500 		 * be moved to the proper location within the queue.
1501 		 * do not allocate a new lock structure. */
1502 		if (ml->node == dlm->node_num) {
1503 			/* MIGRATION ONLY! */
1504 			BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
1505 
1506 			spin_lock(&res->spinlock);
1507 			list_for_each(iter, queue) {
1508 				lock = list_entry (iter, struct dlm_lock, list);
1509 				if (lock->ml.cookie != ml->cookie)
1510 					lock = NULL;
1511 				else
1512 					break;
1513 			}
1514 
1515 			/* lock is always created locally first, and
1516 			 * destroyed locally last.  it must be on the list */
1517 			if (!lock) {
1518 				u64 c = ml->cookie;
1519 				mlog(ML_ERROR, "could not find local lock "
1520 					       "with cookie %u:%llu!\n",
1521 					       dlm_get_lock_cookie_node(c),
1522 					       dlm_get_lock_cookie_seq(c));
1523 				BUG();
1524 			}
1525 			BUG_ON(lock->ml.node != ml->node);
1526 
1527 			/* see NOTE above about why we do not update
1528 			 * to match the master here */
1529 
1530 			/* move the lock to its proper place */
1531 			/* do not alter lock refcount.  switching lists. */
1532 			list_del_init(&lock->list);
1533 			list_add_tail(&lock->list, queue);
1534 			spin_unlock(&res->spinlock);
1535 
1536 			mlog(0, "just reordered a local lock!\n");
1537 			continue;
1538 		}
1539 
1540 		/* lock is for another node. */
1541 		newlock = dlm_new_lock(ml->type, ml->node,
1542 				       be64_to_cpu(ml->cookie), NULL);
1543 		if (!newlock) {
1544 			ret = -ENOMEM;
1545 			goto leave;
1546 		}
1547 		lksb = newlock->lksb;
1548 		dlm_lock_attach_lockres(newlock, res);
1549 
1550 		if (ml->convert_type != LKM_IVMODE) {
1551 			BUG_ON(queue != &res->converting);
1552 			newlock->ml.convert_type = ml->convert_type;
1553 		}
1554 		lksb->flags |= (ml->flags &
1555 				(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
1556 
1557 		if (mres->lvb[0]) {
1558 			if (lksb->flags & DLM_LKSB_PUT_LVB) {
1559 				/* other node was trying to update
1560 				 * lvb when node died.  recreate the
1561 				 * lksb with the updated lvb. */
1562 				memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
1563 			} else {
1564 				/* otherwise, the node is sending its
1565 				 * most recent valid lvb info */
1566 				BUG_ON(ml->type != LKM_EXMODE &&
1567 				       ml->type != LKM_PRMODE);
1568 				if (res->lvb[0] && (ml->type == LKM_EXMODE ||
1569 				    memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
1570 					mlog(ML_ERROR, "received bad lvb!\n");
1571 					__dlm_print_one_lock_resource(res);
1572 					BUG();
1573 				}
1574 				memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1575 			}
1576 		}
1577 
1578 
1579 		/* NOTE:
1580 		 * wrt lock queue ordering and recovery:
1581 		 *    1. order of locks on granted queue is
1582 		 *       meaningless.
1583 		 *    2. order of locks on converting queue is
1584 		 *       LOST with the node death.  sorry charlie.
1585 		 *    3. order of locks on the blocked queue is
1586 		 *       also LOST.
1587 		 * order of locks does not affect integrity, it
1588 		 * just means that a lock request may get pushed
1589 		 * back in line as a result of the node death.
1590 		 * also note that for a given node the lock order
1591 		 * for its secondary queue locks is preserved
1592 		 * relative to each other, but clearly *not*
1593 		 * preserved relative to locks from other nodes.
1594 		 */
1595 		spin_lock(&res->spinlock);
1596 		dlm_lock_get(newlock);
1597 		list_add_tail(&newlock->list, queue);
1598 		spin_unlock(&res->spinlock);
1599 	}
1600 	mlog(0, "done running all the locks\n");
1601 
1602 leave:
1603 	if (ret < 0) {
1604 		mlog_errno(ret);
1605 		if (newlock)
1606 			dlm_lock_put(newlock);
1607 	}
1608 
1609 	mlog_exit(ret);
1610 	return ret;
1611 }
1612 
1613 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
1614 				       struct dlm_lock_resource *res)
1615 {
1616 	int i;
1617 	struct list_head *queue, *iter, *iter2;
1618 	struct dlm_lock *lock;
1619 
1620 	res->state |= DLM_LOCK_RES_RECOVERING;
1621 	if (!list_empty(&res->recovering))
1622 		list_del_init(&res->recovering);
1623 	list_add_tail(&res->recovering, &dlm->reco.resources);
1624 
1625 	/* find any pending locks and put them back on proper list */
1626 	for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
1627 		queue = dlm_list_idx_to_ptr(res, i);
1628 		list_for_each_safe(iter, iter2, queue) {
1629 			lock = list_entry (iter, struct dlm_lock, list);
1630 			dlm_lock_get(lock);
1631 			if (lock->convert_pending) {
1632 				/* move converting lock back to granted */
1633 				BUG_ON(i != DLM_CONVERTING_LIST);
1634 				mlog(0, "node died with convert pending "
1635 				     "on %.*s. move back to granted list.\n",
1636 				     res->lockname.len, res->lockname.name);
1637 				dlm_revert_pending_convert(res, lock);
1638 				lock->convert_pending = 0;
1639 			} else if (lock->lock_pending) {
1640 				/* remove pending lock requests completely */
1641 				BUG_ON(i != DLM_BLOCKED_LIST);
1642 				mlog(0, "node died with lock pending "
1643 				     "on %.*s. remove from blocked list and skip.\n",
1644 				     res->lockname.len, res->lockname.name);
1645 				/* lock will be floating until ref in
1646 				 * dlmlock_remote is freed after the network
1647 				 * call returns.  ok for it to not be on any
1648 				 * list since no ast can be called
1649 				 * (the master is dead). */
1650 				dlm_revert_pending_lock(res, lock);
1651 				lock->lock_pending = 0;
1652 			} else if (lock->unlock_pending) {
1653 				/* if an unlock was in progress, treat as
1654 				 * if this had completed successfully
1655 				 * before sending this lock state to the
1656 				 * new master.  note that the dlm_unlock
1657 				 * call is still responsible for calling
1658 				 * the unlockast.  that will happen after
1659 				 * the network call times out.  for now,
1660 				 * just move lists to prepare the new
1661 				 * recovery master.  */
1662 				BUG_ON(i != DLM_GRANTED_LIST);
1663 				mlog(0, "node died with unlock pending "
1664 				     "on %.*s. remove from blocked list and skip.\n",
1665 				     res->lockname.len, res->lockname.name);
1666 				dlm_commit_pending_unlock(res, lock);
1667 				lock->unlock_pending = 0;
1668 			} else if (lock->cancel_pending) {
1669 				/* if a cancel was in progress, treat as
1670 				 * if this had completed successfully
1671 				 * before sending this lock state to the
1672 				 * new master */
1673 				BUG_ON(i != DLM_CONVERTING_LIST);
1674 				mlog(0, "node died with cancel pending "
1675 				     "on %.*s. move back to granted list.\n",
1676 				     res->lockname.len, res->lockname.name);
1677 				dlm_commit_pending_cancel(res, lock);
1678 				lock->cancel_pending = 0;
1679 			}
1680 			dlm_lock_put(lock);
1681 		}
1682 	}
1683 }
1684 
1685 
1686 
1687 /* removes all recovered locks from the recovery list.
1688  * sets the res->owner to the new master.
1689  * unsets the RECOVERY flag and wakes waiters. */
1690 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
1691 					      u8 dead_node, u8 new_master)
1692 {
1693 	int i;
1694 	struct list_head *iter, *iter2;
1695 	struct hlist_node *hash_iter;
1696 	struct hlist_head *bucket;
1697 
1698 	struct dlm_lock_resource *res;
1699 
1700 	mlog_entry_void();
1701 
1702 	assert_spin_locked(&dlm->spinlock);
1703 
1704 	list_for_each_safe(iter, iter2, &dlm->reco.resources) {
1705 		res = list_entry (iter, struct dlm_lock_resource, recovering);
1706 		if (res->owner == dead_node) {
1707 			list_del_init(&res->recovering);
1708 			spin_lock(&res->spinlock);
1709 			dlm_change_lockres_owner(dlm, res, new_master);
1710 			res->state &= ~DLM_LOCK_RES_RECOVERING;
1711 			__dlm_dirty_lockres(dlm, res);
1712 			spin_unlock(&res->spinlock);
1713 			wake_up(&res->wq);
1714 		}
1715 	}
1716 
1717 	/* this will become unnecessary eventually, but
1718 	 * for now we need to run the whole hash, clear
1719 	 * the RECOVERING state and set the owner
1720 	 * if necessary */
1721 	for (i = 0; i < DLM_HASH_BUCKETS; i++) {
1722 		bucket = &(dlm->lockres_hash[i]);
1723 		hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
1724 			if (res->state & DLM_LOCK_RES_RECOVERING) {
1725 				if (res->owner == dead_node) {
1726 					mlog(0, "(this=%u) res %.*s owner=%u "
1727 					     "was not on recovering list, but "
1728 					     "clearing state anyway\n",
1729 					     dlm->node_num, res->lockname.len,
1730 					     res->lockname.name, new_master);
1731 				} else if (res->owner == dlm->node_num) {
1732 					mlog(0, "(this=%u) res %.*s owner=%u "
1733 					     "was not on recovering list, "
1734 					     "owner is THIS node, clearing\n",
1735 					     dlm->node_num, res->lockname.len,
1736 					     res->lockname.name, new_master);
1737 				} else
1738 					continue;
1739 
1740 				if (!list_empty(&res->recovering)) {
1741 					mlog(0, "%s:%.*s: lockres was "
1742 					     "marked RECOVERING, owner=%u\n",
1743 					     dlm->name, res->lockname.len,
1744 					     res->lockname.name, res->owner);
1745 					list_del_init(&res->recovering);
1746 				}
1747 				spin_lock(&res->spinlock);
1748 				dlm_change_lockres_owner(dlm, res, new_master);
1749 				res->state &= ~DLM_LOCK_RES_RECOVERING;
1750 				__dlm_dirty_lockres(dlm, res);
1751 				spin_unlock(&res->spinlock);
1752 				wake_up(&res->wq);
1753 			}
1754 		}
1755 	}
1756 }
1757 
1758 static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local)
1759 {
1760 	if (local) {
1761 		if (lock->ml.type != LKM_EXMODE &&
1762 		    lock->ml.type != LKM_PRMODE)
1763 			return 1;
1764 	} else if (lock->ml.type == LKM_EXMODE)
1765 		return 1;
1766 	return 0;
1767 }
1768 
1769 static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
1770 			       struct dlm_lock_resource *res, u8 dead_node)
1771 {
1772 	struct list_head *iter, *queue;
1773 	struct dlm_lock *lock;
1774 	int blank_lvb = 0, local = 0;
1775 	int i;
1776 	u8 search_node;
1777 
1778 	assert_spin_locked(&dlm->spinlock);
1779 	assert_spin_locked(&res->spinlock);
1780 
1781 	if (res->owner == dlm->node_num)
1782 		/* if this node owned the lockres, and if the dead node
1783 		 * had an EX when he died, blank out the lvb */
1784 		search_node = dead_node;
1785 	else {
1786 		/* if this is a secondary lockres, and we had no EX or PR
1787 		 * locks granted, we can no longer trust the lvb */
1788 		search_node = dlm->node_num;
1789 		local = 1;  /* check local state for valid lvb */
1790 	}
1791 
1792 	for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) {
1793 		queue = dlm_list_idx_to_ptr(res, i);
1794 		list_for_each(iter, queue) {
1795 			lock = list_entry (iter, struct dlm_lock, list);
1796 			if (lock->ml.node == search_node) {
1797 				if (dlm_lvb_needs_invalidation(lock, local)) {
1798 					/* zero the lksb lvb and lockres lvb */
1799 					blank_lvb = 1;
1800 					memset(lock->lksb->lvb, 0, DLM_LVB_LEN);
1801 				}
1802 			}
1803 		}
1804 	}
1805 
1806 	if (blank_lvb) {
1807 		mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
1808 		     res->lockname.len, res->lockname.name, dead_node);
1809 		memset(res->lvb, 0, DLM_LVB_LEN);
1810 	}
1811 }
1812 
1813 static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
1814 				struct dlm_lock_resource *res, u8 dead_node)
1815 {
1816 	struct list_head *iter, *tmpiter;
1817 	struct dlm_lock *lock;
1818 
1819 	/* this node is the lockres master:
1820 	 * 1) remove any stale locks for the dead node
1821 	 * 2) if the dead node had an EX when he died, blank out the lvb
1822 	 */
1823 	assert_spin_locked(&dlm->spinlock);
1824 	assert_spin_locked(&res->spinlock);
1825 
1826 	/* TODO: check pending_asts, pending_basts here */
1827 	list_for_each_safe(iter, tmpiter, &res->granted) {
1828 		lock = list_entry (iter, struct dlm_lock, list);
1829 		if (lock->ml.node == dead_node) {
1830 			list_del_init(&lock->list);
1831 			dlm_lock_put(lock);
1832 		}
1833 	}
1834 	list_for_each_safe(iter, tmpiter, &res->converting) {
1835 		lock = list_entry (iter, struct dlm_lock, list);
1836 		if (lock->ml.node == dead_node) {
1837 			list_del_init(&lock->list);
1838 			dlm_lock_put(lock);
1839 		}
1840 	}
1841 	list_for_each_safe(iter, tmpiter, &res->blocked) {
1842 		lock = list_entry (iter, struct dlm_lock, list);
1843 		if (lock->ml.node == dead_node) {
1844 			list_del_init(&lock->list);
1845 			dlm_lock_put(lock);
1846 		}
1847 	}
1848 
1849 	/* do not kick thread yet */
1850 	__dlm_dirty_lockres(dlm, res);
1851 }
1852 
1853 /* if this node is the recovery master, and there are no
1854  * locks for a given lockres owned by this node that are in
1855  * either PR or EX mode, zero out the lvb before requesting.
1856  *
1857  */
1858 
1859 
1860 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
1861 {
1862 	struct hlist_node *iter;
1863 	struct dlm_lock_resource *res;
1864 	int i;
1865 	struct hlist_head *bucket;
1866 	struct dlm_lock *lock;
1867 
1868 
1869 	/* purge any stale mles */
1870 	dlm_clean_master_list(dlm, dead_node);
1871 
1872 	/*
1873 	 * now clean up all lock resources.  there are two rules:
1874 	 *
1875 	 * 1) if the dead node was the master, move the lockres
1876 	 *    to the recovering list.  set the RECOVERING flag.
1877 	 *    this lockres needs to be cleaned up before it can
1878 	 *    be used further.
1879 	 *
1880 	 * 2) if this node was the master, remove all locks from
1881 	 *    each of the lockres queues that were owned by the
1882 	 *    dead node.  once recovery finishes, the dlm thread
1883 	 *    can be kicked again to see if any ASTs or BASTs
1884 	 *    need to be fired as a result.
1885 	 */
1886 	for (i = 0; i < DLM_HASH_BUCKETS; i++) {
1887 		bucket = &(dlm->lockres_hash[i]);
1888 		hlist_for_each_entry(res, iter, bucket, hash_node) {
1889  			/* always prune any $RECOVERY entries for dead nodes,
1890  			 * otherwise hangs can occur during later recovery */
1891 			if (dlm_is_recovery_lock(res->lockname.name,
1892 						 res->lockname.len)) {
1893 				spin_lock(&res->spinlock);
1894 				list_for_each_entry(lock, &res->granted, list) {
1895 					if (lock->ml.node == dead_node) {
1896 						mlog(0, "AHA! there was "
1897 						     "a $RECOVERY lock for dead "
1898 						     "node %u (%s)!\n",
1899 						     dead_node, dlm->name);
1900 						list_del_init(&lock->list);
1901 						dlm_lock_put(lock);
1902 						break;
1903 					}
1904 				}
1905 				spin_unlock(&res->spinlock);
1906 				continue;
1907 			}
1908 			spin_lock(&res->spinlock);
1909 			/* zero the lvb if necessary */
1910 			dlm_revalidate_lvb(dlm, res, dead_node);
1911 			if (res->owner == dead_node)
1912 				dlm_move_lockres_to_recovery_list(dlm, res);
1913 			else if (res->owner == dlm->node_num) {
1914 				dlm_free_dead_locks(dlm, res, dead_node);
1915 				__dlm_lockres_calc_usage(dlm, res);
1916 			}
1917 			spin_unlock(&res->spinlock);
1918 		}
1919 	}
1920 
1921 }
1922 
1923 static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
1924 {
1925 	assert_spin_locked(&dlm->spinlock);
1926 
1927 	/* check to see if the node is already considered dead */
1928 	if (!test_bit(idx, dlm->live_nodes_map)) {
1929 		mlog(0, "for domain %s, node %d is already dead. "
1930 		     "another node likely did recovery already.\n",
1931 		     dlm->name, idx);
1932 		return;
1933 	}
1934 
1935 	/* check to see if we do not care about this node */
1936 	if (!test_bit(idx, dlm->domain_map)) {
1937 		/* This also catches the case that we get a node down
1938 		 * but haven't joined the domain yet. */
1939 		mlog(0, "node %u already removed from domain!\n", idx);
1940 		return;
1941 	}
1942 
1943 	clear_bit(idx, dlm->live_nodes_map);
1944 
1945 	/* Clean up join state on node death. */
1946 	if (dlm->joining_node == idx) {
1947 		mlog(0, "Clearing join state for node %u\n", idx);
1948 		__dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
1949 	}
1950 
1951 	/* make sure local cleanup occurs before the heartbeat events */
1952 	if (!test_bit(idx, dlm->recovery_map))
1953 		dlm_do_local_recovery_cleanup(dlm, idx);
1954 
1955 	/* notify anything attached to the heartbeat events */
1956 	dlm_hb_event_notify_attached(dlm, idx, 0);
1957 
1958 	mlog(0, "node %u being removed from domain map!\n", idx);
1959 	clear_bit(idx, dlm->domain_map);
1960 	/* wake up migration waiters if a node goes down.
1961 	 * perhaps later we can genericize this for other waiters. */
1962 	wake_up(&dlm->migration_wq);
1963 
1964 	if (test_bit(idx, dlm->recovery_map))
1965 		mlog(0, "domain %s, node %u already added "
1966 		     "to recovery map!\n", dlm->name, idx);
1967 	else
1968 		set_bit(idx, dlm->recovery_map);
1969 }
1970 
1971 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
1972 {
1973 	struct dlm_ctxt *dlm = data;
1974 
1975 	if (!dlm_grab(dlm))
1976 		return;
1977 
1978 	spin_lock(&dlm->spinlock);
1979 	__dlm_hb_node_down(dlm, idx);
1980 	spin_unlock(&dlm->spinlock);
1981 
1982 	dlm_put(dlm);
1983 }
1984 
1985 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
1986 {
1987 	struct dlm_ctxt *dlm = data;
1988 
1989 	if (!dlm_grab(dlm))
1990 		return;
1991 
1992 	spin_lock(&dlm->spinlock);
1993 	set_bit(idx, dlm->live_nodes_map);
1994 	/* do NOT notify mle attached to the heartbeat events.
1995 	 * new nodes are not interesting in mastery until joined. */
1996 	spin_unlock(&dlm->spinlock);
1997 
1998 	dlm_put(dlm);
1999 }
2000 
2001 static void dlm_reco_ast(void *astdata)
2002 {
2003 	struct dlm_ctxt *dlm = astdata;
2004 	mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
2005 	     dlm->node_num, dlm->name);
2006 }
2007 static void dlm_reco_bast(void *astdata, int blocked_type)
2008 {
2009 	struct dlm_ctxt *dlm = astdata;
2010 	mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
2011 	     dlm->node_num, dlm->name);
2012 }
2013 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
2014 {
2015 	mlog(0, "unlockast for recovery lock fired!\n");
2016 }
2017 
2018 /*
2019  * dlm_pick_recovery_master will continually attempt to use
2020  * dlmlock() on the special "$RECOVERY" lockres with the
2021  * LKM_NOQUEUE flag to get an EX.  every thread that enters
2022  * this function on each node racing to become the recovery
2023  * master will not stop attempting this until either:
2024  * a) this node gets the EX (and becomes the recovery master),
2025  * or b) dlm->reco.new_master gets set to some nodenum
2026  * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2027  * so each time a recovery master is needed, the entire cluster
2028  * will sync at this point.  if the new master dies, that will
2029  * be detected in dlm_do_recovery */
2030 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
2031 {
2032 	enum dlm_status ret;
2033 	struct dlm_lockstatus lksb;
2034 	int status = -EINVAL;
2035 
2036 	mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2037 	     dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
2038 again:
2039 	memset(&lksb, 0, sizeof(lksb));
2040 
2041 	ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
2042 		      DLM_RECOVERY_LOCK_NAME, dlm_reco_ast, dlm, dlm_reco_bast);
2043 
2044 	mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
2045 	     dlm->name, ret, lksb.status);
2046 
2047 	if (ret == DLM_NORMAL) {
2048 		mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2049 		     dlm->name, dlm->node_num);
2050 
2051 		/* got the EX lock.  check to see if another node
2052 		 * just became the reco master */
2053 		if (dlm_reco_master_ready(dlm)) {
2054 			mlog(0, "%s: got reco EX lock, but %u will "
2055 			     "do the recovery\n", dlm->name,
2056 			     dlm->reco.new_master);
2057 			status = -EEXIST;
2058 		} else {
2059 			status = 0;
2060 
2061 			/* see if recovery was already finished elsewhere */
2062 			spin_lock(&dlm->spinlock);
2063 			if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2064 				status = -EINVAL;
2065 				mlog(0, "%s: got reco EX lock, but "
2066 				     "node got recovered already\n", dlm->name);
2067 				if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2068 					mlog(ML_ERROR, "%s: new master is %u "
2069 					     "but no dead node!\n",
2070 					     dlm->name, dlm->reco.new_master);
2071 					BUG();
2072 				}
2073 			}
2074 			spin_unlock(&dlm->spinlock);
2075 		}
2076 
2077 		/* if this node has actually become the recovery master,
2078 		 * set the master and send the messages to begin recovery */
2079 		if (!status) {
2080 			mlog(0, "%s: dead=%u, this=%u, sending "
2081 			     "begin_reco now\n", dlm->name,
2082 			     dlm->reco.dead_node, dlm->node_num);
2083 			status = dlm_send_begin_reco_message(dlm,
2084 				      dlm->reco.dead_node);
2085 			/* this always succeeds */
2086 			BUG_ON(status);
2087 
2088 			/* set the new_master to this node */
2089 			spin_lock(&dlm->spinlock);
2090 			dlm->reco.new_master = dlm->node_num;
2091 			spin_unlock(&dlm->spinlock);
2092 		}
2093 
2094 		/* recovery lock is a special case.  ast will not get fired,
2095 		 * so just go ahead and unlock it. */
2096 		ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
2097 		if (ret == DLM_DENIED) {
2098 			mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
2099 			ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm);
2100 		}
2101 		if (ret != DLM_NORMAL) {
2102 			/* this would really suck. this could only happen
2103 			 * if there was a network error during the unlock
2104 			 * because of node death.  this means the unlock
2105 			 * is actually "done" and the lock structure is
2106 			 * even freed.  we can continue, but only
2107 			 * because this specific lock name is special. */
2108 			mlog(ML_ERROR, "dlmunlock returned %d\n", ret);
2109 		}
2110 	} else if (ret == DLM_NOTQUEUED) {
2111 		mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2112 		     dlm->name, dlm->node_num);
2113 		/* another node is master. wait on
2114 		 * reco.new_master != O2NM_INVALID_NODE_NUM
2115 		 * for at most one second */
2116 		wait_event_timeout(dlm->dlm_reco_thread_wq,
2117 					 dlm_reco_master_ready(dlm),
2118 					 msecs_to_jiffies(1000));
2119 		if (!dlm_reco_master_ready(dlm)) {
2120 			mlog(0, "%s: reco master taking awhile\n",
2121 			     dlm->name);
2122 			goto again;
2123 		}
2124 		/* another node has informed this one that it is reco master */
2125 		mlog(0, "%s: reco master %u is ready to recover %u\n",
2126 		     dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
2127 		status = -EEXIST;
2128 	} else {
2129 		struct dlm_lock_resource *res;
2130 
2131 		/* dlmlock returned something other than NOTQUEUED or NORMAL */
2132 		mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), "
2133 		     "lksb.status=%s\n", dlm->name, dlm_errname(ret),
2134 		     dlm_errname(lksb.status));
2135 		res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2136 					 DLM_RECOVERY_LOCK_NAME_LEN);
2137 		if (res) {
2138 			dlm_print_one_lock_resource(res);
2139 			dlm_lockres_put(res);
2140 		} else {
2141 			mlog(ML_ERROR, "recovery lock not found\n");
2142 		}
2143 		BUG();
2144 	}
2145 
2146 	return status;
2147 }
2148 
2149 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
2150 {
2151 	struct dlm_begin_reco br;
2152 	int ret = 0;
2153 	struct dlm_node_iter iter;
2154 	int nodenum;
2155 	int status;
2156 
2157 	mlog_entry("%u\n", dead_node);
2158 
2159 	mlog(0, "dead node is %u\n", dead_node);
2160 
2161 	spin_lock(&dlm->spinlock);
2162 	dlm_node_iter_init(dlm->domain_map, &iter);
2163 	spin_unlock(&dlm->spinlock);
2164 
2165 	clear_bit(dead_node, iter.node_map);
2166 
2167 	memset(&br, 0, sizeof(br));
2168 	br.node_idx = dlm->node_num;
2169 	br.dead_node = dead_node;
2170 
2171 	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2172 		ret = 0;
2173 		if (nodenum == dead_node) {
2174 			mlog(0, "not sending begin reco to dead node "
2175 				  "%u\n", dead_node);
2176 			continue;
2177 		}
2178 		if (nodenum == dlm->node_num) {
2179 			mlog(0, "not sending begin reco to self\n");
2180 			continue;
2181 		}
2182 retry:
2183 		ret = -EINVAL;
2184 		mlog(0, "attempting to send begin reco msg to %d\n",
2185 			  nodenum);
2186 		ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key,
2187 					 &br, sizeof(br), nodenum, &status);
2188 		/* negative status is handled ok by caller here */
2189 		if (ret >= 0)
2190 			ret = status;
2191 		if (dlm_is_host_down(ret)) {
2192 			/* node is down.  not involved in recovery
2193 			 * so just keep going */
2194 			mlog(0, "%s: node %u was down when sending "
2195 			     "begin reco msg (%d)\n", dlm->name, nodenum, ret);
2196 			ret = 0;
2197 		}
2198 		if (ret < 0) {
2199 			struct dlm_lock_resource *res;
2200 			/* this is now a serious problem, possibly ENOMEM
2201 			 * in the network stack.  must retry */
2202 			mlog_errno(ret);
2203 			mlog(ML_ERROR, "begin reco of dlm %s to node %u "
2204 			    " returned %d\n", dlm->name, nodenum, ret);
2205 			res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2206 						 DLM_RECOVERY_LOCK_NAME_LEN);
2207 			if (res) {
2208 				dlm_print_one_lock_resource(res);
2209 				dlm_lockres_put(res);
2210 			} else {
2211 				mlog(ML_ERROR, "recovery lock not found\n");
2212 			}
2213 			/* sleep for a bit in hopes that we can avoid
2214 			 * another ENOMEM */
2215 			msleep(100);
2216 			goto retry;
2217 		}
2218 	}
2219 
2220 	return ret;
2221 }
2222 
2223 int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data)
2224 {
2225 	struct dlm_ctxt *dlm = data;
2226 	struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;
2227 
2228 	/* ok to return 0, domain has gone away */
2229 	if (!dlm_grab(dlm))
2230 		return 0;
2231 
2232 	mlog(0, "node %u wants to recover node %u\n",
2233 		  br->node_idx, br->dead_node);
2234 
2235 	dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);
2236 
2237 	spin_lock(&dlm->spinlock);
2238 	if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2239 		if (test_bit(dlm->reco.new_master, dlm->recovery_map)) {
2240 			mlog(0, "%s: new_master %u died, changing "
2241 			     "to %u\n", dlm->name, dlm->reco.new_master,
2242 			     br->node_idx);
2243 		} else {
2244 			mlog(0, "%s: new_master %u NOT DEAD, changing "
2245 			     "to %u\n", dlm->name, dlm->reco.new_master,
2246 			     br->node_idx);
2247 			/* may not have seen the new master as dead yet */
2248 		}
2249 	}
2250 	if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
2251 		mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
2252 		     "node %u changing it to %u\n", dlm->name,
2253 		     dlm->reco.dead_node, br->node_idx, br->dead_node);
2254 	}
2255 	dlm->reco.new_master = br->node_idx;
2256 	dlm->reco.dead_node = br->dead_node;
2257 	if (!test_bit(br->dead_node, dlm->recovery_map)) {
2258 		mlog(0, "recovery master %u sees %u as dead, but this "
2259 		     "node has not yet.  marking %u as dead\n",
2260 		     br->node_idx, br->dead_node, br->dead_node);
2261 		if (!test_bit(br->dead_node, dlm->domain_map) ||
2262 		    !test_bit(br->dead_node, dlm->live_nodes_map))
2263 			mlog(0, "%u not in domain/live_nodes map "
2264 			     "so setting it in reco map manually\n",
2265 			     br->dead_node);
2266 		/* force the recovery cleanup in __dlm_hb_node_down
2267 		 * both of these will be cleared in a moment */
2268 		set_bit(br->dead_node, dlm->domain_map);
2269 		set_bit(br->dead_node, dlm->live_nodes_map);
2270 		__dlm_hb_node_down(dlm, br->dead_node);
2271 	}
2272 	spin_unlock(&dlm->spinlock);
2273 
2274 	dlm_kick_recovery_thread(dlm);
2275 	dlm_put(dlm);
2276 	return 0;
2277 }
2278 
2279 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
2280 {
2281 	int ret = 0;
2282 	struct dlm_finalize_reco fr;
2283 	struct dlm_node_iter iter;
2284 	int nodenum;
2285 	int status;
2286 
2287 	mlog(0, "finishing recovery for node %s:%u\n",
2288 	     dlm->name, dlm->reco.dead_node);
2289 
2290 	spin_lock(&dlm->spinlock);
2291 	dlm_node_iter_init(dlm->domain_map, &iter);
2292 	spin_unlock(&dlm->spinlock);
2293 
2294 	memset(&fr, 0, sizeof(fr));
2295 	fr.node_idx = dlm->node_num;
2296 	fr.dead_node = dlm->reco.dead_node;
2297 
2298 	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2299 		if (nodenum == dlm->node_num)
2300 			continue;
2301 		ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
2302 					 &fr, sizeof(fr), nodenum, &status);
2303 		if (ret >= 0) {
2304 			ret = status;
2305 			if (dlm_is_host_down(ret)) {
2306 				/* this has no effect on this recovery
2307 				 * session, so set the status to zero to
2308 				 * finish out the last recovery */
2309 				mlog(ML_ERROR, "node %u went down after this "
2310 				     "node finished recovery.\n", nodenum);
2311 				ret = 0;
2312 			}
2313 		}
2314 		if (ret < 0) {
2315 			mlog_errno(ret);
2316 			break;
2317 		}
2318 	}
2319 
2320 	return ret;
2321 }
2322 
2323 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data)
2324 {
2325 	struct dlm_ctxt *dlm = data;
2326 	struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
2327 
2328 	/* ok to return 0, domain has gone away */
2329 	if (!dlm_grab(dlm))
2330 		return 0;
2331 
2332 	mlog(0, "node %u finalizing recovery of node %u\n",
2333 	     fr->node_idx, fr->dead_node);
2334 
2335 	spin_lock(&dlm->spinlock);
2336 
2337 	if (dlm->reco.new_master != fr->node_idx) {
2338 		mlog(ML_ERROR, "node %u sent recovery finalize msg, but node "
2339 		     "%u is supposed to be the new master, dead=%u\n",
2340 		     fr->node_idx, dlm->reco.new_master, fr->dead_node);
2341 		BUG();
2342 	}
2343 	if (dlm->reco.dead_node != fr->dead_node) {
2344 		mlog(ML_ERROR, "node %u sent recovery finalize msg for dead "
2345 		     "node %u, but node %u is supposed to be dead\n",
2346 		     fr->node_idx, fr->dead_node, dlm->reco.dead_node);
2347 		BUG();
2348 	}
2349 
2350 	dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
2351 
2352 	spin_unlock(&dlm->spinlock);
2353 
2354 	dlm_reset_recovery(dlm);
2355 
2356 	dlm_kick_recovery_thread(dlm);
2357 	dlm_put(dlm);
2358 	return 0;
2359 }
2360