xref: /linux/fs/ocfs2/dlm/dlmmaster.c (revision 957e3facd147510f2cf8780e38606f1d707f0e33)
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * dlmmod.c
5  *
6  * standalone DLM module
7  *
8  * Copyright (C) 2004 Oracle.  All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License as published by the Free Software Foundation; either
13  * version 2 of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public
21  * License along with this program; if not, write to the
22  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23  * Boston, MA 021110-1307, USA.
24  *
25  */
26 
27 
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/init.h>
34 #include <linux/sysctl.h>
35 #include <linux/random.h>
36 #include <linux/blkdev.h>
37 #include <linux/socket.h>
38 #include <linux/inet.h>
39 #include <linux/spinlock.h>
40 #include <linux/delay.h>
41 
42 
43 #include "cluster/heartbeat.h"
44 #include "cluster/nodemanager.h"
45 #include "cluster/tcp.h"
46 
47 #include "dlmapi.h"
48 #include "dlmcommon.h"
49 #include "dlmdomain.h"
50 #include "dlmdebug.h"
51 
52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
53 #include "cluster/masklog.h"
54 
55 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
56 			      struct dlm_master_list_entry *mle,
57 			      struct o2nm_node *node,
58 			      int idx);
59 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
60 			    struct dlm_master_list_entry *mle,
61 			    struct o2nm_node *node,
62 			    int idx);
63 
64 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
65 static int dlm_do_assert_master(struct dlm_ctxt *dlm,
66 				struct dlm_lock_resource *res,
67 				void *nodemap, u32 flags);
68 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data);
69 
70 static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
71 				struct dlm_master_list_entry *mle,
72 				const char *name,
73 				unsigned int namelen)
74 {
75 	if (dlm != mle->dlm)
76 		return 0;
77 
78 	if (namelen != mle->mnamelen ||
79 	    memcmp(name, mle->mname, namelen) != 0)
80 		return 0;
81 
82 	return 1;
83 }
84 
85 static struct kmem_cache *dlm_lockres_cache;
86 static struct kmem_cache *dlm_lockname_cache;
87 static struct kmem_cache *dlm_mle_cache;
88 
89 static void dlm_mle_release(struct kref *kref);
90 static void dlm_init_mle(struct dlm_master_list_entry *mle,
91 			enum dlm_mle_type type,
92 			struct dlm_ctxt *dlm,
93 			struct dlm_lock_resource *res,
94 			const char *name,
95 			unsigned int namelen);
96 static void dlm_put_mle(struct dlm_master_list_entry *mle);
97 static void __dlm_put_mle(struct dlm_master_list_entry *mle);
98 static int dlm_find_mle(struct dlm_ctxt *dlm,
99 			struct dlm_master_list_entry **mle,
100 			char *name, unsigned int namelen);
101 
102 static int dlm_do_master_request(struct dlm_lock_resource *res,
103 				 struct dlm_master_list_entry *mle, int to);
104 
105 
106 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
107 				     struct dlm_lock_resource *res,
108 				     struct dlm_master_list_entry *mle,
109 				     int *blocked);
110 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
111 				    struct dlm_lock_resource *res,
112 				    struct dlm_master_list_entry *mle,
113 				    int blocked);
114 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
115 				 struct dlm_lock_resource *res,
116 				 struct dlm_master_list_entry *mle,
117 				 struct dlm_master_list_entry **oldmle,
118 				 const char *name, unsigned int namelen,
119 				 u8 new_master, u8 master);
120 
121 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
122 				    struct dlm_lock_resource *res);
123 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
124 				      struct dlm_lock_resource *res);
125 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
126 				       struct dlm_lock_resource *res,
127 				       u8 target);
128 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
129 				       struct dlm_lock_resource *res);
130 
131 
132 int dlm_is_host_down(int errno)
133 {
134 	switch (errno) {
135 		case -EBADF:
136 		case -ECONNREFUSED:
137 		case -ENOTCONN:
138 		case -ECONNRESET:
139 		case -EPIPE:
140 		case -EHOSTDOWN:
141 		case -EHOSTUNREACH:
142 		case -ETIMEDOUT:
143 		case -ECONNABORTED:
144 		case -ENETDOWN:
145 		case -ENETUNREACH:
146 		case -ENETRESET:
147 		case -ESHUTDOWN:
148 		case -ENOPROTOOPT:
149 		case -EINVAL:   /* if returned from our tcp code,
150 				   this means there is no socket */
151 			return 1;
152 	}
153 	return 0;
154 }
155 
156 
157 /*
158  * MASTER LIST FUNCTIONS
159  */
160 
161 
162 /*
163  * regarding master list entries and heartbeat callbacks:
164  *
165  * in order to avoid sleeping and allocation that occurs in
166  * heartbeat, master list entries are simply attached to the
167  * dlm's established heartbeat callbacks.  the mle is attached
168  * when it is created, and since the dlm->spinlock is held at
169  * that time, any heartbeat event will be properly discovered
170  * by the mle.  the mle needs to be detached from the
171  * dlm->mle_hb_events list as soon as heartbeat events are no
172  * longer useful to the mle, and before the mle is freed.
173  *
174  * as a general rule, heartbeat events are no longer needed by
175  * the mle once an "answer" regarding the lock master has been
176  * received.
177  */
178 static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm,
179 					      struct dlm_master_list_entry *mle)
180 {
181 	assert_spin_locked(&dlm->spinlock);
182 
183 	list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
184 }
185 
186 
187 static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
188 					      struct dlm_master_list_entry *mle)
189 {
190 	if (!list_empty(&mle->hb_events))
191 		list_del_init(&mle->hb_events);
192 }
193 
194 
195 static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
196 					    struct dlm_master_list_entry *mle)
197 {
198 	spin_lock(&dlm->spinlock);
199 	__dlm_mle_detach_hb_events(dlm, mle);
200 	spin_unlock(&dlm->spinlock);
201 }
202 
203 static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
204 {
205 	struct dlm_ctxt *dlm;
206 	dlm = mle->dlm;
207 
208 	assert_spin_locked(&dlm->spinlock);
209 	assert_spin_locked(&dlm->master_lock);
210 	mle->inuse++;
211 	kref_get(&mle->mle_refs);
212 }
213 
214 static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
215 {
216 	struct dlm_ctxt *dlm;
217 	dlm = mle->dlm;
218 
219 	spin_lock(&dlm->spinlock);
220 	spin_lock(&dlm->master_lock);
221 	mle->inuse--;
222 	__dlm_put_mle(mle);
223 	spin_unlock(&dlm->master_lock);
224 	spin_unlock(&dlm->spinlock);
225 
226 }
227 
228 /* remove from list and free */
229 static void __dlm_put_mle(struct dlm_master_list_entry *mle)
230 {
231 	struct dlm_ctxt *dlm;
232 	dlm = mle->dlm;
233 
234 	assert_spin_locked(&dlm->spinlock);
235 	assert_spin_locked(&dlm->master_lock);
236 	if (!atomic_read(&mle->mle_refs.refcount)) {
237 		/* this may or may not crash, but who cares.
238 		 * it's a BUG. */
239 		mlog(ML_ERROR, "bad mle: %p\n", mle);
240 		dlm_print_one_mle(mle);
241 		BUG();
242 	} else
243 		kref_put(&mle->mle_refs, dlm_mle_release);
244 }
245 
246 
247 /* must not have any spinlocks coming in */
248 static void dlm_put_mle(struct dlm_master_list_entry *mle)
249 {
250 	struct dlm_ctxt *dlm;
251 	dlm = mle->dlm;
252 
253 	spin_lock(&dlm->spinlock);
254 	spin_lock(&dlm->master_lock);
255 	__dlm_put_mle(mle);
256 	spin_unlock(&dlm->master_lock);
257 	spin_unlock(&dlm->spinlock);
258 }
259 
260 static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
261 {
262 	kref_get(&mle->mle_refs);
263 }
264 
265 static void dlm_init_mle(struct dlm_master_list_entry *mle,
266 			enum dlm_mle_type type,
267 			struct dlm_ctxt *dlm,
268 			struct dlm_lock_resource *res,
269 			const char *name,
270 			unsigned int namelen)
271 {
272 	assert_spin_locked(&dlm->spinlock);
273 
274 	mle->dlm = dlm;
275 	mle->type = type;
276 	INIT_HLIST_NODE(&mle->master_hash_node);
277 	INIT_LIST_HEAD(&mle->hb_events);
278 	memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
279 	spin_lock_init(&mle->spinlock);
280 	init_waitqueue_head(&mle->wq);
281 	atomic_set(&mle->woken, 0);
282 	kref_init(&mle->mle_refs);
283 	memset(mle->response_map, 0, sizeof(mle->response_map));
284 	mle->master = O2NM_MAX_NODES;
285 	mle->new_master = O2NM_MAX_NODES;
286 	mle->inuse = 0;
287 
288 	BUG_ON(mle->type != DLM_MLE_BLOCK &&
289 	       mle->type != DLM_MLE_MASTER &&
290 	       mle->type != DLM_MLE_MIGRATION);
291 
292 	if (mle->type == DLM_MLE_MASTER) {
293 		BUG_ON(!res);
294 		mle->mleres = res;
295 		memcpy(mle->mname, res->lockname.name, res->lockname.len);
296 		mle->mnamelen = res->lockname.len;
297 		mle->mnamehash = res->lockname.hash;
298 	} else {
299 		BUG_ON(!name);
300 		mle->mleres = NULL;
301 		memcpy(mle->mname, name, namelen);
302 		mle->mnamelen = namelen;
303 		mle->mnamehash = dlm_lockid_hash(name, namelen);
304 	}
305 
306 	atomic_inc(&dlm->mle_tot_count[mle->type]);
307 	atomic_inc(&dlm->mle_cur_count[mle->type]);
308 
309 	/* copy off the node_map and register hb callbacks on our copy */
310 	memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
311 	memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
312 	clear_bit(dlm->node_num, mle->vote_map);
313 	clear_bit(dlm->node_num, mle->node_map);
314 
315 	/* attach the mle to the domain node up/down events */
316 	__dlm_mle_attach_hb_events(dlm, mle);
317 }
318 
319 void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
320 {
321 	assert_spin_locked(&dlm->spinlock);
322 	assert_spin_locked(&dlm->master_lock);
323 
324 	if (!hlist_unhashed(&mle->master_hash_node))
325 		hlist_del_init(&mle->master_hash_node);
326 }
327 
328 void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
329 {
330 	struct hlist_head *bucket;
331 
332 	assert_spin_locked(&dlm->master_lock);
333 
334 	bucket = dlm_master_hash(dlm, mle->mnamehash);
335 	hlist_add_head(&mle->master_hash_node, bucket);
336 }
337 
338 /* returns 1 if found, 0 if not */
339 static int dlm_find_mle(struct dlm_ctxt *dlm,
340 			struct dlm_master_list_entry **mle,
341 			char *name, unsigned int namelen)
342 {
343 	struct dlm_master_list_entry *tmpmle;
344 	struct hlist_head *bucket;
345 	unsigned int hash;
346 
347 	assert_spin_locked(&dlm->master_lock);
348 
349 	hash = dlm_lockid_hash(name, namelen);
350 	bucket = dlm_master_hash(dlm, hash);
351 	hlist_for_each_entry(tmpmle, bucket, master_hash_node) {
352 		if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
353 			continue;
354 		dlm_get_mle(tmpmle);
355 		*mle = tmpmle;
356 		return 1;
357 	}
358 	return 0;
359 }
360 
361 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
362 {
363 	struct dlm_master_list_entry *mle;
364 
365 	assert_spin_locked(&dlm->spinlock);
366 
367 	list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
368 		if (node_up)
369 			dlm_mle_node_up(dlm, mle, NULL, idx);
370 		else
371 			dlm_mle_node_down(dlm, mle, NULL, idx);
372 	}
373 }
374 
375 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
376 			      struct dlm_master_list_entry *mle,
377 			      struct o2nm_node *node, int idx)
378 {
379 	spin_lock(&mle->spinlock);
380 
381 	if (!test_bit(idx, mle->node_map))
382 		mlog(0, "node %u already removed from nodemap!\n", idx);
383 	else
384 		clear_bit(idx, mle->node_map);
385 
386 	spin_unlock(&mle->spinlock);
387 }
388 
389 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
390 			    struct dlm_master_list_entry *mle,
391 			    struct o2nm_node *node, int idx)
392 {
393 	spin_lock(&mle->spinlock);
394 
395 	if (test_bit(idx, mle->node_map))
396 		mlog(0, "node %u already in node map!\n", idx);
397 	else
398 		set_bit(idx, mle->node_map);
399 
400 	spin_unlock(&mle->spinlock);
401 }
402 
403 
404 int dlm_init_mle_cache(void)
405 {
406 	dlm_mle_cache = kmem_cache_create("o2dlm_mle",
407 					  sizeof(struct dlm_master_list_entry),
408 					  0, SLAB_HWCACHE_ALIGN,
409 					  NULL);
410 	if (dlm_mle_cache == NULL)
411 		return -ENOMEM;
412 	return 0;
413 }
414 
415 void dlm_destroy_mle_cache(void)
416 {
417 	if (dlm_mle_cache)
418 		kmem_cache_destroy(dlm_mle_cache);
419 }
420 
421 static void dlm_mle_release(struct kref *kref)
422 {
423 	struct dlm_master_list_entry *mle;
424 	struct dlm_ctxt *dlm;
425 
426 	mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
427 	dlm = mle->dlm;
428 
429 	assert_spin_locked(&dlm->spinlock);
430 	assert_spin_locked(&dlm->master_lock);
431 
432 	mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname,
433 	     mle->type);
434 
435 	/* remove from list if not already */
436 	__dlm_unlink_mle(dlm, mle);
437 
438 	/* detach the mle from the domain node up/down events */
439 	__dlm_mle_detach_hb_events(dlm, mle);
440 
441 	atomic_dec(&dlm->mle_cur_count[mle->type]);
442 
443 	/* NOTE: kfree under spinlock here.
444 	 * if this is bad, we can move this to a freelist. */
445 	kmem_cache_free(dlm_mle_cache, mle);
446 }
447 
448 
449 /*
450  * LOCK RESOURCE FUNCTIONS
451  */
452 
453 int dlm_init_master_caches(void)
454 {
455 	dlm_lockres_cache = kmem_cache_create("o2dlm_lockres",
456 					      sizeof(struct dlm_lock_resource),
457 					      0, SLAB_HWCACHE_ALIGN, NULL);
458 	if (!dlm_lockres_cache)
459 		goto bail;
460 
461 	dlm_lockname_cache = kmem_cache_create("o2dlm_lockname",
462 					       DLM_LOCKID_NAME_MAX, 0,
463 					       SLAB_HWCACHE_ALIGN, NULL);
464 	if (!dlm_lockname_cache)
465 		goto bail;
466 
467 	return 0;
468 bail:
469 	dlm_destroy_master_caches();
470 	return -ENOMEM;
471 }
472 
473 void dlm_destroy_master_caches(void)
474 {
475 	if (dlm_lockname_cache) {
476 		kmem_cache_destroy(dlm_lockname_cache);
477 		dlm_lockname_cache = NULL;
478 	}
479 
480 	if (dlm_lockres_cache) {
481 		kmem_cache_destroy(dlm_lockres_cache);
482 		dlm_lockres_cache = NULL;
483 	}
484 }
485 
486 static void dlm_lockres_release(struct kref *kref)
487 {
488 	struct dlm_lock_resource *res;
489 	struct dlm_ctxt *dlm;
490 
491 	res = container_of(kref, struct dlm_lock_resource, refs);
492 	dlm = res->dlm;
493 
494 	/* This should not happen -- all lockres' have a name
495 	 * associated with them at init time. */
496 	BUG_ON(!res->lockname.name);
497 
498 	mlog(0, "destroying lockres %.*s\n", res->lockname.len,
499 	     res->lockname.name);
500 
501 	spin_lock(&dlm->track_lock);
502 	if (!list_empty(&res->tracking))
503 		list_del_init(&res->tracking);
504 	else {
505 		mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n",
506 		     res->lockname.len, res->lockname.name);
507 		dlm_print_one_lock_resource(res);
508 	}
509 	spin_unlock(&dlm->track_lock);
510 
511 	atomic_dec(&dlm->res_cur_count);
512 
513 	if (!hlist_unhashed(&res->hash_node) ||
514 	    !list_empty(&res->granted) ||
515 	    !list_empty(&res->converting) ||
516 	    !list_empty(&res->blocked) ||
517 	    !list_empty(&res->dirty) ||
518 	    !list_empty(&res->recovering) ||
519 	    !list_empty(&res->purge)) {
520 		mlog(ML_ERROR,
521 		     "Going to BUG for resource %.*s."
522 		     "  We're on a list! [%c%c%c%c%c%c%c]\n",
523 		     res->lockname.len, res->lockname.name,
524 		     !hlist_unhashed(&res->hash_node) ? 'H' : ' ',
525 		     !list_empty(&res->granted) ? 'G' : ' ',
526 		     !list_empty(&res->converting) ? 'C' : ' ',
527 		     !list_empty(&res->blocked) ? 'B' : ' ',
528 		     !list_empty(&res->dirty) ? 'D' : ' ',
529 		     !list_empty(&res->recovering) ? 'R' : ' ',
530 		     !list_empty(&res->purge) ? 'P' : ' ');
531 
532 		dlm_print_one_lock_resource(res);
533 	}
534 
535 	/* By the time we're ready to blow this guy away, we shouldn't
536 	 * be on any lists. */
537 	BUG_ON(!hlist_unhashed(&res->hash_node));
538 	BUG_ON(!list_empty(&res->granted));
539 	BUG_ON(!list_empty(&res->converting));
540 	BUG_ON(!list_empty(&res->blocked));
541 	BUG_ON(!list_empty(&res->dirty));
542 	BUG_ON(!list_empty(&res->recovering));
543 	BUG_ON(!list_empty(&res->purge));
544 
545 	kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
546 
547 	kmem_cache_free(dlm_lockres_cache, res);
548 }
549 
550 void dlm_lockres_put(struct dlm_lock_resource *res)
551 {
552 	kref_put(&res->refs, dlm_lockres_release);
553 }
554 
555 static void dlm_init_lockres(struct dlm_ctxt *dlm,
556 			     struct dlm_lock_resource *res,
557 			     const char *name, unsigned int namelen)
558 {
559 	char *qname;
560 
561 	/* If we memset here, we lose our reference to the kmalloc'd
562 	 * res->lockname.name, so be sure to init every field
563 	 * correctly! */
564 
565 	qname = (char *) res->lockname.name;
566 	memcpy(qname, name, namelen);
567 
568 	res->lockname.len = namelen;
569 	res->lockname.hash = dlm_lockid_hash(name, namelen);
570 
571 	init_waitqueue_head(&res->wq);
572 	spin_lock_init(&res->spinlock);
573 	INIT_HLIST_NODE(&res->hash_node);
574 	INIT_LIST_HEAD(&res->granted);
575 	INIT_LIST_HEAD(&res->converting);
576 	INIT_LIST_HEAD(&res->blocked);
577 	INIT_LIST_HEAD(&res->dirty);
578 	INIT_LIST_HEAD(&res->recovering);
579 	INIT_LIST_HEAD(&res->purge);
580 	INIT_LIST_HEAD(&res->tracking);
581 	atomic_set(&res->asts_reserved, 0);
582 	res->migration_pending = 0;
583 	res->inflight_locks = 0;
584 	res->inflight_assert_workers = 0;
585 
586 	res->dlm = dlm;
587 
588 	kref_init(&res->refs);
589 
590 	atomic_inc(&dlm->res_tot_count);
591 	atomic_inc(&dlm->res_cur_count);
592 
593 	/* just for consistency */
594 	spin_lock(&res->spinlock);
595 	dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
596 	spin_unlock(&res->spinlock);
597 
598 	res->state = DLM_LOCK_RES_IN_PROGRESS;
599 
600 	res->last_used = 0;
601 
602 	spin_lock(&dlm->spinlock);
603 	list_add_tail(&res->tracking, &dlm->tracking_list);
604 	spin_unlock(&dlm->spinlock);
605 
606 	memset(res->lvb, 0, DLM_LVB_LEN);
607 	memset(res->refmap, 0, sizeof(res->refmap));
608 }
609 
610 struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
611 				   const char *name,
612 				   unsigned int namelen)
613 {
614 	struct dlm_lock_resource *res = NULL;
615 
616 	res = kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS);
617 	if (!res)
618 		goto error;
619 
620 	res->lockname.name = kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS);
621 	if (!res->lockname.name)
622 		goto error;
623 
624 	dlm_init_lockres(dlm, res, name, namelen);
625 	return res;
626 
627 error:
628 	if (res)
629 		kmem_cache_free(dlm_lockres_cache, res);
630 	return NULL;
631 }
632 
633 void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm,
634 				struct dlm_lock_resource *res, int bit)
635 {
636 	assert_spin_locked(&res->spinlock);
637 
638 	mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len,
639 	     res->lockname.name, bit, __builtin_return_address(0));
640 
641 	set_bit(bit, res->refmap);
642 }
643 
644 void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
645 				  struct dlm_lock_resource *res, int bit)
646 {
647 	assert_spin_locked(&res->spinlock);
648 
649 	mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len,
650 	     res->lockname.name, bit, __builtin_return_address(0));
651 
652 	clear_bit(bit, res->refmap);
653 }
654 
655 static void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
656 				   struct dlm_lock_resource *res)
657 {
658 	res->inflight_locks++;
659 
660 	mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name,
661 	     res->lockname.len, res->lockname.name, res->inflight_locks,
662 	     __builtin_return_address(0));
663 }
664 
665 void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
666 				   struct dlm_lock_resource *res)
667 {
668 	assert_spin_locked(&res->spinlock);
669 	__dlm_lockres_grab_inflight_ref(dlm, res);
670 }
671 
672 void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
673 				   struct dlm_lock_resource *res)
674 {
675 	assert_spin_locked(&res->spinlock);
676 
677 	BUG_ON(res->inflight_locks == 0);
678 
679 	res->inflight_locks--;
680 
681 	mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name,
682 	     res->lockname.len, res->lockname.name, res->inflight_locks,
683 	     __builtin_return_address(0));
684 
685 	wake_up(&res->wq);
686 }
687 
688 void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
689 		struct dlm_lock_resource *res)
690 {
691 	assert_spin_locked(&res->spinlock);
692 	res->inflight_assert_workers++;
693 	mlog(0, "%s:%.*s: inflight assert worker++: now %u\n",
694 			dlm->name, res->lockname.len, res->lockname.name,
695 			res->inflight_assert_workers);
696 }
697 
698 static void dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
699 		struct dlm_lock_resource *res)
700 {
701 	spin_lock(&res->spinlock);
702 	__dlm_lockres_grab_inflight_worker(dlm, res);
703 	spin_unlock(&res->spinlock);
704 }
705 
706 static void __dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm,
707 		struct dlm_lock_resource *res)
708 {
709 	assert_spin_locked(&res->spinlock);
710 	BUG_ON(res->inflight_assert_workers == 0);
711 	res->inflight_assert_workers--;
712 	mlog(0, "%s:%.*s: inflight assert worker--: now %u\n",
713 			dlm->name, res->lockname.len, res->lockname.name,
714 			res->inflight_assert_workers);
715 }
716 
717 static void dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm,
718 		struct dlm_lock_resource *res)
719 {
720 	spin_lock(&res->spinlock);
721 	__dlm_lockres_drop_inflight_worker(dlm, res);
722 	spin_unlock(&res->spinlock);
723 }
724 
725 /*
726  * lookup a lock resource by name.
727  * may already exist in the hashtable.
728  * lockid is null terminated
729  *
730  * if not, allocate enough for the lockres and for
731  * the temporary structure used in doing the mastering.
732  *
733  * also, do a lookup in the dlm->master_list to see
734  * if another node has begun mastering the same lock.
735  * if so, there should be a block entry in there
736  * for this name, and we should *not* attempt to master
737  * the lock here.   need to wait around for that node
738  * to assert_master (or die).
739  *
740  */
741 struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
742 					  const char *lockid,
743 					  int namelen,
744 					  int flags)
745 {
746 	struct dlm_lock_resource *tmpres=NULL, *res=NULL;
747 	struct dlm_master_list_entry *mle = NULL;
748 	struct dlm_master_list_entry *alloc_mle = NULL;
749 	int blocked = 0;
750 	int ret, nodenum;
751 	struct dlm_node_iter iter;
752 	unsigned int hash;
753 	int tries = 0;
754 	int bit, wait_on_recovery = 0;
755 
756 	BUG_ON(!lockid);
757 
758 	hash = dlm_lockid_hash(lockid, namelen);
759 
760 	mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
761 
762 lookup:
763 	spin_lock(&dlm->spinlock);
764 	tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
765 	if (tmpres) {
766 		spin_unlock(&dlm->spinlock);
767 		spin_lock(&tmpres->spinlock);
768 		/* Wait on the thread that is mastering the resource */
769 		if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
770 			__dlm_wait_on_lockres(tmpres);
771 			BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN);
772 			spin_unlock(&tmpres->spinlock);
773 			dlm_lockres_put(tmpres);
774 			tmpres = NULL;
775 			goto lookup;
776 		}
777 
778 		/* Wait on the resource purge to complete before continuing */
779 		if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) {
780 			BUG_ON(tmpres->owner == dlm->node_num);
781 			__dlm_wait_on_lockres_flags(tmpres,
782 						    DLM_LOCK_RES_DROPPING_REF);
783 			spin_unlock(&tmpres->spinlock);
784 			dlm_lockres_put(tmpres);
785 			tmpres = NULL;
786 			goto lookup;
787 		}
788 
789 		/* Grab inflight ref to pin the resource */
790 		dlm_lockres_grab_inflight_ref(dlm, tmpres);
791 
792 		spin_unlock(&tmpres->spinlock);
793 		if (res)
794 			dlm_lockres_put(res);
795 		res = tmpres;
796 		goto leave;
797 	}
798 
799 	if (!res) {
800 		spin_unlock(&dlm->spinlock);
801 		mlog(0, "allocating a new resource\n");
802 		/* nothing found and we need to allocate one. */
803 		alloc_mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
804 		if (!alloc_mle)
805 			goto leave;
806 		res = dlm_new_lockres(dlm, lockid, namelen);
807 		if (!res)
808 			goto leave;
809 		goto lookup;
810 	}
811 
812 	mlog(0, "no lockres found, allocated our own: %p\n", res);
813 
814 	if (flags & LKM_LOCAL) {
815 		/* caller knows it's safe to assume it's not mastered elsewhere
816 		 * DONE!  return right away */
817 		spin_lock(&res->spinlock);
818 		dlm_change_lockres_owner(dlm, res, dlm->node_num);
819 		__dlm_insert_lockres(dlm, res);
820 		dlm_lockres_grab_inflight_ref(dlm, res);
821 		spin_unlock(&res->spinlock);
822 		spin_unlock(&dlm->spinlock);
823 		/* lockres still marked IN_PROGRESS */
824 		goto wake_waiters;
825 	}
826 
827 	/* check master list to see if another node has started mastering it */
828 	spin_lock(&dlm->master_lock);
829 
830 	/* if we found a block, wait for lock to be mastered by another node */
831 	blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
832 	if (blocked) {
833 		int mig;
834 		if (mle->type == DLM_MLE_MASTER) {
835 			mlog(ML_ERROR, "master entry for nonexistent lock!\n");
836 			BUG();
837 		}
838 		mig = (mle->type == DLM_MLE_MIGRATION);
839 		/* if there is a migration in progress, let the migration
840 		 * finish before continuing.  we can wait for the absence
841 		 * of the MIGRATION mle: either the migrate finished or
842 		 * one of the nodes died and the mle was cleaned up.
843 		 * if there is a BLOCK here, but it already has a master
844 		 * set, we are too late.  the master does not have a ref
845 		 * for us in the refmap.  detach the mle and drop it.
846 		 * either way, go back to the top and start over. */
847 		if (mig || mle->master != O2NM_MAX_NODES) {
848 			BUG_ON(mig && mle->master == dlm->node_num);
849 			/* we arrived too late.  the master does not
850 			 * have a ref for us. retry. */
851 			mlog(0, "%s:%.*s: late on %s\n",
852 			     dlm->name, namelen, lockid,
853 			     mig ?  "MIGRATION" : "BLOCK");
854 			spin_unlock(&dlm->master_lock);
855 			spin_unlock(&dlm->spinlock);
856 
857 			/* master is known, detach */
858 			if (!mig)
859 				dlm_mle_detach_hb_events(dlm, mle);
860 			dlm_put_mle(mle);
861 			mle = NULL;
862 			/* this is lame, but we can't wait on either
863 			 * the mle or lockres waitqueue here */
864 			if (mig)
865 				msleep(100);
866 			goto lookup;
867 		}
868 	} else {
869 		/* go ahead and try to master lock on this node */
870 		mle = alloc_mle;
871 		/* make sure this does not get freed below */
872 		alloc_mle = NULL;
873 		dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
874 		set_bit(dlm->node_num, mle->maybe_map);
875 		__dlm_insert_mle(dlm, mle);
876 
877 		/* still holding the dlm spinlock, check the recovery map
878 		 * to see if there are any nodes that still need to be
879 		 * considered.  these will not appear in the mle nodemap
880 		 * but they might own this lockres.  wait on them. */
881 		bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
882 		if (bit < O2NM_MAX_NODES) {
883 			mlog(0, "%s: res %.*s, At least one node (%d) "
884 			     "to recover before lock mastery can begin\n",
885 			     dlm->name, namelen, (char *)lockid, bit);
886 			wait_on_recovery = 1;
887 		}
888 	}
889 
890 	/* at this point there is either a DLM_MLE_BLOCK or a
891 	 * DLM_MLE_MASTER on the master list, so it's safe to add the
892 	 * lockres to the hashtable.  anyone who finds the lock will
893 	 * still have to wait on the IN_PROGRESS. */
894 
895 	/* finally add the lockres to its hash bucket */
896 	__dlm_insert_lockres(dlm, res);
897 
898 	/* since this lockres is new it doesn't not require the spinlock */
899 	__dlm_lockres_grab_inflight_ref(dlm, res);
900 
901 	/* get an extra ref on the mle in case this is a BLOCK
902 	 * if so, the creator of the BLOCK may try to put the last
903 	 * ref at this time in the assert master handler, so we
904 	 * need an extra one to keep from a bad ptr deref. */
905 	dlm_get_mle_inuse(mle);
906 	spin_unlock(&dlm->master_lock);
907 	spin_unlock(&dlm->spinlock);
908 
909 redo_request:
910 	while (wait_on_recovery) {
911 		/* any cluster changes that occurred after dropping the
912 		 * dlm spinlock would be detectable be a change on the mle,
913 		 * so we only need to clear out the recovery map once. */
914 		if (dlm_is_recovery_lock(lockid, namelen)) {
915 			mlog(0, "%s: Recovery map is not empty, but must "
916 			     "master $RECOVERY lock now\n", dlm->name);
917 			if (!dlm_pre_master_reco_lockres(dlm, res))
918 				wait_on_recovery = 0;
919 			else {
920 				mlog(0, "%s: waiting 500ms for heartbeat state "
921 				    "change\n", dlm->name);
922 				msleep(500);
923 			}
924 			continue;
925 		}
926 
927 		dlm_kick_recovery_thread(dlm);
928 		msleep(1000);
929 		dlm_wait_for_recovery(dlm);
930 
931 		spin_lock(&dlm->spinlock);
932 		bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
933 		if (bit < O2NM_MAX_NODES) {
934 			mlog(0, "%s: res %.*s, At least one node (%d) "
935 			     "to recover before lock mastery can begin\n",
936 			     dlm->name, namelen, (char *)lockid, bit);
937 			wait_on_recovery = 1;
938 		} else
939 			wait_on_recovery = 0;
940 		spin_unlock(&dlm->spinlock);
941 
942 		if (wait_on_recovery)
943 			dlm_wait_for_node_recovery(dlm, bit, 10000);
944 	}
945 
946 	/* must wait for lock to be mastered elsewhere */
947 	if (blocked)
948 		goto wait;
949 
950 	ret = -EINVAL;
951 	dlm_node_iter_init(mle->vote_map, &iter);
952 	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
953 		ret = dlm_do_master_request(res, mle, nodenum);
954 		if (ret < 0)
955 			mlog_errno(ret);
956 		if (mle->master != O2NM_MAX_NODES) {
957 			/* found a master ! */
958 			if (mle->master <= nodenum)
959 				break;
960 			/* if our master request has not reached the master
961 			 * yet, keep going until it does.  this is how the
962 			 * master will know that asserts are needed back to
963 			 * the lower nodes. */
964 			mlog(0, "%s: res %.*s, Requests only up to %u but "
965 			     "master is %u, keep going\n", dlm->name, namelen,
966 			     lockid, nodenum, mle->master);
967 		}
968 	}
969 
970 wait:
971 	/* keep going until the response map includes all nodes */
972 	ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
973 	if (ret < 0) {
974 		wait_on_recovery = 1;
975 		mlog(0, "%s: res %.*s, Node map changed, redo the master "
976 		     "request now, blocked=%d\n", dlm->name, res->lockname.len,
977 		     res->lockname.name, blocked);
978 		if (++tries > 20) {
979 			mlog(ML_ERROR, "%s: res %.*s, Spinning on "
980 			     "dlm_wait_for_lock_mastery, blocked = %d\n",
981 			     dlm->name, res->lockname.len,
982 			     res->lockname.name, blocked);
983 			dlm_print_one_lock_resource(res);
984 			dlm_print_one_mle(mle);
985 			tries = 0;
986 		}
987 		goto redo_request;
988 	}
989 
990 	mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len,
991 	     res->lockname.name, res->owner);
992 	/* make sure we never continue without this */
993 	BUG_ON(res->owner == O2NM_MAX_NODES);
994 
995 	/* master is known, detach if not already detached */
996 	dlm_mle_detach_hb_events(dlm, mle);
997 	dlm_put_mle(mle);
998 	/* put the extra ref */
999 	dlm_put_mle_inuse(mle);
1000 
1001 wake_waiters:
1002 	spin_lock(&res->spinlock);
1003 	res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1004 	spin_unlock(&res->spinlock);
1005 	wake_up(&res->wq);
1006 
1007 leave:
1008 	/* need to free the unused mle */
1009 	if (alloc_mle)
1010 		kmem_cache_free(dlm_mle_cache, alloc_mle);
1011 
1012 	return res;
1013 }
1014 
1015 
1016 #define DLM_MASTERY_TIMEOUT_MS   5000
1017 
1018 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
1019 				     struct dlm_lock_resource *res,
1020 				     struct dlm_master_list_entry *mle,
1021 				     int *blocked)
1022 {
1023 	u8 m;
1024 	int ret, bit;
1025 	int map_changed, voting_done;
1026 	int assert, sleep;
1027 
1028 recheck:
1029 	ret = 0;
1030 	assert = 0;
1031 
1032 	/* check if another node has already become the owner */
1033 	spin_lock(&res->spinlock);
1034 	if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1035 		mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name,
1036 		     res->lockname.len, res->lockname.name, res->owner);
1037 		spin_unlock(&res->spinlock);
1038 		/* this will cause the master to re-assert across
1039 		 * the whole cluster, freeing up mles */
1040 		if (res->owner != dlm->node_num) {
1041 			ret = dlm_do_master_request(res, mle, res->owner);
1042 			if (ret < 0) {
1043 				/* give recovery a chance to run */
1044 				mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
1045 				msleep(500);
1046 				goto recheck;
1047 			}
1048 		}
1049 		ret = 0;
1050 		goto leave;
1051 	}
1052 	spin_unlock(&res->spinlock);
1053 
1054 	spin_lock(&mle->spinlock);
1055 	m = mle->master;
1056 	map_changed = (memcmp(mle->vote_map, mle->node_map,
1057 			      sizeof(mle->vote_map)) != 0);
1058 	voting_done = (memcmp(mle->vote_map, mle->response_map,
1059 			     sizeof(mle->vote_map)) == 0);
1060 
1061 	/* restart if we hit any errors */
1062 	if (map_changed) {
1063 		int b;
1064 		mlog(0, "%s: %.*s: node map changed, restarting\n",
1065 		     dlm->name, res->lockname.len, res->lockname.name);
1066 		ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1067 		b = (mle->type == DLM_MLE_BLOCK);
1068 		if ((*blocked && !b) || (!*blocked && b)) {
1069 			mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
1070 			     dlm->name, res->lockname.len, res->lockname.name,
1071 			     *blocked, b);
1072 			*blocked = b;
1073 		}
1074 		spin_unlock(&mle->spinlock);
1075 		if (ret < 0) {
1076 			mlog_errno(ret);
1077 			goto leave;
1078 		}
1079 		mlog(0, "%s:%.*s: restart lock mastery succeeded, "
1080 		     "rechecking now\n", dlm->name, res->lockname.len,
1081 		     res->lockname.name);
1082 		goto recheck;
1083 	} else {
1084 		if (!voting_done) {
1085 			mlog(0, "map not changed and voting not done "
1086 			     "for %s:%.*s\n", dlm->name, res->lockname.len,
1087 			     res->lockname.name);
1088 		}
1089 	}
1090 
1091 	if (m != O2NM_MAX_NODES) {
1092 		/* another node has done an assert!
1093 		 * all done! */
1094 		sleep = 0;
1095 	} else {
1096 		sleep = 1;
1097 		/* have all nodes responded? */
1098 		if (voting_done && !*blocked) {
1099 			bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
1100 			if (dlm->node_num <= bit) {
1101 				/* my node number is lowest.
1102 			 	 * now tell other nodes that I am
1103 				 * mastering this. */
1104 				mle->master = dlm->node_num;
1105 				/* ref was grabbed in get_lock_resource
1106 				 * will be dropped in dlmlock_master */
1107 				assert = 1;
1108 				sleep = 0;
1109 			}
1110 			/* if voting is done, but we have not received
1111 			 * an assert master yet, we must sleep */
1112 		}
1113 	}
1114 
1115 	spin_unlock(&mle->spinlock);
1116 
1117 	/* sleep if we haven't finished voting yet */
1118 	if (sleep) {
1119 		unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
1120 
1121 		/*
1122 		if (atomic_read(&mle->mle_refs.refcount) < 2)
1123 			mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
1124 			atomic_read(&mle->mle_refs.refcount),
1125 			res->lockname.len, res->lockname.name);
1126 		*/
1127 		atomic_set(&mle->woken, 0);
1128 		(void)wait_event_timeout(mle->wq,
1129 					 (atomic_read(&mle->woken) == 1),
1130 					 timeo);
1131 		if (res->owner == O2NM_MAX_NODES) {
1132 			mlog(0, "%s:%.*s: waiting again\n", dlm->name,
1133 			     res->lockname.len, res->lockname.name);
1134 			goto recheck;
1135 		}
1136 		mlog(0, "done waiting, master is %u\n", res->owner);
1137 		ret = 0;
1138 		goto leave;
1139 	}
1140 
1141 	ret = 0;   /* done */
1142 	if (assert) {
1143 		m = dlm->node_num;
1144 		mlog(0, "about to master %.*s here, this=%u\n",
1145 		     res->lockname.len, res->lockname.name, m);
1146 		ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0);
1147 		if (ret) {
1148 			/* This is a failure in the network path,
1149 			 * not in the response to the assert_master
1150 			 * (any nonzero response is a BUG on this node).
1151 			 * Most likely a socket just got disconnected
1152 			 * due to node death. */
1153 			mlog_errno(ret);
1154 		}
1155 		/* no longer need to restart lock mastery.
1156 		 * all living nodes have been contacted. */
1157 		ret = 0;
1158 	}
1159 
1160 	/* set the lockres owner */
1161 	spin_lock(&res->spinlock);
1162 	/* mastery reference obtained either during
1163 	 * assert_master_handler or in get_lock_resource */
1164 	dlm_change_lockres_owner(dlm, res, m);
1165 	spin_unlock(&res->spinlock);
1166 
1167 leave:
1168 	return ret;
1169 }
1170 
1171 struct dlm_bitmap_diff_iter
1172 {
1173 	int curnode;
1174 	unsigned long *orig_bm;
1175 	unsigned long *cur_bm;
1176 	unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
1177 };
1178 
1179 enum dlm_node_state_change
1180 {
1181 	NODE_DOWN = -1,
1182 	NODE_NO_CHANGE = 0,
1183 	NODE_UP
1184 };
1185 
1186 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
1187 				      unsigned long *orig_bm,
1188 				      unsigned long *cur_bm)
1189 {
1190 	unsigned long p1, p2;
1191 	int i;
1192 
1193 	iter->curnode = -1;
1194 	iter->orig_bm = orig_bm;
1195 	iter->cur_bm = cur_bm;
1196 
1197 	for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) {
1198        		p1 = *(iter->orig_bm + i);
1199 	       	p2 = *(iter->cur_bm + i);
1200 		iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
1201 	}
1202 }
1203 
1204 static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
1205 				     enum dlm_node_state_change *state)
1206 {
1207 	int bit;
1208 
1209 	if (iter->curnode >= O2NM_MAX_NODES)
1210 		return -ENOENT;
1211 
1212 	bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
1213 			    iter->curnode+1);
1214 	if (bit >= O2NM_MAX_NODES) {
1215 		iter->curnode = O2NM_MAX_NODES;
1216 		return -ENOENT;
1217 	}
1218 
1219 	/* if it was there in the original then this node died */
1220 	if (test_bit(bit, iter->orig_bm))
1221 		*state = NODE_DOWN;
1222 	else
1223 		*state = NODE_UP;
1224 
1225 	iter->curnode = bit;
1226 	return bit;
1227 }
1228 
1229 
1230 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
1231 				    struct dlm_lock_resource *res,
1232 				    struct dlm_master_list_entry *mle,
1233 				    int blocked)
1234 {
1235 	struct dlm_bitmap_diff_iter bdi;
1236 	enum dlm_node_state_change sc;
1237 	int node;
1238 	int ret = 0;
1239 
1240 	mlog(0, "something happened such that the "
1241 	     "master process may need to be restarted!\n");
1242 
1243 	assert_spin_locked(&mle->spinlock);
1244 
1245 	dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
1246 	node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1247 	while (node >= 0) {
1248 		if (sc == NODE_UP) {
1249 			/* a node came up.  clear any old vote from
1250 			 * the response map and set it in the vote map
1251 			 * then restart the mastery. */
1252 			mlog(ML_NOTICE, "node %d up while restarting\n", node);
1253 
1254 			/* redo the master request, but only for the new node */
1255 			mlog(0, "sending request to new node\n");
1256 			clear_bit(node, mle->response_map);
1257 			set_bit(node, mle->vote_map);
1258 		} else {
1259 			mlog(ML_ERROR, "node down! %d\n", node);
1260 			if (blocked) {
1261 				int lowest = find_next_bit(mle->maybe_map,
1262 						       O2NM_MAX_NODES, 0);
1263 
1264 				/* act like it was never there */
1265 				clear_bit(node, mle->maybe_map);
1266 
1267 			       	if (node == lowest) {
1268 					mlog(0, "expected master %u died"
1269 					    " while this node was blocked "
1270 					    "waiting on it!\n", node);
1271 					lowest = find_next_bit(mle->maybe_map,
1272 						       	O2NM_MAX_NODES,
1273 						       	lowest+1);
1274 					if (lowest < O2NM_MAX_NODES) {
1275 						mlog(0, "%s:%.*s:still "
1276 						     "blocked. waiting on %u "
1277 						     "now\n", dlm->name,
1278 						     res->lockname.len,
1279 						     res->lockname.name,
1280 						     lowest);
1281 					} else {
1282 						/* mle is an MLE_BLOCK, but
1283 						 * there is now nothing left to
1284 						 * block on.  we need to return
1285 						 * all the way back out and try
1286 						 * again with an MLE_MASTER.
1287 						 * dlm_do_local_recovery_cleanup
1288 						 * has already run, so the mle
1289 						 * refcount is ok */
1290 						mlog(0, "%s:%.*s: no "
1291 						     "longer blocking. try to "
1292 						     "master this here\n",
1293 						     dlm->name,
1294 						     res->lockname.len,
1295 						     res->lockname.name);
1296 						mle->type = DLM_MLE_MASTER;
1297 						mle->mleres = res;
1298 					}
1299 				}
1300 			}
1301 
1302 			/* now blank out everything, as if we had never
1303 			 * contacted anyone */
1304 			memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
1305 			memset(mle->response_map, 0, sizeof(mle->response_map));
1306 			/* reset the vote_map to the current node_map */
1307 			memcpy(mle->vote_map, mle->node_map,
1308 			       sizeof(mle->node_map));
1309 			/* put myself into the maybe map */
1310 			if (mle->type != DLM_MLE_BLOCK)
1311 				set_bit(dlm->node_num, mle->maybe_map);
1312 		}
1313 		ret = -EAGAIN;
1314 		node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1315 	}
1316 	return ret;
1317 }
1318 
1319 
1320 /*
1321  * DLM_MASTER_REQUEST_MSG
1322  *
1323  * returns: 0 on success,
1324  *          -errno on a network error
1325  *
1326  * on error, the caller should assume the target node is "dead"
1327  *
1328  */
1329 
1330 static int dlm_do_master_request(struct dlm_lock_resource *res,
1331 				 struct dlm_master_list_entry *mle, int to)
1332 {
1333 	struct dlm_ctxt *dlm = mle->dlm;
1334 	struct dlm_master_request request;
1335 	int ret, response=0, resend;
1336 
1337 	memset(&request, 0, sizeof(request));
1338 	request.node_idx = dlm->node_num;
1339 
1340 	BUG_ON(mle->type == DLM_MLE_MIGRATION);
1341 
1342 	request.namelen = (u8)mle->mnamelen;
1343 	memcpy(request.name, mle->mname, request.namelen);
1344 
1345 again:
1346 	ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
1347 				 sizeof(request), to, &response);
1348 	if (ret < 0)  {
1349 		if (ret == -ESRCH) {
1350 			/* should never happen */
1351 			mlog(ML_ERROR, "TCP stack not ready!\n");
1352 			BUG();
1353 		} else if (ret == -EINVAL) {
1354 			mlog(ML_ERROR, "bad args passed to o2net!\n");
1355 			BUG();
1356 		} else if (ret == -ENOMEM) {
1357 			mlog(ML_ERROR, "out of memory while trying to send "
1358 			     "network message!  retrying\n");
1359 			/* this is totally crude */
1360 			msleep(50);
1361 			goto again;
1362 		} else if (!dlm_is_host_down(ret)) {
1363 			/* not a network error. bad. */
1364 			mlog_errno(ret);
1365 			mlog(ML_ERROR, "unhandled error!");
1366 			BUG();
1367 		}
1368 		/* all other errors should be network errors,
1369 		 * and likely indicate node death */
1370 		mlog(ML_ERROR, "link to %d went down!\n", to);
1371 		goto out;
1372 	}
1373 
1374 	ret = 0;
1375 	resend = 0;
1376 	spin_lock(&mle->spinlock);
1377 	switch (response) {
1378 		case DLM_MASTER_RESP_YES:
1379 			set_bit(to, mle->response_map);
1380 			mlog(0, "node %u is the master, response=YES\n", to);
1381 			mlog(0, "%s:%.*s: master node %u now knows I have a "
1382 			     "reference\n", dlm->name, res->lockname.len,
1383 			     res->lockname.name, to);
1384 			mle->master = to;
1385 			break;
1386 		case DLM_MASTER_RESP_NO:
1387 			mlog(0, "node %u not master, response=NO\n", to);
1388 			set_bit(to, mle->response_map);
1389 			break;
1390 		case DLM_MASTER_RESP_MAYBE:
1391 			mlog(0, "node %u not master, response=MAYBE\n", to);
1392 			set_bit(to, mle->response_map);
1393 			set_bit(to, mle->maybe_map);
1394 			break;
1395 		case DLM_MASTER_RESP_ERROR:
1396 			mlog(0, "node %u hit an error, resending\n", to);
1397 			resend = 1;
1398 			response = 0;
1399 			break;
1400 		default:
1401 			mlog(ML_ERROR, "bad response! %u\n", response);
1402 			BUG();
1403 	}
1404 	spin_unlock(&mle->spinlock);
1405 	if (resend) {
1406 		/* this is also totally crude */
1407 		msleep(50);
1408 		goto again;
1409 	}
1410 
1411 out:
1412 	return ret;
1413 }
1414 
1415 /*
1416  * locks that can be taken here:
1417  * dlm->spinlock
1418  * res->spinlock
1419  * mle->spinlock
1420  * dlm->master_list
1421  *
1422  * if possible, TRIM THIS DOWN!!!
1423  */
1424 int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
1425 			       void **ret_data)
1426 {
1427 	u8 response = DLM_MASTER_RESP_MAYBE;
1428 	struct dlm_ctxt *dlm = data;
1429 	struct dlm_lock_resource *res = NULL;
1430 	struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
1431 	struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
1432 	char *name;
1433 	unsigned int namelen, hash;
1434 	int found, ret;
1435 	int set_maybe;
1436 	int dispatch_assert = 0;
1437 
1438 	if (!dlm_grab(dlm))
1439 		return DLM_MASTER_RESP_NO;
1440 
1441 	if (!dlm_domain_fully_joined(dlm)) {
1442 		response = DLM_MASTER_RESP_NO;
1443 		goto send_response;
1444 	}
1445 
1446 	name = request->name;
1447 	namelen = request->namelen;
1448 	hash = dlm_lockid_hash(name, namelen);
1449 
1450 	if (namelen > DLM_LOCKID_NAME_MAX) {
1451 		response = DLM_IVBUFLEN;
1452 		goto send_response;
1453 	}
1454 
1455 way_up_top:
1456 	spin_lock(&dlm->spinlock);
1457 	res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1458 	if (res) {
1459 		spin_unlock(&dlm->spinlock);
1460 
1461 		/* take care of the easy cases up front */
1462 		spin_lock(&res->spinlock);
1463 
1464 		/*
1465 		 * Right after dlm spinlock was released, dlm_thread could have
1466 		 * purged the lockres. Check if lockres got unhashed. If so
1467 		 * start over.
1468 		 */
1469 		if (hlist_unhashed(&res->hash_node)) {
1470 			spin_unlock(&res->spinlock);
1471 			dlm_lockres_put(res);
1472 			goto way_up_top;
1473 		}
1474 
1475 		if (res->state & (DLM_LOCK_RES_RECOVERING|
1476 				  DLM_LOCK_RES_MIGRATING)) {
1477 			spin_unlock(&res->spinlock);
1478 			mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1479 			     "being recovered/migrated\n");
1480 			response = DLM_MASTER_RESP_ERROR;
1481 			if (mle)
1482 				kmem_cache_free(dlm_mle_cache, mle);
1483 			goto send_response;
1484 		}
1485 
1486 		if (res->owner == dlm->node_num) {
1487 			dlm_lockres_set_refmap_bit(dlm, res, request->node_idx);
1488 			spin_unlock(&res->spinlock);
1489 			response = DLM_MASTER_RESP_YES;
1490 			if (mle)
1491 				kmem_cache_free(dlm_mle_cache, mle);
1492 
1493 			/* this node is the owner.
1494 			 * there is some extra work that needs to
1495 			 * happen now.  the requesting node has
1496 			 * caused all nodes up to this one to
1497 			 * create mles.  this node now needs to
1498 			 * go back and clean those up. */
1499 			dispatch_assert = 1;
1500 			goto send_response;
1501 		} else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1502 			spin_unlock(&res->spinlock);
1503 			// mlog(0, "node %u is the master\n", res->owner);
1504 			response = DLM_MASTER_RESP_NO;
1505 			if (mle)
1506 				kmem_cache_free(dlm_mle_cache, mle);
1507 			goto send_response;
1508 		}
1509 
1510 		/* ok, there is no owner.  either this node is
1511 		 * being blocked, or it is actively trying to
1512 		 * master this lock. */
1513 		if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1514 			mlog(ML_ERROR, "lock with no owner should be "
1515 			     "in-progress!\n");
1516 			BUG();
1517 		}
1518 
1519 		// mlog(0, "lockres is in progress...\n");
1520 		spin_lock(&dlm->master_lock);
1521 		found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1522 		if (!found) {
1523 			mlog(ML_ERROR, "no mle found for this lock!\n");
1524 			BUG();
1525 		}
1526 		set_maybe = 1;
1527 		spin_lock(&tmpmle->spinlock);
1528 		if (tmpmle->type == DLM_MLE_BLOCK) {
1529 			// mlog(0, "this node is waiting for "
1530 			// "lockres to be mastered\n");
1531 			response = DLM_MASTER_RESP_NO;
1532 		} else if (tmpmle->type == DLM_MLE_MIGRATION) {
1533 			mlog(0, "node %u is master, but trying to migrate to "
1534 			     "node %u.\n", tmpmle->master, tmpmle->new_master);
1535 			if (tmpmle->master == dlm->node_num) {
1536 				mlog(ML_ERROR, "no owner on lockres, but this "
1537 				     "node is trying to migrate it to %u?!\n",
1538 				     tmpmle->new_master);
1539 				BUG();
1540 			} else {
1541 				/* the real master can respond on its own */
1542 				response = DLM_MASTER_RESP_NO;
1543 			}
1544 		} else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1545 			set_maybe = 0;
1546 			if (tmpmle->master == dlm->node_num) {
1547 				response = DLM_MASTER_RESP_YES;
1548 				/* this node will be the owner.
1549 				 * go back and clean the mles on any
1550 				 * other nodes */
1551 				dispatch_assert = 1;
1552 				dlm_lockres_set_refmap_bit(dlm, res,
1553 							   request->node_idx);
1554 			} else
1555 				response = DLM_MASTER_RESP_NO;
1556 		} else {
1557 			// mlog(0, "this node is attempting to "
1558 			// "master lockres\n");
1559 			response = DLM_MASTER_RESP_MAYBE;
1560 		}
1561 		if (set_maybe)
1562 			set_bit(request->node_idx, tmpmle->maybe_map);
1563 		spin_unlock(&tmpmle->spinlock);
1564 
1565 		spin_unlock(&dlm->master_lock);
1566 		spin_unlock(&res->spinlock);
1567 
1568 		/* keep the mle attached to heartbeat events */
1569 		dlm_put_mle(tmpmle);
1570 		if (mle)
1571 			kmem_cache_free(dlm_mle_cache, mle);
1572 		goto send_response;
1573 	}
1574 
1575 	/*
1576 	 * lockres doesn't exist on this node
1577 	 * if there is an MLE_BLOCK, return NO
1578 	 * if there is an MLE_MASTER, return MAYBE
1579 	 * otherwise, add an MLE_BLOCK, return NO
1580 	 */
1581 	spin_lock(&dlm->master_lock);
1582 	found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1583 	if (!found) {
1584 		/* this lockid has never been seen on this node yet */
1585 		// mlog(0, "no mle found\n");
1586 		if (!mle) {
1587 			spin_unlock(&dlm->master_lock);
1588 			spin_unlock(&dlm->spinlock);
1589 
1590 			mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
1591 			if (!mle) {
1592 				response = DLM_MASTER_RESP_ERROR;
1593 				mlog_errno(-ENOMEM);
1594 				goto send_response;
1595 			}
1596 			goto way_up_top;
1597 		}
1598 
1599 		// mlog(0, "this is second time thru, already allocated, "
1600 		// "add the block.\n");
1601 		dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
1602 		set_bit(request->node_idx, mle->maybe_map);
1603 		__dlm_insert_mle(dlm, mle);
1604 		response = DLM_MASTER_RESP_NO;
1605 	} else {
1606 		// mlog(0, "mle was found\n");
1607 		set_maybe = 1;
1608 		spin_lock(&tmpmle->spinlock);
1609 		if (tmpmle->master == dlm->node_num) {
1610 			mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n");
1611 			BUG();
1612 		}
1613 		if (tmpmle->type == DLM_MLE_BLOCK)
1614 			response = DLM_MASTER_RESP_NO;
1615 		else if (tmpmle->type == DLM_MLE_MIGRATION) {
1616 			mlog(0, "migration mle was found (%u->%u)\n",
1617 			     tmpmle->master, tmpmle->new_master);
1618 			/* real master can respond on its own */
1619 			response = DLM_MASTER_RESP_NO;
1620 		} else
1621 			response = DLM_MASTER_RESP_MAYBE;
1622 		if (set_maybe)
1623 			set_bit(request->node_idx, tmpmle->maybe_map);
1624 		spin_unlock(&tmpmle->spinlock);
1625 	}
1626 	spin_unlock(&dlm->master_lock);
1627 	spin_unlock(&dlm->spinlock);
1628 
1629 	if (found) {
1630 		/* keep the mle attached to heartbeat events */
1631 		dlm_put_mle(tmpmle);
1632 	}
1633 send_response:
1634 	/*
1635 	 * __dlm_lookup_lockres() grabbed a reference to this lockres.
1636 	 * The reference is released by dlm_assert_master_worker() under
1637 	 * the call to dlm_dispatch_assert_master().  If
1638 	 * dlm_assert_master_worker() isn't called, we drop it here.
1639 	 */
1640 	if (dispatch_assert) {
1641 		if (response != DLM_MASTER_RESP_YES)
1642 			mlog(ML_ERROR, "invalid response %d\n", response);
1643 		if (!res) {
1644 			mlog(ML_ERROR, "bad lockres while trying to assert!\n");
1645 			BUG();
1646 		}
1647 		mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1648 			     dlm->node_num, res->lockname.len, res->lockname.name);
1649 		ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
1650 						 DLM_ASSERT_MASTER_MLE_CLEANUP);
1651 		if (ret < 0) {
1652 			mlog(ML_ERROR, "failed to dispatch assert master work\n");
1653 			response = DLM_MASTER_RESP_ERROR;
1654 			dlm_lockres_put(res);
1655 		} else
1656 			dlm_lockres_grab_inflight_worker(dlm, res);
1657 	} else {
1658 		if (res)
1659 			dlm_lockres_put(res);
1660 	}
1661 
1662 	dlm_put(dlm);
1663 	return response;
1664 }
1665 
1666 /*
1667  * DLM_ASSERT_MASTER_MSG
1668  */
1669 
1670 
1671 /*
1672  * NOTE: this can be used for debugging
1673  * can periodically run all locks owned by this node
1674  * and re-assert across the cluster...
1675  */
1676 static int dlm_do_assert_master(struct dlm_ctxt *dlm,
1677 				struct dlm_lock_resource *res,
1678 				void *nodemap, u32 flags)
1679 {
1680 	struct dlm_assert_master assert;
1681 	int to, tmpret;
1682 	struct dlm_node_iter iter;
1683 	int ret = 0;
1684 	int reassert;
1685 	const char *lockname = res->lockname.name;
1686 	unsigned int namelen = res->lockname.len;
1687 
1688 	BUG_ON(namelen > O2NM_MAX_NAME_LEN);
1689 
1690 	spin_lock(&res->spinlock);
1691 	res->state |= DLM_LOCK_RES_SETREF_INPROG;
1692 	spin_unlock(&res->spinlock);
1693 
1694 again:
1695 	reassert = 0;
1696 
1697 	/* note that if this nodemap is empty, it returns 0 */
1698 	dlm_node_iter_init(nodemap, &iter);
1699 	while ((to = dlm_node_iter_next(&iter)) >= 0) {
1700 		int r = 0;
1701 		struct dlm_master_list_entry *mle = NULL;
1702 
1703 		mlog(0, "sending assert master to %d (%.*s)\n", to,
1704 		     namelen, lockname);
1705 		memset(&assert, 0, sizeof(assert));
1706 		assert.node_idx = dlm->node_num;
1707 		assert.namelen = namelen;
1708 		memcpy(assert.name, lockname, namelen);
1709 		assert.flags = cpu_to_be32(flags);
1710 
1711 		tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
1712 					    &assert, sizeof(assert), to, &r);
1713 		if (tmpret < 0) {
1714 			mlog(ML_ERROR, "Error %d when sending message %u (key "
1715 			     "0x%x) to node %u\n", tmpret,
1716 			     DLM_ASSERT_MASTER_MSG, dlm->key, to);
1717 			if (!dlm_is_host_down(tmpret)) {
1718 				mlog(ML_ERROR, "unhandled error=%d!\n", tmpret);
1719 				BUG();
1720 			}
1721 			/* a node died.  finish out the rest of the nodes. */
1722 			mlog(0, "link to %d went down!\n", to);
1723 			/* any nonzero status return will do */
1724 			ret = tmpret;
1725 			r = 0;
1726 		} else if (r < 0) {
1727 			/* ok, something horribly messed.  kill thyself. */
1728 			mlog(ML_ERROR,"during assert master of %.*s to %u, "
1729 			     "got %d.\n", namelen, lockname, to, r);
1730 			spin_lock(&dlm->spinlock);
1731 			spin_lock(&dlm->master_lock);
1732 			if (dlm_find_mle(dlm, &mle, (char *)lockname,
1733 					 namelen)) {
1734 				dlm_print_one_mle(mle);
1735 				__dlm_put_mle(mle);
1736 			}
1737 			spin_unlock(&dlm->master_lock);
1738 			spin_unlock(&dlm->spinlock);
1739 			BUG();
1740 		}
1741 
1742 		if (r & DLM_ASSERT_RESPONSE_REASSERT &&
1743 		    !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) {
1744 				mlog(ML_ERROR, "%.*s: very strange, "
1745 				     "master MLE but no lockres on %u\n",
1746 				     namelen, lockname, to);
1747 		}
1748 
1749 		if (r & DLM_ASSERT_RESPONSE_REASSERT) {
1750 			mlog(0, "%.*s: node %u create mles on other "
1751 			     "nodes and requests a re-assert\n",
1752 			     namelen, lockname, to);
1753 			reassert = 1;
1754 		}
1755 		if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) {
1756 			mlog(0, "%.*s: node %u has a reference to this "
1757 			     "lockres, set the bit in the refmap\n",
1758 			     namelen, lockname, to);
1759 			spin_lock(&res->spinlock);
1760 			dlm_lockres_set_refmap_bit(dlm, res, to);
1761 			spin_unlock(&res->spinlock);
1762 		}
1763 	}
1764 
1765 	if (reassert)
1766 		goto again;
1767 
1768 	spin_lock(&res->spinlock);
1769 	res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
1770 	spin_unlock(&res->spinlock);
1771 	wake_up(&res->wq);
1772 
1773 	return ret;
1774 }
1775 
1776 /*
1777  * locks that can be taken here:
1778  * dlm->spinlock
1779  * res->spinlock
1780  * mle->spinlock
1781  * dlm->master_list
1782  *
1783  * if possible, TRIM THIS DOWN!!!
1784  */
1785 int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
1786 			      void **ret_data)
1787 {
1788 	struct dlm_ctxt *dlm = data;
1789 	struct dlm_master_list_entry *mle = NULL;
1790 	struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
1791 	struct dlm_lock_resource *res = NULL;
1792 	char *name;
1793 	unsigned int namelen, hash;
1794 	u32 flags;
1795 	int master_request = 0, have_lockres_ref = 0;
1796 	int ret = 0;
1797 
1798 	if (!dlm_grab(dlm))
1799 		return 0;
1800 
1801 	name = assert->name;
1802 	namelen = assert->namelen;
1803 	hash = dlm_lockid_hash(name, namelen);
1804 	flags = be32_to_cpu(assert->flags);
1805 
1806 	if (namelen > DLM_LOCKID_NAME_MAX) {
1807 		mlog(ML_ERROR, "Invalid name length!");
1808 		goto done;
1809 	}
1810 
1811 	spin_lock(&dlm->spinlock);
1812 
1813 	if (flags)
1814 		mlog(0, "assert_master with flags: %u\n", flags);
1815 
1816 	/* find the MLE */
1817 	spin_lock(&dlm->master_lock);
1818 	if (!dlm_find_mle(dlm, &mle, name, namelen)) {
1819 		/* not an error, could be master just re-asserting */
1820 		mlog(0, "just got an assert_master from %u, but no "
1821 		     "MLE for it! (%.*s)\n", assert->node_idx,
1822 		     namelen, name);
1823 	} else {
1824 		int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
1825 		if (bit >= O2NM_MAX_NODES) {
1826 			/* not necessarily an error, though less likely.
1827 			 * could be master just re-asserting. */
1828 			mlog(0, "no bits set in the maybe_map, but %u "
1829 			     "is asserting! (%.*s)\n", assert->node_idx,
1830 			     namelen, name);
1831 		} else if (bit != assert->node_idx) {
1832 			if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1833 				mlog(0, "master %u was found, %u should "
1834 				     "back off\n", assert->node_idx, bit);
1835 			} else {
1836 				/* with the fix for bug 569, a higher node
1837 				 * number winning the mastery will respond
1838 				 * YES to mastery requests, but this node
1839 				 * had no way of knowing.  let it pass. */
1840 				mlog(0, "%u is the lowest node, "
1841 				     "%u is asserting. (%.*s)  %u must "
1842 				     "have begun after %u won.\n", bit,
1843 				     assert->node_idx, namelen, name, bit,
1844 				     assert->node_idx);
1845 			}
1846 		}
1847 		if (mle->type == DLM_MLE_MIGRATION) {
1848 			if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1849 				mlog(0, "%s:%.*s: got cleanup assert"
1850 				     " from %u for migration\n",
1851 				     dlm->name, namelen, name,
1852 				     assert->node_idx);
1853 			} else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
1854 				mlog(0, "%s:%.*s: got unrelated assert"
1855 				     " from %u for migration, ignoring\n",
1856 				     dlm->name, namelen, name,
1857 				     assert->node_idx);
1858 				__dlm_put_mle(mle);
1859 				spin_unlock(&dlm->master_lock);
1860 				spin_unlock(&dlm->spinlock);
1861 				goto done;
1862 			}
1863 		}
1864 	}
1865 	spin_unlock(&dlm->master_lock);
1866 
1867 	/* ok everything checks out with the MLE
1868 	 * now check to see if there is a lockres */
1869 	res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1870 	if (res) {
1871 		spin_lock(&res->spinlock);
1872 		if (res->state & DLM_LOCK_RES_RECOVERING)  {
1873 			mlog(ML_ERROR, "%u asserting but %.*s is "
1874 			     "RECOVERING!\n", assert->node_idx, namelen, name);
1875 			goto kill;
1876 		}
1877 		if (!mle) {
1878 			if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
1879 			    res->owner != assert->node_idx) {
1880 				mlog(ML_ERROR, "DIE! Mastery assert from %u, "
1881 				     "but current owner is %u! (%.*s)\n",
1882 				     assert->node_idx, res->owner, namelen,
1883 				     name);
1884 				__dlm_print_one_lock_resource(res);
1885 				BUG();
1886 			}
1887 		} else if (mle->type != DLM_MLE_MIGRATION) {
1888 			if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1889 				/* owner is just re-asserting */
1890 				if (res->owner == assert->node_idx) {
1891 					mlog(0, "owner %u re-asserting on "
1892 					     "lock %.*s\n", assert->node_idx,
1893 					     namelen, name);
1894 					goto ok;
1895 				}
1896 				mlog(ML_ERROR, "got assert_master from "
1897 				     "node %u, but %u is the owner! "
1898 				     "(%.*s)\n", assert->node_idx,
1899 				     res->owner, namelen, name);
1900 				goto kill;
1901 			}
1902 			if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1903 				mlog(ML_ERROR, "got assert from %u, but lock "
1904 				     "with no owner should be "
1905 				     "in-progress! (%.*s)\n",
1906 				     assert->node_idx,
1907 				     namelen, name);
1908 				goto kill;
1909 			}
1910 		} else /* mle->type == DLM_MLE_MIGRATION */ {
1911 			/* should only be getting an assert from new master */
1912 			if (assert->node_idx != mle->new_master) {
1913 				mlog(ML_ERROR, "got assert from %u, but "
1914 				     "new master is %u, and old master "
1915 				     "was %u (%.*s)\n",
1916 				     assert->node_idx, mle->new_master,
1917 				     mle->master, namelen, name);
1918 				goto kill;
1919 			}
1920 
1921 		}
1922 ok:
1923 		spin_unlock(&res->spinlock);
1924 	}
1925 
1926 	// mlog(0, "woo!  got an assert_master from node %u!\n",
1927 	// 	     assert->node_idx);
1928 	if (mle) {
1929 		int extra_ref = 0;
1930 		int nn = -1;
1931 		int rr, err = 0;
1932 
1933 		spin_lock(&mle->spinlock);
1934 		if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
1935 			extra_ref = 1;
1936 		else {
1937 			/* MASTER mle: if any bits set in the response map
1938 			 * then the calling node needs to re-assert to clear
1939 			 * up nodes that this node contacted */
1940 			while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
1941 						    nn+1)) < O2NM_MAX_NODES) {
1942 				if (nn != dlm->node_num && nn != assert->node_idx) {
1943 					master_request = 1;
1944 					break;
1945 				}
1946 			}
1947 		}
1948 		mle->master = assert->node_idx;
1949 		atomic_set(&mle->woken, 1);
1950 		wake_up(&mle->wq);
1951 		spin_unlock(&mle->spinlock);
1952 
1953 		if (res) {
1954 			int wake = 0;
1955 			spin_lock(&res->spinlock);
1956 			if (mle->type == DLM_MLE_MIGRATION) {
1957 				mlog(0, "finishing off migration of lockres %.*s, "
1958 			     		"from %u to %u\n",
1959 			       		res->lockname.len, res->lockname.name,
1960 			       		dlm->node_num, mle->new_master);
1961 				res->state &= ~DLM_LOCK_RES_MIGRATING;
1962 				wake = 1;
1963 				dlm_change_lockres_owner(dlm, res, mle->new_master);
1964 				BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
1965 			} else {
1966 				dlm_change_lockres_owner(dlm, res, mle->master);
1967 			}
1968 			spin_unlock(&res->spinlock);
1969 			have_lockres_ref = 1;
1970 			if (wake)
1971 				wake_up(&res->wq);
1972 		}
1973 
1974 		/* master is known, detach if not already detached.
1975 		 * ensures that only one assert_master call will happen
1976 		 * on this mle. */
1977 		spin_lock(&dlm->master_lock);
1978 
1979 		rr = atomic_read(&mle->mle_refs.refcount);
1980 		if (mle->inuse > 0) {
1981 			if (extra_ref && rr < 3)
1982 				err = 1;
1983 			else if (!extra_ref && rr < 2)
1984 				err = 1;
1985 		} else {
1986 			if (extra_ref && rr < 2)
1987 				err = 1;
1988 			else if (!extra_ref && rr < 1)
1989 				err = 1;
1990 		}
1991 		if (err) {
1992 			mlog(ML_ERROR, "%s:%.*s: got assert master from %u "
1993 			     "that will mess up this node, refs=%d, extra=%d, "
1994 			     "inuse=%d\n", dlm->name, namelen, name,
1995 			     assert->node_idx, rr, extra_ref, mle->inuse);
1996 			dlm_print_one_mle(mle);
1997 		}
1998 		__dlm_unlink_mle(dlm, mle);
1999 		__dlm_mle_detach_hb_events(dlm, mle);
2000 		__dlm_put_mle(mle);
2001 		if (extra_ref) {
2002 			/* the assert master message now balances the extra
2003 		 	 * ref given by the master / migration request message.
2004 		 	 * if this is the last put, it will be removed
2005 		 	 * from the list. */
2006 			__dlm_put_mle(mle);
2007 		}
2008 		spin_unlock(&dlm->master_lock);
2009 	} else if (res) {
2010 		if (res->owner != assert->node_idx) {
2011 			mlog(0, "assert_master from %u, but current "
2012 			     "owner is %u (%.*s), no mle\n", assert->node_idx,
2013 			     res->owner, namelen, name);
2014 		}
2015 	}
2016 	spin_unlock(&dlm->spinlock);
2017 
2018 done:
2019 	ret = 0;
2020 	if (res) {
2021 		spin_lock(&res->spinlock);
2022 		res->state |= DLM_LOCK_RES_SETREF_INPROG;
2023 		spin_unlock(&res->spinlock);
2024 		*ret_data = (void *)res;
2025 	}
2026 	dlm_put(dlm);
2027 	if (master_request) {
2028 		mlog(0, "need to tell master to reassert\n");
2029 		/* positive. negative would shoot down the node. */
2030 		ret |= DLM_ASSERT_RESPONSE_REASSERT;
2031 		if (!have_lockres_ref) {
2032 			mlog(ML_ERROR, "strange, got assert from %u, MASTER "
2033 			     "mle present here for %s:%.*s, but no lockres!\n",
2034 			     assert->node_idx, dlm->name, namelen, name);
2035 		}
2036 	}
2037 	if (have_lockres_ref) {
2038 		/* let the master know we have a reference to the lockres */
2039 		ret |= DLM_ASSERT_RESPONSE_MASTERY_REF;
2040 		mlog(0, "%s:%.*s: got assert from %u, need a ref\n",
2041 		     dlm->name, namelen, name, assert->node_idx);
2042 	}
2043 	return ret;
2044 
2045 kill:
2046 	/* kill the caller! */
2047 	mlog(ML_ERROR, "Bad message received from another node.  Dumping state "
2048 	     "and killing the other node now!  This node is OK and can continue.\n");
2049 	__dlm_print_one_lock_resource(res);
2050 	spin_unlock(&res->spinlock);
2051 	spin_lock(&dlm->master_lock);
2052 	if (mle)
2053 		__dlm_put_mle(mle);
2054 	spin_unlock(&dlm->master_lock);
2055 	spin_unlock(&dlm->spinlock);
2056 	*ret_data = (void *)res;
2057 	dlm_put(dlm);
2058 	return -EINVAL;
2059 }
2060 
2061 void dlm_assert_master_post_handler(int status, void *data, void *ret_data)
2062 {
2063 	struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data;
2064 
2065 	if (ret_data) {
2066 		spin_lock(&res->spinlock);
2067 		res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
2068 		spin_unlock(&res->spinlock);
2069 		wake_up(&res->wq);
2070 		dlm_lockres_put(res);
2071 	}
2072 	return;
2073 }
2074 
2075 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
2076 			       struct dlm_lock_resource *res,
2077 			       int ignore_higher, u8 request_from, u32 flags)
2078 {
2079 	struct dlm_work_item *item;
2080 	item = kzalloc(sizeof(*item), GFP_ATOMIC);
2081 	if (!item)
2082 		return -ENOMEM;
2083 
2084 
2085 	/* queue up work for dlm_assert_master_worker */
2086 	dlm_grab(dlm);  /* get an extra ref for the work item */
2087 	dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
2088 	item->u.am.lockres = res; /* already have a ref */
2089 	/* can optionally ignore node numbers higher than this node */
2090 	item->u.am.ignore_higher = ignore_higher;
2091 	item->u.am.request_from = request_from;
2092 	item->u.am.flags = flags;
2093 
2094 	if (ignore_higher)
2095 		mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
2096 		     res->lockname.name);
2097 
2098 	spin_lock(&dlm->work_lock);
2099 	list_add_tail(&item->list, &dlm->work_list);
2100 	spin_unlock(&dlm->work_lock);
2101 
2102 	queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2103 	return 0;
2104 }
2105 
2106 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
2107 {
2108 	struct dlm_ctxt *dlm = data;
2109 	int ret = 0;
2110 	struct dlm_lock_resource *res;
2111 	unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
2112 	int ignore_higher;
2113 	int bit;
2114 	u8 request_from;
2115 	u32 flags;
2116 
2117 	dlm = item->dlm;
2118 	res = item->u.am.lockres;
2119 	ignore_higher = item->u.am.ignore_higher;
2120 	request_from = item->u.am.request_from;
2121 	flags = item->u.am.flags;
2122 
2123 	spin_lock(&dlm->spinlock);
2124 	memcpy(nodemap, dlm->domain_map, sizeof(nodemap));
2125 	spin_unlock(&dlm->spinlock);
2126 
2127 	clear_bit(dlm->node_num, nodemap);
2128 	if (ignore_higher) {
2129 		/* if is this just to clear up mles for nodes below
2130 		 * this node, do not send the message to the original
2131 		 * caller or any node number higher than this */
2132 		clear_bit(request_from, nodemap);
2133 		bit = dlm->node_num;
2134 		while (1) {
2135 			bit = find_next_bit(nodemap, O2NM_MAX_NODES,
2136 					    bit+1);
2137 		       	if (bit >= O2NM_MAX_NODES)
2138 				break;
2139 			clear_bit(bit, nodemap);
2140 		}
2141 	}
2142 
2143 	/*
2144 	 * If we're migrating this lock to someone else, we are no
2145 	 * longer allowed to assert out own mastery.  OTOH, we need to
2146 	 * prevent migration from starting while we're still asserting
2147 	 * our dominance.  The reserved ast delays migration.
2148 	 */
2149 	spin_lock(&res->spinlock);
2150 	if (res->state & DLM_LOCK_RES_MIGRATING) {
2151 		mlog(0, "Someone asked us to assert mastery, but we're "
2152 		     "in the middle of migration.  Skipping assert, "
2153 		     "the new master will handle that.\n");
2154 		spin_unlock(&res->spinlock);
2155 		goto put;
2156 	} else
2157 		__dlm_lockres_reserve_ast(res);
2158 	spin_unlock(&res->spinlock);
2159 
2160 	/* this call now finishes out the nodemap
2161 	 * even if one or more nodes die */
2162 	mlog(0, "worker about to master %.*s here, this=%u\n",
2163 		     res->lockname.len, res->lockname.name, dlm->node_num);
2164 	ret = dlm_do_assert_master(dlm, res, nodemap, flags);
2165 	if (ret < 0) {
2166 		/* no need to restart, we are done */
2167 		if (!dlm_is_host_down(ret))
2168 			mlog_errno(ret);
2169 	}
2170 
2171 	/* Ok, we've asserted ourselves.  Let's let migration start. */
2172 	dlm_lockres_release_ast(dlm, res);
2173 
2174 put:
2175 	dlm_lockres_drop_inflight_worker(dlm, res);
2176 
2177 	dlm_lockres_put(res);
2178 
2179 	mlog(0, "finished with dlm_assert_master_worker\n");
2180 }
2181 
2182 /* SPECIAL CASE for the $RECOVERY lock used by the recovery thread.
2183  * We cannot wait for node recovery to complete to begin mastering this
2184  * lockres because this lockres is used to kick off recovery! ;-)
2185  * So, do a pre-check on all living nodes to see if any of those nodes
2186  * think that $RECOVERY is currently mastered by a dead node.  If so,
2187  * we wait a short time to allow that node to get notified by its own
2188  * heartbeat stack, then check again.  All $RECOVERY lock resources
2189  * mastered by dead nodes are purged when the hearbeat callback is
2190  * fired, so we can know for sure that it is safe to continue once
2191  * the node returns a live node or no node.  */
2192 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
2193 				       struct dlm_lock_resource *res)
2194 {
2195 	struct dlm_node_iter iter;
2196 	int nodenum;
2197 	int ret = 0;
2198 	u8 master = DLM_LOCK_RES_OWNER_UNKNOWN;
2199 
2200 	spin_lock(&dlm->spinlock);
2201 	dlm_node_iter_init(dlm->domain_map, &iter);
2202 	spin_unlock(&dlm->spinlock);
2203 
2204 	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2205 		/* do not send to self */
2206 		if (nodenum == dlm->node_num)
2207 			continue;
2208 		ret = dlm_do_master_requery(dlm, res, nodenum, &master);
2209 		if (ret < 0) {
2210 			mlog_errno(ret);
2211 			if (!dlm_is_host_down(ret))
2212 				BUG();
2213 			/* host is down, so answer for that node would be
2214 			 * DLM_LOCK_RES_OWNER_UNKNOWN.  continue. */
2215 			ret = 0;
2216 		}
2217 
2218 		if (master != DLM_LOCK_RES_OWNER_UNKNOWN) {
2219 			/* check to see if this master is in the recovery map */
2220 			spin_lock(&dlm->spinlock);
2221 			if (test_bit(master, dlm->recovery_map)) {
2222 				mlog(ML_NOTICE, "%s: node %u has not seen "
2223 				     "node %u go down yet, and thinks the "
2224 				     "dead node is mastering the recovery "
2225 				     "lock.  must wait.\n", dlm->name,
2226 				     nodenum, master);
2227 				ret = -EAGAIN;
2228 			}
2229 			spin_unlock(&dlm->spinlock);
2230 			mlog(0, "%s: reco lock master is %u\n", dlm->name,
2231 			     master);
2232 			break;
2233 		}
2234 	}
2235 	return ret;
2236 }
2237 
2238 /*
2239  * DLM_DEREF_LOCKRES_MSG
2240  */
2241 
2242 int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2243 {
2244 	struct dlm_deref_lockres deref;
2245 	int ret = 0, r;
2246 	const char *lockname;
2247 	unsigned int namelen;
2248 
2249 	lockname = res->lockname.name;
2250 	namelen = res->lockname.len;
2251 	BUG_ON(namelen > O2NM_MAX_NAME_LEN);
2252 
2253 	memset(&deref, 0, sizeof(deref));
2254 	deref.node_idx = dlm->node_num;
2255 	deref.namelen = namelen;
2256 	memcpy(deref.name, lockname, namelen);
2257 
2258 	ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
2259 				 &deref, sizeof(deref), res->owner, &r);
2260 	if (ret < 0)
2261 		mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n",
2262 		     dlm->name, namelen, lockname, ret, res->owner);
2263 	else if (r < 0) {
2264 		/* BAD.  other node says I did not have a ref. */
2265 		mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
2266 		     dlm->name, namelen, lockname, res->owner, r);
2267 		dlm_print_one_lock_resource(res);
2268 		BUG();
2269 	}
2270 	return ret;
2271 }
2272 
2273 int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
2274 			      void **ret_data)
2275 {
2276 	struct dlm_ctxt *dlm = data;
2277 	struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf;
2278 	struct dlm_lock_resource *res = NULL;
2279 	char *name;
2280 	unsigned int namelen;
2281 	int ret = -EINVAL;
2282 	u8 node;
2283 	unsigned int hash;
2284 	struct dlm_work_item *item;
2285 	int cleared = 0;
2286 	int dispatch = 0;
2287 
2288 	if (!dlm_grab(dlm))
2289 		return 0;
2290 
2291 	name = deref->name;
2292 	namelen = deref->namelen;
2293 	node = deref->node_idx;
2294 
2295 	if (namelen > DLM_LOCKID_NAME_MAX) {
2296 		mlog(ML_ERROR, "Invalid name length!");
2297 		goto done;
2298 	}
2299 	if (deref->node_idx >= O2NM_MAX_NODES) {
2300 		mlog(ML_ERROR, "Invalid node number: %u\n", node);
2301 		goto done;
2302 	}
2303 
2304 	hash = dlm_lockid_hash(name, namelen);
2305 
2306 	spin_lock(&dlm->spinlock);
2307 	res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
2308 	if (!res) {
2309 		spin_unlock(&dlm->spinlock);
2310 		mlog(ML_ERROR, "%s:%.*s: bad lockres name\n",
2311 		     dlm->name, namelen, name);
2312 		goto done;
2313 	}
2314 	spin_unlock(&dlm->spinlock);
2315 
2316 	spin_lock(&res->spinlock);
2317 	if (res->state & DLM_LOCK_RES_SETREF_INPROG)
2318 		dispatch = 1;
2319 	else {
2320 		BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2321 		if (test_bit(node, res->refmap)) {
2322 			dlm_lockres_clear_refmap_bit(dlm, res, node);
2323 			cleared = 1;
2324 		}
2325 	}
2326 	spin_unlock(&res->spinlock);
2327 
2328 	if (!dispatch) {
2329 		if (cleared)
2330 			dlm_lockres_calc_usage(dlm, res);
2331 		else {
2332 			mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2333 		     	"but it is already dropped!\n", dlm->name,
2334 		     	res->lockname.len, res->lockname.name, node);
2335 			dlm_print_one_lock_resource(res);
2336 		}
2337 		ret = 0;
2338 		goto done;
2339 	}
2340 
2341 	item = kzalloc(sizeof(*item), GFP_NOFS);
2342 	if (!item) {
2343 		ret = -ENOMEM;
2344 		mlog_errno(ret);
2345 		goto done;
2346 	}
2347 
2348 	dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL);
2349 	item->u.dl.deref_res = res;
2350 	item->u.dl.deref_node = node;
2351 
2352 	spin_lock(&dlm->work_lock);
2353 	list_add_tail(&item->list, &dlm->work_list);
2354 	spin_unlock(&dlm->work_lock);
2355 
2356 	queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2357 	return 0;
2358 
2359 done:
2360 	if (res)
2361 		dlm_lockres_put(res);
2362 	dlm_put(dlm);
2363 
2364 	return ret;
2365 }
2366 
2367 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
2368 {
2369 	struct dlm_ctxt *dlm;
2370 	struct dlm_lock_resource *res;
2371 	u8 node;
2372 	u8 cleared = 0;
2373 
2374 	dlm = item->dlm;
2375 	res = item->u.dl.deref_res;
2376 	node = item->u.dl.deref_node;
2377 
2378 	spin_lock(&res->spinlock);
2379 	BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2380 	if (test_bit(node, res->refmap)) {
2381 		__dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
2382 		dlm_lockres_clear_refmap_bit(dlm, res, node);
2383 		cleared = 1;
2384 	}
2385 	spin_unlock(&res->spinlock);
2386 
2387 	if (cleared) {
2388 		mlog(0, "%s:%.*s node %u ref dropped in dispatch\n",
2389 		     dlm->name, res->lockname.len, res->lockname.name, node);
2390 		dlm_lockres_calc_usage(dlm, res);
2391 	} else {
2392 		mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2393 		     "but it is already dropped!\n", dlm->name,
2394 		     res->lockname.len, res->lockname.name, node);
2395 		dlm_print_one_lock_resource(res);
2396 	}
2397 
2398 	dlm_lockres_put(res);
2399 }
2400 
2401 /*
2402  * A migrateable resource is one that is :
2403  * 1. locally mastered, and,
2404  * 2. zero local locks, and,
2405  * 3. one or more non-local locks, or, one or more references
2406  * Returns 1 if yes, 0 if not.
2407  */
2408 static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
2409 				      struct dlm_lock_resource *res)
2410 {
2411 	enum dlm_lockres_list idx;
2412 	int nonlocal = 0, node_ref;
2413 	struct list_head *queue;
2414 	struct dlm_lock *lock;
2415 	u64 cookie;
2416 
2417 	assert_spin_locked(&res->spinlock);
2418 
2419 	/* delay migration when the lockres is in MIGRATING state */
2420 	if (res->state & DLM_LOCK_RES_MIGRATING)
2421 		return 0;
2422 
2423 	/* delay migration when the lockres is in RECOCERING state */
2424 	if (res->state & DLM_LOCK_RES_RECOVERING)
2425 		return 0;
2426 
2427 	if (res->owner != dlm->node_num)
2428 		return 0;
2429 
2430         for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) {
2431 		queue = dlm_list_idx_to_ptr(res, idx);
2432 		list_for_each_entry(lock, queue, list) {
2433 			if (lock->ml.node != dlm->node_num) {
2434 				nonlocal++;
2435 				continue;
2436 			}
2437 			cookie = be64_to_cpu(lock->ml.cookie);
2438 			mlog(0, "%s: Not migrateable res %.*s, lock %u:%llu on "
2439 			     "%s list\n", dlm->name, res->lockname.len,
2440 			     res->lockname.name,
2441 			     dlm_get_lock_cookie_node(cookie),
2442 			     dlm_get_lock_cookie_seq(cookie),
2443 			     dlm_list_in_text(idx));
2444 			return 0;
2445 		}
2446 	}
2447 
2448 	if (!nonlocal) {
2449 		node_ref = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
2450 		if (node_ref >= O2NM_MAX_NODES)
2451 			return 0;
2452 	}
2453 
2454 	mlog(0, "%s: res %.*s, Migrateable\n", dlm->name, res->lockname.len,
2455 	     res->lockname.name);
2456 
2457 	return 1;
2458 }
2459 
2460 /*
2461  * DLM_MIGRATE_LOCKRES
2462  */
2463 
2464 
2465 static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
2466 			       struct dlm_lock_resource *res, u8 target)
2467 {
2468 	struct dlm_master_list_entry *mle = NULL;
2469 	struct dlm_master_list_entry *oldmle = NULL;
2470  	struct dlm_migratable_lockres *mres = NULL;
2471 	int ret = 0;
2472 	const char *name;
2473 	unsigned int namelen;
2474 	int mle_added = 0;
2475 	int wake = 0;
2476 
2477 	if (!dlm_grab(dlm))
2478 		return -EINVAL;
2479 
2480 	BUG_ON(target == O2NM_MAX_NODES);
2481 
2482 	name = res->lockname.name;
2483 	namelen = res->lockname.len;
2484 
2485 	mlog(0, "%s: Migrating %.*s to node %u\n", dlm->name, namelen, name,
2486 	     target);
2487 
2488 	/* preallocate up front. if this fails, abort */
2489 	ret = -ENOMEM;
2490 	mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS);
2491 	if (!mres) {
2492 		mlog_errno(ret);
2493 		goto leave;
2494 	}
2495 
2496 	mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
2497 	if (!mle) {
2498 		mlog_errno(ret);
2499 		goto leave;
2500 	}
2501 	ret = 0;
2502 
2503 	/*
2504 	 * clear any existing master requests and
2505 	 * add the migration mle to the list
2506 	 */
2507 	spin_lock(&dlm->spinlock);
2508 	spin_lock(&dlm->master_lock);
2509 	ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
2510 				    namelen, target, dlm->node_num);
2511 	spin_unlock(&dlm->master_lock);
2512 	spin_unlock(&dlm->spinlock);
2513 
2514 	if (ret == -EEXIST) {
2515 		mlog(0, "another process is already migrating it\n");
2516 		goto fail;
2517 	}
2518 	mle_added = 1;
2519 
2520 	/*
2521 	 * set the MIGRATING flag and flush asts
2522 	 * if we fail after this we need to re-dirty the lockres
2523 	 */
2524 	if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
2525 		mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
2526 		     "the target went down.\n", res->lockname.len,
2527 		     res->lockname.name, target);
2528 		spin_lock(&res->spinlock);
2529 		res->state &= ~DLM_LOCK_RES_MIGRATING;
2530 		wake = 1;
2531 		spin_unlock(&res->spinlock);
2532 		ret = -EINVAL;
2533 	}
2534 
2535 fail:
2536 	if (oldmle) {
2537 		/* master is known, detach if not already detached */
2538 		dlm_mle_detach_hb_events(dlm, oldmle);
2539 		dlm_put_mle(oldmle);
2540 	}
2541 
2542 	if (ret < 0) {
2543 		if (mle_added) {
2544 			dlm_mle_detach_hb_events(dlm, mle);
2545 			dlm_put_mle(mle);
2546 		} else if (mle) {
2547 			kmem_cache_free(dlm_mle_cache, mle);
2548 			mle = NULL;
2549 		}
2550 		goto leave;
2551 	}
2552 
2553 	/*
2554 	 * at this point, we have a migration target, an mle
2555 	 * in the master list, and the MIGRATING flag set on
2556 	 * the lockres
2557 	 */
2558 
2559 	/* now that remote nodes are spinning on the MIGRATING flag,
2560 	 * ensure that all assert_master work is flushed. */
2561 	flush_workqueue(dlm->dlm_worker);
2562 
2563 	/* get an extra reference on the mle.
2564 	 * otherwise the assert_master from the new
2565 	 * master will destroy this.
2566 	 * also, make sure that all callers of dlm_get_mle
2567 	 * take both dlm->spinlock and dlm->master_lock */
2568 	spin_lock(&dlm->spinlock);
2569 	spin_lock(&dlm->master_lock);
2570 	dlm_get_mle_inuse(mle);
2571 	spin_unlock(&dlm->master_lock);
2572 	spin_unlock(&dlm->spinlock);
2573 
2574 	/* notify new node and send all lock state */
2575 	/* call send_one_lockres with migration flag.
2576 	 * this serves as notice to the target node that a
2577 	 * migration is starting. */
2578 	ret = dlm_send_one_lockres(dlm, res, mres, target,
2579 				   DLM_MRES_MIGRATION);
2580 
2581 	if (ret < 0) {
2582 		mlog(0, "migration to node %u failed with %d\n",
2583 		     target, ret);
2584 		/* migration failed, detach and clean up mle */
2585 		dlm_mle_detach_hb_events(dlm, mle);
2586 		dlm_put_mle(mle);
2587 		dlm_put_mle_inuse(mle);
2588 		spin_lock(&res->spinlock);
2589 		res->state &= ~DLM_LOCK_RES_MIGRATING;
2590 		wake = 1;
2591 		spin_unlock(&res->spinlock);
2592 		if (dlm_is_host_down(ret))
2593 			dlm_wait_for_node_death(dlm, target,
2594 						DLM_NODE_DEATH_WAIT_MAX);
2595 		goto leave;
2596 	}
2597 
2598 	/* at this point, the target sends a message to all nodes,
2599 	 * (using dlm_do_migrate_request).  this node is skipped since
2600 	 * we had to put an mle in the list to begin the process.  this
2601 	 * node now waits for target to do an assert master.  this node
2602 	 * will be the last one notified, ensuring that the migration
2603 	 * is complete everywhere.  if the target dies while this is
2604 	 * going on, some nodes could potentially see the target as the
2605 	 * master, so it is important that my recovery finds the migration
2606 	 * mle and sets the master to UNKNOWN. */
2607 
2608 
2609 	/* wait for new node to assert master */
2610 	while (1) {
2611 		ret = wait_event_interruptible_timeout(mle->wq,
2612 					(atomic_read(&mle->woken) == 1),
2613 					msecs_to_jiffies(5000));
2614 
2615 		if (ret >= 0) {
2616 		       	if (atomic_read(&mle->woken) == 1 ||
2617 			    res->owner == target)
2618 				break;
2619 
2620 			mlog(0, "%s:%.*s: timed out during migration\n",
2621 			     dlm->name, res->lockname.len, res->lockname.name);
2622 			/* avoid hang during shutdown when migrating lockres
2623 			 * to a node which also goes down */
2624 			if (dlm_is_node_dead(dlm, target)) {
2625 				mlog(0, "%s:%.*s: expected migration "
2626 				     "target %u is no longer up, restarting\n",
2627 				     dlm->name, res->lockname.len,
2628 				     res->lockname.name, target);
2629 				ret = -EINVAL;
2630 				/* migration failed, detach and clean up mle */
2631 				dlm_mle_detach_hb_events(dlm, mle);
2632 				dlm_put_mle(mle);
2633 				dlm_put_mle_inuse(mle);
2634 				spin_lock(&res->spinlock);
2635 				res->state &= ~DLM_LOCK_RES_MIGRATING;
2636 				wake = 1;
2637 				spin_unlock(&res->spinlock);
2638 				goto leave;
2639 			}
2640 		} else
2641 			mlog(0, "%s:%.*s: caught signal during migration\n",
2642 			     dlm->name, res->lockname.len, res->lockname.name);
2643 	}
2644 
2645 	/* all done, set the owner, clear the flag */
2646 	spin_lock(&res->spinlock);
2647 	dlm_set_lockres_owner(dlm, res, target);
2648 	res->state &= ~DLM_LOCK_RES_MIGRATING;
2649 	dlm_remove_nonlocal_locks(dlm, res);
2650 	spin_unlock(&res->spinlock);
2651 	wake_up(&res->wq);
2652 
2653 	/* master is known, detach if not already detached */
2654 	dlm_mle_detach_hb_events(dlm, mle);
2655 	dlm_put_mle_inuse(mle);
2656 	ret = 0;
2657 
2658 	dlm_lockres_calc_usage(dlm, res);
2659 
2660 leave:
2661 	/* re-dirty the lockres if we failed */
2662 	if (ret < 0)
2663 		dlm_kick_thread(dlm, res);
2664 
2665 	/* wake up waiters if the MIGRATING flag got set
2666 	 * but migration failed */
2667 	if (wake)
2668 		wake_up(&res->wq);
2669 
2670 	if (mres)
2671 		free_page((unsigned long)mres);
2672 
2673 	dlm_put(dlm);
2674 
2675 	mlog(0, "%s: Migrating %.*s to %u, returns %d\n", dlm->name, namelen,
2676 	     name, target, ret);
2677 	return ret;
2678 }
2679 
2680 #define DLM_MIGRATION_RETRY_MS  100
2681 
2682 /*
2683  * Should be called only after beginning the domain leave process.
2684  * There should not be any remaining locks on nonlocal lock resources,
2685  * and there should be no local locks left on locally mastered resources.
2686  *
2687  * Called with the dlm spinlock held, may drop it to do migration, but
2688  * will re-acquire before exit.
2689  *
2690  * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped
2691  */
2692 int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2693 {
2694 	int ret;
2695 	int lock_dropped = 0;
2696 	u8 target = O2NM_MAX_NODES;
2697 
2698 	assert_spin_locked(&dlm->spinlock);
2699 
2700 	spin_lock(&res->spinlock);
2701 	if (dlm_is_lockres_migrateable(dlm, res))
2702 		target = dlm_pick_migration_target(dlm, res);
2703 	spin_unlock(&res->spinlock);
2704 
2705 	if (target == O2NM_MAX_NODES)
2706 		goto leave;
2707 
2708 	/* Wheee! Migrate lockres here! Will sleep so drop spinlock. */
2709 	spin_unlock(&dlm->spinlock);
2710 	lock_dropped = 1;
2711 	ret = dlm_migrate_lockres(dlm, res, target);
2712 	if (ret)
2713 		mlog(0, "%s: res %.*s, Migrate to node %u failed with %d\n",
2714 		     dlm->name, res->lockname.len, res->lockname.name,
2715 		     target, ret);
2716 	spin_lock(&dlm->spinlock);
2717 leave:
2718 	return lock_dropped;
2719 }
2720 
2721 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
2722 {
2723 	int ret;
2724 	spin_lock(&dlm->ast_lock);
2725 	spin_lock(&lock->spinlock);
2726 	ret = (list_empty(&lock->bast_list) && !lock->bast_pending);
2727 	spin_unlock(&lock->spinlock);
2728 	spin_unlock(&dlm->ast_lock);
2729 	return ret;
2730 }
2731 
2732 static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
2733 				     struct dlm_lock_resource *res,
2734 				     u8 mig_target)
2735 {
2736 	int can_proceed;
2737 	spin_lock(&res->spinlock);
2738 	can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2739 	spin_unlock(&res->spinlock);
2740 
2741 	/* target has died, so make the caller break out of the
2742 	 * wait_event, but caller must recheck the domain_map */
2743 	spin_lock(&dlm->spinlock);
2744 	if (!test_bit(mig_target, dlm->domain_map))
2745 		can_proceed = 1;
2746 	spin_unlock(&dlm->spinlock);
2747 	return can_proceed;
2748 }
2749 
2750 static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm,
2751 				struct dlm_lock_resource *res)
2752 {
2753 	int ret;
2754 	spin_lock(&res->spinlock);
2755 	ret = !!(res->state & DLM_LOCK_RES_DIRTY);
2756 	spin_unlock(&res->spinlock);
2757 	return ret;
2758 }
2759 
2760 
2761 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
2762 				       struct dlm_lock_resource *res,
2763 				       u8 target)
2764 {
2765 	int ret = 0;
2766 
2767 	mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
2768 	       res->lockname.len, res->lockname.name, dlm->node_num,
2769 	       target);
2770 	/* need to set MIGRATING flag on lockres.  this is done by
2771 	 * ensuring that all asts have been flushed for this lockres. */
2772 	spin_lock(&res->spinlock);
2773 	BUG_ON(res->migration_pending);
2774 	res->migration_pending = 1;
2775 	/* strategy is to reserve an extra ast then release
2776 	 * it below, letting the release do all of the work */
2777 	__dlm_lockres_reserve_ast(res);
2778 	spin_unlock(&res->spinlock);
2779 
2780 	/* now flush all the pending asts */
2781 	dlm_kick_thread(dlm, res);
2782 	/* before waiting on DIRTY, block processes which may
2783 	 * try to dirty the lockres before MIGRATING is set */
2784 	spin_lock(&res->spinlock);
2785 	BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY);
2786 	res->state |= DLM_LOCK_RES_BLOCK_DIRTY;
2787 	spin_unlock(&res->spinlock);
2788 	/* now wait on any pending asts and the DIRTY state */
2789 	wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
2790 	dlm_lockres_release_ast(dlm, res);
2791 
2792 	mlog(0, "about to wait on migration_wq, dirty=%s\n",
2793 	       res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
2794 	/* if the extra ref we just put was the final one, this
2795 	 * will pass thru immediately.  otherwise, we need to wait
2796 	 * for the last ast to finish. */
2797 again:
2798 	ret = wait_event_interruptible_timeout(dlm->migration_wq,
2799 		   dlm_migration_can_proceed(dlm, res, target),
2800 		   msecs_to_jiffies(1000));
2801 	if (ret < 0) {
2802 		mlog(0, "woken again: migrating? %s, dead? %s\n",
2803 		       res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2804 		       test_bit(target, dlm->domain_map) ? "no":"yes");
2805 	} else {
2806 		mlog(0, "all is well: migrating? %s, dead? %s\n",
2807 		       res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2808 		       test_bit(target, dlm->domain_map) ? "no":"yes");
2809 	}
2810 	if (!dlm_migration_can_proceed(dlm, res, target)) {
2811 		mlog(0, "trying again...\n");
2812 		goto again;
2813 	}
2814 
2815 	ret = 0;
2816 	/* did the target go down or die? */
2817 	spin_lock(&dlm->spinlock);
2818 	if (!test_bit(target, dlm->domain_map)) {
2819 		mlog(ML_ERROR, "aha. migration target %u just went down\n",
2820 		     target);
2821 		ret = -EHOSTDOWN;
2822 	}
2823 	spin_unlock(&dlm->spinlock);
2824 
2825 	/*
2826 	 * if target is down, we need to clear DLM_LOCK_RES_BLOCK_DIRTY for
2827 	 * another try; otherwise, we are sure the MIGRATING state is there,
2828 	 * drop the unneded state which blocked threads trying to DIRTY
2829 	 */
2830 	spin_lock(&res->spinlock);
2831 	BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
2832 	res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
2833 	if (!ret)
2834 		BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
2835 	spin_unlock(&res->spinlock);
2836 
2837 	/*
2838 	 * at this point:
2839 	 *
2840 	 *   o the DLM_LOCK_RES_MIGRATING flag is set if target not down
2841 	 *   o there are no pending asts on this lockres
2842 	 *   o all processes trying to reserve an ast on this
2843 	 *     lockres must wait for the MIGRATING flag to clear
2844 	 */
2845 	return ret;
2846 }
2847 
2848 /* last step in the migration process.
2849  * original master calls this to free all of the dlm_lock
2850  * structures that used to be for other nodes. */
2851 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2852 				      struct dlm_lock_resource *res)
2853 {
2854 	struct list_head *queue = &res->granted;
2855 	int i, bit;
2856 	struct dlm_lock *lock, *next;
2857 
2858 	assert_spin_locked(&res->spinlock);
2859 
2860 	BUG_ON(res->owner == dlm->node_num);
2861 
2862 	for (i=0; i<3; i++) {
2863 		list_for_each_entry_safe(lock, next, queue, list) {
2864 			if (lock->ml.node != dlm->node_num) {
2865 				mlog(0, "putting lock for node %u\n",
2866 				     lock->ml.node);
2867 				/* be extra careful */
2868 				BUG_ON(!list_empty(&lock->ast_list));
2869 				BUG_ON(!list_empty(&lock->bast_list));
2870 				BUG_ON(lock->ast_pending);
2871 				BUG_ON(lock->bast_pending);
2872 				dlm_lockres_clear_refmap_bit(dlm, res,
2873 							     lock->ml.node);
2874 				list_del_init(&lock->list);
2875 				dlm_lock_put(lock);
2876 				/* In a normal unlock, we would have added a
2877 				 * DLM_UNLOCK_FREE_LOCK action. Force it. */
2878 				dlm_lock_put(lock);
2879 			}
2880 		}
2881 		queue++;
2882 	}
2883 	bit = 0;
2884 	while (1) {
2885 		bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
2886 		if (bit >= O2NM_MAX_NODES)
2887 			break;
2888 		/* do not clear the local node reference, if there is a
2889 		 * process holding this, let it drop the ref itself */
2890 		if (bit != dlm->node_num) {
2891 			mlog(0, "%s:%.*s: node %u had a ref to this "
2892 			     "migrating lockres, clearing\n", dlm->name,
2893 			     res->lockname.len, res->lockname.name, bit);
2894 			dlm_lockres_clear_refmap_bit(dlm, res, bit);
2895 		}
2896 		bit++;
2897 	}
2898 }
2899 
2900 /*
2901  * Pick a node to migrate the lock resource to. This function selects a
2902  * potential target based first on the locks and then on refmap. It skips
2903  * nodes that are in the process of exiting the domain.
2904  */
2905 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
2906 				    struct dlm_lock_resource *res)
2907 {
2908 	enum dlm_lockres_list idx;
2909 	struct list_head *queue = &res->granted;
2910 	struct dlm_lock *lock;
2911 	int noderef;
2912 	u8 nodenum = O2NM_MAX_NODES;
2913 
2914 	assert_spin_locked(&dlm->spinlock);
2915 	assert_spin_locked(&res->spinlock);
2916 
2917 	/* Go through all the locks */
2918 	for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) {
2919 		queue = dlm_list_idx_to_ptr(res, idx);
2920 		list_for_each_entry(lock, queue, list) {
2921 			if (lock->ml.node == dlm->node_num)
2922 				continue;
2923 			if (test_bit(lock->ml.node, dlm->exit_domain_map))
2924 				continue;
2925 			nodenum = lock->ml.node;
2926 			goto bail;
2927 		}
2928 	}
2929 
2930 	/* Go thru the refmap */
2931 	noderef = -1;
2932 	while (1) {
2933 		noderef = find_next_bit(res->refmap, O2NM_MAX_NODES,
2934 					noderef + 1);
2935 		if (noderef >= O2NM_MAX_NODES)
2936 			break;
2937 		if (noderef == dlm->node_num)
2938 			continue;
2939 		if (test_bit(noderef, dlm->exit_domain_map))
2940 			continue;
2941 		nodenum = noderef;
2942 		goto bail;
2943 	}
2944 
2945 bail:
2946 	return nodenum;
2947 }
2948 
2949 /* this is called by the new master once all lockres
2950  * data has been received */
2951 static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
2952 				  struct dlm_lock_resource *res,
2953 				  u8 master, u8 new_master,
2954 				  struct dlm_node_iter *iter)
2955 {
2956 	struct dlm_migrate_request migrate;
2957 	int ret, skip, status = 0;
2958 	int nodenum;
2959 
2960 	memset(&migrate, 0, sizeof(migrate));
2961 	migrate.namelen = res->lockname.len;
2962 	memcpy(migrate.name, res->lockname.name, migrate.namelen);
2963 	migrate.new_master = new_master;
2964 	migrate.master = master;
2965 
2966 	ret = 0;
2967 
2968 	/* send message to all nodes, except the master and myself */
2969 	while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
2970 		if (nodenum == master ||
2971 		    nodenum == new_master)
2972 			continue;
2973 
2974 		/* We could race exit domain. If exited, skip. */
2975 		spin_lock(&dlm->spinlock);
2976 		skip = (!test_bit(nodenum, dlm->domain_map));
2977 		spin_unlock(&dlm->spinlock);
2978 		if (skip) {
2979 			clear_bit(nodenum, iter->node_map);
2980 			continue;
2981 		}
2982 
2983 		ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
2984 					 &migrate, sizeof(migrate), nodenum,
2985 					 &status);
2986 		if (ret < 0) {
2987 			mlog(ML_ERROR, "%s: res %.*s, Error %d send "
2988 			     "MIGRATE_REQUEST to node %u\n", dlm->name,
2989 			     migrate.namelen, migrate.name, ret, nodenum);
2990 			if (!dlm_is_host_down(ret)) {
2991 				mlog(ML_ERROR, "unhandled error=%d!\n", ret);
2992 				BUG();
2993 			}
2994 			clear_bit(nodenum, iter->node_map);
2995 			ret = 0;
2996 		} else if (status < 0) {
2997 			mlog(0, "migrate request (node %u) returned %d!\n",
2998 			     nodenum, status);
2999 			ret = status;
3000 		} else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) {
3001 			/* during the migration request we short-circuited
3002 			 * the mastery of the lockres.  make sure we have
3003 			 * a mastery ref for nodenum */
3004 			mlog(0, "%s:%.*s: need ref for node %u\n",
3005 			     dlm->name, res->lockname.len, res->lockname.name,
3006 			     nodenum);
3007 			spin_lock(&res->spinlock);
3008 			dlm_lockres_set_refmap_bit(dlm, res, nodenum);
3009 			spin_unlock(&res->spinlock);
3010 		}
3011 	}
3012 
3013 	if (ret < 0)
3014 		mlog_errno(ret);
3015 
3016 	mlog(0, "returning ret=%d\n", ret);
3017 	return ret;
3018 }
3019 
3020 
3021 /* if there is an existing mle for this lockres, we now know who the master is.
3022  * (the one who sent us *this* message) we can clear it up right away.
3023  * since the process that put the mle on the list still has a reference to it,
3024  * we can unhash it now, set the master and wake the process.  as a result,
3025  * we will have no mle in the list to start with.  now we can add an mle for
3026  * the migration and this should be the only one found for those scanning the
3027  * list.  */
3028 int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
3029 				void **ret_data)
3030 {
3031 	struct dlm_ctxt *dlm = data;
3032 	struct dlm_lock_resource *res = NULL;
3033 	struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
3034 	struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
3035 	const char *name;
3036 	unsigned int namelen, hash;
3037 	int ret = 0;
3038 
3039 	if (!dlm_grab(dlm))
3040 		return -EINVAL;
3041 
3042 	name = migrate->name;
3043 	namelen = migrate->namelen;
3044 	hash = dlm_lockid_hash(name, namelen);
3045 
3046 	/* preallocate.. if this fails, abort */
3047 	mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
3048 
3049 	if (!mle) {
3050 		ret = -ENOMEM;
3051 		goto leave;
3052 	}
3053 
3054 	/* check for pre-existing lock */
3055 	spin_lock(&dlm->spinlock);
3056 	res = __dlm_lookup_lockres(dlm, name, namelen, hash);
3057 	if (res) {
3058 		spin_lock(&res->spinlock);
3059 		if (res->state & DLM_LOCK_RES_RECOVERING) {
3060 			/* if all is working ok, this can only mean that we got
3061 		 	* a migrate request from a node that we now see as
3062 		 	* dead.  what can we do here?  drop it to the floor? */
3063 			spin_unlock(&res->spinlock);
3064 			mlog(ML_ERROR, "Got a migrate request, but the "
3065 			     "lockres is marked as recovering!");
3066 			kmem_cache_free(dlm_mle_cache, mle);
3067 			ret = -EINVAL; /* need a better solution */
3068 			goto unlock;
3069 		}
3070 		res->state |= DLM_LOCK_RES_MIGRATING;
3071 		spin_unlock(&res->spinlock);
3072 	}
3073 
3074 	spin_lock(&dlm->master_lock);
3075 	/* ignore status.  only nonzero status would BUG. */
3076 	ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
3077 				    name, namelen,
3078 				    migrate->new_master,
3079 				    migrate->master);
3080 
3081 	spin_unlock(&dlm->master_lock);
3082 unlock:
3083 	spin_unlock(&dlm->spinlock);
3084 
3085 	if (oldmle) {
3086 		/* master is known, detach if not already detached */
3087 		dlm_mle_detach_hb_events(dlm, oldmle);
3088 		dlm_put_mle(oldmle);
3089 	}
3090 
3091 	if (res)
3092 		dlm_lockres_put(res);
3093 leave:
3094 	dlm_put(dlm);
3095 	return ret;
3096 }
3097 
3098 /* must be holding dlm->spinlock and dlm->master_lock
3099  * when adding a migration mle, we can clear any other mles
3100  * in the master list because we know with certainty that
3101  * the master is "master".  so we remove any old mle from
3102  * the list after setting it's master field, and then add
3103  * the new migration mle.  this way we can hold with the rule
3104  * of having only one mle for a given lock name at all times. */
3105 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
3106 				 struct dlm_lock_resource *res,
3107 				 struct dlm_master_list_entry *mle,
3108 				 struct dlm_master_list_entry **oldmle,
3109 				 const char *name, unsigned int namelen,
3110 				 u8 new_master, u8 master)
3111 {
3112 	int found;
3113 	int ret = 0;
3114 
3115 	*oldmle = NULL;
3116 
3117 	assert_spin_locked(&dlm->spinlock);
3118 	assert_spin_locked(&dlm->master_lock);
3119 
3120 	/* caller is responsible for any ref taken here on oldmle */
3121 	found = dlm_find_mle(dlm, oldmle, (char *)name, namelen);
3122 	if (found) {
3123 		struct dlm_master_list_entry *tmp = *oldmle;
3124 		spin_lock(&tmp->spinlock);
3125 		if (tmp->type == DLM_MLE_MIGRATION) {
3126 			if (master == dlm->node_num) {
3127 				/* ah another process raced me to it */
3128 				mlog(0, "tried to migrate %.*s, but some "
3129 				     "process beat me to it\n",
3130 				     namelen, name);
3131 				ret = -EEXIST;
3132 			} else {
3133 				/* bad.  2 NODES are trying to migrate! */
3134 				mlog(ML_ERROR, "migration error  mle: "
3135 				     "master=%u new_master=%u // request: "
3136 				     "master=%u new_master=%u // "
3137 				     "lockres=%.*s\n",
3138 				     tmp->master, tmp->new_master,
3139 				     master, new_master,
3140 				     namelen, name);
3141 				BUG();
3142 			}
3143 		} else {
3144 			/* this is essentially what assert_master does */
3145 			tmp->master = master;
3146 			atomic_set(&tmp->woken, 1);
3147 			wake_up(&tmp->wq);
3148 			/* remove it so that only one mle will be found */
3149 			__dlm_unlink_mle(dlm, tmp);
3150 			__dlm_mle_detach_hb_events(dlm, tmp);
3151 			if (tmp->type == DLM_MLE_MASTER) {
3152 				ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
3153 				mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
3154 						"telling master to get ref "
3155 						"for cleared out mle during "
3156 						"migration\n", dlm->name,
3157 						namelen, name, master,
3158 						new_master);
3159 			}
3160 		}
3161 		spin_unlock(&tmp->spinlock);
3162 	}
3163 
3164 	/* now add a migration mle to the tail of the list */
3165 	dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
3166 	mle->new_master = new_master;
3167 	/* the new master will be sending an assert master for this.
3168 	 * at that point we will get the refmap reference */
3169 	mle->master = master;
3170 	/* do this for consistency with other mle types */
3171 	set_bit(new_master, mle->maybe_map);
3172 	__dlm_insert_mle(dlm, mle);
3173 
3174 	return ret;
3175 }
3176 
3177 /*
3178  * Sets the owner of the lockres, associated to the mle, to UNKNOWN
3179  */
3180 static struct dlm_lock_resource *dlm_reset_mleres_owner(struct dlm_ctxt *dlm,
3181 					struct dlm_master_list_entry *mle)
3182 {
3183 	struct dlm_lock_resource *res;
3184 
3185 	/* Find the lockres associated to the mle and set its owner to UNK */
3186 	res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen,
3187 				   mle->mnamehash);
3188 	if (res) {
3189 		spin_unlock(&dlm->master_lock);
3190 
3191 		/* move lockres onto recovery list */
3192 		spin_lock(&res->spinlock);
3193 		dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
3194 		dlm_move_lockres_to_recovery_list(dlm, res);
3195 		spin_unlock(&res->spinlock);
3196 		dlm_lockres_put(res);
3197 
3198 		/* about to get rid of mle, detach from heartbeat */
3199 		__dlm_mle_detach_hb_events(dlm, mle);
3200 
3201 		/* dump the mle */
3202 		spin_lock(&dlm->master_lock);
3203 		__dlm_put_mle(mle);
3204 		spin_unlock(&dlm->master_lock);
3205 	}
3206 
3207 	return res;
3208 }
3209 
3210 static void dlm_clean_migration_mle(struct dlm_ctxt *dlm,
3211 				    struct dlm_master_list_entry *mle)
3212 {
3213 	__dlm_mle_detach_hb_events(dlm, mle);
3214 
3215 	spin_lock(&mle->spinlock);
3216 	__dlm_unlink_mle(dlm, mle);
3217 	atomic_set(&mle->woken, 1);
3218 	spin_unlock(&mle->spinlock);
3219 
3220 	wake_up(&mle->wq);
3221 }
3222 
3223 static void dlm_clean_block_mle(struct dlm_ctxt *dlm,
3224 				struct dlm_master_list_entry *mle, u8 dead_node)
3225 {
3226 	int bit;
3227 
3228 	BUG_ON(mle->type != DLM_MLE_BLOCK);
3229 
3230 	spin_lock(&mle->spinlock);
3231 	bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
3232 	if (bit != dead_node) {
3233 		mlog(0, "mle found, but dead node %u would not have been "
3234 		     "master\n", dead_node);
3235 		spin_unlock(&mle->spinlock);
3236 	} else {
3237 		/* Must drop the refcount by one since the assert_master will
3238 		 * never arrive. This may result in the mle being unlinked and
3239 		 * freed, but there may still be a process waiting in the
3240 		 * dlmlock path which is fine. */
3241 		mlog(0, "node %u was expected master\n", dead_node);
3242 		atomic_set(&mle->woken, 1);
3243 		spin_unlock(&mle->spinlock);
3244 		wake_up(&mle->wq);
3245 
3246 		/* Do not need events any longer, so detach from heartbeat */
3247 		__dlm_mle_detach_hb_events(dlm, mle);
3248 		__dlm_put_mle(mle);
3249 	}
3250 }
3251 
3252 void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
3253 {
3254 	struct dlm_master_list_entry *mle;
3255 	struct dlm_lock_resource *res;
3256 	struct hlist_head *bucket;
3257 	struct hlist_node *tmp;
3258 	unsigned int i;
3259 
3260 	mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node);
3261 top:
3262 	assert_spin_locked(&dlm->spinlock);
3263 
3264 	/* clean the master list */
3265 	spin_lock(&dlm->master_lock);
3266 	for (i = 0; i < DLM_HASH_BUCKETS; i++) {
3267 		bucket = dlm_master_hash(dlm, i);
3268 		hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) {
3269 			BUG_ON(mle->type != DLM_MLE_BLOCK &&
3270 			       mle->type != DLM_MLE_MASTER &&
3271 			       mle->type != DLM_MLE_MIGRATION);
3272 
3273 			/* MASTER mles are initiated locally. The waiting
3274 			 * process will notice the node map change shortly.
3275 			 * Let that happen as normal. */
3276 			if (mle->type == DLM_MLE_MASTER)
3277 				continue;
3278 
3279 			/* BLOCK mles are initiated by other nodes. Need to
3280 			 * clean up if the dead node would have been the
3281 			 * master. */
3282 			if (mle->type == DLM_MLE_BLOCK) {
3283 				dlm_clean_block_mle(dlm, mle, dead_node);
3284 				continue;
3285 			}
3286 
3287 			/* Everything else is a MIGRATION mle */
3288 
3289 			/* The rule for MIGRATION mles is that the master
3290 			 * becomes UNKNOWN if *either* the original or the new
3291 			 * master dies. All UNKNOWN lockres' are sent to
3292 			 * whichever node becomes the recovery master. The new
3293 			 * master is responsible for determining if there is
3294 			 * still a master for this lockres, or if he needs to
3295 			 * take over mastery. Either way, this node should
3296 			 * expect another message to resolve this. */
3297 
3298 			if (mle->master != dead_node &&
3299 			    mle->new_master != dead_node)
3300 				continue;
3301 
3302 			/* If we have reached this point, this mle needs to be
3303 			 * removed from the list and freed. */
3304 			dlm_clean_migration_mle(dlm, mle);
3305 
3306 			mlog(0, "%s: node %u died during migration from "
3307 			     "%u to %u!\n", dlm->name, dead_node, mle->master,
3308 			     mle->new_master);
3309 
3310 			/* If we find a lockres associated with the mle, we've
3311 			 * hit this rare case that messes up our lock ordering.
3312 			 * If so, we need to drop the master lock so that we can
3313 			 * take the lockres lock, meaning that we will have to
3314 			 * restart from the head of list. */
3315 			res = dlm_reset_mleres_owner(dlm, mle);
3316 			if (res)
3317 				/* restart */
3318 				goto top;
3319 
3320 			/* This may be the last reference */
3321 			__dlm_put_mle(mle);
3322 		}
3323 	}
3324 	spin_unlock(&dlm->master_lock);
3325 }
3326 
3327 int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
3328 			 u8 old_master)
3329 {
3330 	struct dlm_node_iter iter;
3331 	int ret = 0;
3332 
3333 	spin_lock(&dlm->spinlock);
3334 	dlm_node_iter_init(dlm->domain_map, &iter);
3335 	clear_bit(old_master, iter.node_map);
3336 	clear_bit(dlm->node_num, iter.node_map);
3337 	spin_unlock(&dlm->spinlock);
3338 
3339 	/* ownership of the lockres is changing.  account for the
3340 	 * mastery reference here since old_master will briefly have
3341 	 * a reference after the migration completes */
3342 	spin_lock(&res->spinlock);
3343 	dlm_lockres_set_refmap_bit(dlm, res, old_master);
3344 	spin_unlock(&res->spinlock);
3345 
3346 	mlog(0, "now time to do a migrate request to other nodes\n");
3347 	ret = dlm_do_migrate_request(dlm, res, old_master,
3348 				     dlm->node_num, &iter);
3349 	if (ret < 0) {
3350 		mlog_errno(ret);
3351 		goto leave;
3352 	}
3353 
3354 	mlog(0, "doing assert master of %.*s to all except the original node\n",
3355 	     res->lockname.len, res->lockname.name);
3356 	/* this call now finishes out the nodemap
3357 	 * even if one or more nodes die */
3358 	ret = dlm_do_assert_master(dlm, res, iter.node_map,
3359 				   DLM_ASSERT_MASTER_FINISH_MIGRATION);
3360 	if (ret < 0) {
3361 		/* no longer need to retry.  all living nodes contacted. */
3362 		mlog_errno(ret);
3363 		ret = 0;
3364 	}
3365 
3366 	memset(iter.node_map, 0, sizeof(iter.node_map));
3367 	set_bit(old_master, iter.node_map);
3368 	mlog(0, "doing assert master of %.*s back to %u\n",
3369 	     res->lockname.len, res->lockname.name, old_master);
3370 	ret = dlm_do_assert_master(dlm, res, iter.node_map,
3371 				   DLM_ASSERT_MASTER_FINISH_MIGRATION);
3372 	if (ret < 0) {
3373 		mlog(0, "assert master to original master failed "
3374 		     "with %d.\n", ret);
3375 		/* the only nonzero status here would be because of
3376 		 * a dead original node.  we're done. */
3377 		ret = 0;
3378 	}
3379 
3380 	/* all done, set the owner, clear the flag */
3381 	spin_lock(&res->spinlock);
3382 	dlm_set_lockres_owner(dlm, res, dlm->node_num);
3383 	res->state &= ~DLM_LOCK_RES_MIGRATING;
3384 	spin_unlock(&res->spinlock);
3385 	/* re-dirty it on the new master */
3386 	dlm_kick_thread(dlm, res);
3387 	wake_up(&res->wq);
3388 leave:
3389 	return ret;
3390 }
3391 
3392 /*
3393  * LOCKRES AST REFCOUNT
3394  * this is integral to migration
3395  */
3396 
3397 /* for future intent to call an ast, reserve one ahead of time.
3398  * this should be called only after waiting on the lockres
3399  * with dlm_wait_on_lockres, and while still holding the
3400  * spinlock after the call. */
3401 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
3402 {
3403 	assert_spin_locked(&res->spinlock);
3404 	if (res->state & DLM_LOCK_RES_MIGRATING) {
3405 		__dlm_print_one_lock_resource(res);
3406 	}
3407 	BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3408 
3409 	atomic_inc(&res->asts_reserved);
3410 }
3411 
3412 /*
3413  * used to drop the reserved ast, either because it went unused,
3414  * or because the ast/bast was actually called.
3415  *
3416  * also, if there is a pending migration on this lockres,
3417  * and this was the last pending ast on the lockres,
3418  * atomically set the MIGRATING flag before we drop the lock.
3419  * this is how we ensure that migration can proceed with no
3420  * asts in progress.  note that it is ok if the state of the
3421  * queues is such that a lock should be granted in the future
3422  * or that a bast should be fired, because the new master will
3423  * shuffle the lists on this lockres as soon as it is migrated.
3424  */
3425 void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
3426 			     struct dlm_lock_resource *res)
3427 {
3428 	if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
3429 		return;
3430 
3431 	if (!res->migration_pending) {
3432 		spin_unlock(&res->spinlock);
3433 		return;
3434 	}
3435 
3436 	BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3437 	res->migration_pending = 0;
3438 	res->state |= DLM_LOCK_RES_MIGRATING;
3439 	spin_unlock(&res->spinlock);
3440 	wake_up(&res->wq);
3441 	wake_up(&dlm->migration_wq);
3442 }
3443 
3444 void dlm_force_free_mles(struct dlm_ctxt *dlm)
3445 {
3446 	int i;
3447 	struct hlist_head *bucket;
3448 	struct dlm_master_list_entry *mle;
3449 	struct hlist_node *tmp;
3450 
3451 	/*
3452 	 * We notified all other nodes that we are exiting the domain and
3453 	 * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still
3454 	 * around we force free them and wake any processes that are waiting
3455 	 * on the mles
3456 	 */
3457 	spin_lock(&dlm->spinlock);
3458 	spin_lock(&dlm->master_lock);
3459 
3460 	BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING);
3461 	BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES));
3462 
3463 	for (i = 0; i < DLM_HASH_BUCKETS; i++) {
3464 		bucket = dlm_master_hash(dlm, i);
3465 		hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) {
3466 			if (mle->type != DLM_MLE_BLOCK) {
3467 				mlog(ML_ERROR, "bad mle: %p\n", mle);
3468 				dlm_print_one_mle(mle);
3469 			}
3470 			atomic_set(&mle->woken, 1);
3471 			wake_up(&mle->wq);
3472 
3473 			__dlm_unlink_mle(dlm, mle);
3474 			__dlm_mle_detach_hb_events(dlm, mle);
3475 			__dlm_put_mle(mle);
3476 		}
3477 	}
3478 	spin_unlock(&dlm->master_lock);
3479 	spin_unlock(&dlm->spinlock);
3480 }
3481