xref: /linux/fs/ocfs2/dlm/dlmmaster.c (revision 60e13231561b3a4c5269bfa1ef6c0569ad6f28ec)
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * dlmmod.c
5  *
6  * standalone DLM module
7  *
8  * Copyright (C) 2004 Oracle.  All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License as published by the Free Software Foundation; either
13  * version 2 of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public
21  * License along with this program; if not, write to the
22  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23  * Boston, MA 021110-1307, USA.
24  *
25  */
26 
27 
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/init.h>
34 #include <linux/sysctl.h>
35 #include <linux/random.h>
36 #include <linux/blkdev.h>
37 #include <linux/socket.h>
38 #include <linux/inet.h>
39 #include <linux/spinlock.h>
40 #include <linux/delay.h>
41 
42 
43 #include "cluster/heartbeat.h"
44 #include "cluster/nodemanager.h"
45 #include "cluster/tcp.h"
46 
47 #include "dlmapi.h"
48 #include "dlmcommon.h"
49 #include "dlmdomain.h"
50 #include "dlmdebug.h"
51 
52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
53 #include "cluster/masklog.h"
54 
55 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
56 			      struct dlm_master_list_entry *mle,
57 			      struct o2nm_node *node,
58 			      int idx);
59 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
60 			    struct dlm_master_list_entry *mle,
61 			    struct o2nm_node *node,
62 			    int idx);
63 
64 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
65 static int dlm_do_assert_master(struct dlm_ctxt *dlm,
66 				struct dlm_lock_resource *res,
67 				void *nodemap, u32 flags);
68 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data);
69 
70 static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
71 				struct dlm_master_list_entry *mle,
72 				const char *name,
73 				unsigned int namelen)
74 {
75 	if (dlm != mle->dlm)
76 		return 0;
77 
78 	if (namelen != mle->mnamelen ||
79 	    memcmp(name, mle->mname, namelen) != 0)
80 		return 0;
81 
82 	return 1;
83 }
84 
85 static struct kmem_cache *dlm_lockres_cache = NULL;
86 static struct kmem_cache *dlm_lockname_cache = NULL;
87 static struct kmem_cache *dlm_mle_cache = NULL;
88 
89 static void dlm_mle_release(struct kref *kref);
90 static void dlm_init_mle(struct dlm_master_list_entry *mle,
91 			enum dlm_mle_type type,
92 			struct dlm_ctxt *dlm,
93 			struct dlm_lock_resource *res,
94 			const char *name,
95 			unsigned int namelen);
96 static void dlm_put_mle(struct dlm_master_list_entry *mle);
97 static void __dlm_put_mle(struct dlm_master_list_entry *mle);
98 static int dlm_find_mle(struct dlm_ctxt *dlm,
99 			struct dlm_master_list_entry **mle,
100 			char *name, unsigned int namelen);
101 
102 static int dlm_do_master_request(struct dlm_lock_resource *res,
103 				 struct dlm_master_list_entry *mle, int to);
104 
105 
106 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
107 				     struct dlm_lock_resource *res,
108 				     struct dlm_master_list_entry *mle,
109 				     int *blocked);
110 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
111 				    struct dlm_lock_resource *res,
112 				    struct dlm_master_list_entry *mle,
113 				    int blocked);
114 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
115 				 struct dlm_lock_resource *res,
116 				 struct dlm_master_list_entry *mle,
117 				 struct dlm_master_list_entry **oldmle,
118 				 const char *name, unsigned int namelen,
119 				 u8 new_master, u8 master);
120 
121 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
122 				    struct dlm_lock_resource *res);
123 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
124 				      struct dlm_lock_resource *res);
125 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
126 				       struct dlm_lock_resource *res,
127 				       u8 target);
128 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
129 				       struct dlm_lock_resource *res);
130 
131 
132 int dlm_is_host_down(int errno)
133 {
134 	switch (errno) {
135 		case -EBADF:
136 		case -ECONNREFUSED:
137 		case -ENOTCONN:
138 		case -ECONNRESET:
139 		case -EPIPE:
140 		case -EHOSTDOWN:
141 		case -EHOSTUNREACH:
142 		case -ETIMEDOUT:
143 		case -ECONNABORTED:
144 		case -ENETDOWN:
145 		case -ENETUNREACH:
146 		case -ENETRESET:
147 		case -ESHUTDOWN:
148 		case -ENOPROTOOPT:
149 		case -EINVAL:   /* if returned from our tcp code,
150 				   this means there is no socket */
151 			return 1;
152 	}
153 	return 0;
154 }
155 
156 
157 /*
158  * MASTER LIST FUNCTIONS
159  */
160 
161 
162 /*
163  * regarding master list entries and heartbeat callbacks:
164  *
165  * in order to avoid sleeping and allocation that occurs in
166  * heartbeat, master list entries are simply attached to the
167  * dlm's established heartbeat callbacks.  the mle is attached
168  * when it is created, and since the dlm->spinlock is held at
169  * that time, any heartbeat event will be properly discovered
170  * by the mle.  the mle needs to be detached from the
171  * dlm->mle_hb_events list as soon as heartbeat events are no
172  * longer useful to the mle, and before the mle is freed.
173  *
174  * as a general rule, heartbeat events are no longer needed by
175  * the mle once an "answer" regarding the lock master has been
176  * received.
177  */
178 static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm,
179 					      struct dlm_master_list_entry *mle)
180 {
181 	assert_spin_locked(&dlm->spinlock);
182 
183 	list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
184 }
185 
186 
187 static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
188 					      struct dlm_master_list_entry *mle)
189 {
190 	if (!list_empty(&mle->hb_events))
191 		list_del_init(&mle->hb_events);
192 }
193 
194 
195 static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
196 					    struct dlm_master_list_entry *mle)
197 {
198 	spin_lock(&dlm->spinlock);
199 	__dlm_mle_detach_hb_events(dlm, mle);
200 	spin_unlock(&dlm->spinlock);
201 }
202 
203 static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
204 {
205 	struct dlm_ctxt *dlm;
206 	dlm = mle->dlm;
207 
208 	assert_spin_locked(&dlm->spinlock);
209 	assert_spin_locked(&dlm->master_lock);
210 	mle->inuse++;
211 	kref_get(&mle->mle_refs);
212 }
213 
214 static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
215 {
216 	struct dlm_ctxt *dlm;
217 	dlm = mle->dlm;
218 
219 	spin_lock(&dlm->spinlock);
220 	spin_lock(&dlm->master_lock);
221 	mle->inuse--;
222 	__dlm_put_mle(mle);
223 	spin_unlock(&dlm->master_lock);
224 	spin_unlock(&dlm->spinlock);
225 
226 }
227 
228 /* remove from list and free */
229 static void __dlm_put_mle(struct dlm_master_list_entry *mle)
230 {
231 	struct dlm_ctxt *dlm;
232 	dlm = mle->dlm;
233 
234 	assert_spin_locked(&dlm->spinlock);
235 	assert_spin_locked(&dlm->master_lock);
236 	if (!atomic_read(&mle->mle_refs.refcount)) {
237 		/* this may or may not crash, but who cares.
238 		 * it's a BUG. */
239 		mlog(ML_ERROR, "bad mle: %p\n", mle);
240 		dlm_print_one_mle(mle);
241 		BUG();
242 	} else
243 		kref_put(&mle->mle_refs, dlm_mle_release);
244 }
245 
246 
247 /* must not have any spinlocks coming in */
248 static void dlm_put_mle(struct dlm_master_list_entry *mle)
249 {
250 	struct dlm_ctxt *dlm;
251 	dlm = mle->dlm;
252 
253 	spin_lock(&dlm->spinlock);
254 	spin_lock(&dlm->master_lock);
255 	__dlm_put_mle(mle);
256 	spin_unlock(&dlm->master_lock);
257 	spin_unlock(&dlm->spinlock);
258 }
259 
260 static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
261 {
262 	kref_get(&mle->mle_refs);
263 }
264 
265 static void dlm_init_mle(struct dlm_master_list_entry *mle,
266 			enum dlm_mle_type type,
267 			struct dlm_ctxt *dlm,
268 			struct dlm_lock_resource *res,
269 			const char *name,
270 			unsigned int namelen)
271 {
272 	assert_spin_locked(&dlm->spinlock);
273 
274 	mle->dlm = dlm;
275 	mle->type = type;
276 	INIT_HLIST_NODE(&mle->master_hash_node);
277 	INIT_LIST_HEAD(&mle->hb_events);
278 	memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
279 	spin_lock_init(&mle->spinlock);
280 	init_waitqueue_head(&mle->wq);
281 	atomic_set(&mle->woken, 0);
282 	kref_init(&mle->mle_refs);
283 	memset(mle->response_map, 0, sizeof(mle->response_map));
284 	mle->master = O2NM_MAX_NODES;
285 	mle->new_master = O2NM_MAX_NODES;
286 	mle->inuse = 0;
287 
288 	BUG_ON(mle->type != DLM_MLE_BLOCK &&
289 	       mle->type != DLM_MLE_MASTER &&
290 	       mle->type != DLM_MLE_MIGRATION);
291 
292 	if (mle->type == DLM_MLE_MASTER) {
293 		BUG_ON(!res);
294 		mle->mleres = res;
295 		memcpy(mle->mname, res->lockname.name, res->lockname.len);
296 		mle->mnamelen = res->lockname.len;
297 		mle->mnamehash = res->lockname.hash;
298 	} else {
299 		BUG_ON(!name);
300 		mle->mleres = NULL;
301 		memcpy(mle->mname, name, namelen);
302 		mle->mnamelen = namelen;
303 		mle->mnamehash = dlm_lockid_hash(name, namelen);
304 	}
305 
306 	atomic_inc(&dlm->mle_tot_count[mle->type]);
307 	atomic_inc(&dlm->mle_cur_count[mle->type]);
308 
309 	/* copy off the node_map and register hb callbacks on our copy */
310 	memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
311 	memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
312 	clear_bit(dlm->node_num, mle->vote_map);
313 	clear_bit(dlm->node_num, mle->node_map);
314 
315 	/* attach the mle to the domain node up/down events */
316 	__dlm_mle_attach_hb_events(dlm, mle);
317 }
318 
319 void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
320 {
321 	assert_spin_locked(&dlm->spinlock);
322 	assert_spin_locked(&dlm->master_lock);
323 
324 	if (!hlist_unhashed(&mle->master_hash_node))
325 		hlist_del_init(&mle->master_hash_node);
326 }
327 
328 void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
329 {
330 	struct hlist_head *bucket;
331 
332 	assert_spin_locked(&dlm->master_lock);
333 
334 	bucket = dlm_master_hash(dlm, mle->mnamehash);
335 	hlist_add_head(&mle->master_hash_node, bucket);
336 }
337 
338 /* returns 1 if found, 0 if not */
339 static int dlm_find_mle(struct dlm_ctxt *dlm,
340 			struct dlm_master_list_entry **mle,
341 			char *name, unsigned int namelen)
342 {
343 	struct dlm_master_list_entry *tmpmle;
344 	struct hlist_head *bucket;
345 	struct hlist_node *list;
346 	unsigned int hash;
347 
348 	assert_spin_locked(&dlm->master_lock);
349 
350 	hash = dlm_lockid_hash(name, namelen);
351 	bucket = dlm_master_hash(dlm, hash);
352 	hlist_for_each(list, bucket) {
353 		tmpmle = hlist_entry(list, struct dlm_master_list_entry,
354 				     master_hash_node);
355 		if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
356 			continue;
357 		dlm_get_mle(tmpmle);
358 		*mle = tmpmle;
359 		return 1;
360 	}
361 	return 0;
362 }
363 
364 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
365 {
366 	struct dlm_master_list_entry *mle;
367 
368 	assert_spin_locked(&dlm->spinlock);
369 
370 	list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
371 		if (node_up)
372 			dlm_mle_node_up(dlm, mle, NULL, idx);
373 		else
374 			dlm_mle_node_down(dlm, mle, NULL, idx);
375 	}
376 }
377 
378 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
379 			      struct dlm_master_list_entry *mle,
380 			      struct o2nm_node *node, int idx)
381 {
382 	spin_lock(&mle->spinlock);
383 
384 	if (!test_bit(idx, mle->node_map))
385 		mlog(0, "node %u already removed from nodemap!\n", idx);
386 	else
387 		clear_bit(idx, mle->node_map);
388 
389 	spin_unlock(&mle->spinlock);
390 }
391 
392 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
393 			    struct dlm_master_list_entry *mle,
394 			    struct o2nm_node *node, int idx)
395 {
396 	spin_lock(&mle->spinlock);
397 
398 	if (test_bit(idx, mle->node_map))
399 		mlog(0, "node %u already in node map!\n", idx);
400 	else
401 		set_bit(idx, mle->node_map);
402 
403 	spin_unlock(&mle->spinlock);
404 }
405 
406 
407 int dlm_init_mle_cache(void)
408 {
409 	dlm_mle_cache = kmem_cache_create("o2dlm_mle",
410 					  sizeof(struct dlm_master_list_entry),
411 					  0, SLAB_HWCACHE_ALIGN,
412 					  NULL);
413 	if (dlm_mle_cache == NULL)
414 		return -ENOMEM;
415 	return 0;
416 }
417 
418 void dlm_destroy_mle_cache(void)
419 {
420 	if (dlm_mle_cache)
421 		kmem_cache_destroy(dlm_mle_cache);
422 }
423 
424 static void dlm_mle_release(struct kref *kref)
425 {
426 	struct dlm_master_list_entry *mle;
427 	struct dlm_ctxt *dlm;
428 
429 	mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
430 	dlm = mle->dlm;
431 
432 	assert_spin_locked(&dlm->spinlock);
433 	assert_spin_locked(&dlm->master_lock);
434 
435 	mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname,
436 	     mle->type);
437 
438 	/* remove from list if not already */
439 	__dlm_unlink_mle(dlm, mle);
440 
441 	/* detach the mle from the domain node up/down events */
442 	__dlm_mle_detach_hb_events(dlm, mle);
443 
444 	atomic_dec(&dlm->mle_cur_count[mle->type]);
445 
446 	/* NOTE: kfree under spinlock here.
447 	 * if this is bad, we can move this to a freelist. */
448 	kmem_cache_free(dlm_mle_cache, mle);
449 }
450 
451 
452 /*
453  * LOCK RESOURCE FUNCTIONS
454  */
455 
456 int dlm_init_master_caches(void)
457 {
458 	dlm_lockres_cache = kmem_cache_create("o2dlm_lockres",
459 					      sizeof(struct dlm_lock_resource),
460 					      0, SLAB_HWCACHE_ALIGN, NULL);
461 	if (!dlm_lockres_cache)
462 		goto bail;
463 
464 	dlm_lockname_cache = kmem_cache_create("o2dlm_lockname",
465 					       DLM_LOCKID_NAME_MAX, 0,
466 					       SLAB_HWCACHE_ALIGN, NULL);
467 	if (!dlm_lockname_cache)
468 		goto bail;
469 
470 	return 0;
471 bail:
472 	dlm_destroy_master_caches();
473 	return -ENOMEM;
474 }
475 
476 void dlm_destroy_master_caches(void)
477 {
478 	if (dlm_lockname_cache)
479 		kmem_cache_destroy(dlm_lockname_cache);
480 
481 	if (dlm_lockres_cache)
482 		kmem_cache_destroy(dlm_lockres_cache);
483 }
484 
485 static void dlm_lockres_release(struct kref *kref)
486 {
487 	struct dlm_lock_resource *res;
488 	struct dlm_ctxt *dlm;
489 
490 	res = container_of(kref, struct dlm_lock_resource, refs);
491 	dlm = res->dlm;
492 
493 	/* This should not happen -- all lockres' have a name
494 	 * associated with them at init time. */
495 	BUG_ON(!res->lockname.name);
496 
497 	mlog(0, "destroying lockres %.*s\n", res->lockname.len,
498 	     res->lockname.name);
499 
500 	spin_lock(&dlm->track_lock);
501 	if (!list_empty(&res->tracking))
502 		list_del_init(&res->tracking);
503 	else {
504 		mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n",
505 		     res->lockname.len, res->lockname.name);
506 		dlm_print_one_lock_resource(res);
507 	}
508 	spin_unlock(&dlm->track_lock);
509 
510 	atomic_dec(&dlm->res_cur_count);
511 
512 	if (!hlist_unhashed(&res->hash_node) ||
513 	    !list_empty(&res->granted) ||
514 	    !list_empty(&res->converting) ||
515 	    !list_empty(&res->blocked) ||
516 	    !list_empty(&res->dirty) ||
517 	    !list_empty(&res->recovering) ||
518 	    !list_empty(&res->purge)) {
519 		mlog(ML_ERROR,
520 		     "Going to BUG for resource %.*s."
521 		     "  We're on a list! [%c%c%c%c%c%c%c]\n",
522 		     res->lockname.len, res->lockname.name,
523 		     !hlist_unhashed(&res->hash_node) ? 'H' : ' ',
524 		     !list_empty(&res->granted) ? 'G' : ' ',
525 		     !list_empty(&res->converting) ? 'C' : ' ',
526 		     !list_empty(&res->blocked) ? 'B' : ' ',
527 		     !list_empty(&res->dirty) ? 'D' : ' ',
528 		     !list_empty(&res->recovering) ? 'R' : ' ',
529 		     !list_empty(&res->purge) ? 'P' : ' ');
530 
531 		dlm_print_one_lock_resource(res);
532 	}
533 
534 	/* By the time we're ready to blow this guy away, we shouldn't
535 	 * be on any lists. */
536 	BUG_ON(!hlist_unhashed(&res->hash_node));
537 	BUG_ON(!list_empty(&res->granted));
538 	BUG_ON(!list_empty(&res->converting));
539 	BUG_ON(!list_empty(&res->blocked));
540 	BUG_ON(!list_empty(&res->dirty));
541 	BUG_ON(!list_empty(&res->recovering));
542 	BUG_ON(!list_empty(&res->purge));
543 
544 	kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
545 
546 	kmem_cache_free(dlm_lockres_cache, res);
547 }
548 
549 void dlm_lockres_put(struct dlm_lock_resource *res)
550 {
551 	kref_put(&res->refs, dlm_lockres_release);
552 }
553 
554 static void dlm_init_lockres(struct dlm_ctxt *dlm,
555 			     struct dlm_lock_resource *res,
556 			     const char *name, unsigned int namelen)
557 {
558 	char *qname;
559 
560 	/* If we memset here, we lose our reference to the kmalloc'd
561 	 * res->lockname.name, so be sure to init every field
562 	 * correctly! */
563 
564 	qname = (char *) res->lockname.name;
565 	memcpy(qname, name, namelen);
566 
567 	res->lockname.len = namelen;
568 	res->lockname.hash = dlm_lockid_hash(name, namelen);
569 
570 	init_waitqueue_head(&res->wq);
571 	spin_lock_init(&res->spinlock);
572 	INIT_HLIST_NODE(&res->hash_node);
573 	INIT_LIST_HEAD(&res->granted);
574 	INIT_LIST_HEAD(&res->converting);
575 	INIT_LIST_HEAD(&res->blocked);
576 	INIT_LIST_HEAD(&res->dirty);
577 	INIT_LIST_HEAD(&res->recovering);
578 	INIT_LIST_HEAD(&res->purge);
579 	INIT_LIST_HEAD(&res->tracking);
580 	atomic_set(&res->asts_reserved, 0);
581 	res->migration_pending = 0;
582 	res->inflight_locks = 0;
583 
584 	res->dlm = dlm;
585 
586 	kref_init(&res->refs);
587 
588 	atomic_inc(&dlm->res_tot_count);
589 	atomic_inc(&dlm->res_cur_count);
590 
591 	/* just for consistency */
592 	spin_lock(&res->spinlock);
593 	dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
594 	spin_unlock(&res->spinlock);
595 
596 	res->state = DLM_LOCK_RES_IN_PROGRESS;
597 
598 	res->last_used = 0;
599 
600 	spin_lock(&dlm->spinlock);
601 	list_add_tail(&res->tracking, &dlm->tracking_list);
602 	spin_unlock(&dlm->spinlock);
603 
604 	memset(res->lvb, 0, DLM_LVB_LEN);
605 	memset(res->refmap, 0, sizeof(res->refmap));
606 }
607 
608 struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
609 				   const char *name,
610 				   unsigned int namelen)
611 {
612 	struct dlm_lock_resource *res = NULL;
613 
614 	res = kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS);
615 	if (!res)
616 		goto error;
617 
618 	res->lockname.name = kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS);
619 	if (!res->lockname.name)
620 		goto error;
621 
622 	dlm_init_lockres(dlm, res, name, namelen);
623 	return res;
624 
625 error:
626 	if (res && res->lockname.name)
627 		kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
628 
629 	if (res)
630 		kmem_cache_free(dlm_lockres_cache, res);
631 	return NULL;
632 }
633 
634 void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
635 				   struct dlm_lock_resource *res,
636 				   int new_lockres,
637 				   const char *file,
638 				   int line)
639 {
640 	if (!new_lockres)
641 		assert_spin_locked(&res->spinlock);
642 
643 	if (!test_bit(dlm->node_num, res->refmap)) {
644 		BUG_ON(res->inflight_locks != 0);
645 		dlm_lockres_set_refmap_bit(dlm->node_num, res);
646 	}
647 	res->inflight_locks++;
648 	mlog(0, "%s:%.*s: inflight++: now %u\n",
649 	     dlm->name, res->lockname.len, res->lockname.name,
650 	     res->inflight_locks);
651 }
652 
653 void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
654 				   struct dlm_lock_resource *res,
655 				   const char *file,
656 				   int line)
657 {
658 	assert_spin_locked(&res->spinlock);
659 
660 	BUG_ON(res->inflight_locks == 0);
661 	res->inflight_locks--;
662 	mlog(0, "%s:%.*s: inflight--: now %u\n",
663 	     dlm->name, res->lockname.len, res->lockname.name,
664 	     res->inflight_locks);
665 	if (res->inflight_locks == 0)
666 		dlm_lockres_clear_refmap_bit(dlm->node_num, res);
667 	wake_up(&res->wq);
668 }
669 
670 /*
671  * lookup a lock resource by name.
672  * may already exist in the hashtable.
673  * lockid is null terminated
674  *
675  * if not, allocate enough for the lockres and for
676  * the temporary structure used in doing the mastering.
677  *
678  * also, do a lookup in the dlm->master_list to see
679  * if another node has begun mastering the same lock.
680  * if so, there should be a block entry in there
681  * for this name, and we should *not* attempt to master
682  * the lock here.   need to wait around for that node
683  * to assert_master (or die).
684  *
685  */
686 struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
687 					  const char *lockid,
688 					  int namelen,
689 					  int flags)
690 {
691 	struct dlm_lock_resource *tmpres=NULL, *res=NULL;
692 	struct dlm_master_list_entry *mle = NULL;
693 	struct dlm_master_list_entry *alloc_mle = NULL;
694 	int blocked = 0;
695 	int ret, nodenum;
696 	struct dlm_node_iter iter;
697 	unsigned int hash;
698 	int tries = 0;
699 	int bit, wait_on_recovery = 0;
700 	int drop_inflight_if_nonlocal = 0;
701 
702 	BUG_ON(!lockid);
703 
704 	hash = dlm_lockid_hash(lockid, namelen);
705 
706 	mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
707 
708 lookup:
709 	spin_lock(&dlm->spinlock);
710 	tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
711 	if (tmpres) {
712 		int dropping_ref = 0;
713 
714 		spin_unlock(&dlm->spinlock);
715 
716 		spin_lock(&tmpres->spinlock);
717 		/* We wait for the other thread that is mastering the resource */
718 		if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
719 			__dlm_wait_on_lockres(tmpres);
720 			BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN);
721 		}
722 
723 		if (tmpres->owner == dlm->node_num) {
724 			BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF);
725 			dlm_lockres_grab_inflight_ref(dlm, tmpres);
726 		} else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF)
727 			dropping_ref = 1;
728 		spin_unlock(&tmpres->spinlock);
729 
730 		/* wait until done messaging the master, drop our ref to allow
731 		 * the lockres to be purged, start over. */
732 		if (dropping_ref) {
733 			spin_lock(&tmpres->spinlock);
734 			__dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF);
735 			spin_unlock(&tmpres->spinlock);
736 			dlm_lockres_put(tmpres);
737 			tmpres = NULL;
738 			goto lookup;
739 		}
740 
741 		mlog(0, "found in hash!\n");
742 		if (res)
743 			dlm_lockres_put(res);
744 		res = tmpres;
745 		goto leave;
746 	}
747 
748 	if (!res) {
749 		spin_unlock(&dlm->spinlock);
750 		mlog(0, "allocating a new resource\n");
751 		/* nothing found and we need to allocate one. */
752 		alloc_mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
753 		if (!alloc_mle)
754 			goto leave;
755 		res = dlm_new_lockres(dlm, lockid, namelen);
756 		if (!res)
757 			goto leave;
758 		goto lookup;
759 	}
760 
761 	mlog(0, "no lockres found, allocated our own: %p\n", res);
762 
763 	if (flags & LKM_LOCAL) {
764 		/* caller knows it's safe to assume it's not mastered elsewhere
765 		 * DONE!  return right away */
766 		spin_lock(&res->spinlock);
767 		dlm_change_lockres_owner(dlm, res, dlm->node_num);
768 		__dlm_insert_lockres(dlm, res);
769 		dlm_lockres_grab_inflight_ref(dlm, res);
770 		spin_unlock(&res->spinlock);
771 		spin_unlock(&dlm->spinlock);
772 		/* lockres still marked IN_PROGRESS */
773 		goto wake_waiters;
774 	}
775 
776 	/* check master list to see if another node has started mastering it */
777 	spin_lock(&dlm->master_lock);
778 
779 	/* if we found a block, wait for lock to be mastered by another node */
780 	blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
781 	if (blocked) {
782 		int mig;
783 		if (mle->type == DLM_MLE_MASTER) {
784 			mlog(ML_ERROR, "master entry for nonexistent lock!\n");
785 			BUG();
786 		}
787 		mig = (mle->type == DLM_MLE_MIGRATION);
788 		/* if there is a migration in progress, let the migration
789 		 * finish before continuing.  we can wait for the absence
790 		 * of the MIGRATION mle: either the migrate finished or
791 		 * one of the nodes died and the mle was cleaned up.
792 		 * if there is a BLOCK here, but it already has a master
793 		 * set, we are too late.  the master does not have a ref
794 		 * for us in the refmap.  detach the mle and drop it.
795 		 * either way, go back to the top and start over. */
796 		if (mig || mle->master != O2NM_MAX_NODES) {
797 			BUG_ON(mig && mle->master == dlm->node_num);
798 			/* we arrived too late.  the master does not
799 			 * have a ref for us. retry. */
800 			mlog(0, "%s:%.*s: late on %s\n",
801 			     dlm->name, namelen, lockid,
802 			     mig ?  "MIGRATION" : "BLOCK");
803 			spin_unlock(&dlm->master_lock);
804 			spin_unlock(&dlm->spinlock);
805 
806 			/* master is known, detach */
807 			if (!mig)
808 				dlm_mle_detach_hb_events(dlm, mle);
809 			dlm_put_mle(mle);
810 			mle = NULL;
811 			/* this is lame, but we can't wait on either
812 			 * the mle or lockres waitqueue here */
813 			if (mig)
814 				msleep(100);
815 			goto lookup;
816 		}
817 	} else {
818 		/* go ahead and try to master lock on this node */
819 		mle = alloc_mle;
820 		/* make sure this does not get freed below */
821 		alloc_mle = NULL;
822 		dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
823 		set_bit(dlm->node_num, mle->maybe_map);
824 		__dlm_insert_mle(dlm, mle);
825 
826 		/* still holding the dlm spinlock, check the recovery map
827 		 * to see if there are any nodes that still need to be
828 		 * considered.  these will not appear in the mle nodemap
829 		 * but they might own this lockres.  wait on them. */
830 		bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
831 		if (bit < O2NM_MAX_NODES) {
832 			mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
833 			     "recover before lock mastery can begin\n",
834 			     dlm->name, namelen, (char *)lockid, bit);
835 			wait_on_recovery = 1;
836 		}
837 	}
838 
839 	/* at this point there is either a DLM_MLE_BLOCK or a
840 	 * DLM_MLE_MASTER on the master list, so it's safe to add the
841 	 * lockres to the hashtable.  anyone who finds the lock will
842 	 * still have to wait on the IN_PROGRESS. */
843 
844 	/* finally add the lockres to its hash bucket */
845 	__dlm_insert_lockres(dlm, res);
846 	/* since this lockres is new it doesn't not require the spinlock */
847 	dlm_lockres_grab_inflight_ref_new(dlm, res);
848 
849 	/* if this node does not become the master make sure to drop
850 	 * this inflight reference below */
851 	drop_inflight_if_nonlocal = 1;
852 
853 	/* get an extra ref on the mle in case this is a BLOCK
854 	 * if so, the creator of the BLOCK may try to put the last
855 	 * ref at this time in the assert master handler, so we
856 	 * need an extra one to keep from a bad ptr deref. */
857 	dlm_get_mle_inuse(mle);
858 	spin_unlock(&dlm->master_lock);
859 	spin_unlock(&dlm->spinlock);
860 
861 redo_request:
862 	while (wait_on_recovery) {
863 		/* any cluster changes that occurred after dropping the
864 		 * dlm spinlock would be detectable be a change on the mle,
865 		 * so we only need to clear out the recovery map once. */
866 		if (dlm_is_recovery_lock(lockid, namelen)) {
867 			mlog(ML_NOTICE, "%s: recovery map is not empty, but "
868 			     "must master $RECOVERY lock now\n", dlm->name);
869 			if (!dlm_pre_master_reco_lockres(dlm, res))
870 				wait_on_recovery = 0;
871 			else {
872 				mlog(0, "%s: waiting 500ms for heartbeat state "
873 				    "change\n", dlm->name);
874 				msleep(500);
875 			}
876 			continue;
877 		}
878 
879 		dlm_kick_recovery_thread(dlm);
880 		msleep(1000);
881 		dlm_wait_for_recovery(dlm);
882 
883 		spin_lock(&dlm->spinlock);
884 		bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
885 		if (bit < O2NM_MAX_NODES) {
886 			mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
887 			     "recover before lock mastery can begin\n",
888 			     dlm->name, namelen, (char *)lockid, bit);
889 			wait_on_recovery = 1;
890 		} else
891 			wait_on_recovery = 0;
892 		spin_unlock(&dlm->spinlock);
893 
894 		if (wait_on_recovery)
895 			dlm_wait_for_node_recovery(dlm, bit, 10000);
896 	}
897 
898 	/* must wait for lock to be mastered elsewhere */
899 	if (blocked)
900 		goto wait;
901 
902 	ret = -EINVAL;
903 	dlm_node_iter_init(mle->vote_map, &iter);
904 	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
905 		ret = dlm_do_master_request(res, mle, nodenum);
906 		if (ret < 0)
907 			mlog_errno(ret);
908 		if (mle->master != O2NM_MAX_NODES) {
909 			/* found a master ! */
910 			if (mle->master <= nodenum)
911 				break;
912 			/* if our master request has not reached the master
913 			 * yet, keep going until it does.  this is how the
914 			 * master will know that asserts are needed back to
915 			 * the lower nodes. */
916 			mlog(0, "%s:%.*s: requests only up to %u but master "
917 			     "is %u, keep going\n", dlm->name, namelen,
918 			     lockid, nodenum, mle->master);
919 		}
920 	}
921 
922 wait:
923 	/* keep going until the response map includes all nodes */
924 	ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
925 	if (ret < 0) {
926 		wait_on_recovery = 1;
927 		mlog(0, "%s:%.*s: node map changed, redo the "
928 		     "master request now, blocked=%d\n",
929 		     dlm->name, res->lockname.len,
930 		     res->lockname.name, blocked);
931 		if (++tries > 20) {
932 			mlog(ML_ERROR, "%s:%.*s: spinning on "
933 			     "dlm_wait_for_lock_mastery, blocked=%d\n",
934 			     dlm->name, res->lockname.len,
935 			     res->lockname.name, blocked);
936 			dlm_print_one_lock_resource(res);
937 			dlm_print_one_mle(mle);
938 			tries = 0;
939 		}
940 		goto redo_request;
941 	}
942 
943 	mlog(0, "lockres mastered by %u\n", res->owner);
944 	/* make sure we never continue without this */
945 	BUG_ON(res->owner == O2NM_MAX_NODES);
946 
947 	/* master is known, detach if not already detached */
948 	dlm_mle_detach_hb_events(dlm, mle);
949 	dlm_put_mle(mle);
950 	/* put the extra ref */
951 	dlm_put_mle_inuse(mle);
952 
953 wake_waiters:
954 	spin_lock(&res->spinlock);
955 	if (res->owner != dlm->node_num && drop_inflight_if_nonlocal)
956 		dlm_lockres_drop_inflight_ref(dlm, res);
957 	res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
958 	spin_unlock(&res->spinlock);
959 	wake_up(&res->wq);
960 
961 leave:
962 	/* need to free the unused mle */
963 	if (alloc_mle)
964 		kmem_cache_free(dlm_mle_cache, alloc_mle);
965 
966 	return res;
967 }
968 
969 
970 #define DLM_MASTERY_TIMEOUT_MS   5000
971 
972 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
973 				     struct dlm_lock_resource *res,
974 				     struct dlm_master_list_entry *mle,
975 				     int *blocked)
976 {
977 	u8 m;
978 	int ret, bit;
979 	int map_changed, voting_done;
980 	int assert, sleep;
981 
982 recheck:
983 	ret = 0;
984 	assert = 0;
985 
986 	/* check if another node has already become the owner */
987 	spin_lock(&res->spinlock);
988 	if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
989 		mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name,
990 		     res->lockname.len, res->lockname.name, res->owner);
991 		spin_unlock(&res->spinlock);
992 		/* this will cause the master to re-assert across
993 		 * the whole cluster, freeing up mles */
994 		if (res->owner != dlm->node_num) {
995 			ret = dlm_do_master_request(res, mle, res->owner);
996 			if (ret < 0) {
997 				/* give recovery a chance to run */
998 				mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
999 				msleep(500);
1000 				goto recheck;
1001 			}
1002 		}
1003 		ret = 0;
1004 		goto leave;
1005 	}
1006 	spin_unlock(&res->spinlock);
1007 
1008 	spin_lock(&mle->spinlock);
1009 	m = mle->master;
1010 	map_changed = (memcmp(mle->vote_map, mle->node_map,
1011 			      sizeof(mle->vote_map)) != 0);
1012 	voting_done = (memcmp(mle->vote_map, mle->response_map,
1013 			     sizeof(mle->vote_map)) == 0);
1014 
1015 	/* restart if we hit any errors */
1016 	if (map_changed) {
1017 		int b;
1018 		mlog(0, "%s: %.*s: node map changed, restarting\n",
1019 		     dlm->name, res->lockname.len, res->lockname.name);
1020 		ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1021 		b = (mle->type == DLM_MLE_BLOCK);
1022 		if ((*blocked && !b) || (!*blocked && b)) {
1023 			mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
1024 			     dlm->name, res->lockname.len, res->lockname.name,
1025 			     *blocked, b);
1026 			*blocked = b;
1027 		}
1028 		spin_unlock(&mle->spinlock);
1029 		if (ret < 0) {
1030 			mlog_errno(ret);
1031 			goto leave;
1032 		}
1033 		mlog(0, "%s:%.*s: restart lock mastery succeeded, "
1034 		     "rechecking now\n", dlm->name, res->lockname.len,
1035 		     res->lockname.name);
1036 		goto recheck;
1037 	} else {
1038 		if (!voting_done) {
1039 			mlog(0, "map not changed and voting not done "
1040 			     "for %s:%.*s\n", dlm->name, res->lockname.len,
1041 			     res->lockname.name);
1042 		}
1043 	}
1044 
1045 	if (m != O2NM_MAX_NODES) {
1046 		/* another node has done an assert!
1047 		 * all done! */
1048 		sleep = 0;
1049 	} else {
1050 		sleep = 1;
1051 		/* have all nodes responded? */
1052 		if (voting_done && !*blocked) {
1053 			bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
1054 			if (dlm->node_num <= bit) {
1055 				/* my node number is lowest.
1056 			 	 * now tell other nodes that I am
1057 				 * mastering this. */
1058 				mle->master = dlm->node_num;
1059 				/* ref was grabbed in get_lock_resource
1060 				 * will be dropped in dlmlock_master */
1061 				assert = 1;
1062 				sleep = 0;
1063 			}
1064 			/* if voting is done, but we have not received
1065 			 * an assert master yet, we must sleep */
1066 		}
1067 	}
1068 
1069 	spin_unlock(&mle->spinlock);
1070 
1071 	/* sleep if we haven't finished voting yet */
1072 	if (sleep) {
1073 		unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
1074 
1075 		/*
1076 		if (atomic_read(&mle->mle_refs.refcount) < 2)
1077 			mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
1078 			atomic_read(&mle->mle_refs.refcount),
1079 			res->lockname.len, res->lockname.name);
1080 		*/
1081 		atomic_set(&mle->woken, 0);
1082 		(void)wait_event_timeout(mle->wq,
1083 					 (atomic_read(&mle->woken) == 1),
1084 					 timeo);
1085 		if (res->owner == O2NM_MAX_NODES) {
1086 			mlog(0, "%s:%.*s: waiting again\n", dlm->name,
1087 			     res->lockname.len, res->lockname.name);
1088 			goto recheck;
1089 		}
1090 		mlog(0, "done waiting, master is %u\n", res->owner);
1091 		ret = 0;
1092 		goto leave;
1093 	}
1094 
1095 	ret = 0;   /* done */
1096 	if (assert) {
1097 		m = dlm->node_num;
1098 		mlog(0, "about to master %.*s here, this=%u\n",
1099 		     res->lockname.len, res->lockname.name, m);
1100 		ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0);
1101 		if (ret) {
1102 			/* This is a failure in the network path,
1103 			 * not in the response to the assert_master
1104 			 * (any nonzero response is a BUG on this node).
1105 			 * Most likely a socket just got disconnected
1106 			 * due to node death. */
1107 			mlog_errno(ret);
1108 		}
1109 		/* no longer need to restart lock mastery.
1110 		 * all living nodes have been contacted. */
1111 		ret = 0;
1112 	}
1113 
1114 	/* set the lockres owner */
1115 	spin_lock(&res->spinlock);
1116 	/* mastery reference obtained either during
1117 	 * assert_master_handler or in get_lock_resource */
1118 	dlm_change_lockres_owner(dlm, res, m);
1119 	spin_unlock(&res->spinlock);
1120 
1121 leave:
1122 	return ret;
1123 }
1124 
1125 struct dlm_bitmap_diff_iter
1126 {
1127 	int curnode;
1128 	unsigned long *orig_bm;
1129 	unsigned long *cur_bm;
1130 	unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
1131 };
1132 
1133 enum dlm_node_state_change
1134 {
1135 	NODE_DOWN = -1,
1136 	NODE_NO_CHANGE = 0,
1137 	NODE_UP
1138 };
1139 
1140 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
1141 				      unsigned long *orig_bm,
1142 				      unsigned long *cur_bm)
1143 {
1144 	unsigned long p1, p2;
1145 	int i;
1146 
1147 	iter->curnode = -1;
1148 	iter->orig_bm = orig_bm;
1149 	iter->cur_bm = cur_bm;
1150 
1151 	for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) {
1152        		p1 = *(iter->orig_bm + i);
1153 	       	p2 = *(iter->cur_bm + i);
1154 		iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
1155 	}
1156 }
1157 
1158 static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
1159 				     enum dlm_node_state_change *state)
1160 {
1161 	int bit;
1162 
1163 	if (iter->curnode >= O2NM_MAX_NODES)
1164 		return -ENOENT;
1165 
1166 	bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
1167 			    iter->curnode+1);
1168 	if (bit >= O2NM_MAX_NODES) {
1169 		iter->curnode = O2NM_MAX_NODES;
1170 		return -ENOENT;
1171 	}
1172 
1173 	/* if it was there in the original then this node died */
1174 	if (test_bit(bit, iter->orig_bm))
1175 		*state = NODE_DOWN;
1176 	else
1177 		*state = NODE_UP;
1178 
1179 	iter->curnode = bit;
1180 	return bit;
1181 }
1182 
1183 
1184 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
1185 				    struct dlm_lock_resource *res,
1186 				    struct dlm_master_list_entry *mle,
1187 				    int blocked)
1188 {
1189 	struct dlm_bitmap_diff_iter bdi;
1190 	enum dlm_node_state_change sc;
1191 	int node;
1192 	int ret = 0;
1193 
1194 	mlog(0, "something happened such that the "
1195 	     "master process may need to be restarted!\n");
1196 
1197 	assert_spin_locked(&mle->spinlock);
1198 
1199 	dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
1200 	node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1201 	while (node >= 0) {
1202 		if (sc == NODE_UP) {
1203 			/* a node came up.  clear any old vote from
1204 			 * the response map and set it in the vote map
1205 			 * then restart the mastery. */
1206 			mlog(ML_NOTICE, "node %d up while restarting\n", node);
1207 
1208 			/* redo the master request, but only for the new node */
1209 			mlog(0, "sending request to new node\n");
1210 			clear_bit(node, mle->response_map);
1211 			set_bit(node, mle->vote_map);
1212 		} else {
1213 			mlog(ML_ERROR, "node down! %d\n", node);
1214 			if (blocked) {
1215 				int lowest = find_next_bit(mle->maybe_map,
1216 						       O2NM_MAX_NODES, 0);
1217 
1218 				/* act like it was never there */
1219 				clear_bit(node, mle->maybe_map);
1220 
1221 			       	if (node == lowest) {
1222 					mlog(0, "expected master %u died"
1223 					    " while this node was blocked "
1224 					    "waiting on it!\n", node);
1225 					lowest = find_next_bit(mle->maybe_map,
1226 						       	O2NM_MAX_NODES,
1227 						       	lowest+1);
1228 					if (lowest < O2NM_MAX_NODES) {
1229 						mlog(0, "%s:%.*s:still "
1230 						     "blocked. waiting on %u "
1231 						     "now\n", dlm->name,
1232 						     res->lockname.len,
1233 						     res->lockname.name,
1234 						     lowest);
1235 					} else {
1236 						/* mle is an MLE_BLOCK, but
1237 						 * there is now nothing left to
1238 						 * block on.  we need to return
1239 						 * all the way back out and try
1240 						 * again with an MLE_MASTER.
1241 						 * dlm_do_local_recovery_cleanup
1242 						 * has already run, so the mle
1243 						 * refcount is ok */
1244 						mlog(0, "%s:%.*s: no "
1245 						     "longer blocking. try to "
1246 						     "master this here\n",
1247 						     dlm->name,
1248 						     res->lockname.len,
1249 						     res->lockname.name);
1250 						mle->type = DLM_MLE_MASTER;
1251 						mle->mleres = res;
1252 					}
1253 				}
1254 			}
1255 
1256 			/* now blank out everything, as if we had never
1257 			 * contacted anyone */
1258 			memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
1259 			memset(mle->response_map, 0, sizeof(mle->response_map));
1260 			/* reset the vote_map to the current node_map */
1261 			memcpy(mle->vote_map, mle->node_map,
1262 			       sizeof(mle->node_map));
1263 			/* put myself into the maybe map */
1264 			if (mle->type != DLM_MLE_BLOCK)
1265 				set_bit(dlm->node_num, mle->maybe_map);
1266 		}
1267 		ret = -EAGAIN;
1268 		node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1269 	}
1270 	return ret;
1271 }
1272 
1273 
1274 /*
1275  * DLM_MASTER_REQUEST_MSG
1276  *
1277  * returns: 0 on success,
1278  *          -errno on a network error
1279  *
1280  * on error, the caller should assume the target node is "dead"
1281  *
1282  */
1283 
1284 static int dlm_do_master_request(struct dlm_lock_resource *res,
1285 				 struct dlm_master_list_entry *mle, int to)
1286 {
1287 	struct dlm_ctxt *dlm = mle->dlm;
1288 	struct dlm_master_request request;
1289 	int ret, response=0, resend;
1290 
1291 	memset(&request, 0, sizeof(request));
1292 	request.node_idx = dlm->node_num;
1293 
1294 	BUG_ON(mle->type == DLM_MLE_MIGRATION);
1295 
1296 	request.namelen = (u8)mle->mnamelen;
1297 	memcpy(request.name, mle->mname, request.namelen);
1298 
1299 again:
1300 	ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
1301 				 sizeof(request), to, &response);
1302 	if (ret < 0)  {
1303 		if (ret == -ESRCH) {
1304 			/* should never happen */
1305 			mlog(ML_ERROR, "TCP stack not ready!\n");
1306 			BUG();
1307 		} else if (ret == -EINVAL) {
1308 			mlog(ML_ERROR, "bad args passed to o2net!\n");
1309 			BUG();
1310 		} else if (ret == -ENOMEM) {
1311 			mlog(ML_ERROR, "out of memory while trying to send "
1312 			     "network message!  retrying\n");
1313 			/* this is totally crude */
1314 			msleep(50);
1315 			goto again;
1316 		} else if (!dlm_is_host_down(ret)) {
1317 			/* not a network error. bad. */
1318 			mlog_errno(ret);
1319 			mlog(ML_ERROR, "unhandled error!");
1320 			BUG();
1321 		}
1322 		/* all other errors should be network errors,
1323 		 * and likely indicate node death */
1324 		mlog(ML_ERROR, "link to %d went down!\n", to);
1325 		goto out;
1326 	}
1327 
1328 	ret = 0;
1329 	resend = 0;
1330 	spin_lock(&mle->spinlock);
1331 	switch (response) {
1332 		case DLM_MASTER_RESP_YES:
1333 			set_bit(to, mle->response_map);
1334 			mlog(0, "node %u is the master, response=YES\n", to);
1335 			mlog(0, "%s:%.*s: master node %u now knows I have a "
1336 			     "reference\n", dlm->name, res->lockname.len,
1337 			     res->lockname.name, to);
1338 			mle->master = to;
1339 			break;
1340 		case DLM_MASTER_RESP_NO:
1341 			mlog(0, "node %u not master, response=NO\n", to);
1342 			set_bit(to, mle->response_map);
1343 			break;
1344 		case DLM_MASTER_RESP_MAYBE:
1345 			mlog(0, "node %u not master, response=MAYBE\n", to);
1346 			set_bit(to, mle->response_map);
1347 			set_bit(to, mle->maybe_map);
1348 			break;
1349 		case DLM_MASTER_RESP_ERROR:
1350 			mlog(0, "node %u hit an error, resending\n", to);
1351 			resend = 1;
1352 			response = 0;
1353 			break;
1354 		default:
1355 			mlog(ML_ERROR, "bad response! %u\n", response);
1356 			BUG();
1357 	}
1358 	spin_unlock(&mle->spinlock);
1359 	if (resend) {
1360 		/* this is also totally crude */
1361 		msleep(50);
1362 		goto again;
1363 	}
1364 
1365 out:
1366 	return ret;
1367 }
1368 
1369 /*
1370  * locks that can be taken here:
1371  * dlm->spinlock
1372  * res->spinlock
1373  * mle->spinlock
1374  * dlm->master_list
1375  *
1376  * if possible, TRIM THIS DOWN!!!
1377  */
1378 int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
1379 			       void **ret_data)
1380 {
1381 	u8 response = DLM_MASTER_RESP_MAYBE;
1382 	struct dlm_ctxt *dlm = data;
1383 	struct dlm_lock_resource *res = NULL;
1384 	struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
1385 	struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
1386 	char *name;
1387 	unsigned int namelen, hash;
1388 	int found, ret;
1389 	int set_maybe;
1390 	int dispatch_assert = 0;
1391 
1392 	if (!dlm_grab(dlm))
1393 		return DLM_MASTER_RESP_NO;
1394 
1395 	if (!dlm_domain_fully_joined(dlm)) {
1396 		response = DLM_MASTER_RESP_NO;
1397 		goto send_response;
1398 	}
1399 
1400 	name = request->name;
1401 	namelen = request->namelen;
1402 	hash = dlm_lockid_hash(name, namelen);
1403 
1404 	if (namelen > DLM_LOCKID_NAME_MAX) {
1405 		response = DLM_IVBUFLEN;
1406 		goto send_response;
1407 	}
1408 
1409 way_up_top:
1410 	spin_lock(&dlm->spinlock);
1411 	res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1412 	if (res) {
1413 		spin_unlock(&dlm->spinlock);
1414 
1415 		/* take care of the easy cases up front */
1416 		spin_lock(&res->spinlock);
1417 		if (res->state & (DLM_LOCK_RES_RECOVERING|
1418 				  DLM_LOCK_RES_MIGRATING)) {
1419 			spin_unlock(&res->spinlock);
1420 			mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1421 			     "being recovered/migrated\n");
1422 			response = DLM_MASTER_RESP_ERROR;
1423 			if (mle)
1424 				kmem_cache_free(dlm_mle_cache, mle);
1425 			goto send_response;
1426 		}
1427 
1428 		if (res->owner == dlm->node_num) {
1429 			mlog(0, "%s:%.*s: setting bit %u in refmap\n",
1430 			     dlm->name, namelen, name, request->node_idx);
1431 			dlm_lockres_set_refmap_bit(request->node_idx, res);
1432 			spin_unlock(&res->spinlock);
1433 			response = DLM_MASTER_RESP_YES;
1434 			if (mle)
1435 				kmem_cache_free(dlm_mle_cache, mle);
1436 
1437 			/* this node is the owner.
1438 			 * there is some extra work that needs to
1439 			 * happen now.  the requesting node has
1440 			 * caused all nodes up to this one to
1441 			 * create mles.  this node now needs to
1442 			 * go back and clean those up. */
1443 			dispatch_assert = 1;
1444 			goto send_response;
1445 		} else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1446 			spin_unlock(&res->spinlock);
1447 			// mlog(0, "node %u is the master\n", res->owner);
1448 			response = DLM_MASTER_RESP_NO;
1449 			if (mle)
1450 				kmem_cache_free(dlm_mle_cache, mle);
1451 			goto send_response;
1452 		}
1453 
1454 		/* ok, there is no owner.  either this node is
1455 		 * being blocked, or it is actively trying to
1456 		 * master this lock. */
1457 		if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1458 			mlog(ML_ERROR, "lock with no owner should be "
1459 			     "in-progress!\n");
1460 			BUG();
1461 		}
1462 
1463 		// mlog(0, "lockres is in progress...\n");
1464 		spin_lock(&dlm->master_lock);
1465 		found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1466 		if (!found) {
1467 			mlog(ML_ERROR, "no mle found for this lock!\n");
1468 			BUG();
1469 		}
1470 		set_maybe = 1;
1471 		spin_lock(&tmpmle->spinlock);
1472 		if (tmpmle->type == DLM_MLE_BLOCK) {
1473 			// mlog(0, "this node is waiting for "
1474 			// "lockres to be mastered\n");
1475 			response = DLM_MASTER_RESP_NO;
1476 		} else if (tmpmle->type == DLM_MLE_MIGRATION) {
1477 			mlog(0, "node %u is master, but trying to migrate to "
1478 			     "node %u.\n", tmpmle->master, tmpmle->new_master);
1479 			if (tmpmle->master == dlm->node_num) {
1480 				mlog(ML_ERROR, "no owner on lockres, but this "
1481 				     "node is trying to migrate it to %u?!\n",
1482 				     tmpmle->new_master);
1483 				BUG();
1484 			} else {
1485 				/* the real master can respond on its own */
1486 				response = DLM_MASTER_RESP_NO;
1487 			}
1488 		} else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1489 			set_maybe = 0;
1490 			if (tmpmle->master == dlm->node_num) {
1491 				response = DLM_MASTER_RESP_YES;
1492 				/* this node will be the owner.
1493 				 * go back and clean the mles on any
1494 				 * other nodes */
1495 				dispatch_assert = 1;
1496 				dlm_lockres_set_refmap_bit(request->node_idx, res);
1497 				mlog(0, "%s:%.*s: setting bit %u in refmap\n",
1498 				     dlm->name, namelen, name,
1499 				     request->node_idx);
1500 			} else
1501 				response = DLM_MASTER_RESP_NO;
1502 		} else {
1503 			// mlog(0, "this node is attempting to "
1504 			// "master lockres\n");
1505 			response = DLM_MASTER_RESP_MAYBE;
1506 		}
1507 		if (set_maybe)
1508 			set_bit(request->node_idx, tmpmle->maybe_map);
1509 		spin_unlock(&tmpmle->spinlock);
1510 
1511 		spin_unlock(&dlm->master_lock);
1512 		spin_unlock(&res->spinlock);
1513 
1514 		/* keep the mle attached to heartbeat events */
1515 		dlm_put_mle(tmpmle);
1516 		if (mle)
1517 			kmem_cache_free(dlm_mle_cache, mle);
1518 		goto send_response;
1519 	}
1520 
1521 	/*
1522 	 * lockres doesn't exist on this node
1523 	 * if there is an MLE_BLOCK, return NO
1524 	 * if there is an MLE_MASTER, return MAYBE
1525 	 * otherwise, add an MLE_BLOCK, return NO
1526 	 */
1527 	spin_lock(&dlm->master_lock);
1528 	found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1529 	if (!found) {
1530 		/* this lockid has never been seen on this node yet */
1531 		// mlog(0, "no mle found\n");
1532 		if (!mle) {
1533 			spin_unlock(&dlm->master_lock);
1534 			spin_unlock(&dlm->spinlock);
1535 
1536 			mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
1537 			if (!mle) {
1538 				response = DLM_MASTER_RESP_ERROR;
1539 				mlog_errno(-ENOMEM);
1540 				goto send_response;
1541 			}
1542 			goto way_up_top;
1543 		}
1544 
1545 		// mlog(0, "this is second time thru, already allocated, "
1546 		// "add the block.\n");
1547 		dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
1548 		set_bit(request->node_idx, mle->maybe_map);
1549 		__dlm_insert_mle(dlm, mle);
1550 		response = DLM_MASTER_RESP_NO;
1551 	} else {
1552 		// mlog(0, "mle was found\n");
1553 		set_maybe = 1;
1554 		spin_lock(&tmpmle->spinlock);
1555 		if (tmpmle->master == dlm->node_num) {
1556 			mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n");
1557 			BUG();
1558 		}
1559 		if (tmpmle->type == DLM_MLE_BLOCK)
1560 			response = DLM_MASTER_RESP_NO;
1561 		else if (tmpmle->type == DLM_MLE_MIGRATION) {
1562 			mlog(0, "migration mle was found (%u->%u)\n",
1563 			     tmpmle->master, tmpmle->new_master);
1564 			/* real master can respond on its own */
1565 			response = DLM_MASTER_RESP_NO;
1566 		} else
1567 			response = DLM_MASTER_RESP_MAYBE;
1568 		if (set_maybe)
1569 			set_bit(request->node_idx, tmpmle->maybe_map);
1570 		spin_unlock(&tmpmle->spinlock);
1571 	}
1572 	spin_unlock(&dlm->master_lock);
1573 	spin_unlock(&dlm->spinlock);
1574 
1575 	if (found) {
1576 		/* keep the mle attached to heartbeat events */
1577 		dlm_put_mle(tmpmle);
1578 	}
1579 send_response:
1580 	/*
1581 	 * __dlm_lookup_lockres() grabbed a reference to this lockres.
1582 	 * The reference is released by dlm_assert_master_worker() under
1583 	 * the call to dlm_dispatch_assert_master().  If
1584 	 * dlm_assert_master_worker() isn't called, we drop it here.
1585 	 */
1586 	if (dispatch_assert) {
1587 		if (response != DLM_MASTER_RESP_YES)
1588 			mlog(ML_ERROR, "invalid response %d\n", response);
1589 		if (!res) {
1590 			mlog(ML_ERROR, "bad lockres while trying to assert!\n");
1591 			BUG();
1592 		}
1593 		mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1594 			     dlm->node_num, res->lockname.len, res->lockname.name);
1595 		ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
1596 						 DLM_ASSERT_MASTER_MLE_CLEANUP);
1597 		if (ret < 0) {
1598 			mlog(ML_ERROR, "failed to dispatch assert master work\n");
1599 			response = DLM_MASTER_RESP_ERROR;
1600 			dlm_lockres_put(res);
1601 		}
1602 	} else {
1603 		if (res)
1604 			dlm_lockres_put(res);
1605 	}
1606 
1607 	dlm_put(dlm);
1608 	return response;
1609 }
1610 
1611 /*
1612  * DLM_ASSERT_MASTER_MSG
1613  */
1614 
1615 
1616 /*
1617  * NOTE: this can be used for debugging
1618  * can periodically run all locks owned by this node
1619  * and re-assert across the cluster...
1620  */
1621 static int dlm_do_assert_master(struct dlm_ctxt *dlm,
1622 				struct dlm_lock_resource *res,
1623 				void *nodemap, u32 flags)
1624 {
1625 	struct dlm_assert_master assert;
1626 	int to, tmpret;
1627 	struct dlm_node_iter iter;
1628 	int ret = 0;
1629 	int reassert;
1630 	const char *lockname = res->lockname.name;
1631 	unsigned int namelen = res->lockname.len;
1632 
1633 	BUG_ON(namelen > O2NM_MAX_NAME_LEN);
1634 
1635 	spin_lock(&res->spinlock);
1636 	res->state |= DLM_LOCK_RES_SETREF_INPROG;
1637 	spin_unlock(&res->spinlock);
1638 
1639 again:
1640 	reassert = 0;
1641 
1642 	/* note that if this nodemap is empty, it returns 0 */
1643 	dlm_node_iter_init(nodemap, &iter);
1644 	while ((to = dlm_node_iter_next(&iter)) >= 0) {
1645 		int r = 0;
1646 		struct dlm_master_list_entry *mle = NULL;
1647 
1648 		mlog(0, "sending assert master to %d (%.*s)\n", to,
1649 		     namelen, lockname);
1650 		memset(&assert, 0, sizeof(assert));
1651 		assert.node_idx = dlm->node_num;
1652 		assert.namelen = namelen;
1653 		memcpy(assert.name, lockname, namelen);
1654 		assert.flags = cpu_to_be32(flags);
1655 
1656 		tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
1657 					    &assert, sizeof(assert), to, &r);
1658 		if (tmpret < 0) {
1659 			mlog(ML_ERROR, "Error %d when sending message %u (key "
1660 			     "0x%x) to node %u\n", tmpret,
1661 			     DLM_ASSERT_MASTER_MSG, dlm->key, to);
1662 			if (!dlm_is_host_down(tmpret)) {
1663 				mlog(ML_ERROR, "unhandled error=%d!\n", tmpret);
1664 				BUG();
1665 			}
1666 			/* a node died.  finish out the rest of the nodes. */
1667 			mlog(0, "link to %d went down!\n", to);
1668 			/* any nonzero status return will do */
1669 			ret = tmpret;
1670 			r = 0;
1671 		} else if (r < 0) {
1672 			/* ok, something horribly messed.  kill thyself. */
1673 			mlog(ML_ERROR,"during assert master of %.*s to %u, "
1674 			     "got %d.\n", namelen, lockname, to, r);
1675 			spin_lock(&dlm->spinlock);
1676 			spin_lock(&dlm->master_lock);
1677 			if (dlm_find_mle(dlm, &mle, (char *)lockname,
1678 					 namelen)) {
1679 				dlm_print_one_mle(mle);
1680 				__dlm_put_mle(mle);
1681 			}
1682 			spin_unlock(&dlm->master_lock);
1683 			spin_unlock(&dlm->spinlock);
1684 			BUG();
1685 		}
1686 
1687 		if (r & DLM_ASSERT_RESPONSE_REASSERT &&
1688 		    !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) {
1689 				mlog(ML_ERROR, "%.*s: very strange, "
1690 				     "master MLE but no lockres on %u\n",
1691 				     namelen, lockname, to);
1692 		}
1693 
1694 		if (r & DLM_ASSERT_RESPONSE_REASSERT) {
1695 			mlog(0, "%.*s: node %u create mles on other "
1696 			     "nodes and requests a re-assert\n",
1697 			     namelen, lockname, to);
1698 			reassert = 1;
1699 		}
1700 		if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) {
1701 			mlog(0, "%.*s: node %u has a reference to this "
1702 			     "lockres, set the bit in the refmap\n",
1703 			     namelen, lockname, to);
1704 			spin_lock(&res->spinlock);
1705 			dlm_lockres_set_refmap_bit(to, res);
1706 			spin_unlock(&res->spinlock);
1707 		}
1708 	}
1709 
1710 	if (reassert)
1711 		goto again;
1712 
1713 	spin_lock(&res->spinlock);
1714 	res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
1715 	spin_unlock(&res->spinlock);
1716 	wake_up(&res->wq);
1717 
1718 	return ret;
1719 }
1720 
1721 /*
1722  * locks that can be taken here:
1723  * dlm->spinlock
1724  * res->spinlock
1725  * mle->spinlock
1726  * dlm->master_list
1727  *
1728  * if possible, TRIM THIS DOWN!!!
1729  */
1730 int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
1731 			      void **ret_data)
1732 {
1733 	struct dlm_ctxt *dlm = data;
1734 	struct dlm_master_list_entry *mle = NULL;
1735 	struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
1736 	struct dlm_lock_resource *res = NULL;
1737 	char *name;
1738 	unsigned int namelen, hash;
1739 	u32 flags;
1740 	int master_request = 0, have_lockres_ref = 0;
1741 	int ret = 0;
1742 
1743 	if (!dlm_grab(dlm))
1744 		return 0;
1745 
1746 	name = assert->name;
1747 	namelen = assert->namelen;
1748 	hash = dlm_lockid_hash(name, namelen);
1749 	flags = be32_to_cpu(assert->flags);
1750 
1751 	if (namelen > DLM_LOCKID_NAME_MAX) {
1752 		mlog(ML_ERROR, "Invalid name length!");
1753 		goto done;
1754 	}
1755 
1756 	spin_lock(&dlm->spinlock);
1757 
1758 	if (flags)
1759 		mlog(0, "assert_master with flags: %u\n", flags);
1760 
1761 	/* find the MLE */
1762 	spin_lock(&dlm->master_lock);
1763 	if (!dlm_find_mle(dlm, &mle, name, namelen)) {
1764 		/* not an error, could be master just re-asserting */
1765 		mlog(0, "just got an assert_master from %u, but no "
1766 		     "MLE for it! (%.*s)\n", assert->node_idx,
1767 		     namelen, name);
1768 	} else {
1769 		int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
1770 		if (bit >= O2NM_MAX_NODES) {
1771 			/* not necessarily an error, though less likely.
1772 			 * could be master just re-asserting. */
1773 			mlog(0, "no bits set in the maybe_map, but %u "
1774 			     "is asserting! (%.*s)\n", assert->node_idx,
1775 			     namelen, name);
1776 		} else if (bit != assert->node_idx) {
1777 			if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1778 				mlog(0, "master %u was found, %u should "
1779 				     "back off\n", assert->node_idx, bit);
1780 			} else {
1781 				/* with the fix for bug 569, a higher node
1782 				 * number winning the mastery will respond
1783 				 * YES to mastery requests, but this node
1784 				 * had no way of knowing.  let it pass. */
1785 				mlog(0, "%u is the lowest node, "
1786 				     "%u is asserting. (%.*s)  %u must "
1787 				     "have begun after %u won.\n", bit,
1788 				     assert->node_idx, namelen, name, bit,
1789 				     assert->node_idx);
1790 			}
1791 		}
1792 		if (mle->type == DLM_MLE_MIGRATION) {
1793 			if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1794 				mlog(0, "%s:%.*s: got cleanup assert"
1795 				     " from %u for migration\n",
1796 				     dlm->name, namelen, name,
1797 				     assert->node_idx);
1798 			} else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
1799 				mlog(0, "%s:%.*s: got unrelated assert"
1800 				     " from %u for migration, ignoring\n",
1801 				     dlm->name, namelen, name,
1802 				     assert->node_idx);
1803 				__dlm_put_mle(mle);
1804 				spin_unlock(&dlm->master_lock);
1805 				spin_unlock(&dlm->spinlock);
1806 				goto done;
1807 			}
1808 		}
1809 	}
1810 	spin_unlock(&dlm->master_lock);
1811 
1812 	/* ok everything checks out with the MLE
1813 	 * now check to see if there is a lockres */
1814 	res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1815 	if (res) {
1816 		spin_lock(&res->spinlock);
1817 		if (res->state & DLM_LOCK_RES_RECOVERING)  {
1818 			mlog(ML_ERROR, "%u asserting but %.*s is "
1819 			     "RECOVERING!\n", assert->node_idx, namelen, name);
1820 			goto kill;
1821 		}
1822 		if (!mle) {
1823 			if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
1824 			    res->owner != assert->node_idx) {
1825 				mlog(ML_ERROR, "DIE! Mastery assert from %u, "
1826 				     "but current owner is %u! (%.*s)\n",
1827 				     assert->node_idx, res->owner, namelen,
1828 				     name);
1829 				__dlm_print_one_lock_resource(res);
1830 				BUG();
1831 			}
1832 		} else if (mle->type != DLM_MLE_MIGRATION) {
1833 			if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1834 				/* owner is just re-asserting */
1835 				if (res->owner == assert->node_idx) {
1836 					mlog(0, "owner %u re-asserting on "
1837 					     "lock %.*s\n", assert->node_idx,
1838 					     namelen, name);
1839 					goto ok;
1840 				}
1841 				mlog(ML_ERROR, "got assert_master from "
1842 				     "node %u, but %u is the owner! "
1843 				     "(%.*s)\n", assert->node_idx,
1844 				     res->owner, namelen, name);
1845 				goto kill;
1846 			}
1847 			if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1848 				mlog(ML_ERROR, "got assert from %u, but lock "
1849 				     "with no owner should be "
1850 				     "in-progress! (%.*s)\n",
1851 				     assert->node_idx,
1852 				     namelen, name);
1853 				goto kill;
1854 			}
1855 		} else /* mle->type == DLM_MLE_MIGRATION */ {
1856 			/* should only be getting an assert from new master */
1857 			if (assert->node_idx != mle->new_master) {
1858 				mlog(ML_ERROR, "got assert from %u, but "
1859 				     "new master is %u, and old master "
1860 				     "was %u (%.*s)\n",
1861 				     assert->node_idx, mle->new_master,
1862 				     mle->master, namelen, name);
1863 				goto kill;
1864 			}
1865 
1866 		}
1867 ok:
1868 		spin_unlock(&res->spinlock);
1869 	}
1870 
1871 	// mlog(0, "woo!  got an assert_master from node %u!\n",
1872 	// 	     assert->node_idx);
1873 	if (mle) {
1874 		int extra_ref = 0;
1875 		int nn = -1;
1876 		int rr, err = 0;
1877 
1878 		spin_lock(&mle->spinlock);
1879 		if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
1880 			extra_ref = 1;
1881 		else {
1882 			/* MASTER mle: if any bits set in the response map
1883 			 * then the calling node needs to re-assert to clear
1884 			 * up nodes that this node contacted */
1885 			while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
1886 						    nn+1)) < O2NM_MAX_NODES) {
1887 				if (nn != dlm->node_num && nn != assert->node_idx)
1888 					master_request = 1;
1889 			}
1890 		}
1891 		mle->master = assert->node_idx;
1892 		atomic_set(&mle->woken, 1);
1893 		wake_up(&mle->wq);
1894 		spin_unlock(&mle->spinlock);
1895 
1896 		if (res) {
1897 			int wake = 0;
1898 			spin_lock(&res->spinlock);
1899 			if (mle->type == DLM_MLE_MIGRATION) {
1900 				mlog(0, "finishing off migration of lockres %.*s, "
1901 			     		"from %u to %u\n",
1902 			       		res->lockname.len, res->lockname.name,
1903 			       		dlm->node_num, mle->new_master);
1904 				res->state &= ~DLM_LOCK_RES_MIGRATING;
1905 				wake = 1;
1906 				dlm_change_lockres_owner(dlm, res, mle->new_master);
1907 				BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
1908 			} else {
1909 				dlm_change_lockres_owner(dlm, res, mle->master);
1910 			}
1911 			spin_unlock(&res->spinlock);
1912 			have_lockres_ref = 1;
1913 			if (wake)
1914 				wake_up(&res->wq);
1915 		}
1916 
1917 		/* master is known, detach if not already detached.
1918 		 * ensures that only one assert_master call will happen
1919 		 * on this mle. */
1920 		spin_lock(&dlm->master_lock);
1921 
1922 		rr = atomic_read(&mle->mle_refs.refcount);
1923 		if (mle->inuse > 0) {
1924 			if (extra_ref && rr < 3)
1925 				err = 1;
1926 			else if (!extra_ref && rr < 2)
1927 				err = 1;
1928 		} else {
1929 			if (extra_ref && rr < 2)
1930 				err = 1;
1931 			else if (!extra_ref && rr < 1)
1932 				err = 1;
1933 		}
1934 		if (err) {
1935 			mlog(ML_ERROR, "%s:%.*s: got assert master from %u "
1936 			     "that will mess up this node, refs=%d, extra=%d, "
1937 			     "inuse=%d\n", dlm->name, namelen, name,
1938 			     assert->node_idx, rr, extra_ref, mle->inuse);
1939 			dlm_print_one_mle(mle);
1940 		}
1941 		__dlm_unlink_mle(dlm, mle);
1942 		__dlm_mle_detach_hb_events(dlm, mle);
1943 		__dlm_put_mle(mle);
1944 		if (extra_ref) {
1945 			/* the assert master message now balances the extra
1946 		 	 * ref given by the master / migration request message.
1947 		 	 * if this is the last put, it will be removed
1948 		 	 * from the list. */
1949 			__dlm_put_mle(mle);
1950 		}
1951 		spin_unlock(&dlm->master_lock);
1952 	} else if (res) {
1953 		if (res->owner != assert->node_idx) {
1954 			mlog(0, "assert_master from %u, but current "
1955 			     "owner is %u (%.*s), no mle\n", assert->node_idx,
1956 			     res->owner, namelen, name);
1957 		}
1958 	}
1959 	spin_unlock(&dlm->spinlock);
1960 
1961 done:
1962 	ret = 0;
1963 	if (res) {
1964 		spin_lock(&res->spinlock);
1965 		res->state |= DLM_LOCK_RES_SETREF_INPROG;
1966 		spin_unlock(&res->spinlock);
1967 		*ret_data = (void *)res;
1968 	}
1969 	dlm_put(dlm);
1970 	if (master_request) {
1971 		mlog(0, "need to tell master to reassert\n");
1972 		/* positive. negative would shoot down the node. */
1973 		ret |= DLM_ASSERT_RESPONSE_REASSERT;
1974 		if (!have_lockres_ref) {
1975 			mlog(ML_ERROR, "strange, got assert from %u, MASTER "
1976 			     "mle present here for %s:%.*s, but no lockres!\n",
1977 			     assert->node_idx, dlm->name, namelen, name);
1978 		}
1979 	}
1980 	if (have_lockres_ref) {
1981 		/* let the master know we have a reference to the lockres */
1982 		ret |= DLM_ASSERT_RESPONSE_MASTERY_REF;
1983 		mlog(0, "%s:%.*s: got assert from %u, need a ref\n",
1984 		     dlm->name, namelen, name, assert->node_idx);
1985 	}
1986 	return ret;
1987 
1988 kill:
1989 	/* kill the caller! */
1990 	mlog(ML_ERROR, "Bad message received from another node.  Dumping state "
1991 	     "and killing the other node now!  This node is OK and can continue.\n");
1992 	__dlm_print_one_lock_resource(res);
1993 	spin_unlock(&res->spinlock);
1994 	spin_unlock(&dlm->spinlock);
1995 	*ret_data = (void *)res;
1996 	dlm_put(dlm);
1997 	return -EINVAL;
1998 }
1999 
2000 void dlm_assert_master_post_handler(int status, void *data, void *ret_data)
2001 {
2002 	struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data;
2003 
2004 	if (ret_data) {
2005 		spin_lock(&res->spinlock);
2006 		res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
2007 		spin_unlock(&res->spinlock);
2008 		wake_up(&res->wq);
2009 		dlm_lockres_put(res);
2010 	}
2011 	return;
2012 }
2013 
2014 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
2015 			       struct dlm_lock_resource *res,
2016 			       int ignore_higher, u8 request_from, u32 flags)
2017 {
2018 	struct dlm_work_item *item;
2019 	item = kzalloc(sizeof(*item), GFP_NOFS);
2020 	if (!item)
2021 		return -ENOMEM;
2022 
2023 
2024 	/* queue up work for dlm_assert_master_worker */
2025 	dlm_grab(dlm);  /* get an extra ref for the work item */
2026 	dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
2027 	item->u.am.lockres = res; /* already have a ref */
2028 	/* can optionally ignore node numbers higher than this node */
2029 	item->u.am.ignore_higher = ignore_higher;
2030 	item->u.am.request_from = request_from;
2031 	item->u.am.flags = flags;
2032 
2033 	if (ignore_higher)
2034 		mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
2035 		     res->lockname.name);
2036 
2037 	spin_lock(&dlm->work_lock);
2038 	list_add_tail(&item->list, &dlm->work_list);
2039 	spin_unlock(&dlm->work_lock);
2040 
2041 	queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2042 	return 0;
2043 }
2044 
2045 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
2046 {
2047 	struct dlm_ctxt *dlm = data;
2048 	int ret = 0;
2049 	struct dlm_lock_resource *res;
2050 	unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
2051 	int ignore_higher;
2052 	int bit;
2053 	u8 request_from;
2054 	u32 flags;
2055 
2056 	dlm = item->dlm;
2057 	res = item->u.am.lockres;
2058 	ignore_higher = item->u.am.ignore_higher;
2059 	request_from = item->u.am.request_from;
2060 	flags = item->u.am.flags;
2061 
2062 	spin_lock(&dlm->spinlock);
2063 	memcpy(nodemap, dlm->domain_map, sizeof(nodemap));
2064 	spin_unlock(&dlm->spinlock);
2065 
2066 	clear_bit(dlm->node_num, nodemap);
2067 	if (ignore_higher) {
2068 		/* if is this just to clear up mles for nodes below
2069 		 * this node, do not send the message to the original
2070 		 * caller or any node number higher than this */
2071 		clear_bit(request_from, nodemap);
2072 		bit = dlm->node_num;
2073 		while (1) {
2074 			bit = find_next_bit(nodemap, O2NM_MAX_NODES,
2075 					    bit+1);
2076 		       	if (bit >= O2NM_MAX_NODES)
2077 				break;
2078 			clear_bit(bit, nodemap);
2079 		}
2080 	}
2081 
2082 	/*
2083 	 * If we're migrating this lock to someone else, we are no
2084 	 * longer allowed to assert out own mastery.  OTOH, we need to
2085 	 * prevent migration from starting while we're still asserting
2086 	 * our dominance.  The reserved ast delays migration.
2087 	 */
2088 	spin_lock(&res->spinlock);
2089 	if (res->state & DLM_LOCK_RES_MIGRATING) {
2090 		mlog(0, "Someone asked us to assert mastery, but we're "
2091 		     "in the middle of migration.  Skipping assert, "
2092 		     "the new master will handle that.\n");
2093 		spin_unlock(&res->spinlock);
2094 		goto put;
2095 	} else
2096 		__dlm_lockres_reserve_ast(res);
2097 	spin_unlock(&res->spinlock);
2098 
2099 	/* this call now finishes out the nodemap
2100 	 * even if one or more nodes die */
2101 	mlog(0, "worker about to master %.*s here, this=%u\n",
2102 		     res->lockname.len, res->lockname.name, dlm->node_num);
2103 	ret = dlm_do_assert_master(dlm, res, nodemap, flags);
2104 	if (ret < 0) {
2105 		/* no need to restart, we are done */
2106 		if (!dlm_is_host_down(ret))
2107 			mlog_errno(ret);
2108 	}
2109 
2110 	/* Ok, we've asserted ourselves.  Let's let migration start. */
2111 	dlm_lockres_release_ast(dlm, res);
2112 
2113 put:
2114 	dlm_lockres_put(res);
2115 
2116 	mlog(0, "finished with dlm_assert_master_worker\n");
2117 }
2118 
2119 /* SPECIAL CASE for the $RECOVERY lock used by the recovery thread.
2120  * We cannot wait for node recovery to complete to begin mastering this
2121  * lockres because this lockres is used to kick off recovery! ;-)
2122  * So, do a pre-check on all living nodes to see if any of those nodes
2123  * think that $RECOVERY is currently mastered by a dead node.  If so,
2124  * we wait a short time to allow that node to get notified by its own
2125  * heartbeat stack, then check again.  All $RECOVERY lock resources
2126  * mastered by dead nodes are purged when the hearbeat callback is
2127  * fired, so we can know for sure that it is safe to continue once
2128  * the node returns a live node or no node.  */
2129 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
2130 				       struct dlm_lock_resource *res)
2131 {
2132 	struct dlm_node_iter iter;
2133 	int nodenum;
2134 	int ret = 0;
2135 	u8 master = DLM_LOCK_RES_OWNER_UNKNOWN;
2136 
2137 	spin_lock(&dlm->spinlock);
2138 	dlm_node_iter_init(dlm->domain_map, &iter);
2139 	spin_unlock(&dlm->spinlock);
2140 
2141 	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2142 		/* do not send to self */
2143 		if (nodenum == dlm->node_num)
2144 			continue;
2145 		ret = dlm_do_master_requery(dlm, res, nodenum, &master);
2146 		if (ret < 0) {
2147 			mlog_errno(ret);
2148 			if (!dlm_is_host_down(ret))
2149 				BUG();
2150 			/* host is down, so answer for that node would be
2151 			 * DLM_LOCK_RES_OWNER_UNKNOWN.  continue. */
2152 			ret = 0;
2153 		}
2154 
2155 		if (master != DLM_LOCK_RES_OWNER_UNKNOWN) {
2156 			/* check to see if this master is in the recovery map */
2157 			spin_lock(&dlm->spinlock);
2158 			if (test_bit(master, dlm->recovery_map)) {
2159 				mlog(ML_NOTICE, "%s: node %u has not seen "
2160 				     "node %u go down yet, and thinks the "
2161 				     "dead node is mastering the recovery "
2162 				     "lock.  must wait.\n", dlm->name,
2163 				     nodenum, master);
2164 				ret = -EAGAIN;
2165 			}
2166 			spin_unlock(&dlm->spinlock);
2167 			mlog(0, "%s: reco lock master is %u\n", dlm->name,
2168 			     master);
2169 			break;
2170 		}
2171 	}
2172 	return ret;
2173 }
2174 
2175 /*
2176  * DLM_DEREF_LOCKRES_MSG
2177  */
2178 
2179 int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2180 {
2181 	struct dlm_deref_lockres deref;
2182 	int ret = 0, r;
2183 	const char *lockname;
2184 	unsigned int namelen;
2185 
2186 	lockname = res->lockname.name;
2187 	namelen = res->lockname.len;
2188 	BUG_ON(namelen > O2NM_MAX_NAME_LEN);
2189 
2190 	mlog(0, "%s:%.*s: sending deref to %d\n",
2191 	     dlm->name, namelen, lockname, res->owner);
2192 	memset(&deref, 0, sizeof(deref));
2193 	deref.node_idx = dlm->node_num;
2194 	deref.namelen = namelen;
2195 	memcpy(deref.name, lockname, namelen);
2196 
2197 	ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
2198 				 &deref, sizeof(deref), res->owner, &r);
2199 	if (ret < 0)
2200 		mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
2201 		     "node %u\n", ret, DLM_DEREF_LOCKRES_MSG, dlm->key,
2202 		     res->owner);
2203 	else if (r < 0) {
2204 		/* BAD.  other node says I did not have a ref. */
2205 		mlog(ML_ERROR,"while dropping ref on %s:%.*s "
2206 		    "(master=%u) got %d.\n", dlm->name, namelen,
2207 		    lockname, res->owner, r);
2208 		dlm_print_one_lock_resource(res);
2209 		BUG();
2210 	}
2211 	return ret;
2212 }
2213 
2214 int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
2215 			      void **ret_data)
2216 {
2217 	struct dlm_ctxt *dlm = data;
2218 	struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf;
2219 	struct dlm_lock_resource *res = NULL;
2220 	char *name;
2221 	unsigned int namelen;
2222 	int ret = -EINVAL;
2223 	u8 node;
2224 	unsigned int hash;
2225 	struct dlm_work_item *item;
2226 	int cleared = 0;
2227 	int dispatch = 0;
2228 
2229 	if (!dlm_grab(dlm))
2230 		return 0;
2231 
2232 	name = deref->name;
2233 	namelen = deref->namelen;
2234 	node = deref->node_idx;
2235 
2236 	if (namelen > DLM_LOCKID_NAME_MAX) {
2237 		mlog(ML_ERROR, "Invalid name length!");
2238 		goto done;
2239 	}
2240 	if (deref->node_idx >= O2NM_MAX_NODES) {
2241 		mlog(ML_ERROR, "Invalid node number: %u\n", node);
2242 		goto done;
2243 	}
2244 
2245 	hash = dlm_lockid_hash(name, namelen);
2246 
2247 	spin_lock(&dlm->spinlock);
2248 	res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
2249 	if (!res) {
2250 		spin_unlock(&dlm->spinlock);
2251 		mlog(ML_ERROR, "%s:%.*s: bad lockres name\n",
2252 		     dlm->name, namelen, name);
2253 		goto done;
2254 	}
2255 	spin_unlock(&dlm->spinlock);
2256 
2257 	spin_lock(&res->spinlock);
2258 	if (res->state & DLM_LOCK_RES_SETREF_INPROG)
2259 		dispatch = 1;
2260 	else {
2261 		BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2262 		if (test_bit(node, res->refmap)) {
2263 			dlm_lockres_clear_refmap_bit(node, res);
2264 			cleared = 1;
2265 		}
2266 	}
2267 	spin_unlock(&res->spinlock);
2268 
2269 	if (!dispatch) {
2270 		if (cleared)
2271 			dlm_lockres_calc_usage(dlm, res);
2272 		else {
2273 			mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2274 		     	"but it is already dropped!\n", dlm->name,
2275 		     	res->lockname.len, res->lockname.name, node);
2276 			dlm_print_one_lock_resource(res);
2277 		}
2278 		ret = 0;
2279 		goto done;
2280 	}
2281 
2282 	item = kzalloc(sizeof(*item), GFP_NOFS);
2283 	if (!item) {
2284 		ret = -ENOMEM;
2285 		mlog_errno(ret);
2286 		goto done;
2287 	}
2288 
2289 	dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL);
2290 	item->u.dl.deref_res = res;
2291 	item->u.dl.deref_node = node;
2292 
2293 	spin_lock(&dlm->work_lock);
2294 	list_add_tail(&item->list, &dlm->work_list);
2295 	spin_unlock(&dlm->work_lock);
2296 
2297 	queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2298 	return 0;
2299 
2300 done:
2301 	if (res)
2302 		dlm_lockres_put(res);
2303 	dlm_put(dlm);
2304 
2305 	return ret;
2306 }
2307 
2308 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
2309 {
2310 	struct dlm_ctxt *dlm;
2311 	struct dlm_lock_resource *res;
2312 	u8 node;
2313 	u8 cleared = 0;
2314 
2315 	dlm = item->dlm;
2316 	res = item->u.dl.deref_res;
2317 	node = item->u.dl.deref_node;
2318 
2319 	spin_lock(&res->spinlock);
2320 	BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2321 	if (test_bit(node, res->refmap)) {
2322 		__dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
2323 		dlm_lockres_clear_refmap_bit(node, res);
2324 		cleared = 1;
2325 	}
2326 	spin_unlock(&res->spinlock);
2327 
2328 	if (cleared) {
2329 		mlog(0, "%s:%.*s node %u ref dropped in dispatch\n",
2330 		     dlm->name, res->lockname.len, res->lockname.name, node);
2331 		dlm_lockres_calc_usage(dlm, res);
2332 	} else {
2333 		mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2334 		     "but it is already dropped!\n", dlm->name,
2335 		     res->lockname.len, res->lockname.name, node);
2336 		dlm_print_one_lock_resource(res);
2337 	}
2338 
2339 	dlm_lockres_put(res);
2340 }
2341 
2342 /*
2343  * A migrateable resource is one that is :
2344  * 1. locally mastered, and,
2345  * 2. zero local locks, and,
2346  * 3. one or more non-local locks, or, one or more references
2347  * Returns 1 if yes, 0 if not.
2348  */
2349 static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
2350 				      struct dlm_lock_resource *res)
2351 {
2352 	enum dlm_lockres_list idx;
2353 	int nonlocal = 0, node_ref;
2354 	struct list_head *queue;
2355 	struct dlm_lock *lock;
2356 	u64 cookie;
2357 
2358 	assert_spin_locked(&res->spinlock);
2359 
2360 	if (res->owner != dlm->node_num)
2361 		return 0;
2362 
2363         for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) {
2364 		queue = dlm_list_idx_to_ptr(res, idx);
2365 		list_for_each_entry(lock, queue, list) {
2366 			if (lock->ml.node != dlm->node_num) {
2367 				nonlocal++;
2368 				continue;
2369 			}
2370 			cookie = be64_to_cpu(lock->ml.cookie);
2371 			mlog(0, "%s: Not migrateable res %.*s, lock %u:%llu on "
2372 			     "%s list\n", dlm->name, res->lockname.len,
2373 			     res->lockname.name,
2374 			     dlm_get_lock_cookie_node(cookie),
2375 			     dlm_get_lock_cookie_seq(cookie),
2376 			     dlm_list_in_text(idx));
2377 			return 0;
2378 		}
2379 	}
2380 
2381 	if (!nonlocal) {
2382 		node_ref = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
2383 		if (node_ref >= O2NM_MAX_NODES)
2384 			return 0;
2385 	}
2386 
2387 	mlog(0, "%s: res %.*s, Migrateable\n", dlm->name, res->lockname.len,
2388 	     res->lockname.name);
2389 
2390 	return 1;
2391 }
2392 
2393 /*
2394  * DLM_MIGRATE_LOCKRES
2395  */
2396 
2397 
2398 static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
2399 			       struct dlm_lock_resource *res, u8 target)
2400 {
2401 	struct dlm_master_list_entry *mle = NULL;
2402 	struct dlm_master_list_entry *oldmle = NULL;
2403  	struct dlm_migratable_lockres *mres = NULL;
2404 	int ret = 0;
2405 	const char *name;
2406 	unsigned int namelen;
2407 	int mle_added = 0;
2408 	int wake = 0;
2409 
2410 	if (!dlm_grab(dlm))
2411 		return -EINVAL;
2412 
2413 	BUG_ON(target == O2NM_MAX_NODES);
2414 
2415 	name = res->lockname.name;
2416 	namelen = res->lockname.len;
2417 
2418 	mlog(0, "%s: Migrating %.*s to node %u\n", dlm->name, namelen, name,
2419 	     target);
2420 
2421 	/* preallocate up front. if this fails, abort */
2422 	ret = -ENOMEM;
2423 	mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS);
2424 	if (!mres) {
2425 		mlog_errno(ret);
2426 		goto leave;
2427 	}
2428 
2429 	mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
2430 	if (!mle) {
2431 		mlog_errno(ret);
2432 		goto leave;
2433 	}
2434 	ret = 0;
2435 
2436 	/*
2437 	 * clear any existing master requests and
2438 	 * add the migration mle to the list
2439 	 */
2440 	spin_lock(&dlm->spinlock);
2441 	spin_lock(&dlm->master_lock);
2442 	ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
2443 				    namelen, target, dlm->node_num);
2444 	spin_unlock(&dlm->master_lock);
2445 	spin_unlock(&dlm->spinlock);
2446 
2447 	if (ret == -EEXIST) {
2448 		mlog(0, "another process is already migrating it\n");
2449 		goto fail;
2450 	}
2451 	mle_added = 1;
2452 
2453 	/*
2454 	 * set the MIGRATING flag and flush asts
2455 	 * if we fail after this we need to re-dirty the lockres
2456 	 */
2457 	if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
2458 		mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
2459 		     "the target went down.\n", res->lockname.len,
2460 		     res->lockname.name, target);
2461 		spin_lock(&res->spinlock);
2462 		res->state &= ~DLM_LOCK_RES_MIGRATING;
2463 		wake = 1;
2464 		spin_unlock(&res->spinlock);
2465 		ret = -EINVAL;
2466 	}
2467 
2468 fail:
2469 	if (oldmle) {
2470 		/* master is known, detach if not already detached */
2471 		dlm_mle_detach_hb_events(dlm, oldmle);
2472 		dlm_put_mle(oldmle);
2473 	}
2474 
2475 	if (ret < 0) {
2476 		if (mle_added) {
2477 			dlm_mle_detach_hb_events(dlm, mle);
2478 			dlm_put_mle(mle);
2479 		} else if (mle) {
2480 			kmem_cache_free(dlm_mle_cache, mle);
2481 			mle = NULL;
2482 		}
2483 		goto leave;
2484 	}
2485 
2486 	/*
2487 	 * at this point, we have a migration target, an mle
2488 	 * in the master list, and the MIGRATING flag set on
2489 	 * the lockres
2490 	 */
2491 
2492 	/* now that remote nodes are spinning on the MIGRATING flag,
2493 	 * ensure that all assert_master work is flushed. */
2494 	flush_workqueue(dlm->dlm_worker);
2495 
2496 	/* get an extra reference on the mle.
2497 	 * otherwise the assert_master from the new
2498 	 * master will destroy this.
2499 	 * also, make sure that all callers of dlm_get_mle
2500 	 * take both dlm->spinlock and dlm->master_lock */
2501 	spin_lock(&dlm->spinlock);
2502 	spin_lock(&dlm->master_lock);
2503 	dlm_get_mle_inuse(mle);
2504 	spin_unlock(&dlm->master_lock);
2505 	spin_unlock(&dlm->spinlock);
2506 
2507 	/* notify new node and send all lock state */
2508 	/* call send_one_lockres with migration flag.
2509 	 * this serves as notice to the target node that a
2510 	 * migration is starting. */
2511 	ret = dlm_send_one_lockres(dlm, res, mres, target,
2512 				   DLM_MRES_MIGRATION);
2513 
2514 	if (ret < 0) {
2515 		mlog(0, "migration to node %u failed with %d\n",
2516 		     target, ret);
2517 		/* migration failed, detach and clean up mle */
2518 		dlm_mle_detach_hb_events(dlm, mle);
2519 		dlm_put_mle(mle);
2520 		dlm_put_mle_inuse(mle);
2521 		spin_lock(&res->spinlock);
2522 		res->state &= ~DLM_LOCK_RES_MIGRATING;
2523 		wake = 1;
2524 		spin_unlock(&res->spinlock);
2525 		if (dlm_is_host_down(ret))
2526 			dlm_wait_for_node_death(dlm, target,
2527 						DLM_NODE_DEATH_WAIT_MAX);
2528 		goto leave;
2529 	}
2530 
2531 	/* at this point, the target sends a message to all nodes,
2532 	 * (using dlm_do_migrate_request).  this node is skipped since
2533 	 * we had to put an mle in the list to begin the process.  this
2534 	 * node now waits for target to do an assert master.  this node
2535 	 * will be the last one notified, ensuring that the migration
2536 	 * is complete everywhere.  if the target dies while this is
2537 	 * going on, some nodes could potentially see the target as the
2538 	 * master, so it is important that my recovery finds the migration
2539 	 * mle and sets the master to UNKNOWN. */
2540 
2541 
2542 	/* wait for new node to assert master */
2543 	while (1) {
2544 		ret = wait_event_interruptible_timeout(mle->wq,
2545 					(atomic_read(&mle->woken) == 1),
2546 					msecs_to_jiffies(5000));
2547 
2548 		if (ret >= 0) {
2549 		       	if (atomic_read(&mle->woken) == 1 ||
2550 			    res->owner == target)
2551 				break;
2552 
2553 			mlog(0, "%s:%.*s: timed out during migration\n",
2554 			     dlm->name, res->lockname.len, res->lockname.name);
2555 			/* avoid hang during shutdown when migrating lockres
2556 			 * to a node which also goes down */
2557 			if (dlm_is_node_dead(dlm, target)) {
2558 				mlog(0, "%s:%.*s: expected migration "
2559 				     "target %u is no longer up, restarting\n",
2560 				     dlm->name, res->lockname.len,
2561 				     res->lockname.name, target);
2562 				ret = -EINVAL;
2563 				/* migration failed, detach and clean up mle */
2564 				dlm_mle_detach_hb_events(dlm, mle);
2565 				dlm_put_mle(mle);
2566 				dlm_put_mle_inuse(mle);
2567 				spin_lock(&res->spinlock);
2568 				res->state &= ~DLM_LOCK_RES_MIGRATING;
2569 				wake = 1;
2570 				spin_unlock(&res->spinlock);
2571 				goto leave;
2572 			}
2573 		} else
2574 			mlog(0, "%s:%.*s: caught signal during migration\n",
2575 			     dlm->name, res->lockname.len, res->lockname.name);
2576 	}
2577 
2578 	/* all done, set the owner, clear the flag */
2579 	spin_lock(&res->spinlock);
2580 	dlm_set_lockres_owner(dlm, res, target);
2581 	res->state &= ~DLM_LOCK_RES_MIGRATING;
2582 	dlm_remove_nonlocal_locks(dlm, res);
2583 	spin_unlock(&res->spinlock);
2584 	wake_up(&res->wq);
2585 
2586 	/* master is known, detach if not already detached */
2587 	dlm_mle_detach_hb_events(dlm, mle);
2588 	dlm_put_mle_inuse(mle);
2589 	ret = 0;
2590 
2591 	dlm_lockres_calc_usage(dlm, res);
2592 
2593 leave:
2594 	/* re-dirty the lockres if we failed */
2595 	if (ret < 0)
2596 		dlm_kick_thread(dlm, res);
2597 
2598 	/* wake up waiters if the MIGRATING flag got set
2599 	 * but migration failed */
2600 	if (wake)
2601 		wake_up(&res->wq);
2602 
2603 	if (mres)
2604 		free_page((unsigned long)mres);
2605 
2606 	dlm_put(dlm);
2607 
2608 	mlog(0, "%s: Migrating %.*s to %u, returns %d\n", dlm->name, namelen,
2609 	     name, target, ret);
2610 	return ret;
2611 }
2612 
2613 #define DLM_MIGRATION_RETRY_MS  100
2614 
2615 /*
2616  * Should be called only after beginning the domain leave process.
2617  * There should not be any remaining locks on nonlocal lock resources,
2618  * and there should be no local locks left on locally mastered resources.
2619  *
2620  * Called with the dlm spinlock held, may drop it to do migration, but
2621  * will re-acquire before exit.
2622  *
2623  * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped
2624  */
2625 int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2626 {
2627 	int ret;
2628 	int lock_dropped = 0;
2629 	u8 target = O2NM_MAX_NODES;
2630 
2631 	assert_spin_locked(&dlm->spinlock);
2632 
2633 	spin_lock(&res->spinlock);
2634 	if (dlm_is_lockres_migrateable(dlm, res))
2635 		target = dlm_pick_migration_target(dlm, res);
2636 	spin_unlock(&res->spinlock);
2637 
2638 	if (target == O2NM_MAX_NODES)
2639 		goto leave;
2640 
2641 	/* Wheee! Migrate lockres here! Will sleep so drop spinlock. */
2642 	spin_unlock(&dlm->spinlock);
2643 	lock_dropped = 1;
2644 	ret = dlm_migrate_lockres(dlm, res, target);
2645 	if (ret)
2646 		mlog(0, "%s: res %.*s, Migrate to node %u failed with %d\n",
2647 		     dlm->name, res->lockname.len, res->lockname.name,
2648 		     target, ret);
2649 	spin_lock(&dlm->spinlock);
2650 leave:
2651 	return lock_dropped;
2652 }
2653 
2654 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
2655 {
2656 	int ret;
2657 	spin_lock(&dlm->ast_lock);
2658 	spin_lock(&lock->spinlock);
2659 	ret = (list_empty(&lock->bast_list) && !lock->bast_pending);
2660 	spin_unlock(&lock->spinlock);
2661 	spin_unlock(&dlm->ast_lock);
2662 	return ret;
2663 }
2664 
2665 static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
2666 				     struct dlm_lock_resource *res,
2667 				     u8 mig_target)
2668 {
2669 	int can_proceed;
2670 	spin_lock(&res->spinlock);
2671 	can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2672 	spin_unlock(&res->spinlock);
2673 
2674 	/* target has died, so make the caller break out of the
2675 	 * wait_event, but caller must recheck the domain_map */
2676 	spin_lock(&dlm->spinlock);
2677 	if (!test_bit(mig_target, dlm->domain_map))
2678 		can_proceed = 1;
2679 	spin_unlock(&dlm->spinlock);
2680 	return can_proceed;
2681 }
2682 
2683 static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm,
2684 				struct dlm_lock_resource *res)
2685 {
2686 	int ret;
2687 	spin_lock(&res->spinlock);
2688 	ret = !!(res->state & DLM_LOCK_RES_DIRTY);
2689 	spin_unlock(&res->spinlock);
2690 	return ret;
2691 }
2692 
2693 
2694 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
2695 				       struct dlm_lock_resource *res,
2696 				       u8 target)
2697 {
2698 	int ret = 0;
2699 
2700 	mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
2701 	       res->lockname.len, res->lockname.name, dlm->node_num,
2702 	       target);
2703 	/* need to set MIGRATING flag on lockres.  this is done by
2704 	 * ensuring that all asts have been flushed for this lockres. */
2705 	spin_lock(&res->spinlock);
2706 	BUG_ON(res->migration_pending);
2707 	res->migration_pending = 1;
2708 	/* strategy is to reserve an extra ast then release
2709 	 * it below, letting the release do all of the work */
2710 	__dlm_lockres_reserve_ast(res);
2711 	spin_unlock(&res->spinlock);
2712 
2713 	/* now flush all the pending asts */
2714 	dlm_kick_thread(dlm, res);
2715 	/* before waiting on DIRTY, block processes which may
2716 	 * try to dirty the lockres before MIGRATING is set */
2717 	spin_lock(&res->spinlock);
2718 	BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY);
2719 	res->state |= DLM_LOCK_RES_BLOCK_DIRTY;
2720 	spin_unlock(&res->spinlock);
2721 	/* now wait on any pending asts and the DIRTY state */
2722 	wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
2723 	dlm_lockres_release_ast(dlm, res);
2724 
2725 	mlog(0, "about to wait on migration_wq, dirty=%s\n",
2726 	       res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
2727 	/* if the extra ref we just put was the final one, this
2728 	 * will pass thru immediately.  otherwise, we need to wait
2729 	 * for the last ast to finish. */
2730 again:
2731 	ret = wait_event_interruptible_timeout(dlm->migration_wq,
2732 		   dlm_migration_can_proceed(dlm, res, target),
2733 		   msecs_to_jiffies(1000));
2734 	if (ret < 0) {
2735 		mlog(0, "woken again: migrating? %s, dead? %s\n",
2736 		       res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2737 		       test_bit(target, dlm->domain_map) ? "no":"yes");
2738 	} else {
2739 		mlog(0, "all is well: migrating? %s, dead? %s\n",
2740 		       res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2741 		       test_bit(target, dlm->domain_map) ? "no":"yes");
2742 	}
2743 	if (!dlm_migration_can_proceed(dlm, res, target)) {
2744 		mlog(0, "trying again...\n");
2745 		goto again;
2746 	}
2747 
2748 	ret = 0;
2749 	/* did the target go down or die? */
2750 	spin_lock(&dlm->spinlock);
2751 	if (!test_bit(target, dlm->domain_map)) {
2752 		mlog(ML_ERROR, "aha. migration target %u just went down\n",
2753 		     target);
2754 		ret = -EHOSTDOWN;
2755 	}
2756 	spin_unlock(&dlm->spinlock);
2757 
2758 	/*
2759 	 * if target is down, we need to clear DLM_LOCK_RES_BLOCK_DIRTY for
2760 	 * another try; otherwise, we are sure the MIGRATING state is there,
2761 	 * drop the unneded state which blocked threads trying to DIRTY
2762 	 */
2763 	spin_lock(&res->spinlock);
2764 	BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
2765 	res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
2766 	if (!ret)
2767 		BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
2768 	spin_unlock(&res->spinlock);
2769 
2770 	/*
2771 	 * at this point:
2772 	 *
2773 	 *   o the DLM_LOCK_RES_MIGRATING flag is set if target not down
2774 	 *   o there are no pending asts on this lockres
2775 	 *   o all processes trying to reserve an ast on this
2776 	 *     lockres must wait for the MIGRATING flag to clear
2777 	 */
2778 	return ret;
2779 }
2780 
2781 /* last step in the migration process.
2782  * original master calls this to free all of the dlm_lock
2783  * structures that used to be for other nodes. */
2784 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2785 				      struct dlm_lock_resource *res)
2786 {
2787 	struct list_head *queue = &res->granted;
2788 	int i, bit;
2789 	struct dlm_lock *lock, *next;
2790 
2791 	assert_spin_locked(&res->spinlock);
2792 
2793 	BUG_ON(res->owner == dlm->node_num);
2794 
2795 	for (i=0; i<3; i++) {
2796 		list_for_each_entry_safe(lock, next, queue, list) {
2797 			if (lock->ml.node != dlm->node_num) {
2798 				mlog(0, "putting lock for node %u\n",
2799 				     lock->ml.node);
2800 				/* be extra careful */
2801 				BUG_ON(!list_empty(&lock->ast_list));
2802 				BUG_ON(!list_empty(&lock->bast_list));
2803 				BUG_ON(lock->ast_pending);
2804 				BUG_ON(lock->bast_pending);
2805 				dlm_lockres_clear_refmap_bit(lock->ml.node, res);
2806 				list_del_init(&lock->list);
2807 				dlm_lock_put(lock);
2808 				/* In a normal unlock, we would have added a
2809 				 * DLM_UNLOCK_FREE_LOCK action. Force it. */
2810 				dlm_lock_put(lock);
2811 			}
2812 		}
2813 		queue++;
2814 	}
2815 	bit = 0;
2816 	while (1) {
2817 		bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
2818 		if (bit >= O2NM_MAX_NODES)
2819 			break;
2820 		/* do not clear the local node reference, if there is a
2821 		 * process holding this, let it drop the ref itself */
2822 		if (bit != dlm->node_num) {
2823 			mlog(0, "%s:%.*s: node %u had a ref to this "
2824 			     "migrating lockres, clearing\n", dlm->name,
2825 			     res->lockname.len, res->lockname.name, bit);
2826 			dlm_lockres_clear_refmap_bit(bit, res);
2827 		}
2828 		bit++;
2829 	}
2830 }
2831 
2832 /*
2833  * Pick a node to migrate the lock resource to. This function selects a
2834  * potential target based first on the locks and then on refmap. It skips
2835  * nodes that are in the process of exiting the domain.
2836  */
2837 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
2838 				    struct dlm_lock_resource *res)
2839 {
2840 	enum dlm_lockres_list idx;
2841 	struct list_head *queue = &res->granted;
2842 	struct dlm_lock *lock;
2843 	int noderef;
2844 	u8 nodenum = O2NM_MAX_NODES;
2845 
2846 	assert_spin_locked(&dlm->spinlock);
2847 	assert_spin_locked(&res->spinlock);
2848 
2849 	/* Go through all the locks */
2850 	for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) {
2851 		queue = dlm_list_idx_to_ptr(res, idx);
2852 		list_for_each_entry(lock, queue, list) {
2853 			if (lock->ml.node == dlm->node_num)
2854 				continue;
2855 			if (test_bit(lock->ml.node, dlm->exit_domain_map))
2856 				continue;
2857 			nodenum = lock->ml.node;
2858 			goto bail;
2859 		}
2860 	}
2861 
2862 	/* Go thru the refmap */
2863 	noderef = -1;
2864 	while (1) {
2865 		noderef = find_next_bit(res->refmap, O2NM_MAX_NODES,
2866 					noderef + 1);
2867 		if (noderef >= O2NM_MAX_NODES)
2868 			break;
2869 		if (noderef == dlm->node_num)
2870 			continue;
2871 		if (test_bit(noderef, dlm->exit_domain_map))
2872 			continue;
2873 		nodenum = noderef;
2874 		goto bail;
2875 	}
2876 
2877 bail:
2878 	return nodenum;
2879 }
2880 
2881 /* this is called by the new master once all lockres
2882  * data has been received */
2883 static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
2884 				  struct dlm_lock_resource *res,
2885 				  u8 master, u8 new_master,
2886 				  struct dlm_node_iter *iter)
2887 {
2888 	struct dlm_migrate_request migrate;
2889 	int ret, skip, status = 0;
2890 	int nodenum;
2891 
2892 	memset(&migrate, 0, sizeof(migrate));
2893 	migrate.namelen = res->lockname.len;
2894 	memcpy(migrate.name, res->lockname.name, migrate.namelen);
2895 	migrate.new_master = new_master;
2896 	migrate.master = master;
2897 
2898 	ret = 0;
2899 
2900 	/* send message to all nodes, except the master and myself */
2901 	while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
2902 		if (nodenum == master ||
2903 		    nodenum == new_master)
2904 			continue;
2905 
2906 		/* We could race exit domain. If exited, skip. */
2907 		spin_lock(&dlm->spinlock);
2908 		skip = (!test_bit(nodenum, dlm->domain_map));
2909 		spin_unlock(&dlm->spinlock);
2910 		if (skip) {
2911 			clear_bit(nodenum, iter->node_map);
2912 			continue;
2913 		}
2914 
2915 		ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
2916 					 &migrate, sizeof(migrate), nodenum,
2917 					 &status);
2918 		if (ret < 0) {
2919 			mlog(ML_ERROR, "Error %d when sending message %u (key "
2920 			     "0x%x) to node %u\n", ret, DLM_MIGRATE_REQUEST_MSG,
2921 			     dlm->key, nodenum);
2922 			if (!dlm_is_host_down(ret)) {
2923 				mlog(ML_ERROR, "unhandled error=%d!\n", ret);
2924 				BUG();
2925 			}
2926 			clear_bit(nodenum, iter->node_map);
2927 			ret = 0;
2928 		} else if (status < 0) {
2929 			mlog(0, "migrate request (node %u) returned %d!\n",
2930 			     nodenum, status);
2931 			ret = status;
2932 		} else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) {
2933 			/* during the migration request we short-circuited
2934 			 * the mastery of the lockres.  make sure we have
2935 			 * a mastery ref for nodenum */
2936 			mlog(0, "%s:%.*s: need ref for node %u\n",
2937 			     dlm->name, res->lockname.len, res->lockname.name,
2938 			     nodenum);
2939 			spin_lock(&res->spinlock);
2940 			dlm_lockres_set_refmap_bit(nodenum, res);
2941 			spin_unlock(&res->spinlock);
2942 		}
2943 	}
2944 
2945 	if (ret < 0)
2946 		mlog_errno(ret);
2947 
2948 	mlog(0, "returning ret=%d\n", ret);
2949 	return ret;
2950 }
2951 
2952 
2953 /* if there is an existing mle for this lockres, we now know who the master is.
2954  * (the one who sent us *this* message) we can clear it up right away.
2955  * since the process that put the mle on the list still has a reference to it,
2956  * we can unhash it now, set the master and wake the process.  as a result,
2957  * we will have no mle in the list to start with.  now we can add an mle for
2958  * the migration and this should be the only one found for those scanning the
2959  * list.  */
2960 int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
2961 				void **ret_data)
2962 {
2963 	struct dlm_ctxt *dlm = data;
2964 	struct dlm_lock_resource *res = NULL;
2965 	struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
2966 	struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
2967 	const char *name;
2968 	unsigned int namelen, hash;
2969 	int ret = 0;
2970 
2971 	if (!dlm_grab(dlm))
2972 		return -EINVAL;
2973 
2974 	name = migrate->name;
2975 	namelen = migrate->namelen;
2976 	hash = dlm_lockid_hash(name, namelen);
2977 
2978 	/* preallocate.. if this fails, abort */
2979 	mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
2980 
2981 	if (!mle) {
2982 		ret = -ENOMEM;
2983 		goto leave;
2984 	}
2985 
2986 	/* check for pre-existing lock */
2987 	spin_lock(&dlm->spinlock);
2988 	res = __dlm_lookup_lockres(dlm, name, namelen, hash);
2989 	if (res) {
2990 		spin_lock(&res->spinlock);
2991 		if (res->state & DLM_LOCK_RES_RECOVERING) {
2992 			/* if all is working ok, this can only mean that we got
2993 		 	* a migrate request from a node that we now see as
2994 		 	* dead.  what can we do here?  drop it to the floor? */
2995 			spin_unlock(&res->spinlock);
2996 			mlog(ML_ERROR, "Got a migrate request, but the "
2997 			     "lockres is marked as recovering!");
2998 			kmem_cache_free(dlm_mle_cache, mle);
2999 			ret = -EINVAL; /* need a better solution */
3000 			goto unlock;
3001 		}
3002 		res->state |= DLM_LOCK_RES_MIGRATING;
3003 		spin_unlock(&res->spinlock);
3004 	}
3005 
3006 	spin_lock(&dlm->master_lock);
3007 	/* ignore status.  only nonzero status would BUG. */
3008 	ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
3009 				    name, namelen,
3010 				    migrate->new_master,
3011 				    migrate->master);
3012 
3013 	spin_unlock(&dlm->master_lock);
3014 unlock:
3015 	spin_unlock(&dlm->spinlock);
3016 
3017 	if (oldmle) {
3018 		/* master is known, detach if not already detached */
3019 		dlm_mle_detach_hb_events(dlm, oldmle);
3020 		dlm_put_mle(oldmle);
3021 	}
3022 
3023 	if (res)
3024 		dlm_lockres_put(res);
3025 leave:
3026 	dlm_put(dlm);
3027 	return ret;
3028 }
3029 
3030 /* must be holding dlm->spinlock and dlm->master_lock
3031  * when adding a migration mle, we can clear any other mles
3032  * in the master list because we know with certainty that
3033  * the master is "master".  so we remove any old mle from
3034  * the list after setting it's master field, and then add
3035  * the new migration mle.  this way we can hold with the rule
3036  * of having only one mle for a given lock name at all times. */
3037 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
3038 				 struct dlm_lock_resource *res,
3039 				 struct dlm_master_list_entry *mle,
3040 				 struct dlm_master_list_entry **oldmle,
3041 				 const char *name, unsigned int namelen,
3042 				 u8 new_master, u8 master)
3043 {
3044 	int found;
3045 	int ret = 0;
3046 
3047 	*oldmle = NULL;
3048 
3049 	assert_spin_locked(&dlm->spinlock);
3050 	assert_spin_locked(&dlm->master_lock);
3051 
3052 	/* caller is responsible for any ref taken here on oldmle */
3053 	found = dlm_find_mle(dlm, oldmle, (char *)name, namelen);
3054 	if (found) {
3055 		struct dlm_master_list_entry *tmp = *oldmle;
3056 		spin_lock(&tmp->spinlock);
3057 		if (tmp->type == DLM_MLE_MIGRATION) {
3058 			if (master == dlm->node_num) {
3059 				/* ah another process raced me to it */
3060 				mlog(0, "tried to migrate %.*s, but some "
3061 				     "process beat me to it\n",
3062 				     namelen, name);
3063 				ret = -EEXIST;
3064 			} else {
3065 				/* bad.  2 NODES are trying to migrate! */
3066 				mlog(ML_ERROR, "migration error  mle: "
3067 				     "master=%u new_master=%u // request: "
3068 				     "master=%u new_master=%u // "
3069 				     "lockres=%.*s\n",
3070 				     tmp->master, tmp->new_master,
3071 				     master, new_master,
3072 				     namelen, name);
3073 				BUG();
3074 			}
3075 		} else {
3076 			/* this is essentially what assert_master does */
3077 			tmp->master = master;
3078 			atomic_set(&tmp->woken, 1);
3079 			wake_up(&tmp->wq);
3080 			/* remove it so that only one mle will be found */
3081 			__dlm_unlink_mle(dlm, tmp);
3082 			__dlm_mle_detach_hb_events(dlm, tmp);
3083 			ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
3084 			mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
3085 			    "telling master to get ref for cleared out mle "
3086 			    "during migration\n", dlm->name, namelen, name,
3087 			    master, new_master);
3088 		}
3089 		spin_unlock(&tmp->spinlock);
3090 	}
3091 
3092 	/* now add a migration mle to the tail of the list */
3093 	dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
3094 	mle->new_master = new_master;
3095 	/* the new master will be sending an assert master for this.
3096 	 * at that point we will get the refmap reference */
3097 	mle->master = master;
3098 	/* do this for consistency with other mle types */
3099 	set_bit(new_master, mle->maybe_map);
3100 	__dlm_insert_mle(dlm, mle);
3101 
3102 	return ret;
3103 }
3104 
3105 /*
3106  * Sets the owner of the lockres, associated to the mle, to UNKNOWN
3107  */
3108 static struct dlm_lock_resource *dlm_reset_mleres_owner(struct dlm_ctxt *dlm,
3109 					struct dlm_master_list_entry *mle)
3110 {
3111 	struct dlm_lock_resource *res;
3112 
3113 	/* Find the lockres associated to the mle and set its owner to UNK */
3114 	res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen,
3115 				   mle->mnamehash);
3116 	if (res) {
3117 		spin_unlock(&dlm->master_lock);
3118 
3119 		/* move lockres onto recovery list */
3120 		spin_lock(&res->spinlock);
3121 		dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
3122 		dlm_move_lockres_to_recovery_list(dlm, res);
3123 		spin_unlock(&res->spinlock);
3124 		dlm_lockres_put(res);
3125 
3126 		/* about to get rid of mle, detach from heartbeat */
3127 		__dlm_mle_detach_hb_events(dlm, mle);
3128 
3129 		/* dump the mle */
3130 		spin_lock(&dlm->master_lock);
3131 		__dlm_put_mle(mle);
3132 		spin_unlock(&dlm->master_lock);
3133 	}
3134 
3135 	return res;
3136 }
3137 
3138 static void dlm_clean_migration_mle(struct dlm_ctxt *dlm,
3139 				    struct dlm_master_list_entry *mle)
3140 {
3141 	__dlm_mle_detach_hb_events(dlm, mle);
3142 
3143 	spin_lock(&mle->spinlock);
3144 	__dlm_unlink_mle(dlm, mle);
3145 	atomic_set(&mle->woken, 1);
3146 	spin_unlock(&mle->spinlock);
3147 
3148 	wake_up(&mle->wq);
3149 }
3150 
3151 static void dlm_clean_block_mle(struct dlm_ctxt *dlm,
3152 				struct dlm_master_list_entry *mle, u8 dead_node)
3153 {
3154 	int bit;
3155 
3156 	BUG_ON(mle->type != DLM_MLE_BLOCK);
3157 
3158 	spin_lock(&mle->spinlock);
3159 	bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
3160 	if (bit != dead_node) {
3161 		mlog(0, "mle found, but dead node %u would not have been "
3162 		     "master\n", dead_node);
3163 		spin_unlock(&mle->spinlock);
3164 	} else {
3165 		/* Must drop the refcount by one since the assert_master will
3166 		 * never arrive. This may result in the mle being unlinked and
3167 		 * freed, but there may still be a process waiting in the
3168 		 * dlmlock path which is fine. */
3169 		mlog(0, "node %u was expected master\n", dead_node);
3170 		atomic_set(&mle->woken, 1);
3171 		spin_unlock(&mle->spinlock);
3172 		wake_up(&mle->wq);
3173 
3174 		/* Do not need events any longer, so detach from heartbeat */
3175 		__dlm_mle_detach_hb_events(dlm, mle);
3176 		__dlm_put_mle(mle);
3177 	}
3178 }
3179 
3180 void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
3181 {
3182 	struct dlm_master_list_entry *mle;
3183 	struct dlm_lock_resource *res;
3184 	struct hlist_head *bucket;
3185 	struct hlist_node *list;
3186 	unsigned int i;
3187 
3188 	mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node);
3189 top:
3190 	assert_spin_locked(&dlm->spinlock);
3191 
3192 	/* clean the master list */
3193 	spin_lock(&dlm->master_lock);
3194 	for (i = 0; i < DLM_HASH_BUCKETS; i++) {
3195 		bucket = dlm_master_hash(dlm, i);
3196 		hlist_for_each(list, bucket) {
3197 			mle = hlist_entry(list, struct dlm_master_list_entry,
3198 					  master_hash_node);
3199 
3200 			BUG_ON(mle->type != DLM_MLE_BLOCK &&
3201 			       mle->type != DLM_MLE_MASTER &&
3202 			       mle->type != DLM_MLE_MIGRATION);
3203 
3204 			/* MASTER mles are initiated locally. The waiting
3205 			 * process will notice the node map change shortly.
3206 			 * Let that happen as normal. */
3207 			if (mle->type == DLM_MLE_MASTER)
3208 				continue;
3209 
3210 			/* BLOCK mles are initiated by other nodes. Need to
3211 			 * clean up if the dead node would have been the
3212 			 * master. */
3213 			if (mle->type == DLM_MLE_BLOCK) {
3214 				dlm_clean_block_mle(dlm, mle, dead_node);
3215 				continue;
3216 			}
3217 
3218 			/* Everything else is a MIGRATION mle */
3219 
3220 			/* The rule for MIGRATION mles is that the master
3221 			 * becomes UNKNOWN if *either* the original or the new
3222 			 * master dies. All UNKNOWN lockres' are sent to
3223 			 * whichever node becomes the recovery master. The new
3224 			 * master is responsible for determining if there is
3225 			 * still a master for this lockres, or if he needs to
3226 			 * take over mastery. Either way, this node should
3227 			 * expect another message to resolve this. */
3228 
3229 			if (mle->master != dead_node &&
3230 			    mle->new_master != dead_node)
3231 				continue;
3232 
3233 			/* If we have reached this point, this mle needs to be
3234 			 * removed from the list and freed. */
3235 			dlm_clean_migration_mle(dlm, mle);
3236 
3237 			mlog(0, "%s: node %u died during migration from "
3238 			     "%u to %u!\n", dlm->name, dead_node, mle->master,
3239 			     mle->new_master);
3240 
3241 			/* If we find a lockres associated with the mle, we've
3242 			 * hit this rare case that messes up our lock ordering.
3243 			 * If so, we need to drop the master lock so that we can
3244 			 * take the lockres lock, meaning that we will have to
3245 			 * restart from the head of list. */
3246 			res = dlm_reset_mleres_owner(dlm, mle);
3247 			if (res)
3248 				/* restart */
3249 				goto top;
3250 
3251 			/* This may be the last reference */
3252 			__dlm_put_mle(mle);
3253 		}
3254 	}
3255 	spin_unlock(&dlm->master_lock);
3256 }
3257 
3258 int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
3259 			 u8 old_master)
3260 {
3261 	struct dlm_node_iter iter;
3262 	int ret = 0;
3263 
3264 	spin_lock(&dlm->spinlock);
3265 	dlm_node_iter_init(dlm->domain_map, &iter);
3266 	clear_bit(old_master, iter.node_map);
3267 	clear_bit(dlm->node_num, iter.node_map);
3268 	spin_unlock(&dlm->spinlock);
3269 
3270 	/* ownership of the lockres is changing.  account for the
3271 	 * mastery reference here since old_master will briefly have
3272 	 * a reference after the migration completes */
3273 	spin_lock(&res->spinlock);
3274 	dlm_lockres_set_refmap_bit(old_master, res);
3275 	spin_unlock(&res->spinlock);
3276 
3277 	mlog(0, "now time to do a migrate request to other nodes\n");
3278 	ret = dlm_do_migrate_request(dlm, res, old_master,
3279 				     dlm->node_num, &iter);
3280 	if (ret < 0) {
3281 		mlog_errno(ret);
3282 		goto leave;
3283 	}
3284 
3285 	mlog(0, "doing assert master of %.*s to all except the original node\n",
3286 	     res->lockname.len, res->lockname.name);
3287 	/* this call now finishes out the nodemap
3288 	 * even if one or more nodes die */
3289 	ret = dlm_do_assert_master(dlm, res, iter.node_map,
3290 				   DLM_ASSERT_MASTER_FINISH_MIGRATION);
3291 	if (ret < 0) {
3292 		/* no longer need to retry.  all living nodes contacted. */
3293 		mlog_errno(ret);
3294 		ret = 0;
3295 	}
3296 
3297 	memset(iter.node_map, 0, sizeof(iter.node_map));
3298 	set_bit(old_master, iter.node_map);
3299 	mlog(0, "doing assert master of %.*s back to %u\n",
3300 	     res->lockname.len, res->lockname.name, old_master);
3301 	ret = dlm_do_assert_master(dlm, res, iter.node_map,
3302 				   DLM_ASSERT_MASTER_FINISH_MIGRATION);
3303 	if (ret < 0) {
3304 		mlog(0, "assert master to original master failed "
3305 		     "with %d.\n", ret);
3306 		/* the only nonzero status here would be because of
3307 		 * a dead original node.  we're done. */
3308 		ret = 0;
3309 	}
3310 
3311 	/* all done, set the owner, clear the flag */
3312 	spin_lock(&res->spinlock);
3313 	dlm_set_lockres_owner(dlm, res, dlm->node_num);
3314 	res->state &= ~DLM_LOCK_RES_MIGRATING;
3315 	spin_unlock(&res->spinlock);
3316 	/* re-dirty it on the new master */
3317 	dlm_kick_thread(dlm, res);
3318 	wake_up(&res->wq);
3319 leave:
3320 	return ret;
3321 }
3322 
3323 /*
3324  * LOCKRES AST REFCOUNT
3325  * this is integral to migration
3326  */
3327 
3328 /* for future intent to call an ast, reserve one ahead of time.
3329  * this should be called only after waiting on the lockres
3330  * with dlm_wait_on_lockres, and while still holding the
3331  * spinlock after the call. */
3332 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
3333 {
3334 	assert_spin_locked(&res->spinlock);
3335 	if (res->state & DLM_LOCK_RES_MIGRATING) {
3336 		__dlm_print_one_lock_resource(res);
3337 	}
3338 	BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3339 
3340 	atomic_inc(&res->asts_reserved);
3341 }
3342 
3343 /*
3344  * used to drop the reserved ast, either because it went unused,
3345  * or because the ast/bast was actually called.
3346  *
3347  * also, if there is a pending migration on this lockres,
3348  * and this was the last pending ast on the lockres,
3349  * atomically set the MIGRATING flag before we drop the lock.
3350  * this is how we ensure that migration can proceed with no
3351  * asts in progress.  note that it is ok if the state of the
3352  * queues is such that a lock should be granted in the future
3353  * or that a bast should be fired, because the new master will
3354  * shuffle the lists on this lockres as soon as it is migrated.
3355  */
3356 void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
3357 			     struct dlm_lock_resource *res)
3358 {
3359 	if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
3360 		return;
3361 
3362 	if (!res->migration_pending) {
3363 		spin_unlock(&res->spinlock);
3364 		return;
3365 	}
3366 
3367 	BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3368 	res->migration_pending = 0;
3369 	res->state |= DLM_LOCK_RES_MIGRATING;
3370 	spin_unlock(&res->spinlock);
3371 	wake_up(&res->wq);
3372 	wake_up(&dlm->migration_wq);
3373 }
3374 
3375 void dlm_force_free_mles(struct dlm_ctxt *dlm)
3376 {
3377 	int i;
3378 	struct hlist_head *bucket;
3379 	struct dlm_master_list_entry *mle;
3380 	struct hlist_node *tmp, *list;
3381 
3382 	/*
3383 	 * We notified all other nodes that we are exiting the domain and
3384 	 * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still
3385 	 * around we force free them and wake any processes that are waiting
3386 	 * on the mles
3387 	 */
3388 	spin_lock(&dlm->spinlock);
3389 	spin_lock(&dlm->master_lock);
3390 
3391 	BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING);
3392 	BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES));
3393 
3394 	for (i = 0; i < DLM_HASH_BUCKETS; i++) {
3395 		bucket = dlm_master_hash(dlm, i);
3396 		hlist_for_each_safe(list, tmp, bucket) {
3397 			mle = hlist_entry(list, struct dlm_master_list_entry,
3398 					  master_hash_node);
3399 			if (mle->type != DLM_MLE_BLOCK) {
3400 				mlog(ML_ERROR, "bad mle: %p\n", mle);
3401 				dlm_print_one_mle(mle);
3402 			}
3403 			atomic_set(&mle->woken, 1);
3404 			wake_up(&mle->wq);
3405 
3406 			__dlm_unlink_mle(dlm, mle);
3407 			__dlm_mle_detach_hb_events(dlm, mle);
3408 			__dlm_put_mle(mle);
3409 		}
3410 	}
3411 	spin_unlock(&dlm->master_lock);
3412 	spin_unlock(&dlm->spinlock);
3413 }
3414