xref: /linux/fs/ocfs2/dlm/dlmthread.c (revision 48a7afe314bfc4d7f50e1608632f503dbba7e013)
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * dlmthread.c
5  *
6  * standalone DLM module
7  *
8  * Copyright (C) 2004 Oracle.  All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License as published by the Free Software Foundation; either
13  * version 2 of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public
21  * License along with this program; if not, write to the
22  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23  * Boston, MA 021110-1307, USA.
24  *
25  */
26 
27 
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/timer.h>
41 #include <linux/kthread.h>
42 #include <linux/delay.h>
43 
44 
45 #include "cluster/heartbeat.h"
46 #include "cluster/nodemanager.h"
47 #include "cluster/tcp.h"
48 
49 #include "dlmapi.h"
50 #include "dlmcommon.h"
51 #include "dlmdomain.h"
52 
53 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_THREAD)
54 #include "cluster/masklog.h"
55 
56 static int dlm_thread(void *data);
57 static void dlm_flush_asts(struct dlm_ctxt *dlm);
58 
59 #define dlm_lock_is_remote(dlm, lock)     ((lock)->ml.node != (dlm)->node_num)
60 
61 /* will exit holding res->spinlock, but may drop in function */
62 /* waits until flags are cleared on res->state */
63 void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags)
64 {
65 	DECLARE_WAITQUEUE(wait, current);
66 
67 	assert_spin_locked(&res->spinlock);
68 
69 	add_wait_queue(&res->wq, &wait);
70 repeat:
71 	set_current_state(TASK_UNINTERRUPTIBLE);
72 	if (res->state & flags) {
73 		spin_unlock(&res->spinlock);
74 		schedule();
75 		spin_lock(&res->spinlock);
76 		goto repeat;
77 	}
78 	remove_wait_queue(&res->wq, &wait);
79 	current->state = TASK_RUNNING;
80 }
81 
82 int __dlm_lockres_has_locks(struct dlm_lock_resource *res)
83 {
84 	if (list_empty(&res->granted) &&
85 	    list_empty(&res->converting) &&
86 	    list_empty(&res->blocked))
87 		return 0;
88 	return 1;
89 }
90 
91 /* "unused": the lockres has no locks, is not on the dirty list,
92  * has no inflight locks (in the gap between mastery and acquiring
93  * the first lock), and has no bits in its refmap.
94  * truly ready to be freed. */
95 int __dlm_lockres_unused(struct dlm_lock_resource *res)
96 {
97 	if (!__dlm_lockres_has_locks(res) &&
98 	    (list_empty(&res->dirty) && !(res->state & DLM_LOCK_RES_DIRTY))) {
99 		/* try not to scan the bitmap unless the first two
100 		 * conditions are already true */
101 		int bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
102 		if (bit >= O2NM_MAX_NODES) {
103 			/* since the bit for dlm->node_num is not
104 			 * set, inflight_locks better be zero */
105 			BUG_ON(res->inflight_locks != 0);
106 			return 1;
107 		}
108 	}
109 	return 0;
110 }
111 
112 
113 /* Call whenever you may have added or deleted something from one of
114  * the lockres queue's. This will figure out whether it belongs on the
115  * unused list or not and does the appropriate thing. */
116 void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
117 			      struct dlm_lock_resource *res)
118 {
119 	mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
120 
121 	assert_spin_locked(&dlm->spinlock);
122 	assert_spin_locked(&res->spinlock);
123 
124 	if (__dlm_lockres_unused(res)){
125 		if (list_empty(&res->purge)) {
126 			mlog(0, "putting lockres %.*s:%p onto purge list\n",
127 			     res->lockname.len, res->lockname.name, res);
128 
129 			res->last_used = jiffies;
130 			dlm_lockres_get(res);
131 			list_add_tail(&res->purge, &dlm->purge_list);
132 			dlm->purge_count++;
133 		}
134 	} else if (!list_empty(&res->purge)) {
135 		mlog(0, "removing lockres %.*s:%p from purge list, owner=%u\n",
136 		     res->lockname.len, res->lockname.name, res, res->owner);
137 
138 		list_del_init(&res->purge);
139 		dlm_lockres_put(res);
140 		dlm->purge_count--;
141 	}
142 }
143 
144 void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
145 			    struct dlm_lock_resource *res)
146 {
147 	mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
148 	spin_lock(&dlm->spinlock);
149 	spin_lock(&res->spinlock);
150 
151 	__dlm_lockres_calc_usage(dlm, res);
152 
153 	spin_unlock(&res->spinlock);
154 	spin_unlock(&dlm->spinlock);
155 }
156 
157 static int dlm_purge_lockres(struct dlm_ctxt *dlm,
158 			     struct dlm_lock_resource *res)
159 {
160 	int master;
161 	int ret = 0;
162 
163 	spin_lock(&res->spinlock);
164 	if (!__dlm_lockres_unused(res)) {
165 		spin_unlock(&res->spinlock);
166 		mlog(0, "%s:%.*s: tried to purge but not unused\n",
167 		     dlm->name, res->lockname.len, res->lockname.name);
168 		return -ENOTEMPTY;
169 	}
170 	master = (res->owner == dlm->node_num);
171 	if (!master)
172 		res->state |= DLM_LOCK_RES_DROPPING_REF;
173 	spin_unlock(&res->spinlock);
174 
175 	mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len,
176 	     res->lockname.name, master);
177 
178 	if (!master) {
179 		spin_lock(&res->spinlock);
180 		/* This ensures that clear refmap is sent after the set */
181 		__dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
182 		spin_unlock(&res->spinlock);
183 		/* drop spinlock to do messaging, retake below */
184 		spin_unlock(&dlm->spinlock);
185 		/* clear our bit from the master's refmap, ignore errors */
186 		ret = dlm_drop_lockres_ref(dlm, res);
187 		if (ret < 0) {
188 			mlog_errno(ret);
189 			if (!dlm_is_host_down(ret))
190 				BUG();
191 		}
192 		mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n",
193 		     dlm->name, res->lockname.len, res->lockname.name, ret);
194 		spin_lock(&dlm->spinlock);
195 	}
196 
197 	if (!list_empty(&res->purge)) {
198 		mlog(0, "removing lockres %.*s:%p from purgelist, "
199 		     "master = %d\n", res->lockname.len, res->lockname.name,
200 		     res, master);
201 		list_del_init(&res->purge);
202 		dlm_lockres_put(res);
203 		dlm->purge_count--;
204 	}
205 	__dlm_unhash_lockres(res);
206 
207 	/* lockres is not in the hash now.  drop the flag and wake up
208 	 * any processes waiting in dlm_get_lock_resource. */
209 	if (!master) {
210 		spin_lock(&res->spinlock);
211 		res->state &= ~DLM_LOCK_RES_DROPPING_REF;
212 		spin_unlock(&res->spinlock);
213 		wake_up(&res->wq);
214 	}
215 	return 0;
216 }
217 
218 static void dlm_run_purge_list(struct dlm_ctxt *dlm,
219 			       int purge_now)
220 {
221 	unsigned int run_max, unused;
222 	unsigned long purge_jiffies;
223 	struct dlm_lock_resource *lockres;
224 
225 	spin_lock(&dlm->spinlock);
226 	run_max = dlm->purge_count;
227 
228 	while(run_max && !list_empty(&dlm->purge_list)) {
229 		run_max--;
230 
231 		lockres = list_entry(dlm->purge_list.next,
232 				     struct dlm_lock_resource, purge);
233 
234 		/* Status of the lockres *might* change so double
235 		 * check. If the lockres is unused, holding the dlm
236 		 * spinlock will prevent people from getting and more
237 		 * refs on it -- there's no need to keep the lockres
238 		 * spinlock. */
239 		spin_lock(&lockres->spinlock);
240 		unused = __dlm_lockres_unused(lockres);
241 		spin_unlock(&lockres->spinlock);
242 
243 		if (!unused)
244 			continue;
245 
246 		purge_jiffies = lockres->last_used +
247 			msecs_to_jiffies(DLM_PURGE_INTERVAL_MS);
248 
249 		/* Make sure that we want to be processing this guy at
250 		 * this time. */
251 		if (!purge_now && time_after(purge_jiffies, jiffies)) {
252 			/* Since resources are added to the purge list
253 			 * in tail order, we can stop at the first
254 			 * unpurgable resource -- anyone added after
255 			 * him will have a greater last_used value */
256 			break;
257 		}
258 
259 		dlm_lockres_get(lockres);
260 
261 		/* This may drop and reacquire the dlm spinlock if it
262 		 * has to do migration. */
263 		if (dlm_purge_lockres(dlm, lockres))
264 			BUG();
265 
266 		dlm_lockres_put(lockres);
267 
268 		/* Avoid adding any scheduling latencies */
269 		cond_resched_lock(&dlm->spinlock);
270 	}
271 
272 	spin_unlock(&dlm->spinlock);
273 }
274 
275 static void dlm_shuffle_lists(struct dlm_ctxt *dlm,
276 			      struct dlm_lock_resource *res)
277 {
278 	struct dlm_lock *lock, *target;
279 	struct list_head *iter;
280 	struct list_head *head;
281 	int can_grant = 1;
282 
283 	//mlog(0, "res->lockname.len=%d\n", res->lockname.len);
284 	//mlog(0, "res->lockname.name=%p\n", res->lockname.name);
285 	//mlog(0, "shuffle res %.*s\n", res->lockname.len,
286 	//	  res->lockname.name);
287 
288 	/* because this function is called with the lockres
289 	 * spinlock, and because we know that it is not migrating/
290 	 * recovering/in-progress, it is fine to reserve asts and
291 	 * basts right before queueing them all throughout */
292 	assert_spin_locked(&res->spinlock);
293 	BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING|
294 			      DLM_LOCK_RES_RECOVERING|
295 			      DLM_LOCK_RES_IN_PROGRESS)));
296 
297 converting:
298 	if (list_empty(&res->converting))
299 		goto blocked;
300 	mlog(0, "res %.*s has locks on a convert queue\n", res->lockname.len,
301 	     res->lockname.name);
302 
303 	target = list_entry(res->converting.next, struct dlm_lock, list);
304 	if (target->ml.convert_type == LKM_IVMODE) {
305 		mlog(ML_ERROR, "%.*s: converting a lock with no "
306 		     "convert_type!\n", res->lockname.len, res->lockname.name);
307 		BUG();
308 	}
309 	head = &res->granted;
310 	list_for_each(iter, head) {
311 		lock = list_entry(iter, struct dlm_lock, list);
312 		if (lock==target)
313 			continue;
314 		if (!dlm_lock_compatible(lock->ml.type,
315 					 target->ml.convert_type)) {
316 			can_grant = 0;
317 			/* queue the BAST if not already */
318 			if (lock->ml.highest_blocked == LKM_IVMODE) {
319 				__dlm_lockres_reserve_ast(res);
320 				dlm_queue_bast(dlm, lock);
321 			}
322 			/* update the highest_blocked if needed */
323 			if (lock->ml.highest_blocked < target->ml.convert_type)
324 				lock->ml.highest_blocked =
325 					target->ml.convert_type;
326 		}
327 	}
328 	head = &res->converting;
329 	list_for_each(iter, head) {
330 		lock = list_entry(iter, struct dlm_lock, list);
331 		if (lock==target)
332 			continue;
333 		if (!dlm_lock_compatible(lock->ml.type,
334 					 target->ml.convert_type)) {
335 			can_grant = 0;
336 			if (lock->ml.highest_blocked == LKM_IVMODE) {
337 				__dlm_lockres_reserve_ast(res);
338 				dlm_queue_bast(dlm, lock);
339 			}
340 			if (lock->ml.highest_blocked < target->ml.convert_type)
341 				lock->ml.highest_blocked =
342 					target->ml.convert_type;
343 		}
344 	}
345 
346 	/* we can convert the lock */
347 	if (can_grant) {
348 		spin_lock(&target->spinlock);
349 		BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
350 
351 		mlog(0, "calling ast for converting lock: %.*s, have: %d, "
352 		     "granting: %d, node: %u\n", res->lockname.len,
353 		     res->lockname.name, target->ml.type,
354 		     target->ml.convert_type, target->ml.node);
355 
356 		target->ml.type = target->ml.convert_type;
357 		target->ml.convert_type = LKM_IVMODE;
358 		list_move_tail(&target->list, &res->granted);
359 
360 		BUG_ON(!target->lksb);
361 		target->lksb->status = DLM_NORMAL;
362 
363 		spin_unlock(&target->spinlock);
364 
365 		__dlm_lockres_reserve_ast(res);
366 		dlm_queue_ast(dlm, target);
367 		/* go back and check for more */
368 		goto converting;
369 	}
370 
371 blocked:
372 	if (list_empty(&res->blocked))
373 		goto leave;
374 	target = list_entry(res->blocked.next, struct dlm_lock, list);
375 
376 	head = &res->granted;
377 	list_for_each(iter, head) {
378 		lock = list_entry(iter, struct dlm_lock, list);
379 		if (lock==target)
380 			continue;
381 		if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
382 			can_grant = 0;
383 			if (lock->ml.highest_blocked == LKM_IVMODE) {
384 				__dlm_lockres_reserve_ast(res);
385 				dlm_queue_bast(dlm, lock);
386 			}
387 			if (lock->ml.highest_blocked < target->ml.type)
388 				lock->ml.highest_blocked = target->ml.type;
389 		}
390 	}
391 
392 	head = &res->converting;
393 	list_for_each(iter, head) {
394 		lock = list_entry(iter, struct dlm_lock, list);
395 		if (lock==target)
396 			continue;
397 		if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
398 			can_grant = 0;
399 			if (lock->ml.highest_blocked == LKM_IVMODE) {
400 				__dlm_lockres_reserve_ast(res);
401 				dlm_queue_bast(dlm, lock);
402 			}
403 			if (lock->ml.highest_blocked < target->ml.type)
404 				lock->ml.highest_blocked = target->ml.type;
405 		}
406 	}
407 
408 	/* we can grant the blocked lock (only
409 	 * possible if converting list empty) */
410 	if (can_grant) {
411 		spin_lock(&target->spinlock);
412 		BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
413 
414 		mlog(0, "calling ast for blocked lock: %.*s, granting: %d, "
415 		     "node: %u\n", res->lockname.len, res->lockname.name,
416 		     target->ml.type, target->ml.node);
417 
418 		// target->ml.type is already correct
419 		list_move_tail(&target->list, &res->granted);
420 
421 		BUG_ON(!target->lksb);
422 		target->lksb->status = DLM_NORMAL;
423 
424 		spin_unlock(&target->spinlock);
425 
426 		__dlm_lockres_reserve_ast(res);
427 		dlm_queue_ast(dlm, target);
428 		/* go back and check for more */
429 		goto converting;
430 	}
431 
432 leave:
433 	return;
434 }
435 
436 /* must have NO locks when calling this with res !=NULL * */
437 void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
438 {
439 	mlog_entry("dlm=%p, res=%p\n", dlm, res);
440 	if (res) {
441 		spin_lock(&dlm->spinlock);
442 		spin_lock(&res->spinlock);
443 		__dlm_dirty_lockres(dlm, res);
444 		spin_unlock(&res->spinlock);
445 		spin_unlock(&dlm->spinlock);
446 	}
447 	wake_up(&dlm->dlm_thread_wq);
448 }
449 
450 void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
451 {
452 	mlog_entry("dlm=%p, res=%p\n", dlm, res);
453 
454 	assert_spin_locked(&dlm->spinlock);
455 	assert_spin_locked(&res->spinlock);
456 
457 	/* don't shuffle secondary queues */
458 	if ((res->owner == dlm->node_num)) {
459 		if (res->state & (DLM_LOCK_RES_MIGRATING |
460 				  DLM_LOCK_RES_BLOCK_DIRTY))
461 		    return;
462 
463 		if (list_empty(&res->dirty)) {
464 			/* ref for dirty_list */
465 			dlm_lockres_get(res);
466 			list_add_tail(&res->dirty, &dlm->dirty_list);
467 			res->state |= DLM_LOCK_RES_DIRTY;
468 		}
469 	}
470 }
471 
472 
473 /* Launch the NM thread for the mounted volume */
474 int dlm_launch_thread(struct dlm_ctxt *dlm)
475 {
476 	mlog(0, "starting dlm thread...\n");
477 
478 	dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm_thread");
479 	if (IS_ERR(dlm->dlm_thread_task)) {
480 		mlog_errno(PTR_ERR(dlm->dlm_thread_task));
481 		dlm->dlm_thread_task = NULL;
482 		return -EINVAL;
483 	}
484 
485 	return 0;
486 }
487 
488 void dlm_complete_thread(struct dlm_ctxt *dlm)
489 {
490 	if (dlm->dlm_thread_task) {
491 		mlog(ML_KTHREAD, "waiting for dlm thread to exit\n");
492 		kthread_stop(dlm->dlm_thread_task);
493 		dlm->dlm_thread_task = NULL;
494 	}
495 }
496 
497 static int dlm_dirty_list_empty(struct dlm_ctxt *dlm)
498 {
499 	int empty;
500 
501 	spin_lock(&dlm->spinlock);
502 	empty = list_empty(&dlm->dirty_list);
503 	spin_unlock(&dlm->spinlock);
504 
505 	return empty;
506 }
507 
508 static void dlm_flush_asts(struct dlm_ctxt *dlm)
509 {
510 	int ret;
511 	struct dlm_lock *lock;
512 	struct dlm_lock_resource *res;
513 	u8 hi;
514 
515 	spin_lock(&dlm->ast_lock);
516 	while (!list_empty(&dlm->pending_asts)) {
517 		lock = list_entry(dlm->pending_asts.next,
518 				  struct dlm_lock, ast_list);
519 		/* get an extra ref on lock */
520 		dlm_lock_get(lock);
521 		res = lock->lockres;
522 		mlog(0, "delivering an ast for this lockres\n");
523 
524 		BUG_ON(!lock->ast_pending);
525 
526 		/* remove from list (including ref) */
527 		list_del_init(&lock->ast_list);
528 		dlm_lock_put(lock);
529 		spin_unlock(&dlm->ast_lock);
530 
531 		if (lock->ml.node != dlm->node_num) {
532 			ret = dlm_do_remote_ast(dlm, res, lock);
533 			if (ret < 0)
534 				mlog_errno(ret);
535 		} else
536 			dlm_do_local_ast(dlm, res, lock);
537 
538 		spin_lock(&dlm->ast_lock);
539 
540 		/* possible that another ast was queued while
541 		 * we were delivering the last one */
542 		if (!list_empty(&lock->ast_list)) {
543 			mlog(0, "aha another ast got queued while "
544 			     "we were finishing the last one.  will "
545 			     "keep the ast_pending flag set.\n");
546 		} else
547 			lock->ast_pending = 0;
548 
549 		/* drop the extra ref.
550 		 * this may drop it completely. */
551 		dlm_lock_put(lock);
552 		dlm_lockres_release_ast(dlm, res);
553 	}
554 
555 	while (!list_empty(&dlm->pending_basts)) {
556 		lock = list_entry(dlm->pending_basts.next,
557 				  struct dlm_lock, bast_list);
558 		/* get an extra ref on lock */
559 		dlm_lock_get(lock);
560 		res = lock->lockres;
561 
562 		BUG_ON(!lock->bast_pending);
563 
564 		/* get the highest blocked lock, and reset */
565 		spin_lock(&lock->spinlock);
566 		BUG_ON(lock->ml.highest_blocked <= LKM_IVMODE);
567 		hi = lock->ml.highest_blocked;
568 		lock->ml.highest_blocked = LKM_IVMODE;
569 		spin_unlock(&lock->spinlock);
570 
571 		/* remove from list (including ref) */
572 		list_del_init(&lock->bast_list);
573 		dlm_lock_put(lock);
574 		spin_unlock(&dlm->ast_lock);
575 
576 		mlog(0, "delivering a bast for this lockres "
577 		     "(blocked = %d\n", hi);
578 
579 		if (lock->ml.node != dlm->node_num) {
580 			ret = dlm_send_proxy_bast(dlm, res, lock, hi);
581 			if (ret < 0)
582 				mlog_errno(ret);
583 		} else
584 			dlm_do_local_bast(dlm, res, lock, hi);
585 
586 		spin_lock(&dlm->ast_lock);
587 
588 		/* possible that another bast was queued while
589 		 * we were delivering the last one */
590 		if (!list_empty(&lock->bast_list)) {
591 			mlog(0, "aha another bast got queued while "
592 			     "we were finishing the last one.  will "
593 			     "keep the bast_pending flag set.\n");
594 		} else
595 			lock->bast_pending = 0;
596 
597 		/* drop the extra ref.
598 		 * this may drop it completely. */
599 		dlm_lock_put(lock);
600 		dlm_lockres_release_ast(dlm, res);
601 	}
602 	wake_up(&dlm->ast_wq);
603 	spin_unlock(&dlm->ast_lock);
604 }
605 
606 
607 #define DLM_THREAD_TIMEOUT_MS (4 * 1000)
608 #define DLM_THREAD_MAX_DIRTY  100
609 #define DLM_THREAD_MAX_ASTS   10
610 
611 static int dlm_thread(void *data)
612 {
613 	struct dlm_lock_resource *res;
614 	struct dlm_ctxt *dlm = data;
615 	unsigned long timeout = msecs_to_jiffies(DLM_THREAD_TIMEOUT_MS);
616 
617 	mlog(0, "dlm thread running for %s...\n", dlm->name);
618 
619 	while (!kthread_should_stop()) {
620 		int n = DLM_THREAD_MAX_DIRTY;
621 
622 		/* dlm_shutting_down is very point-in-time, but that
623 		 * doesn't matter as we'll just loop back around if we
624 		 * get false on the leading edge of a state
625 		 * transition. */
626 		dlm_run_purge_list(dlm, dlm_shutting_down(dlm));
627 
628 		/* We really don't want to hold dlm->spinlock while
629 		 * calling dlm_shuffle_lists on each lockres that
630 		 * needs to have its queues adjusted and AST/BASTs
631 		 * run.  So let's pull each entry off the dirty_list
632 		 * and drop dlm->spinlock ASAP.  Once off the list,
633 		 * res->spinlock needs to be taken again to protect
634 		 * the queues while calling dlm_shuffle_lists.  */
635 		spin_lock(&dlm->spinlock);
636 		while (!list_empty(&dlm->dirty_list)) {
637 			int delay = 0;
638 			res = list_entry(dlm->dirty_list.next,
639 					 struct dlm_lock_resource, dirty);
640 
641 			/* peel a lockres off, remove it from the list,
642 			 * unset the dirty flag and drop the dlm lock */
643 			BUG_ON(!res);
644 			dlm_lockres_get(res);
645 
646 			spin_lock(&res->spinlock);
647 			/* We clear the DLM_LOCK_RES_DIRTY state once we shuffle lists below */
648 			list_del_init(&res->dirty);
649 			spin_unlock(&res->spinlock);
650 			spin_unlock(&dlm->spinlock);
651 			/* Drop dirty_list ref */
652 			dlm_lockres_put(res);
653 
654 		 	/* lockres can be re-dirtied/re-added to the
655 			 * dirty_list in this gap, but that is ok */
656 
657 			spin_lock(&res->spinlock);
658 			if (res->owner != dlm->node_num) {
659 				__dlm_print_one_lock_resource(res);
660 				mlog(ML_ERROR, "inprog:%s, mig:%s, reco:%s, dirty:%s\n",
661 				     res->state & DLM_LOCK_RES_IN_PROGRESS ? "yes" : "no",
662 				     res->state & DLM_LOCK_RES_MIGRATING ? "yes" : "no",
663 				     res->state & DLM_LOCK_RES_RECOVERING ? "yes" : "no",
664 				     res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
665 			}
666 			BUG_ON(res->owner != dlm->node_num);
667 
668 			/* it is now ok to move lockreses in these states
669 			 * to the dirty list, assuming that they will only be
670 			 * dirty for a short while. */
671 			BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
672 			if (res->state & (DLM_LOCK_RES_IN_PROGRESS |
673 					  DLM_LOCK_RES_RECOVERING)) {
674 				/* move it to the tail and keep going */
675 				res->state &= ~DLM_LOCK_RES_DIRTY;
676 				spin_unlock(&res->spinlock);
677 				mlog(0, "delaying list shuffling for in-"
678 				     "progress lockres %.*s, state=%d\n",
679 				     res->lockname.len, res->lockname.name,
680 				     res->state);
681 				delay = 1;
682 				goto in_progress;
683 			}
684 
685 			/* at this point the lockres is not migrating/
686 			 * recovering/in-progress.  we have the lockres
687 			 * spinlock and do NOT have the dlm lock.
688 			 * safe to reserve/queue asts and run the lists. */
689 
690 			mlog(0, "calling dlm_shuffle_lists with dlm=%s, "
691 			     "res=%.*s\n", dlm->name,
692 			     res->lockname.len, res->lockname.name);
693 
694 			/* called while holding lockres lock */
695 			dlm_shuffle_lists(dlm, res);
696 			res->state &= ~DLM_LOCK_RES_DIRTY;
697 			spin_unlock(&res->spinlock);
698 
699 			dlm_lockres_calc_usage(dlm, res);
700 
701 in_progress:
702 
703 			spin_lock(&dlm->spinlock);
704 			/* if the lock was in-progress, stick
705 			 * it on the back of the list */
706 			if (delay) {
707 				spin_lock(&res->spinlock);
708 				__dlm_dirty_lockres(dlm, res);
709 				spin_unlock(&res->spinlock);
710 			}
711 			dlm_lockres_put(res);
712 
713 			/* unlikely, but we may need to give time to
714 			 * other tasks */
715 			if (!--n) {
716 				mlog(0, "throttling dlm_thread\n");
717 				break;
718 			}
719 		}
720 
721 		spin_unlock(&dlm->spinlock);
722 		dlm_flush_asts(dlm);
723 
724 		/* yield and continue right away if there is more work to do */
725 		if (!n) {
726 			cond_resched();
727 			continue;
728 		}
729 
730 		wait_event_interruptible_timeout(dlm->dlm_thread_wq,
731 						 !dlm_dirty_list_empty(dlm) ||
732 						 kthread_should_stop(),
733 						 timeout);
734 	}
735 
736 	mlog(0, "quitting DLM thread\n");
737 	return 0;
738 }
739