xref: /linux/fs/gfs2/glock.c (revision 092e0e7e520a1fca03e13c9f2d157432a8657ff2)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/buffer_head.h>
14 #include <linux/delay.h>
15 #include <linux/sort.h>
16 #include <linux/jhash.h>
17 #include <linux/kallsyms.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/list.h>
20 #include <linux/wait.h>
21 #include <linux/module.h>
22 #include <asm/uaccess.h>
23 #include <linux/seq_file.h>
24 #include <linux/debugfs.h>
25 #include <linux/kthread.h>
26 #include <linux/freezer.h>
27 #include <linux/workqueue.h>
28 #include <linux/jiffies.h>
29 
30 #include "gfs2.h"
31 #include "incore.h"
32 #include "glock.h"
33 #include "glops.h"
34 #include "inode.h"
35 #include "lops.h"
36 #include "meta_io.h"
37 #include "quota.h"
38 #include "super.h"
39 #include "util.h"
40 #include "bmap.h"
41 #define CREATE_TRACE_POINTS
42 #include "trace_gfs2.h"
43 
44 struct gfs2_gl_hash_bucket {
45         struct hlist_head hb_list;
46 };
47 
48 struct gfs2_glock_iter {
49 	int hash;			/* hash bucket index         */
50 	struct gfs2_sbd *sdp;		/* incore superblock         */
51 	struct gfs2_glock *gl;		/* current glock struct      */
52 	char string[512];		/* scratch space             */
53 };
54 
55 typedef void (*glock_examiner) (struct gfs2_glock * gl);
56 
57 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
58 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
59 #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
60 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
61 
62 static struct dentry *gfs2_root;
63 static struct workqueue_struct *glock_workqueue;
64 struct workqueue_struct *gfs2_delete_workqueue;
65 static LIST_HEAD(lru_list);
66 static atomic_t lru_count = ATOMIC_INIT(0);
67 static DEFINE_SPINLOCK(lru_lock);
68 
69 #define GFS2_GL_HASH_SHIFT      15
70 #define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
71 #define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
72 
73 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
74 static struct dentry *gfs2_root;
75 
76 /*
77  * Despite what you might think, the numbers below are not arbitrary :-)
78  * They are taken from the ipv4 routing hash code, which is well tested
79  * and thus should be nearly optimal. Later on we might tweek the numbers
80  * but for now this should be fine.
81  *
82  * The reason for putting the locks in a separate array from the list heads
83  * is that we can have fewer locks than list heads and save memory. We use
84  * the same hash function for both, but with a different hash mask.
85  */
86 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
87 	defined(CONFIG_PROVE_LOCKING)
88 
89 #ifdef CONFIG_LOCKDEP
90 # define GL_HASH_LOCK_SZ        256
91 #else
92 # if NR_CPUS >= 32
93 #  define GL_HASH_LOCK_SZ       4096
94 # elif NR_CPUS >= 16
95 #  define GL_HASH_LOCK_SZ       2048
96 # elif NR_CPUS >= 8
97 #  define GL_HASH_LOCK_SZ       1024
98 # elif NR_CPUS >= 4
99 #  define GL_HASH_LOCK_SZ       512
100 # else
101 #  define GL_HASH_LOCK_SZ       256
102 # endif
103 #endif
104 
105 /* We never want more locks than chains */
106 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
107 # undef GL_HASH_LOCK_SZ
108 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
109 #endif
110 
111 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
112 
113 static inline rwlock_t *gl_lock_addr(unsigned int x)
114 {
115 	return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
116 }
117 #else /* not SMP, so no spinlocks required */
118 static inline rwlock_t *gl_lock_addr(unsigned int x)
119 {
120 	return NULL;
121 }
122 #endif
123 
124 /**
125  * gl_hash() - Turn glock number into hash bucket number
126  * @lock: The glock number
127  *
128  * Returns: The number of the corresponding hash bucket
129  */
130 
131 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
132 			    const struct lm_lockname *name)
133 {
134 	unsigned int h;
135 
136 	h = jhash(&name->ln_number, sizeof(u64), 0);
137 	h = jhash(&name->ln_type, sizeof(unsigned int), h);
138 	h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
139 	h &= GFS2_GL_HASH_MASK;
140 
141 	return h;
142 }
143 
144 /**
145  * glock_free() - Perform a few checks and then release struct gfs2_glock
146  * @gl: The glock to release
147  *
148  * Also calls lock module to release its internal structure for this glock.
149  *
150  */
151 
152 static void glock_free(struct gfs2_glock *gl)
153 {
154 	struct gfs2_sbd *sdp = gl->gl_sbd;
155 	struct address_space *mapping = gfs2_glock2aspace(gl);
156 	struct kmem_cache *cachep = gfs2_glock_cachep;
157 
158 	GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
159 	trace_gfs2_glock_put(gl);
160 	if (mapping)
161 		cachep = gfs2_glock_aspace_cachep;
162 	sdp->sd_lockstruct.ls_ops->lm_put_lock(cachep, gl);
163 }
164 
165 /**
166  * gfs2_glock_hold() - increment reference count on glock
167  * @gl: The glock to hold
168  *
169  */
170 
171 void gfs2_glock_hold(struct gfs2_glock *gl)
172 {
173 	GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
174 	atomic_inc(&gl->gl_ref);
175 }
176 
177 /**
178  * demote_ok - Check to see if it's ok to unlock a glock
179  * @gl: the glock
180  *
181  * Returns: 1 if it's ok
182  */
183 
184 static int demote_ok(const struct gfs2_glock *gl)
185 {
186 	const struct gfs2_glock_operations *glops = gl->gl_ops;
187 
188 	if (gl->gl_state == LM_ST_UNLOCKED)
189 		return 0;
190 	if (!list_empty(&gl->gl_holders))
191 		return 0;
192 	if (glops->go_demote_ok)
193 		return glops->go_demote_ok(gl);
194 	return 1;
195 }
196 
197 /**
198  * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
199  * @gl: the glock
200  *
201  */
202 
203 static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
204 {
205 	int may_reclaim;
206 	may_reclaim = (demote_ok(gl) &&
207 		       (atomic_read(&gl->gl_ref) == 1 ||
208 			(gl->gl_name.ln_type == LM_TYPE_INODE &&
209 			 atomic_read(&gl->gl_ref) <= 2)));
210 	spin_lock(&lru_lock);
211 	if (list_empty(&gl->gl_lru) && may_reclaim) {
212 		list_add_tail(&gl->gl_lru, &lru_list);
213 		atomic_inc(&lru_count);
214 	}
215 	spin_unlock(&lru_lock);
216 }
217 
218 /**
219  * gfs2_glock_put_nolock() - Decrement reference count on glock
220  * @gl: The glock to put
221  *
222  * This function should only be used if the caller has its own reference
223  * to the glock, in addition to the one it is dropping.
224  */
225 
226 void gfs2_glock_put_nolock(struct gfs2_glock *gl)
227 {
228 	if (atomic_dec_and_test(&gl->gl_ref))
229 		GLOCK_BUG_ON(gl, 1);
230 	gfs2_glock_schedule_for_reclaim(gl);
231 }
232 
233 /**
234  * gfs2_glock_put() - Decrement reference count on glock
235  * @gl: The glock to put
236  *
237  */
238 
239 int gfs2_glock_put(struct gfs2_glock *gl)
240 {
241 	int rv = 0;
242 
243 	write_lock(gl_lock_addr(gl->gl_hash));
244 	if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
245 		hlist_del(&gl->gl_list);
246 		if (!list_empty(&gl->gl_lru)) {
247 			list_del_init(&gl->gl_lru);
248 			atomic_dec(&lru_count);
249 		}
250 		spin_unlock(&lru_lock);
251 		write_unlock(gl_lock_addr(gl->gl_hash));
252 		GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
253 		glock_free(gl);
254 		rv = 1;
255 		goto out;
256 	}
257 	spin_lock(&gl->gl_spin);
258 	gfs2_glock_schedule_for_reclaim(gl);
259 	spin_unlock(&gl->gl_spin);
260 	write_unlock(gl_lock_addr(gl->gl_hash));
261 out:
262 	return rv;
263 }
264 
265 /**
266  * search_bucket() - Find struct gfs2_glock by lock number
267  * @bucket: the bucket to search
268  * @name: The lock name
269  *
270  * Returns: NULL, or the struct gfs2_glock with the requested number
271  */
272 
273 static struct gfs2_glock *search_bucket(unsigned int hash,
274 					const struct gfs2_sbd *sdp,
275 					const struct lm_lockname *name)
276 {
277 	struct gfs2_glock *gl;
278 	struct hlist_node *h;
279 
280 	hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
281 		if (!lm_name_equal(&gl->gl_name, name))
282 			continue;
283 		if (gl->gl_sbd != sdp)
284 			continue;
285 
286 		atomic_inc(&gl->gl_ref);
287 
288 		return gl;
289 	}
290 
291 	return NULL;
292 }
293 
294 /**
295  * may_grant - check if its ok to grant a new lock
296  * @gl: The glock
297  * @gh: The lock request which we wish to grant
298  *
299  * Returns: true if its ok to grant the lock
300  */
301 
302 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
303 {
304 	const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
305 	if ((gh->gh_state == LM_ST_EXCLUSIVE ||
306 	     gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
307 		return 0;
308 	if (gl->gl_state == gh->gh_state)
309 		return 1;
310 	if (gh->gh_flags & GL_EXACT)
311 		return 0;
312 	if (gl->gl_state == LM_ST_EXCLUSIVE) {
313 		if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
314 			return 1;
315 		if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
316 			return 1;
317 	}
318 	if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
319 		return 1;
320 	return 0;
321 }
322 
323 static void gfs2_holder_wake(struct gfs2_holder *gh)
324 {
325 	clear_bit(HIF_WAIT, &gh->gh_iflags);
326 	smp_mb__after_clear_bit();
327 	wake_up_bit(&gh->gh_iflags, HIF_WAIT);
328 }
329 
330 /**
331  * do_error - Something unexpected has happened during a lock request
332  *
333  */
334 
335 static inline void do_error(struct gfs2_glock *gl, const int ret)
336 {
337 	struct gfs2_holder *gh, *tmp;
338 
339 	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
340 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
341 			continue;
342 		if (ret & LM_OUT_ERROR)
343 			gh->gh_error = -EIO;
344 		else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
345 			gh->gh_error = GLR_TRYFAILED;
346 		else
347 			continue;
348 		list_del_init(&gh->gh_list);
349 		trace_gfs2_glock_queue(gh, 0);
350 		gfs2_holder_wake(gh);
351 	}
352 }
353 
354 /**
355  * do_promote - promote as many requests as possible on the current queue
356  * @gl: The glock
357  *
358  * Returns: 1 if there is a blocked holder at the head of the list, or 2
359  *          if a type specific operation is underway.
360  */
361 
362 static int do_promote(struct gfs2_glock *gl)
363 __releases(&gl->gl_spin)
364 __acquires(&gl->gl_spin)
365 {
366 	const struct gfs2_glock_operations *glops = gl->gl_ops;
367 	struct gfs2_holder *gh, *tmp;
368 	int ret;
369 
370 restart:
371 	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
372 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
373 			continue;
374 		if (may_grant(gl, gh)) {
375 			if (gh->gh_list.prev == &gl->gl_holders &&
376 			    glops->go_lock) {
377 				spin_unlock(&gl->gl_spin);
378 				/* FIXME: eliminate this eventually */
379 				ret = glops->go_lock(gh);
380 				spin_lock(&gl->gl_spin);
381 				if (ret) {
382 					if (ret == 1)
383 						return 2;
384 					gh->gh_error = ret;
385 					list_del_init(&gh->gh_list);
386 					trace_gfs2_glock_queue(gh, 0);
387 					gfs2_holder_wake(gh);
388 					goto restart;
389 				}
390 				set_bit(HIF_HOLDER, &gh->gh_iflags);
391 				trace_gfs2_promote(gh, 1);
392 				gfs2_holder_wake(gh);
393 				goto restart;
394 			}
395 			set_bit(HIF_HOLDER, &gh->gh_iflags);
396 			trace_gfs2_promote(gh, 0);
397 			gfs2_holder_wake(gh);
398 			continue;
399 		}
400 		if (gh->gh_list.prev == &gl->gl_holders)
401 			return 1;
402 		do_error(gl, 0);
403 		break;
404 	}
405 	return 0;
406 }
407 
408 /**
409  * find_first_waiter - find the first gh that's waiting for the glock
410  * @gl: the glock
411  */
412 
413 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
414 {
415 	struct gfs2_holder *gh;
416 
417 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
418 		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
419 			return gh;
420 	}
421 	return NULL;
422 }
423 
424 /**
425  * state_change - record that the glock is now in a different state
426  * @gl: the glock
427  * @new_state the new state
428  *
429  */
430 
431 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
432 {
433 	int held1, held2;
434 
435 	held1 = (gl->gl_state != LM_ST_UNLOCKED);
436 	held2 = (new_state != LM_ST_UNLOCKED);
437 
438 	if (held1 != held2) {
439 		if (held2)
440 			gfs2_glock_hold(gl);
441 		else
442 			gfs2_glock_put_nolock(gl);
443 	}
444 	if (held1 && held2 && list_empty(&gl->gl_holders))
445 		clear_bit(GLF_QUEUED, &gl->gl_flags);
446 
447 	gl->gl_state = new_state;
448 	gl->gl_tchange = jiffies;
449 }
450 
451 static void gfs2_demote_wake(struct gfs2_glock *gl)
452 {
453 	gl->gl_demote_state = LM_ST_EXCLUSIVE;
454 	clear_bit(GLF_DEMOTE, &gl->gl_flags);
455 	smp_mb__after_clear_bit();
456 	wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
457 }
458 
459 /**
460  * finish_xmote - The DLM has replied to one of our lock requests
461  * @gl: The glock
462  * @ret: The status from the DLM
463  *
464  */
465 
466 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
467 {
468 	const struct gfs2_glock_operations *glops = gl->gl_ops;
469 	struct gfs2_holder *gh;
470 	unsigned state = ret & LM_OUT_ST_MASK;
471 	int rv;
472 
473 	spin_lock(&gl->gl_spin);
474 	trace_gfs2_glock_state_change(gl, state);
475 	state_change(gl, state);
476 	gh = find_first_waiter(gl);
477 
478 	/* Demote to UN request arrived during demote to SH or DF */
479 	if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
480 	    state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
481 		gl->gl_target = LM_ST_UNLOCKED;
482 
483 	/* Check for state != intended state */
484 	if (unlikely(state != gl->gl_target)) {
485 		if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
486 			/* move to back of queue and try next entry */
487 			if (ret & LM_OUT_CANCELED) {
488 				if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
489 					list_move_tail(&gh->gh_list, &gl->gl_holders);
490 				gh = find_first_waiter(gl);
491 				gl->gl_target = gh->gh_state;
492 				goto retry;
493 			}
494 			/* Some error or failed "try lock" - report it */
495 			if ((ret & LM_OUT_ERROR) ||
496 			    (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
497 				gl->gl_target = gl->gl_state;
498 				do_error(gl, ret);
499 				goto out;
500 			}
501 		}
502 		switch(state) {
503 		/* Unlocked due to conversion deadlock, try again */
504 		case LM_ST_UNLOCKED:
505 retry:
506 			do_xmote(gl, gh, gl->gl_target);
507 			break;
508 		/* Conversion fails, unlock and try again */
509 		case LM_ST_SHARED:
510 		case LM_ST_DEFERRED:
511 			do_xmote(gl, gh, LM_ST_UNLOCKED);
512 			break;
513 		default: /* Everything else */
514 			printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
515 			GLOCK_BUG_ON(gl, 1);
516 		}
517 		spin_unlock(&gl->gl_spin);
518 		return;
519 	}
520 
521 	/* Fast path - we got what we asked for */
522 	if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
523 		gfs2_demote_wake(gl);
524 	if (state != LM_ST_UNLOCKED) {
525 		if (glops->go_xmote_bh) {
526 			spin_unlock(&gl->gl_spin);
527 			rv = glops->go_xmote_bh(gl, gh);
528 			spin_lock(&gl->gl_spin);
529 			if (rv) {
530 				do_error(gl, rv);
531 				goto out;
532 			}
533 		}
534 		rv = do_promote(gl);
535 		if (rv == 2)
536 			goto out_locked;
537 	}
538 out:
539 	clear_bit(GLF_LOCK, &gl->gl_flags);
540 out_locked:
541 	spin_unlock(&gl->gl_spin);
542 }
543 
544 static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
545 				 unsigned int req_state,
546 				 unsigned int flags)
547 {
548 	int ret = LM_OUT_ERROR;
549 
550 	if (!sdp->sd_lockstruct.ls_ops->lm_lock)
551 		return req_state == LM_ST_UNLOCKED ? 0 : req_state;
552 
553 	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
554 		ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock,
555 							 req_state, flags);
556 	return ret;
557 }
558 
559 /**
560  * do_xmote - Calls the DLM to change the state of a lock
561  * @gl: The lock state
562  * @gh: The holder (only for promotes)
563  * @target: The target lock state
564  *
565  */
566 
567 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
568 __releases(&gl->gl_spin)
569 __acquires(&gl->gl_spin)
570 {
571 	const struct gfs2_glock_operations *glops = gl->gl_ops;
572 	struct gfs2_sbd *sdp = gl->gl_sbd;
573 	unsigned int lck_flags = gh ? gh->gh_flags : 0;
574 	int ret;
575 
576 	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
577 		      LM_FLAG_PRIORITY);
578 	BUG_ON(gl->gl_state == target);
579 	BUG_ON(gl->gl_state == gl->gl_target);
580 	if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
581 	    glops->go_inval) {
582 		set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
583 		do_error(gl, 0); /* Fail queued try locks */
584 	}
585 	spin_unlock(&gl->gl_spin);
586 	if (glops->go_xmote_th)
587 		glops->go_xmote_th(gl);
588 	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
589 		glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
590 	clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
591 
592 	gfs2_glock_hold(gl);
593 	if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED ||
594 	    gl->gl_state == LM_ST_DEFERRED) &&
595 	    !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
596 		lck_flags |= LM_FLAG_TRY_1CB;
597 	ret = gfs2_lm_lock(sdp, gl, target, lck_flags);
598 
599 	if (!(ret & LM_OUT_ASYNC)) {
600 		finish_xmote(gl, ret);
601 		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
602 			gfs2_glock_put(gl);
603 	} else {
604 		GLOCK_BUG_ON(gl, ret != LM_OUT_ASYNC);
605 	}
606 	spin_lock(&gl->gl_spin);
607 }
608 
609 /**
610  * find_first_holder - find the first "holder" gh
611  * @gl: the glock
612  */
613 
614 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
615 {
616 	struct gfs2_holder *gh;
617 
618 	if (!list_empty(&gl->gl_holders)) {
619 		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
620 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
621 			return gh;
622 	}
623 	return NULL;
624 }
625 
626 /**
627  * run_queue - do all outstanding tasks related to a glock
628  * @gl: The glock in question
629  * @nonblock: True if we must not block in run_queue
630  *
631  */
632 
633 static void run_queue(struct gfs2_glock *gl, const int nonblock)
634 __releases(&gl->gl_spin)
635 __acquires(&gl->gl_spin)
636 {
637 	struct gfs2_holder *gh = NULL;
638 	int ret;
639 
640 	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
641 		return;
642 
643 	GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
644 
645 	if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
646 	    gl->gl_demote_state != gl->gl_state) {
647 		if (find_first_holder(gl))
648 			goto out_unlock;
649 		if (nonblock)
650 			goto out_sched;
651 		set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
652 		GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
653 		gl->gl_target = gl->gl_demote_state;
654 	} else {
655 		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
656 			gfs2_demote_wake(gl);
657 		ret = do_promote(gl);
658 		if (ret == 0)
659 			goto out_unlock;
660 		if (ret == 2)
661 			goto out;
662 		gh = find_first_waiter(gl);
663 		gl->gl_target = gh->gh_state;
664 		if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
665 			do_error(gl, 0); /* Fail queued try locks */
666 	}
667 	do_xmote(gl, gh, gl->gl_target);
668 out:
669 	return;
670 
671 out_sched:
672 	clear_bit(GLF_LOCK, &gl->gl_flags);
673 	smp_mb__after_clear_bit();
674 	gfs2_glock_hold(gl);
675 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
676 		gfs2_glock_put_nolock(gl);
677 	return;
678 
679 out_unlock:
680 	clear_bit(GLF_LOCK, &gl->gl_flags);
681 	smp_mb__after_clear_bit();
682 	return;
683 }
684 
685 static void delete_work_func(struct work_struct *work)
686 {
687 	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
688 	struct gfs2_sbd *sdp = gl->gl_sbd;
689 	struct gfs2_inode *ip = NULL;
690 	struct inode *inode;
691 	u64 no_addr = 0;
692 
693 	spin_lock(&gl->gl_spin);
694 	ip = (struct gfs2_inode *)gl->gl_object;
695 	if (ip)
696 		no_addr = ip->i_no_addr;
697 	spin_unlock(&gl->gl_spin);
698 	if (ip) {
699 		inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
700 		if (inode) {
701 			d_prune_aliases(inode);
702 			iput(inode);
703 		}
704 	}
705 	gfs2_glock_put(gl);
706 }
707 
708 static void glock_work_func(struct work_struct *work)
709 {
710 	unsigned long delay = 0;
711 	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
712 	int drop_ref = 0;
713 
714 	if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
715 		finish_xmote(gl, gl->gl_reply);
716 		drop_ref = 1;
717 	}
718 	spin_lock(&gl->gl_spin);
719 	if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
720 	    gl->gl_state != LM_ST_UNLOCKED &&
721 	    gl->gl_demote_state != LM_ST_EXCLUSIVE) {
722 		unsigned long holdtime, now = jiffies;
723 		holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
724 		if (time_before(now, holdtime))
725 			delay = holdtime - now;
726 		set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags);
727 	}
728 	run_queue(gl, 0);
729 	spin_unlock(&gl->gl_spin);
730 	if (!delay ||
731 	    queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
732 		gfs2_glock_put(gl);
733 	if (drop_ref)
734 		gfs2_glock_put(gl);
735 }
736 
737 /**
738  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
739  * @sdp: The GFS2 superblock
740  * @number: the lock number
741  * @glops: The glock_operations to use
742  * @create: If 0, don't create the glock if it doesn't exist
743  * @glp: the glock is returned here
744  *
745  * This does not lock a glock, just finds/creates structures for one.
746  *
747  * Returns: errno
748  */
749 
750 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
751 		   const struct gfs2_glock_operations *glops, int create,
752 		   struct gfs2_glock **glp)
753 {
754 	struct super_block *s = sdp->sd_vfs;
755 	struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
756 	struct gfs2_glock *gl, *tmp;
757 	unsigned int hash = gl_hash(sdp, &name);
758 	struct address_space *mapping;
759 
760 	read_lock(gl_lock_addr(hash));
761 	gl = search_bucket(hash, sdp, &name);
762 	read_unlock(gl_lock_addr(hash));
763 
764 	*glp = gl;
765 	if (gl)
766 		return 0;
767 	if (!create)
768 		return -ENOENT;
769 
770 	if (glops->go_flags & GLOF_ASPACE)
771 		gl = kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_KERNEL);
772 	else
773 		gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
774 	if (!gl)
775 		return -ENOMEM;
776 
777 	atomic_inc(&sdp->sd_glock_disposal);
778 	gl->gl_flags = 0;
779 	gl->gl_name = name;
780 	atomic_set(&gl->gl_ref, 1);
781 	gl->gl_state = LM_ST_UNLOCKED;
782 	gl->gl_target = LM_ST_UNLOCKED;
783 	gl->gl_demote_state = LM_ST_EXCLUSIVE;
784 	gl->gl_hash = hash;
785 	gl->gl_ops = glops;
786 	snprintf(gl->gl_strname, GDLM_STRNAME_BYTES, "%8x%16llx", name.ln_type, (unsigned long long)number);
787 	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
788 	gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
789 	gl->gl_tchange = jiffies;
790 	gl->gl_object = NULL;
791 	gl->gl_sbd = sdp;
792 	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
793 	INIT_WORK(&gl->gl_delete, delete_work_func);
794 
795 	mapping = gfs2_glock2aspace(gl);
796 	if (mapping) {
797                 mapping->a_ops = &gfs2_meta_aops;
798 		mapping->host = s->s_bdev->bd_inode;
799 		mapping->flags = 0;
800 		mapping_set_gfp_mask(mapping, GFP_NOFS);
801 		mapping->assoc_mapping = NULL;
802 		mapping->backing_dev_info = s->s_bdi;
803 		mapping->writeback_index = 0;
804 	}
805 
806 	write_lock(gl_lock_addr(hash));
807 	tmp = search_bucket(hash, sdp, &name);
808 	if (tmp) {
809 		write_unlock(gl_lock_addr(hash));
810 		glock_free(gl);
811 		gl = tmp;
812 	} else {
813 		hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
814 		write_unlock(gl_lock_addr(hash));
815 	}
816 
817 	*glp = gl;
818 
819 	return 0;
820 }
821 
822 /**
823  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
824  * @gl: the glock
825  * @state: the state we're requesting
826  * @flags: the modifier flags
827  * @gh: the holder structure
828  *
829  */
830 
831 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
832 		      struct gfs2_holder *gh)
833 {
834 	INIT_LIST_HEAD(&gh->gh_list);
835 	gh->gh_gl = gl;
836 	gh->gh_ip = (unsigned long)__builtin_return_address(0);
837 	gh->gh_owner_pid = get_pid(task_pid(current));
838 	gh->gh_state = state;
839 	gh->gh_flags = flags;
840 	gh->gh_error = 0;
841 	gh->gh_iflags = 0;
842 	gfs2_glock_hold(gl);
843 }
844 
845 /**
846  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
847  * @state: the state we're requesting
848  * @flags: the modifier flags
849  * @gh: the holder structure
850  *
851  * Don't mess with the glock.
852  *
853  */
854 
855 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
856 {
857 	gh->gh_state = state;
858 	gh->gh_flags = flags;
859 	gh->gh_iflags = 0;
860 	gh->gh_ip = (unsigned long)__builtin_return_address(0);
861 	if (gh->gh_owner_pid)
862 		put_pid(gh->gh_owner_pid);
863 	gh->gh_owner_pid = get_pid(task_pid(current));
864 }
865 
866 /**
867  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
868  * @gh: the holder structure
869  *
870  */
871 
872 void gfs2_holder_uninit(struct gfs2_holder *gh)
873 {
874 	put_pid(gh->gh_owner_pid);
875 	gfs2_glock_put(gh->gh_gl);
876 	gh->gh_gl = NULL;
877 	gh->gh_ip = 0;
878 }
879 
880 /**
881  * gfs2_glock_holder_wait
882  * @word: unused
883  *
884  * This function and gfs2_glock_demote_wait both show up in the WCHAN
885  * field. Thus I've separated these otherwise identical functions in
886  * order to be more informative to the user.
887  */
888 
889 static int gfs2_glock_holder_wait(void *word)
890 {
891         schedule();
892         return 0;
893 }
894 
895 static int gfs2_glock_demote_wait(void *word)
896 {
897 	schedule();
898 	return 0;
899 }
900 
901 static void wait_on_holder(struct gfs2_holder *gh)
902 {
903 	might_sleep();
904 	wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
905 }
906 
907 static void wait_on_demote(struct gfs2_glock *gl)
908 {
909 	might_sleep();
910 	wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
911 }
912 
913 /**
914  * handle_callback - process a demote request
915  * @gl: the glock
916  * @state: the state the caller wants us to change to
917  *
918  * There are only two requests that we are going to see in actual
919  * practise: LM_ST_SHARED and LM_ST_UNLOCKED
920  */
921 
922 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
923 			    unsigned long delay)
924 {
925 	int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
926 
927 	set_bit(bit, &gl->gl_flags);
928 	if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
929 		gl->gl_demote_state = state;
930 		gl->gl_demote_time = jiffies;
931 	} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
932 			gl->gl_demote_state != state) {
933 		gl->gl_demote_state = LM_ST_UNLOCKED;
934 	}
935 	if (gl->gl_ops->go_callback)
936 		gl->gl_ops->go_callback(gl);
937 	trace_gfs2_demote_rq(gl);
938 }
939 
940 /**
941  * gfs2_glock_wait - wait on a glock acquisition
942  * @gh: the glock holder
943  *
944  * Returns: 0 on success
945  */
946 
947 int gfs2_glock_wait(struct gfs2_holder *gh)
948 {
949 	wait_on_holder(gh);
950 	return gh->gh_error;
951 }
952 
953 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
954 {
955 	va_list args;
956 
957 	va_start(args, fmt);
958 	if (seq) {
959 		struct gfs2_glock_iter *gi = seq->private;
960 		vsprintf(gi->string, fmt, args);
961 		seq_printf(seq, gi->string);
962 	} else {
963 		printk(KERN_ERR " ");
964 		vprintk(fmt, args);
965 	}
966 	va_end(args);
967 }
968 
969 /**
970  * add_to_queue - Add a holder to the wait queue (but look for recursion)
971  * @gh: the holder structure to add
972  *
973  * Eventually we should move the recursive locking trap to a
974  * debugging option or something like that. This is the fast
975  * path and needs to have the minimum number of distractions.
976  *
977  */
978 
979 static inline void add_to_queue(struct gfs2_holder *gh)
980 __releases(&gl->gl_spin)
981 __acquires(&gl->gl_spin)
982 {
983 	struct gfs2_glock *gl = gh->gh_gl;
984 	struct gfs2_sbd *sdp = gl->gl_sbd;
985 	struct list_head *insert_pt = NULL;
986 	struct gfs2_holder *gh2;
987 	int try_lock = 0;
988 
989 	BUG_ON(gh->gh_owner_pid == NULL);
990 	if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
991 		BUG();
992 
993 	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
994 		if (test_bit(GLF_LOCK, &gl->gl_flags))
995 			try_lock = 1;
996 		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
997 			goto fail;
998 	}
999 
1000 	list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
1001 		if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
1002 		    (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
1003 			goto trap_recursive;
1004 		if (try_lock &&
1005 		    !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
1006 		    !may_grant(gl, gh)) {
1007 fail:
1008 			gh->gh_error = GLR_TRYFAILED;
1009 			gfs2_holder_wake(gh);
1010 			return;
1011 		}
1012 		if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
1013 			continue;
1014 		if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
1015 			insert_pt = &gh2->gh_list;
1016 	}
1017 	set_bit(GLF_QUEUED, &gl->gl_flags);
1018 	if (likely(insert_pt == NULL)) {
1019 		list_add_tail(&gh->gh_list, &gl->gl_holders);
1020 		if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
1021 			goto do_cancel;
1022 		return;
1023 	}
1024 	trace_gfs2_glock_queue(gh, 1);
1025 	list_add_tail(&gh->gh_list, insert_pt);
1026 do_cancel:
1027 	gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
1028 	if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
1029 		spin_unlock(&gl->gl_spin);
1030 		if (sdp->sd_lockstruct.ls_ops->lm_cancel)
1031 			sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
1032 		spin_lock(&gl->gl_spin);
1033 	}
1034 	return;
1035 
1036 trap_recursive:
1037 	print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
1038 	printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
1039 	printk(KERN_ERR "lock type: %d req lock state : %d\n",
1040 	       gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1041 	print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
1042 	printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
1043 	printk(KERN_ERR "lock type: %d req lock state : %d\n",
1044 	       gh->gh_gl->gl_name.ln_type, gh->gh_state);
1045 	__dump_glock(NULL, gl);
1046 	BUG();
1047 }
1048 
1049 /**
1050  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1051  * @gh: the holder structure
1052  *
1053  * if (gh->gh_flags & GL_ASYNC), this never returns an error
1054  *
1055  * Returns: 0, GLR_TRYFAILED, or errno on failure
1056  */
1057 
1058 int gfs2_glock_nq(struct gfs2_holder *gh)
1059 {
1060 	struct gfs2_glock *gl = gh->gh_gl;
1061 	struct gfs2_sbd *sdp = gl->gl_sbd;
1062 	int error = 0;
1063 
1064 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1065 		return -EIO;
1066 
1067 	spin_lock(&gl->gl_spin);
1068 	add_to_queue(gh);
1069 	if ((LM_FLAG_NOEXP & gh->gh_flags) &&
1070 	    test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1071 		set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1072 	run_queue(gl, 1);
1073 	spin_unlock(&gl->gl_spin);
1074 
1075 	if (!(gh->gh_flags & GL_ASYNC))
1076 		error = gfs2_glock_wait(gh);
1077 
1078 	return error;
1079 }
1080 
1081 /**
1082  * gfs2_glock_poll - poll to see if an async request has been completed
1083  * @gh: the holder
1084  *
1085  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1086  */
1087 
1088 int gfs2_glock_poll(struct gfs2_holder *gh)
1089 {
1090 	return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1091 }
1092 
1093 /**
1094  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1095  * @gh: the glock holder
1096  *
1097  */
1098 
1099 void gfs2_glock_dq(struct gfs2_holder *gh)
1100 {
1101 	struct gfs2_glock *gl = gh->gh_gl;
1102 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1103 	unsigned delay = 0;
1104 	int fast_path = 0;
1105 
1106 	spin_lock(&gl->gl_spin);
1107 	if (gh->gh_flags & GL_NOCACHE)
1108 		handle_callback(gl, LM_ST_UNLOCKED, 0);
1109 
1110 	list_del_init(&gh->gh_list);
1111 	if (find_first_holder(gl) == NULL) {
1112 		if (glops->go_unlock) {
1113 			GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1114 			spin_unlock(&gl->gl_spin);
1115 			glops->go_unlock(gh);
1116 			spin_lock(&gl->gl_spin);
1117 			clear_bit(GLF_LOCK, &gl->gl_flags);
1118 		}
1119 		if (list_empty(&gl->gl_holders) &&
1120 		    !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1121 		    !test_bit(GLF_DEMOTE, &gl->gl_flags))
1122 			fast_path = 1;
1123 	}
1124 	trace_gfs2_glock_queue(gh, 0);
1125 	spin_unlock(&gl->gl_spin);
1126 	if (likely(fast_path))
1127 		return;
1128 
1129 	gfs2_glock_hold(gl);
1130 	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1131 	    !test_bit(GLF_DEMOTE, &gl->gl_flags))
1132 		delay = gl->gl_ops->go_min_hold_time;
1133 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1134 		gfs2_glock_put(gl);
1135 }
1136 
1137 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1138 {
1139 	struct gfs2_glock *gl = gh->gh_gl;
1140 	gfs2_glock_dq(gh);
1141 	wait_on_demote(gl);
1142 }
1143 
1144 /**
1145  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1146  * @gh: the holder structure
1147  *
1148  */
1149 
1150 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1151 {
1152 	gfs2_glock_dq(gh);
1153 	gfs2_holder_uninit(gh);
1154 }
1155 
1156 /**
1157  * gfs2_glock_nq_num - acquire a glock based on lock number
1158  * @sdp: the filesystem
1159  * @number: the lock number
1160  * @glops: the glock operations for the type of glock
1161  * @state: the state to acquire the glock in
1162  * @flags: modifier flags for the aquisition
1163  * @gh: the struct gfs2_holder
1164  *
1165  * Returns: errno
1166  */
1167 
1168 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1169 		      const struct gfs2_glock_operations *glops,
1170 		      unsigned int state, int flags, struct gfs2_holder *gh)
1171 {
1172 	struct gfs2_glock *gl;
1173 	int error;
1174 
1175 	error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1176 	if (!error) {
1177 		error = gfs2_glock_nq_init(gl, state, flags, gh);
1178 		gfs2_glock_put(gl);
1179 	}
1180 
1181 	return error;
1182 }
1183 
1184 /**
1185  * glock_compare - Compare two struct gfs2_glock structures for sorting
1186  * @arg_a: the first structure
1187  * @arg_b: the second structure
1188  *
1189  */
1190 
1191 static int glock_compare(const void *arg_a, const void *arg_b)
1192 {
1193 	const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1194 	const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1195 	const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1196 	const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1197 
1198 	if (a->ln_number > b->ln_number)
1199 		return 1;
1200 	if (a->ln_number < b->ln_number)
1201 		return -1;
1202 	BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1203 	return 0;
1204 }
1205 
1206 /**
1207  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1208  * @num_gh: the number of structures
1209  * @ghs: an array of struct gfs2_holder structures
1210  *
1211  * Returns: 0 on success (all glocks acquired),
1212  *          errno on failure (no glocks acquired)
1213  */
1214 
1215 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1216 		     struct gfs2_holder **p)
1217 {
1218 	unsigned int x;
1219 	int error = 0;
1220 
1221 	for (x = 0; x < num_gh; x++)
1222 		p[x] = &ghs[x];
1223 
1224 	sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1225 
1226 	for (x = 0; x < num_gh; x++) {
1227 		p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1228 
1229 		error = gfs2_glock_nq(p[x]);
1230 		if (error) {
1231 			while (x--)
1232 				gfs2_glock_dq(p[x]);
1233 			break;
1234 		}
1235 	}
1236 
1237 	return error;
1238 }
1239 
1240 /**
1241  * gfs2_glock_nq_m - acquire multiple glocks
1242  * @num_gh: the number of structures
1243  * @ghs: an array of struct gfs2_holder structures
1244  *
1245  *
1246  * Returns: 0 on success (all glocks acquired),
1247  *          errno on failure (no glocks acquired)
1248  */
1249 
1250 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1251 {
1252 	struct gfs2_holder *tmp[4];
1253 	struct gfs2_holder **pph = tmp;
1254 	int error = 0;
1255 
1256 	switch(num_gh) {
1257 	case 0:
1258 		return 0;
1259 	case 1:
1260 		ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1261 		return gfs2_glock_nq(ghs);
1262 	default:
1263 		if (num_gh <= 4)
1264 			break;
1265 		pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1266 		if (!pph)
1267 			return -ENOMEM;
1268 	}
1269 
1270 	error = nq_m_sync(num_gh, ghs, pph);
1271 
1272 	if (pph != tmp)
1273 		kfree(pph);
1274 
1275 	return error;
1276 }
1277 
1278 /**
1279  * gfs2_glock_dq_m - release multiple glocks
1280  * @num_gh: the number of structures
1281  * @ghs: an array of struct gfs2_holder structures
1282  *
1283  */
1284 
1285 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1286 {
1287 	unsigned int x;
1288 
1289 	for (x = 0; x < num_gh; x++)
1290 		gfs2_glock_dq(&ghs[x]);
1291 }
1292 
1293 /**
1294  * gfs2_glock_dq_uninit_m - release multiple glocks
1295  * @num_gh: the number of structures
1296  * @ghs: an array of struct gfs2_holder structures
1297  *
1298  */
1299 
1300 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1301 {
1302 	unsigned int x;
1303 
1304 	for (x = 0; x < num_gh; x++)
1305 		gfs2_glock_dq_uninit(&ghs[x]);
1306 }
1307 
1308 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1309 {
1310 	unsigned long delay = 0;
1311 	unsigned long holdtime;
1312 	unsigned long now = jiffies;
1313 
1314 	gfs2_glock_hold(gl);
1315 	holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
1316 	if (test_bit(GLF_QUEUED, &gl->gl_flags)) {
1317 		if (time_before(now, holdtime))
1318 			delay = holdtime - now;
1319 		if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1320 			delay = gl->gl_ops->go_min_hold_time;
1321 	}
1322 
1323 	spin_lock(&gl->gl_spin);
1324 	handle_callback(gl, state, delay);
1325 	spin_unlock(&gl->gl_spin);
1326 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1327 		gfs2_glock_put(gl);
1328 }
1329 
1330 /**
1331  * gfs2_should_freeze - Figure out if glock should be frozen
1332  * @gl: The glock in question
1333  *
1334  * Glocks are not frozen if (a) the result of the dlm operation is
1335  * an error, (b) the locking operation was an unlock operation or
1336  * (c) if there is a "noexp" flagged request anywhere in the queue
1337  *
1338  * Returns: 1 if freezing should occur, 0 otherwise
1339  */
1340 
1341 static int gfs2_should_freeze(const struct gfs2_glock *gl)
1342 {
1343 	const struct gfs2_holder *gh;
1344 
1345 	if (gl->gl_reply & ~LM_OUT_ST_MASK)
1346 		return 0;
1347 	if (gl->gl_target == LM_ST_UNLOCKED)
1348 		return 0;
1349 
1350 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1351 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1352 			continue;
1353 		if (LM_FLAG_NOEXP & gh->gh_flags)
1354 			return 0;
1355 	}
1356 
1357 	return 1;
1358 }
1359 
1360 /**
1361  * gfs2_glock_complete - Callback used by locking
1362  * @gl: Pointer to the glock
1363  * @ret: The return value from the dlm
1364  *
1365  */
1366 
1367 void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1368 {
1369 	struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
1370 
1371 	gl->gl_reply = ret;
1372 
1373 	if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) {
1374 		spin_lock(&gl->gl_spin);
1375 		if (gfs2_should_freeze(gl)) {
1376 			set_bit(GLF_FROZEN, &gl->gl_flags);
1377 			spin_unlock(&gl->gl_spin);
1378 			return;
1379 		}
1380 		spin_unlock(&gl->gl_spin);
1381 	}
1382 	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1383 	gfs2_glock_hold(gl);
1384 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1385 		gfs2_glock_put(gl);
1386 }
1387 
1388 
1389 static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
1390 {
1391 	struct gfs2_glock *gl;
1392 	int may_demote;
1393 	int nr_skipped = 0;
1394 	LIST_HEAD(skipped);
1395 
1396 	if (nr == 0)
1397 		goto out;
1398 
1399 	if (!(gfp_mask & __GFP_FS))
1400 		return -1;
1401 
1402 	spin_lock(&lru_lock);
1403 	while(nr && !list_empty(&lru_list)) {
1404 		gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1405 		list_del_init(&gl->gl_lru);
1406 		atomic_dec(&lru_count);
1407 
1408 		/* Test for being demotable */
1409 		if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1410 			gfs2_glock_hold(gl);
1411 			spin_unlock(&lru_lock);
1412 			spin_lock(&gl->gl_spin);
1413 			may_demote = demote_ok(gl);
1414 			if (may_demote) {
1415 				handle_callback(gl, LM_ST_UNLOCKED, 0);
1416 				nr--;
1417 			}
1418 			clear_bit(GLF_LOCK, &gl->gl_flags);
1419 			smp_mb__after_clear_bit();
1420 			if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1421 				gfs2_glock_put_nolock(gl);
1422 			spin_unlock(&gl->gl_spin);
1423 			spin_lock(&lru_lock);
1424 			continue;
1425 		}
1426 		nr_skipped++;
1427 		list_add(&gl->gl_lru, &skipped);
1428 	}
1429 	list_splice(&skipped, &lru_list);
1430 	atomic_add(nr_skipped, &lru_count);
1431 	spin_unlock(&lru_lock);
1432 out:
1433 	return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure;
1434 }
1435 
1436 static struct shrinker glock_shrinker = {
1437 	.shrink = gfs2_shrink_glock_memory,
1438 	.seeks = DEFAULT_SEEKS,
1439 };
1440 
1441 /**
1442  * examine_bucket - Call a function for glock in a hash bucket
1443  * @examiner: the function
1444  * @sdp: the filesystem
1445  * @bucket: the bucket
1446  *
1447  * Returns: 1 if the bucket has entries
1448  */
1449 
1450 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1451 			  unsigned int hash)
1452 {
1453 	struct gfs2_glock *gl, *prev = NULL;
1454 	int has_entries = 0;
1455 	struct hlist_head *head = &gl_hash_table[hash].hb_list;
1456 
1457 	read_lock(gl_lock_addr(hash));
1458 	/* Can't use hlist_for_each_entry - don't want prefetch here */
1459 	if (hlist_empty(head))
1460 		goto out;
1461 	gl = list_entry(head->first, struct gfs2_glock, gl_list);
1462 	while(1) {
1463 		if (!sdp || gl->gl_sbd == sdp) {
1464 			gfs2_glock_hold(gl);
1465 			read_unlock(gl_lock_addr(hash));
1466 			if (prev)
1467 				gfs2_glock_put(prev);
1468 			prev = gl;
1469 			examiner(gl);
1470 			has_entries = 1;
1471 			read_lock(gl_lock_addr(hash));
1472 		}
1473 		if (gl->gl_list.next == NULL)
1474 			break;
1475 		gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1476 	}
1477 out:
1478 	read_unlock(gl_lock_addr(hash));
1479 	if (prev)
1480 		gfs2_glock_put(prev);
1481 	cond_resched();
1482 	return has_entries;
1483 }
1484 
1485 
1486 /**
1487  * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1488  * @gl: The glock to thaw
1489  *
1490  * N.B. When we freeze a glock, we leave a ref to the glock outstanding,
1491  * so this has to result in the ref count being dropped by one.
1492  */
1493 
1494 static void thaw_glock(struct gfs2_glock *gl)
1495 {
1496 	if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1497 		return;
1498 	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1499 	gfs2_glock_hold(gl);
1500 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1501 		gfs2_glock_put(gl);
1502 }
1503 
1504 /**
1505  * clear_glock - look at a glock and see if we can free it from glock cache
1506  * @gl: the glock to look at
1507  *
1508  */
1509 
1510 static void clear_glock(struct gfs2_glock *gl)
1511 {
1512 	spin_lock(&lru_lock);
1513 	if (!list_empty(&gl->gl_lru)) {
1514 		list_del_init(&gl->gl_lru);
1515 		atomic_dec(&lru_count);
1516 	}
1517 	spin_unlock(&lru_lock);
1518 
1519 	spin_lock(&gl->gl_spin);
1520 	if (gl->gl_state != LM_ST_UNLOCKED)
1521 		handle_callback(gl, LM_ST_UNLOCKED, 0);
1522 	spin_unlock(&gl->gl_spin);
1523 	gfs2_glock_hold(gl);
1524 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1525 		gfs2_glock_put(gl);
1526 }
1527 
1528 /**
1529  * gfs2_glock_thaw - Thaw any frozen glocks
1530  * @sdp: The super block
1531  *
1532  */
1533 
1534 void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1535 {
1536 	unsigned x;
1537 
1538 	for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1539 		examine_bucket(thaw_glock, sdp, x);
1540 }
1541 
1542 /**
1543  * gfs2_gl_hash_clear - Empty out the glock hash table
1544  * @sdp: the filesystem
1545  * @wait: wait until it's all gone
1546  *
1547  * Called when unmounting the filesystem.
1548  */
1549 
1550 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1551 {
1552 	unsigned int x;
1553 
1554 	for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1555 		examine_bucket(clear_glock, sdp, x);
1556 	flush_workqueue(glock_workqueue);
1557 	wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
1558 	gfs2_dump_lockstate(sdp);
1559 }
1560 
1561 void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1562 {
1563 	struct gfs2_glock *gl = ip->i_gl;
1564 	int ret;
1565 
1566 	ret = gfs2_truncatei_resume(ip);
1567 	gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
1568 
1569 	spin_lock(&gl->gl_spin);
1570 	clear_bit(GLF_LOCK, &gl->gl_flags);
1571 	run_queue(gl, 1);
1572 	spin_unlock(&gl->gl_spin);
1573 }
1574 
1575 static const char *state2str(unsigned state)
1576 {
1577 	switch(state) {
1578 	case LM_ST_UNLOCKED:
1579 		return "UN";
1580 	case LM_ST_SHARED:
1581 		return "SH";
1582 	case LM_ST_DEFERRED:
1583 		return "DF";
1584 	case LM_ST_EXCLUSIVE:
1585 		return "EX";
1586 	}
1587 	return "??";
1588 }
1589 
1590 static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1591 {
1592 	char *p = buf;
1593 	if (flags & LM_FLAG_TRY)
1594 		*p++ = 't';
1595 	if (flags & LM_FLAG_TRY_1CB)
1596 		*p++ = 'T';
1597 	if (flags & LM_FLAG_NOEXP)
1598 		*p++ = 'e';
1599 	if (flags & LM_FLAG_ANY)
1600 		*p++ = 'A';
1601 	if (flags & LM_FLAG_PRIORITY)
1602 		*p++ = 'p';
1603 	if (flags & GL_ASYNC)
1604 		*p++ = 'a';
1605 	if (flags & GL_EXACT)
1606 		*p++ = 'E';
1607 	if (flags & GL_NOCACHE)
1608 		*p++ = 'c';
1609 	if (test_bit(HIF_HOLDER, &iflags))
1610 		*p++ = 'H';
1611 	if (test_bit(HIF_WAIT, &iflags))
1612 		*p++ = 'W';
1613 	if (test_bit(HIF_FIRST, &iflags))
1614 		*p++ = 'F';
1615 	*p = 0;
1616 	return buf;
1617 }
1618 
1619 /**
1620  * dump_holder - print information about a glock holder
1621  * @seq: the seq_file struct
1622  * @gh: the glock holder
1623  *
1624  * Returns: 0 on success, -ENOBUFS when we run out of space
1625  */
1626 
1627 static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1628 {
1629 	struct task_struct *gh_owner = NULL;
1630 	char buffer[KSYM_SYMBOL_LEN];
1631 	char flags_buf[32];
1632 
1633 	sprint_symbol(buffer, gh->gh_ip);
1634 	if (gh->gh_owner_pid)
1635 		gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1636 	gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %s\n",
1637 		  state2str(gh->gh_state),
1638 		  hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1639 		  gh->gh_error,
1640 		  gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1641 		  gh_owner ? gh_owner->comm : "(ended)", buffer);
1642 	return 0;
1643 }
1644 
1645 static const char *gflags2str(char *buf, const unsigned long *gflags)
1646 {
1647 	char *p = buf;
1648 	if (test_bit(GLF_LOCK, gflags))
1649 		*p++ = 'l';
1650 	if (test_bit(GLF_DEMOTE, gflags))
1651 		*p++ = 'D';
1652 	if (test_bit(GLF_PENDING_DEMOTE, gflags))
1653 		*p++ = 'd';
1654 	if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1655 		*p++ = 'p';
1656 	if (test_bit(GLF_DIRTY, gflags))
1657 		*p++ = 'y';
1658 	if (test_bit(GLF_LFLUSH, gflags))
1659 		*p++ = 'f';
1660 	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1661 		*p++ = 'i';
1662 	if (test_bit(GLF_REPLY_PENDING, gflags))
1663 		*p++ = 'r';
1664 	if (test_bit(GLF_INITIAL, gflags))
1665 		*p++ = 'I';
1666 	if (test_bit(GLF_FROZEN, gflags))
1667 		*p++ = 'F';
1668 	if (test_bit(GLF_QUEUED, gflags))
1669 		*p++ = 'q';
1670 	*p = 0;
1671 	return buf;
1672 }
1673 
1674 /**
1675  * __dump_glock - print information about a glock
1676  * @seq: The seq_file struct
1677  * @gl: the glock
1678  *
1679  * The file format is as follows:
1680  * One line per object, capital letters are used to indicate objects
1681  * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1682  * other objects are indented by a single space and follow the glock to
1683  * which they are related. Fields are indicated by lower case letters
1684  * followed by a colon and the field value, except for strings which are in
1685  * [] so that its possible to see if they are composed of spaces for
1686  * example. The field's are n = number (id of the object), f = flags,
1687  * t = type, s = state, r = refcount, e = error, p = pid.
1688  *
1689  * Returns: 0 on success, -ENOBUFS when we run out of space
1690  */
1691 
1692 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1693 {
1694 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1695 	unsigned long long dtime;
1696 	const struct gfs2_holder *gh;
1697 	char gflags_buf[32];
1698 	int error = 0;
1699 
1700 	dtime = jiffies - gl->gl_demote_time;
1701 	dtime *= 1000000/HZ; /* demote time in uSec */
1702 	if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1703 		dtime = 0;
1704 	gfs2_print_dbg(seq, "G:  s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d r:%d\n",
1705 		  state2str(gl->gl_state),
1706 		  gl->gl_name.ln_type,
1707 		  (unsigned long long)gl->gl_name.ln_number,
1708 		  gflags2str(gflags_buf, &gl->gl_flags),
1709 		  state2str(gl->gl_target),
1710 		  state2str(gl->gl_demote_state), dtime,
1711 		  atomic_read(&gl->gl_ail_count),
1712 		  atomic_read(&gl->gl_ref));
1713 
1714 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1715 		error = dump_holder(seq, gh);
1716 		if (error)
1717 			goto out;
1718 	}
1719 	if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1720 		error = glops->go_dump(seq, gl);
1721 out:
1722 	return error;
1723 }
1724 
1725 static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1726 {
1727 	int ret;
1728 	spin_lock(&gl->gl_spin);
1729 	ret = __dump_glock(seq, gl);
1730 	spin_unlock(&gl->gl_spin);
1731 	return ret;
1732 }
1733 
1734 /**
1735  * gfs2_dump_lockstate - print out the current lockstate
1736  * @sdp: the filesystem
1737  * @ub: the buffer to copy the information into
1738  *
1739  * If @ub is NULL, dump the lockstate to the console.
1740  *
1741  */
1742 
1743 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
1744 {
1745 	struct gfs2_glock *gl;
1746 	struct hlist_node *h;
1747 	unsigned int x;
1748 	int error = 0;
1749 
1750 	for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1751 
1752 		read_lock(gl_lock_addr(x));
1753 
1754 		hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
1755 			if (gl->gl_sbd != sdp)
1756 				continue;
1757 
1758 			error = dump_glock(NULL, gl);
1759 			if (error)
1760 				break;
1761 		}
1762 
1763 		read_unlock(gl_lock_addr(x));
1764 
1765 		if (error)
1766 			break;
1767 	}
1768 
1769 
1770 	return error;
1771 }
1772 
1773 
1774 int __init gfs2_glock_init(void)
1775 {
1776 	unsigned i;
1777 	for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1778 		INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
1779 	}
1780 #ifdef GL_HASH_LOCK_SZ
1781 	for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
1782 		rwlock_init(&gl_hash_locks[i]);
1783 	}
1784 #endif
1785 
1786 	glock_workqueue = alloc_workqueue("glock_workqueue", WQ_RESCUER |
1787 					  WQ_HIGHPRI | WQ_FREEZEABLE, 0);
1788 	if (IS_ERR(glock_workqueue))
1789 		return PTR_ERR(glock_workqueue);
1790 	gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", WQ_RESCUER |
1791 						WQ_FREEZEABLE, 0);
1792 	if (IS_ERR(gfs2_delete_workqueue)) {
1793 		destroy_workqueue(glock_workqueue);
1794 		return PTR_ERR(gfs2_delete_workqueue);
1795 	}
1796 
1797 	register_shrinker(&glock_shrinker);
1798 
1799 	return 0;
1800 }
1801 
1802 void gfs2_glock_exit(void)
1803 {
1804 	unregister_shrinker(&glock_shrinker);
1805 	destroy_workqueue(glock_workqueue);
1806 	destroy_workqueue(gfs2_delete_workqueue);
1807 }
1808 
1809 static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1810 {
1811 	struct gfs2_glock *gl;
1812 
1813 restart:
1814 	read_lock(gl_lock_addr(gi->hash));
1815 	gl = gi->gl;
1816 	if (gl) {
1817 		gi->gl = hlist_entry(gl->gl_list.next,
1818 				     struct gfs2_glock, gl_list);
1819 	} else {
1820 		gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
1821 				     struct gfs2_glock, gl_list);
1822 	}
1823 	if (gi->gl)
1824 		gfs2_glock_hold(gi->gl);
1825 	read_unlock(gl_lock_addr(gi->hash));
1826 	if (gl)
1827 		gfs2_glock_put(gl);
1828 	while (gi->gl == NULL) {
1829 		gi->hash++;
1830 		if (gi->hash >= GFS2_GL_HASH_SIZE)
1831 			return 1;
1832 		read_lock(gl_lock_addr(gi->hash));
1833 		gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
1834 				     struct gfs2_glock, gl_list);
1835 		if (gi->gl)
1836 			gfs2_glock_hold(gi->gl);
1837 		read_unlock(gl_lock_addr(gi->hash));
1838 	}
1839 
1840 	if (gi->sdp != gi->gl->gl_sbd)
1841 		goto restart;
1842 
1843 	return 0;
1844 }
1845 
1846 static void gfs2_glock_iter_free(struct gfs2_glock_iter *gi)
1847 {
1848 	if (gi->gl)
1849 		gfs2_glock_put(gi->gl);
1850 	gi->gl = NULL;
1851 }
1852 
1853 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1854 {
1855 	struct gfs2_glock_iter *gi = seq->private;
1856 	loff_t n = *pos;
1857 
1858 	gi->hash = 0;
1859 
1860 	do {
1861 		if (gfs2_glock_iter_next(gi)) {
1862 			gfs2_glock_iter_free(gi);
1863 			return NULL;
1864 		}
1865 	} while (n--);
1866 
1867 	return gi->gl;
1868 }
1869 
1870 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1871 				 loff_t *pos)
1872 {
1873 	struct gfs2_glock_iter *gi = seq->private;
1874 
1875 	(*pos)++;
1876 
1877 	if (gfs2_glock_iter_next(gi)) {
1878 		gfs2_glock_iter_free(gi);
1879 		return NULL;
1880 	}
1881 
1882 	return gi->gl;
1883 }
1884 
1885 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1886 {
1887 	struct gfs2_glock_iter *gi = seq->private;
1888 	gfs2_glock_iter_free(gi);
1889 }
1890 
1891 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1892 {
1893 	return dump_glock(seq, iter_ptr);
1894 }
1895 
1896 static const struct seq_operations gfs2_glock_seq_ops = {
1897 	.start = gfs2_glock_seq_start,
1898 	.next  = gfs2_glock_seq_next,
1899 	.stop  = gfs2_glock_seq_stop,
1900 	.show  = gfs2_glock_seq_show,
1901 };
1902 
1903 static int gfs2_debugfs_open(struct inode *inode, struct file *file)
1904 {
1905 	int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1906 				   sizeof(struct gfs2_glock_iter));
1907 	if (ret == 0) {
1908 		struct seq_file *seq = file->private_data;
1909 		struct gfs2_glock_iter *gi = seq->private;
1910 		gi->sdp = inode->i_private;
1911 	}
1912 	return ret;
1913 }
1914 
1915 static const struct file_operations gfs2_debug_fops = {
1916 	.owner   = THIS_MODULE,
1917 	.open    = gfs2_debugfs_open,
1918 	.read    = seq_read,
1919 	.llseek  = seq_lseek,
1920 	.release = seq_release_private,
1921 };
1922 
1923 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
1924 {
1925 	sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
1926 	if (!sdp->debugfs_dir)
1927 		return -ENOMEM;
1928 	sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
1929 							 S_IFREG | S_IRUGO,
1930 							 sdp->debugfs_dir, sdp,
1931 							 &gfs2_debug_fops);
1932 	if (!sdp->debugfs_dentry_glocks)
1933 		return -ENOMEM;
1934 
1935 	return 0;
1936 }
1937 
1938 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
1939 {
1940 	if (sdp && sdp->debugfs_dir) {
1941 		if (sdp->debugfs_dentry_glocks) {
1942 			debugfs_remove(sdp->debugfs_dentry_glocks);
1943 			sdp->debugfs_dentry_glocks = NULL;
1944 		}
1945 		debugfs_remove(sdp->debugfs_dir);
1946 		sdp->debugfs_dir = NULL;
1947 	}
1948 }
1949 
1950 int gfs2_register_debugfs(void)
1951 {
1952 	gfs2_root = debugfs_create_dir("gfs2", NULL);
1953 	return gfs2_root ? 0 : -ENOMEM;
1954 }
1955 
1956 void gfs2_unregister_debugfs(void)
1957 {
1958 	debugfs_remove(gfs2_root);
1959 	gfs2_root = NULL;
1960 }
1961