xref: /linux/fs/gfs2/glock.c (revision d39d0ed196aa1685bb24771e92f78633c66ac9cb)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/buffer_head.h>
14 #include <linux/delay.h>
15 #include <linux/sort.h>
16 #include <linux/jhash.h>
17 #include <linux/kallsyms.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/list.h>
20 #include <linux/wait.h>
21 #include <linux/module.h>
22 #include <asm/uaccess.h>
23 #include <linux/seq_file.h>
24 #include <linux/debugfs.h>
25 #include <linux/kthread.h>
26 #include <linux/freezer.h>
27 #include <linux/workqueue.h>
28 #include <linux/jiffies.h>
29 
30 #include "gfs2.h"
31 #include "incore.h"
32 #include "glock.h"
33 #include "glops.h"
34 #include "inode.h"
35 #include "lops.h"
36 #include "meta_io.h"
37 #include "quota.h"
38 #include "super.h"
39 #include "util.h"
40 #include "bmap.h"
41 #define CREATE_TRACE_POINTS
42 #include "trace_gfs2.h"
43 
44 struct gfs2_gl_hash_bucket {
45         struct hlist_head hb_list;
46 };
47 
48 struct gfs2_glock_iter {
49 	int hash;			/* hash bucket index         */
50 	struct gfs2_sbd *sdp;		/* incore superblock         */
51 	struct gfs2_glock *gl;		/* current glock struct      */
52 	char string[512];		/* scratch space             */
53 };
54 
55 typedef void (*glock_examiner) (struct gfs2_glock * gl);
56 
57 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
58 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
59 #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
60 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
61 
62 static struct dentry *gfs2_root;
63 static struct workqueue_struct *glock_workqueue;
64 struct workqueue_struct *gfs2_delete_workqueue;
65 static LIST_HEAD(lru_list);
66 static atomic_t lru_count = ATOMIC_INIT(0);
67 static DEFINE_SPINLOCK(lru_lock);
68 
69 #define GFS2_GL_HASH_SHIFT      15
70 #define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
71 #define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
72 
73 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
74 static struct dentry *gfs2_root;
75 
76 /*
77  * Despite what you might think, the numbers below are not arbitrary :-)
78  * They are taken from the ipv4 routing hash code, which is well tested
79  * and thus should be nearly optimal. Later on we might tweek the numbers
80  * but for now this should be fine.
81  *
82  * The reason for putting the locks in a separate array from the list heads
83  * is that we can have fewer locks than list heads and save memory. We use
84  * the same hash function for both, but with a different hash mask.
85  */
86 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
87 	defined(CONFIG_PROVE_LOCKING)
88 
89 #ifdef CONFIG_LOCKDEP
90 # define GL_HASH_LOCK_SZ        256
91 #else
92 # if NR_CPUS >= 32
93 #  define GL_HASH_LOCK_SZ       4096
94 # elif NR_CPUS >= 16
95 #  define GL_HASH_LOCK_SZ       2048
96 # elif NR_CPUS >= 8
97 #  define GL_HASH_LOCK_SZ       1024
98 # elif NR_CPUS >= 4
99 #  define GL_HASH_LOCK_SZ       512
100 # else
101 #  define GL_HASH_LOCK_SZ       256
102 # endif
103 #endif
104 
105 /* We never want more locks than chains */
106 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
107 # undef GL_HASH_LOCK_SZ
108 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
109 #endif
110 
111 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
112 
113 static inline rwlock_t *gl_lock_addr(unsigned int x)
114 {
115 	return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
116 }
117 #else /* not SMP, so no spinlocks required */
118 static inline rwlock_t *gl_lock_addr(unsigned int x)
119 {
120 	return NULL;
121 }
122 #endif
123 
124 /**
125  * gl_hash() - Turn glock number into hash bucket number
126  * @lock: The glock number
127  *
128  * Returns: The number of the corresponding hash bucket
129  */
130 
131 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
132 			    const struct lm_lockname *name)
133 {
134 	unsigned int h;
135 
136 	h = jhash(&name->ln_number, sizeof(u64), 0);
137 	h = jhash(&name->ln_type, sizeof(unsigned int), h);
138 	h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
139 	h &= GFS2_GL_HASH_MASK;
140 
141 	return h;
142 }
143 
144 /**
145  * glock_free() - Perform a few checks and then release struct gfs2_glock
146  * @gl: The glock to release
147  *
148  * Also calls lock module to release its internal structure for this glock.
149  *
150  */
151 
152 static void glock_free(struct gfs2_glock *gl)
153 {
154 	struct gfs2_sbd *sdp = gl->gl_sbd;
155 	struct address_space *mapping = gfs2_glock2aspace(gl);
156 	struct kmem_cache *cachep = gfs2_glock_cachep;
157 
158 	GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
159 	trace_gfs2_glock_put(gl);
160 	if (mapping)
161 		cachep = gfs2_glock_aspace_cachep;
162 	sdp->sd_lockstruct.ls_ops->lm_put_lock(cachep, gl);
163 }
164 
165 /**
166  * gfs2_glock_hold() - increment reference count on glock
167  * @gl: The glock to hold
168  *
169  */
170 
171 void gfs2_glock_hold(struct gfs2_glock *gl)
172 {
173 	GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
174 	atomic_inc(&gl->gl_ref);
175 }
176 
177 /**
178  * demote_ok - Check to see if it's ok to unlock a glock
179  * @gl: the glock
180  *
181  * Returns: 1 if it's ok
182  */
183 
184 static int demote_ok(const struct gfs2_glock *gl)
185 {
186 	const struct gfs2_glock_operations *glops = gl->gl_ops;
187 
188 	if (gl->gl_state == LM_ST_UNLOCKED)
189 		return 0;
190 	if (!list_empty(&gl->gl_holders))
191 		return 0;
192 	if (glops->go_demote_ok)
193 		return glops->go_demote_ok(gl);
194 	return 1;
195 }
196 
197 /**
198  * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
199  * @gl: the glock
200  *
201  */
202 
203 static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
204 {
205 	int may_reclaim;
206 	may_reclaim = (demote_ok(gl) &&
207 		       (atomic_read(&gl->gl_ref) == 1 ||
208 			(gl->gl_name.ln_type == LM_TYPE_INODE &&
209 			 atomic_read(&gl->gl_ref) <= 2)));
210 	spin_lock(&lru_lock);
211 	if (list_empty(&gl->gl_lru) && may_reclaim) {
212 		list_add_tail(&gl->gl_lru, &lru_list);
213 		atomic_inc(&lru_count);
214 	}
215 	spin_unlock(&lru_lock);
216 }
217 
218 /**
219  * gfs2_glock_put_nolock() - Decrement reference count on glock
220  * @gl: The glock to put
221  *
222  * This function should only be used if the caller has its own reference
223  * to the glock, in addition to the one it is dropping.
224  */
225 
226 void gfs2_glock_put_nolock(struct gfs2_glock *gl)
227 {
228 	if (atomic_dec_and_test(&gl->gl_ref))
229 		GLOCK_BUG_ON(gl, 1);
230 	gfs2_glock_schedule_for_reclaim(gl);
231 }
232 
233 /**
234  * gfs2_glock_put() - Decrement reference count on glock
235  * @gl: The glock to put
236  *
237  */
238 
239 int gfs2_glock_put(struct gfs2_glock *gl)
240 {
241 	int rv = 0;
242 
243 	write_lock(gl_lock_addr(gl->gl_hash));
244 	if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
245 		hlist_del(&gl->gl_list);
246 		if (!list_empty(&gl->gl_lru)) {
247 			list_del_init(&gl->gl_lru);
248 			atomic_dec(&lru_count);
249 		}
250 		spin_unlock(&lru_lock);
251 		write_unlock(gl_lock_addr(gl->gl_hash));
252 		GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
253 		glock_free(gl);
254 		rv = 1;
255 		goto out;
256 	}
257 	spin_lock(&gl->gl_spin);
258 	gfs2_glock_schedule_for_reclaim(gl);
259 	spin_unlock(&gl->gl_spin);
260 	write_unlock(gl_lock_addr(gl->gl_hash));
261 out:
262 	return rv;
263 }
264 
265 /**
266  * search_bucket() - Find struct gfs2_glock by lock number
267  * @bucket: the bucket to search
268  * @name: The lock name
269  *
270  * Returns: NULL, or the struct gfs2_glock with the requested number
271  */
272 
273 static struct gfs2_glock *search_bucket(unsigned int hash,
274 					const struct gfs2_sbd *sdp,
275 					const struct lm_lockname *name)
276 {
277 	struct gfs2_glock *gl;
278 	struct hlist_node *h;
279 
280 	hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
281 		if (!lm_name_equal(&gl->gl_name, name))
282 			continue;
283 		if (gl->gl_sbd != sdp)
284 			continue;
285 
286 		atomic_inc(&gl->gl_ref);
287 
288 		return gl;
289 	}
290 
291 	return NULL;
292 }
293 
294 /**
295  * may_grant - check if its ok to grant a new lock
296  * @gl: The glock
297  * @gh: The lock request which we wish to grant
298  *
299  * Returns: true if its ok to grant the lock
300  */
301 
302 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
303 {
304 	const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
305 	if ((gh->gh_state == LM_ST_EXCLUSIVE ||
306 	     gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
307 		return 0;
308 	if (gl->gl_state == gh->gh_state)
309 		return 1;
310 	if (gh->gh_flags & GL_EXACT)
311 		return 0;
312 	if (gl->gl_state == LM_ST_EXCLUSIVE) {
313 		if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
314 			return 1;
315 		if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
316 			return 1;
317 	}
318 	if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
319 		return 1;
320 	return 0;
321 }
322 
323 static void gfs2_holder_wake(struct gfs2_holder *gh)
324 {
325 	clear_bit(HIF_WAIT, &gh->gh_iflags);
326 	smp_mb__after_clear_bit();
327 	wake_up_bit(&gh->gh_iflags, HIF_WAIT);
328 }
329 
330 /**
331  * do_error - Something unexpected has happened during a lock request
332  *
333  */
334 
335 static inline void do_error(struct gfs2_glock *gl, const int ret)
336 {
337 	struct gfs2_holder *gh, *tmp;
338 
339 	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
340 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
341 			continue;
342 		if (ret & LM_OUT_ERROR)
343 			gh->gh_error = -EIO;
344 		else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
345 			gh->gh_error = GLR_TRYFAILED;
346 		else
347 			continue;
348 		list_del_init(&gh->gh_list);
349 		trace_gfs2_glock_queue(gh, 0);
350 		gfs2_holder_wake(gh);
351 	}
352 }
353 
354 /**
355  * do_promote - promote as many requests as possible on the current queue
356  * @gl: The glock
357  *
358  * Returns: 1 if there is a blocked holder at the head of the list, or 2
359  *          if a type specific operation is underway.
360  */
361 
362 static int do_promote(struct gfs2_glock *gl)
363 __releases(&gl->gl_spin)
364 __acquires(&gl->gl_spin)
365 {
366 	const struct gfs2_glock_operations *glops = gl->gl_ops;
367 	struct gfs2_holder *gh, *tmp;
368 	int ret;
369 
370 restart:
371 	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
372 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
373 			continue;
374 		if (may_grant(gl, gh)) {
375 			if (gh->gh_list.prev == &gl->gl_holders &&
376 			    glops->go_lock) {
377 				spin_unlock(&gl->gl_spin);
378 				/* FIXME: eliminate this eventually */
379 				ret = glops->go_lock(gh);
380 				spin_lock(&gl->gl_spin);
381 				if (ret) {
382 					if (ret == 1)
383 						return 2;
384 					gh->gh_error = ret;
385 					list_del_init(&gh->gh_list);
386 					trace_gfs2_glock_queue(gh, 0);
387 					gfs2_holder_wake(gh);
388 					goto restart;
389 				}
390 				set_bit(HIF_HOLDER, &gh->gh_iflags);
391 				trace_gfs2_promote(gh, 1);
392 				gfs2_holder_wake(gh);
393 				goto restart;
394 			}
395 			set_bit(HIF_HOLDER, &gh->gh_iflags);
396 			trace_gfs2_promote(gh, 0);
397 			gfs2_holder_wake(gh);
398 			continue;
399 		}
400 		if (gh->gh_list.prev == &gl->gl_holders)
401 			return 1;
402 		do_error(gl, 0);
403 		break;
404 	}
405 	return 0;
406 }
407 
408 /**
409  * find_first_waiter - find the first gh that's waiting for the glock
410  * @gl: the glock
411  */
412 
413 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
414 {
415 	struct gfs2_holder *gh;
416 
417 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
418 		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
419 			return gh;
420 	}
421 	return NULL;
422 }
423 
424 /**
425  * state_change - record that the glock is now in a different state
426  * @gl: the glock
427  * @new_state the new state
428  *
429  */
430 
431 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
432 {
433 	int held1, held2;
434 
435 	held1 = (gl->gl_state != LM_ST_UNLOCKED);
436 	held2 = (new_state != LM_ST_UNLOCKED);
437 
438 	if (held1 != held2) {
439 		if (held2)
440 			gfs2_glock_hold(gl);
441 		else
442 			gfs2_glock_put_nolock(gl);
443 	}
444 
445 	gl->gl_state = new_state;
446 	gl->gl_tchange = jiffies;
447 }
448 
449 static void gfs2_demote_wake(struct gfs2_glock *gl)
450 {
451 	gl->gl_demote_state = LM_ST_EXCLUSIVE;
452 	clear_bit(GLF_DEMOTE, &gl->gl_flags);
453 	smp_mb__after_clear_bit();
454 	wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
455 }
456 
457 /**
458  * finish_xmote - The DLM has replied to one of our lock requests
459  * @gl: The glock
460  * @ret: The status from the DLM
461  *
462  */
463 
464 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
465 {
466 	const struct gfs2_glock_operations *glops = gl->gl_ops;
467 	struct gfs2_holder *gh;
468 	unsigned state = ret & LM_OUT_ST_MASK;
469 	int rv;
470 
471 	spin_lock(&gl->gl_spin);
472 	trace_gfs2_glock_state_change(gl, state);
473 	state_change(gl, state);
474 	gh = find_first_waiter(gl);
475 
476 	/* Demote to UN request arrived during demote to SH or DF */
477 	if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
478 	    state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
479 		gl->gl_target = LM_ST_UNLOCKED;
480 
481 	/* Check for state != intended state */
482 	if (unlikely(state != gl->gl_target)) {
483 		if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
484 			/* move to back of queue and try next entry */
485 			if (ret & LM_OUT_CANCELED) {
486 				if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
487 					list_move_tail(&gh->gh_list, &gl->gl_holders);
488 				gh = find_first_waiter(gl);
489 				gl->gl_target = gh->gh_state;
490 				goto retry;
491 			}
492 			/* Some error or failed "try lock" - report it */
493 			if ((ret & LM_OUT_ERROR) ||
494 			    (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
495 				gl->gl_target = gl->gl_state;
496 				do_error(gl, ret);
497 				goto out;
498 			}
499 		}
500 		switch(state) {
501 		/* Unlocked due to conversion deadlock, try again */
502 		case LM_ST_UNLOCKED:
503 retry:
504 			do_xmote(gl, gh, gl->gl_target);
505 			break;
506 		/* Conversion fails, unlock and try again */
507 		case LM_ST_SHARED:
508 		case LM_ST_DEFERRED:
509 			do_xmote(gl, gh, LM_ST_UNLOCKED);
510 			break;
511 		default: /* Everything else */
512 			printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
513 			GLOCK_BUG_ON(gl, 1);
514 		}
515 		spin_unlock(&gl->gl_spin);
516 		return;
517 	}
518 
519 	/* Fast path - we got what we asked for */
520 	if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
521 		gfs2_demote_wake(gl);
522 	if (state != LM_ST_UNLOCKED) {
523 		if (glops->go_xmote_bh) {
524 			spin_unlock(&gl->gl_spin);
525 			rv = glops->go_xmote_bh(gl, gh);
526 			spin_lock(&gl->gl_spin);
527 			if (rv) {
528 				do_error(gl, rv);
529 				goto out;
530 			}
531 		}
532 		rv = do_promote(gl);
533 		if (rv == 2)
534 			goto out_locked;
535 	}
536 out:
537 	clear_bit(GLF_LOCK, &gl->gl_flags);
538 out_locked:
539 	spin_unlock(&gl->gl_spin);
540 }
541 
542 static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
543 				 unsigned int req_state,
544 				 unsigned int flags)
545 {
546 	int ret = LM_OUT_ERROR;
547 
548 	if (!sdp->sd_lockstruct.ls_ops->lm_lock)
549 		return req_state == LM_ST_UNLOCKED ? 0 : req_state;
550 
551 	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
552 		ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock,
553 							 req_state, flags);
554 	return ret;
555 }
556 
557 /**
558  * do_xmote - Calls the DLM to change the state of a lock
559  * @gl: The lock state
560  * @gh: The holder (only for promotes)
561  * @target: The target lock state
562  *
563  */
564 
565 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
566 __releases(&gl->gl_spin)
567 __acquires(&gl->gl_spin)
568 {
569 	const struct gfs2_glock_operations *glops = gl->gl_ops;
570 	struct gfs2_sbd *sdp = gl->gl_sbd;
571 	unsigned int lck_flags = gh ? gh->gh_flags : 0;
572 	int ret;
573 
574 	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
575 		      LM_FLAG_PRIORITY);
576 	BUG_ON(gl->gl_state == target);
577 	BUG_ON(gl->gl_state == gl->gl_target);
578 	if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
579 	    glops->go_inval) {
580 		set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
581 		do_error(gl, 0); /* Fail queued try locks */
582 	}
583 	spin_unlock(&gl->gl_spin);
584 	if (glops->go_xmote_th)
585 		glops->go_xmote_th(gl);
586 	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
587 		glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
588 	clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
589 
590 	gfs2_glock_hold(gl);
591 	if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED ||
592 	    gl->gl_state == LM_ST_DEFERRED) &&
593 	    !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
594 		lck_flags |= LM_FLAG_TRY_1CB;
595 	ret = gfs2_lm_lock(sdp, gl, target, lck_flags);
596 
597 	if (!(ret & LM_OUT_ASYNC)) {
598 		finish_xmote(gl, ret);
599 		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
600 			gfs2_glock_put(gl);
601 	} else {
602 		GLOCK_BUG_ON(gl, ret != LM_OUT_ASYNC);
603 	}
604 	spin_lock(&gl->gl_spin);
605 }
606 
607 /**
608  * find_first_holder - find the first "holder" gh
609  * @gl: the glock
610  */
611 
612 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
613 {
614 	struct gfs2_holder *gh;
615 
616 	if (!list_empty(&gl->gl_holders)) {
617 		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
618 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
619 			return gh;
620 	}
621 	return NULL;
622 }
623 
624 /**
625  * run_queue - do all outstanding tasks related to a glock
626  * @gl: The glock in question
627  * @nonblock: True if we must not block in run_queue
628  *
629  */
630 
631 static void run_queue(struct gfs2_glock *gl, const int nonblock)
632 __releases(&gl->gl_spin)
633 __acquires(&gl->gl_spin)
634 {
635 	struct gfs2_holder *gh = NULL;
636 	int ret;
637 
638 	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
639 		return;
640 
641 	GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
642 
643 	if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
644 	    gl->gl_demote_state != gl->gl_state) {
645 		if (find_first_holder(gl))
646 			goto out_unlock;
647 		if (nonblock)
648 			goto out_sched;
649 		set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
650 		GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
651 		gl->gl_target = gl->gl_demote_state;
652 	} else {
653 		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
654 			gfs2_demote_wake(gl);
655 		ret = do_promote(gl);
656 		if (ret == 0)
657 			goto out_unlock;
658 		if (ret == 2)
659 			goto out;
660 		gh = find_first_waiter(gl);
661 		gl->gl_target = gh->gh_state;
662 		if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
663 			do_error(gl, 0); /* Fail queued try locks */
664 	}
665 	do_xmote(gl, gh, gl->gl_target);
666 out:
667 	return;
668 
669 out_sched:
670 	clear_bit(GLF_LOCK, &gl->gl_flags);
671 	smp_mb__after_clear_bit();
672 	gfs2_glock_hold(gl);
673 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
674 		gfs2_glock_put_nolock(gl);
675 	return;
676 
677 out_unlock:
678 	clear_bit(GLF_LOCK, &gl->gl_flags);
679 	smp_mb__after_clear_bit();
680 	return;
681 }
682 
683 static void delete_work_func(struct work_struct *work)
684 {
685 	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
686 	struct gfs2_sbd *sdp = gl->gl_sbd;
687 	struct gfs2_inode *ip = NULL;
688 	struct inode *inode;
689 	u64 no_addr = 0;
690 
691 	spin_lock(&gl->gl_spin);
692 	ip = (struct gfs2_inode *)gl->gl_object;
693 	if (ip)
694 		no_addr = ip->i_no_addr;
695 	spin_unlock(&gl->gl_spin);
696 	if (ip) {
697 		inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
698 		if (inode) {
699 			d_prune_aliases(inode);
700 			iput(inode);
701 		}
702 	}
703 	gfs2_glock_put(gl);
704 }
705 
706 static void glock_work_func(struct work_struct *work)
707 {
708 	unsigned long delay = 0;
709 	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
710 	int drop_ref = 0;
711 
712 	if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
713 		finish_xmote(gl, gl->gl_reply);
714 		drop_ref = 1;
715 	}
716 	spin_lock(&gl->gl_spin);
717 	if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
718 	    gl->gl_state != LM_ST_UNLOCKED &&
719 	    gl->gl_demote_state != LM_ST_EXCLUSIVE) {
720 		unsigned long holdtime, now = jiffies;
721 		holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
722 		if (time_before(now, holdtime))
723 			delay = holdtime - now;
724 		set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags);
725 	}
726 	run_queue(gl, 0);
727 	spin_unlock(&gl->gl_spin);
728 	if (!delay ||
729 	    queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
730 		gfs2_glock_put(gl);
731 	if (drop_ref)
732 		gfs2_glock_put(gl);
733 }
734 
735 /**
736  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
737  * @sdp: The GFS2 superblock
738  * @number: the lock number
739  * @glops: The glock_operations to use
740  * @create: If 0, don't create the glock if it doesn't exist
741  * @glp: the glock is returned here
742  *
743  * This does not lock a glock, just finds/creates structures for one.
744  *
745  * Returns: errno
746  */
747 
748 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
749 		   const struct gfs2_glock_operations *glops, int create,
750 		   struct gfs2_glock **glp)
751 {
752 	struct super_block *s = sdp->sd_vfs;
753 	struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
754 	struct gfs2_glock *gl, *tmp;
755 	unsigned int hash = gl_hash(sdp, &name);
756 	struct address_space *mapping;
757 
758 	read_lock(gl_lock_addr(hash));
759 	gl = search_bucket(hash, sdp, &name);
760 	read_unlock(gl_lock_addr(hash));
761 
762 	*glp = gl;
763 	if (gl)
764 		return 0;
765 	if (!create)
766 		return -ENOENT;
767 
768 	if (glops->go_flags & GLOF_ASPACE)
769 		gl = kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_KERNEL);
770 	else
771 		gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
772 	if (!gl)
773 		return -ENOMEM;
774 
775 	atomic_inc(&sdp->sd_glock_disposal);
776 	gl->gl_flags = 0;
777 	gl->gl_name = name;
778 	atomic_set(&gl->gl_ref, 1);
779 	gl->gl_state = LM_ST_UNLOCKED;
780 	gl->gl_target = LM_ST_UNLOCKED;
781 	gl->gl_demote_state = LM_ST_EXCLUSIVE;
782 	gl->gl_hash = hash;
783 	gl->gl_ops = glops;
784 	snprintf(gl->gl_strname, GDLM_STRNAME_BYTES, "%8x%16llx", name.ln_type, (unsigned long long)number);
785 	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
786 	gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
787 	gl->gl_tchange = jiffies;
788 	gl->gl_object = NULL;
789 	gl->gl_sbd = sdp;
790 	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
791 	INIT_WORK(&gl->gl_delete, delete_work_func);
792 
793 	mapping = gfs2_glock2aspace(gl);
794 	if (mapping) {
795                 mapping->a_ops = &gfs2_meta_aops;
796 		mapping->host = s->s_bdev->bd_inode;
797 		mapping->flags = 0;
798 		mapping_set_gfp_mask(mapping, GFP_NOFS);
799 		mapping->assoc_mapping = NULL;
800 		mapping->backing_dev_info = s->s_bdi;
801 		mapping->writeback_index = 0;
802 	}
803 
804 	write_lock(gl_lock_addr(hash));
805 	tmp = search_bucket(hash, sdp, &name);
806 	if (tmp) {
807 		write_unlock(gl_lock_addr(hash));
808 		glock_free(gl);
809 		gl = tmp;
810 	} else {
811 		hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
812 		write_unlock(gl_lock_addr(hash));
813 	}
814 
815 	*glp = gl;
816 
817 	return 0;
818 }
819 
820 /**
821  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
822  * @gl: the glock
823  * @state: the state we're requesting
824  * @flags: the modifier flags
825  * @gh: the holder structure
826  *
827  */
828 
829 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
830 		      struct gfs2_holder *gh)
831 {
832 	INIT_LIST_HEAD(&gh->gh_list);
833 	gh->gh_gl = gl;
834 	gh->gh_ip = (unsigned long)__builtin_return_address(0);
835 	gh->gh_owner_pid = get_pid(task_pid(current));
836 	gh->gh_state = state;
837 	gh->gh_flags = flags;
838 	gh->gh_error = 0;
839 	gh->gh_iflags = 0;
840 	gfs2_glock_hold(gl);
841 }
842 
843 /**
844  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
845  * @state: the state we're requesting
846  * @flags: the modifier flags
847  * @gh: the holder structure
848  *
849  * Don't mess with the glock.
850  *
851  */
852 
853 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
854 {
855 	gh->gh_state = state;
856 	gh->gh_flags = flags;
857 	gh->gh_iflags = 0;
858 	gh->gh_ip = (unsigned long)__builtin_return_address(0);
859 	if (gh->gh_owner_pid)
860 		put_pid(gh->gh_owner_pid);
861 	gh->gh_owner_pid = get_pid(task_pid(current));
862 }
863 
864 /**
865  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
866  * @gh: the holder structure
867  *
868  */
869 
870 void gfs2_holder_uninit(struct gfs2_holder *gh)
871 {
872 	put_pid(gh->gh_owner_pid);
873 	gfs2_glock_put(gh->gh_gl);
874 	gh->gh_gl = NULL;
875 	gh->gh_ip = 0;
876 }
877 
878 /**
879  * gfs2_glock_holder_wait
880  * @word: unused
881  *
882  * This function and gfs2_glock_demote_wait both show up in the WCHAN
883  * field. Thus I've separated these otherwise identical functions in
884  * order to be more informative to the user.
885  */
886 
887 static int gfs2_glock_holder_wait(void *word)
888 {
889         schedule();
890         return 0;
891 }
892 
893 static int gfs2_glock_demote_wait(void *word)
894 {
895 	schedule();
896 	return 0;
897 }
898 
899 static void wait_on_holder(struct gfs2_holder *gh)
900 {
901 	might_sleep();
902 	wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
903 }
904 
905 static void wait_on_demote(struct gfs2_glock *gl)
906 {
907 	might_sleep();
908 	wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
909 }
910 
911 /**
912  * handle_callback - process a demote request
913  * @gl: the glock
914  * @state: the state the caller wants us to change to
915  *
916  * There are only two requests that we are going to see in actual
917  * practise: LM_ST_SHARED and LM_ST_UNLOCKED
918  */
919 
920 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
921 			    unsigned long delay)
922 {
923 	int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
924 
925 	set_bit(bit, &gl->gl_flags);
926 	if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
927 		gl->gl_demote_state = state;
928 		gl->gl_demote_time = jiffies;
929 	} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
930 			gl->gl_demote_state != state) {
931 		gl->gl_demote_state = LM_ST_UNLOCKED;
932 	}
933 	if (gl->gl_ops->go_callback)
934 		gl->gl_ops->go_callback(gl);
935 	trace_gfs2_demote_rq(gl);
936 }
937 
938 /**
939  * gfs2_glock_wait - wait on a glock acquisition
940  * @gh: the glock holder
941  *
942  * Returns: 0 on success
943  */
944 
945 int gfs2_glock_wait(struct gfs2_holder *gh)
946 {
947 	wait_on_holder(gh);
948 	return gh->gh_error;
949 }
950 
951 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
952 {
953 	va_list args;
954 
955 	va_start(args, fmt);
956 	if (seq) {
957 		struct gfs2_glock_iter *gi = seq->private;
958 		vsprintf(gi->string, fmt, args);
959 		seq_printf(seq, gi->string);
960 	} else {
961 		printk(KERN_ERR " ");
962 		vprintk(fmt, args);
963 	}
964 	va_end(args);
965 }
966 
967 /**
968  * add_to_queue - Add a holder to the wait queue (but look for recursion)
969  * @gh: the holder structure to add
970  *
971  * Eventually we should move the recursive locking trap to a
972  * debugging option or something like that. This is the fast
973  * path and needs to have the minimum number of distractions.
974  *
975  */
976 
977 static inline void add_to_queue(struct gfs2_holder *gh)
978 __releases(&gl->gl_spin)
979 __acquires(&gl->gl_spin)
980 {
981 	struct gfs2_glock *gl = gh->gh_gl;
982 	struct gfs2_sbd *sdp = gl->gl_sbd;
983 	struct list_head *insert_pt = NULL;
984 	struct gfs2_holder *gh2;
985 	int try_lock = 0;
986 
987 	BUG_ON(gh->gh_owner_pid == NULL);
988 	if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
989 		BUG();
990 
991 	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
992 		if (test_bit(GLF_LOCK, &gl->gl_flags))
993 			try_lock = 1;
994 		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
995 			goto fail;
996 	}
997 
998 	list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
999 		if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
1000 		    (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
1001 			goto trap_recursive;
1002 		if (try_lock &&
1003 		    !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
1004 		    !may_grant(gl, gh)) {
1005 fail:
1006 			gh->gh_error = GLR_TRYFAILED;
1007 			gfs2_holder_wake(gh);
1008 			return;
1009 		}
1010 		if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
1011 			continue;
1012 		if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
1013 			insert_pt = &gh2->gh_list;
1014 	}
1015 	if (likely(insert_pt == NULL)) {
1016 		list_add_tail(&gh->gh_list, &gl->gl_holders);
1017 		if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
1018 			goto do_cancel;
1019 		return;
1020 	}
1021 	trace_gfs2_glock_queue(gh, 1);
1022 	list_add_tail(&gh->gh_list, insert_pt);
1023 do_cancel:
1024 	gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
1025 	if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
1026 		spin_unlock(&gl->gl_spin);
1027 		if (sdp->sd_lockstruct.ls_ops->lm_cancel)
1028 			sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
1029 		spin_lock(&gl->gl_spin);
1030 	}
1031 	return;
1032 
1033 trap_recursive:
1034 	print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
1035 	printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
1036 	printk(KERN_ERR "lock type: %d req lock state : %d\n",
1037 	       gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1038 	print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
1039 	printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
1040 	printk(KERN_ERR "lock type: %d req lock state : %d\n",
1041 	       gh->gh_gl->gl_name.ln_type, gh->gh_state);
1042 	__dump_glock(NULL, gl);
1043 	BUG();
1044 }
1045 
1046 /**
1047  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1048  * @gh: the holder structure
1049  *
1050  * if (gh->gh_flags & GL_ASYNC), this never returns an error
1051  *
1052  * Returns: 0, GLR_TRYFAILED, or errno on failure
1053  */
1054 
1055 int gfs2_glock_nq(struct gfs2_holder *gh)
1056 {
1057 	struct gfs2_glock *gl = gh->gh_gl;
1058 	struct gfs2_sbd *sdp = gl->gl_sbd;
1059 	int error = 0;
1060 
1061 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1062 		return -EIO;
1063 
1064 	spin_lock(&gl->gl_spin);
1065 	add_to_queue(gh);
1066 	if ((LM_FLAG_NOEXP & gh->gh_flags) &&
1067 	    test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1068 		set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1069 	run_queue(gl, 1);
1070 	spin_unlock(&gl->gl_spin);
1071 
1072 	if (!(gh->gh_flags & GL_ASYNC))
1073 		error = gfs2_glock_wait(gh);
1074 
1075 	return error;
1076 }
1077 
1078 /**
1079  * gfs2_glock_poll - poll to see if an async request has been completed
1080  * @gh: the holder
1081  *
1082  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1083  */
1084 
1085 int gfs2_glock_poll(struct gfs2_holder *gh)
1086 {
1087 	return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1088 }
1089 
1090 /**
1091  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1092  * @gh: the glock holder
1093  *
1094  */
1095 
1096 void gfs2_glock_dq(struct gfs2_holder *gh)
1097 {
1098 	struct gfs2_glock *gl = gh->gh_gl;
1099 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1100 	unsigned delay = 0;
1101 	int fast_path = 0;
1102 
1103 	spin_lock(&gl->gl_spin);
1104 	if (gh->gh_flags & GL_NOCACHE)
1105 		handle_callback(gl, LM_ST_UNLOCKED, 0);
1106 
1107 	list_del_init(&gh->gh_list);
1108 	if (find_first_holder(gl) == NULL) {
1109 		if (glops->go_unlock) {
1110 			GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1111 			spin_unlock(&gl->gl_spin);
1112 			glops->go_unlock(gh);
1113 			spin_lock(&gl->gl_spin);
1114 			clear_bit(GLF_LOCK, &gl->gl_flags);
1115 		}
1116 		if (list_empty(&gl->gl_holders) &&
1117 		    !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1118 		    !test_bit(GLF_DEMOTE, &gl->gl_flags))
1119 			fast_path = 1;
1120 	}
1121 	trace_gfs2_glock_queue(gh, 0);
1122 	spin_unlock(&gl->gl_spin);
1123 	if (likely(fast_path))
1124 		return;
1125 
1126 	gfs2_glock_hold(gl);
1127 	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1128 	    !test_bit(GLF_DEMOTE, &gl->gl_flags))
1129 		delay = gl->gl_ops->go_min_hold_time;
1130 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1131 		gfs2_glock_put(gl);
1132 }
1133 
1134 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1135 {
1136 	struct gfs2_glock *gl = gh->gh_gl;
1137 	gfs2_glock_dq(gh);
1138 	wait_on_demote(gl);
1139 }
1140 
1141 /**
1142  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1143  * @gh: the holder structure
1144  *
1145  */
1146 
1147 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1148 {
1149 	gfs2_glock_dq(gh);
1150 	gfs2_holder_uninit(gh);
1151 }
1152 
1153 /**
1154  * gfs2_glock_nq_num - acquire a glock based on lock number
1155  * @sdp: the filesystem
1156  * @number: the lock number
1157  * @glops: the glock operations for the type of glock
1158  * @state: the state to acquire the glock in
1159  * @flags: modifier flags for the aquisition
1160  * @gh: the struct gfs2_holder
1161  *
1162  * Returns: errno
1163  */
1164 
1165 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1166 		      const struct gfs2_glock_operations *glops,
1167 		      unsigned int state, int flags, struct gfs2_holder *gh)
1168 {
1169 	struct gfs2_glock *gl;
1170 	int error;
1171 
1172 	error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1173 	if (!error) {
1174 		error = gfs2_glock_nq_init(gl, state, flags, gh);
1175 		gfs2_glock_put(gl);
1176 	}
1177 
1178 	return error;
1179 }
1180 
1181 /**
1182  * glock_compare - Compare two struct gfs2_glock structures for sorting
1183  * @arg_a: the first structure
1184  * @arg_b: the second structure
1185  *
1186  */
1187 
1188 static int glock_compare(const void *arg_a, const void *arg_b)
1189 {
1190 	const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1191 	const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1192 	const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1193 	const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1194 
1195 	if (a->ln_number > b->ln_number)
1196 		return 1;
1197 	if (a->ln_number < b->ln_number)
1198 		return -1;
1199 	BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1200 	return 0;
1201 }
1202 
1203 /**
1204  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1205  * @num_gh: the number of structures
1206  * @ghs: an array of struct gfs2_holder structures
1207  *
1208  * Returns: 0 on success (all glocks acquired),
1209  *          errno on failure (no glocks acquired)
1210  */
1211 
1212 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1213 		     struct gfs2_holder **p)
1214 {
1215 	unsigned int x;
1216 	int error = 0;
1217 
1218 	for (x = 0; x < num_gh; x++)
1219 		p[x] = &ghs[x];
1220 
1221 	sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1222 
1223 	for (x = 0; x < num_gh; x++) {
1224 		p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1225 
1226 		error = gfs2_glock_nq(p[x]);
1227 		if (error) {
1228 			while (x--)
1229 				gfs2_glock_dq(p[x]);
1230 			break;
1231 		}
1232 	}
1233 
1234 	return error;
1235 }
1236 
1237 /**
1238  * gfs2_glock_nq_m - acquire multiple glocks
1239  * @num_gh: the number of structures
1240  * @ghs: an array of struct gfs2_holder structures
1241  *
1242  *
1243  * Returns: 0 on success (all glocks acquired),
1244  *          errno on failure (no glocks acquired)
1245  */
1246 
1247 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1248 {
1249 	struct gfs2_holder *tmp[4];
1250 	struct gfs2_holder **pph = tmp;
1251 	int error = 0;
1252 
1253 	switch(num_gh) {
1254 	case 0:
1255 		return 0;
1256 	case 1:
1257 		ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1258 		return gfs2_glock_nq(ghs);
1259 	default:
1260 		if (num_gh <= 4)
1261 			break;
1262 		pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1263 		if (!pph)
1264 			return -ENOMEM;
1265 	}
1266 
1267 	error = nq_m_sync(num_gh, ghs, pph);
1268 
1269 	if (pph != tmp)
1270 		kfree(pph);
1271 
1272 	return error;
1273 }
1274 
1275 /**
1276  * gfs2_glock_dq_m - release multiple glocks
1277  * @num_gh: the number of structures
1278  * @ghs: an array of struct gfs2_holder structures
1279  *
1280  */
1281 
1282 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1283 {
1284 	unsigned int x;
1285 
1286 	for (x = 0; x < num_gh; x++)
1287 		gfs2_glock_dq(&ghs[x]);
1288 }
1289 
1290 /**
1291  * gfs2_glock_dq_uninit_m - release multiple glocks
1292  * @num_gh: the number of structures
1293  * @ghs: an array of struct gfs2_holder structures
1294  *
1295  */
1296 
1297 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1298 {
1299 	unsigned int x;
1300 
1301 	for (x = 0; x < num_gh; x++)
1302 		gfs2_glock_dq_uninit(&ghs[x]);
1303 }
1304 
1305 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1306 {
1307 	unsigned long delay = 0;
1308 	unsigned long holdtime;
1309 	unsigned long now = jiffies;
1310 
1311 	gfs2_glock_hold(gl);
1312 	holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
1313 	if (time_before(now, holdtime))
1314 		delay = holdtime - now;
1315 	if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1316 		delay = gl->gl_ops->go_min_hold_time;
1317 
1318 	spin_lock(&gl->gl_spin);
1319 	handle_callback(gl, state, delay);
1320 	spin_unlock(&gl->gl_spin);
1321 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1322 		gfs2_glock_put(gl);
1323 }
1324 
1325 /**
1326  * gfs2_should_freeze - Figure out if glock should be frozen
1327  * @gl: The glock in question
1328  *
1329  * Glocks are not frozen if (a) the result of the dlm operation is
1330  * an error, (b) the locking operation was an unlock operation or
1331  * (c) if there is a "noexp" flagged request anywhere in the queue
1332  *
1333  * Returns: 1 if freezing should occur, 0 otherwise
1334  */
1335 
1336 static int gfs2_should_freeze(const struct gfs2_glock *gl)
1337 {
1338 	const struct gfs2_holder *gh;
1339 
1340 	if (gl->gl_reply & ~LM_OUT_ST_MASK)
1341 		return 0;
1342 	if (gl->gl_target == LM_ST_UNLOCKED)
1343 		return 0;
1344 
1345 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1346 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1347 			continue;
1348 		if (LM_FLAG_NOEXP & gh->gh_flags)
1349 			return 0;
1350 	}
1351 
1352 	return 1;
1353 }
1354 
1355 /**
1356  * gfs2_glock_complete - Callback used by locking
1357  * @gl: Pointer to the glock
1358  * @ret: The return value from the dlm
1359  *
1360  */
1361 
1362 void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1363 {
1364 	struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
1365 
1366 	gl->gl_reply = ret;
1367 
1368 	if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) {
1369 		spin_lock(&gl->gl_spin);
1370 		if (gfs2_should_freeze(gl)) {
1371 			set_bit(GLF_FROZEN, &gl->gl_flags);
1372 			spin_unlock(&gl->gl_spin);
1373 			return;
1374 		}
1375 		spin_unlock(&gl->gl_spin);
1376 	}
1377 	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1378 	gfs2_glock_hold(gl);
1379 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1380 		gfs2_glock_put(gl);
1381 }
1382 
1383 
1384 static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
1385 {
1386 	struct gfs2_glock *gl;
1387 	int may_demote;
1388 	int nr_skipped = 0;
1389 	LIST_HEAD(skipped);
1390 
1391 	if (nr == 0)
1392 		goto out;
1393 
1394 	if (!(gfp_mask & __GFP_FS))
1395 		return -1;
1396 
1397 	spin_lock(&lru_lock);
1398 	while(nr && !list_empty(&lru_list)) {
1399 		gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1400 		list_del_init(&gl->gl_lru);
1401 		atomic_dec(&lru_count);
1402 
1403 		/* Test for being demotable */
1404 		if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1405 			gfs2_glock_hold(gl);
1406 			spin_unlock(&lru_lock);
1407 			spin_lock(&gl->gl_spin);
1408 			may_demote = demote_ok(gl);
1409 			if (may_demote) {
1410 				handle_callback(gl, LM_ST_UNLOCKED, 0);
1411 				nr--;
1412 			}
1413 			clear_bit(GLF_LOCK, &gl->gl_flags);
1414 			smp_mb__after_clear_bit();
1415 			if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1416 				gfs2_glock_put_nolock(gl);
1417 			spin_unlock(&gl->gl_spin);
1418 			spin_lock(&lru_lock);
1419 			continue;
1420 		}
1421 		nr_skipped++;
1422 		list_add(&gl->gl_lru, &skipped);
1423 	}
1424 	list_splice(&skipped, &lru_list);
1425 	atomic_add(nr_skipped, &lru_count);
1426 	spin_unlock(&lru_lock);
1427 out:
1428 	return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure;
1429 }
1430 
1431 static struct shrinker glock_shrinker = {
1432 	.shrink = gfs2_shrink_glock_memory,
1433 	.seeks = DEFAULT_SEEKS,
1434 };
1435 
1436 /**
1437  * examine_bucket - Call a function for glock in a hash bucket
1438  * @examiner: the function
1439  * @sdp: the filesystem
1440  * @bucket: the bucket
1441  *
1442  * Returns: 1 if the bucket has entries
1443  */
1444 
1445 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1446 			  unsigned int hash)
1447 {
1448 	struct gfs2_glock *gl, *prev = NULL;
1449 	int has_entries = 0;
1450 	struct hlist_head *head = &gl_hash_table[hash].hb_list;
1451 
1452 	read_lock(gl_lock_addr(hash));
1453 	/* Can't use hlist_for_each_entry - don't want prefetch here */
1454 	if (hlist_empty(head))
1455 		goto out;
1456 	gl = list_entry(head->first, struct gfs2_glock, gl_list);
1457 	while(1) {
1458 		if (!sdp || gl->gl_sbd == sdp) {
1459 			gfs2_glock_hold(gl);
1460 			read_unlock(gl_lock_addr(hash));
1461 			if (prev)
1462 				gfs2_glock_put(prev);
1463 			prev = gl;
1464 			examiner(gl);
1465 			has_entries = 1;
1466 			read_lock(gl_lock_addr(hash));
1467 		}
1468 		if (gl->gl_list.next == NULL)
1469 			break;
1470 		gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1471 	}
1472 out:
1473 	read_unlock(gl_lock_addr(hash));
1474 	if (prev)
1475 		gfs2_glock_put(prev);
1476 	cond_resched();
1477 	return has_entries;
1478 }
1479 
1480 
1481 /**
1482  * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1483  * @gl: The glock to thaw
1484  *
1485  * N.B. When we freeze a glock, we leave a ref to the glock outstanding,
1486  * so this has to result in the ref count being dropped by one.
1487  */
1488 
1489 static void thaw_glock(struct gfs2_glock *gl)
1490 {
1491 	if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1492 		return;
1493 	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1494 	gfs2_glock_hold(gl);
1495 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1496 		gfs2_glock_put(gl);
1497 }
1498 
1499 /**
1500  * clear_glock - look at a glock and see if we can free it from glock cache
1501  * @gl: the glock to look at
1502  *
1503  */
1504 
1505 static void clear_glock(struct gfs2_glock *gl)
1506 {
1507 	spin_lock(&lru_lock);
1508 	if (!list_empty(&gl->gl_lru)) {
1509 		list_del_init(&gl->gl_lru);
1510 		atomic_dec(&lru_count);
1511 	}
1512 	spin_unlock(&lru_lock);
1513 
1514 	spin_lock(&gl->gl_spin);
1515 	if (find_first_holder(gl) == NULL && gl->gl_state != LM_ST_UNLOCKED)
1516 		handle_callback(gl, LM_ST_UNLOCKED, 0);
1517 	spin_unlock(&gl->gl_spin);
1518 	gfs2_glock_hold(gl);
1519 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1520 		gfs2_glock_put(gl);
1521 }
1522 
1523 /**
1524  * gfs2_glock_thaw - Thaw any frozen glocks
1525  * @sdp: The super block
1526  *
1527  */
1528 
1529 void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1530 {
1531 	unsigned x;
1532 
1533 	for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1534 		examine_bucket(thaw_glock, sdp, x);
1535 }
1536 
1537 /**
1538  * gfs2_gl_hash_clear - Empty out the glock hash table
1539  * @sdp: the filesystem
1540  * @wait: wait until it's all gone
1541  *
1542  * Called when unmounting the filesystem.
1543  */
1544 
1545 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1546 {
1547 	unsigned int x;
1548 
1549 	for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1550 		examine_bucket(clear_glock, sdp, x);
1551 	flush_workqueue(glock_workqueue);
1552 	wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
1553 	gfs2_dump_lockstate(sdp);
1554 }
1555 
1556 void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1557 {
1558 	struct gfs2_glock *gl = ip->i_gl;
1559 	int ret;
1560 
1561 	ret = gfs2_truncatei_resume(ip);
1562 	gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
1563 
1564 	spin_lock(&gl->gl_spin);
1565 	clear_bit(GLF_LOCK, &gl->gl_flags);
1566 	run_queue(gl, 1);
1567 	spin_unlock(&gl->gl_spin);
1568 }
1569 
1570 static const char *state2str(unsigned state)
1571 {
1572 	switch(state) {
1573 	case LM_ST_UNLOCKED:
1574 		return "UN";
1575 	case LM_ST_SHARED:
1576 		return "SH";
1577 	case LM_ST_DEFERRED:
1578 		return "DF";
1579 	case LM_ST_EXCLUSIVE:
1580 		return "EX";
1581 	}
1582 	return "??";
1583 }
1584 
1585 static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1586 {
1587 	char *p = buf;
1588 	if (flags & LM_FLAG_TRY)
1589 		*p++ = 't';
1590 	if (flags & LM_FLAG_TRY_1CB)
1591 		*p++ = 'T';
1592 	if (flags & LM_FLAG_NOEXP)
1593 		*p++ = 'e';
1594 	if (flags & LM_FLAG_ANY)
1595 		*p++ = 'A';
1596 	if (flags & LM_FLAG_PRIORITY)
1597 		*p++ = 'p';
1598 	if (flags & GL_ASYNC)
1599 		*p++ = 'a';
1600 	if (flags & GL_EXACT)
1601 		*p++ = 'E';
1602 	if (flags & GL_NOCACHE)
1603 		*p++ = 'c';
1604 	if (test_bit(HIF_HOLDER, &iflags))
1605 		*p++ = 'H';
1606 	if (test_bit(HIF_WAIT, &iflags))
1607 		*p++ = 'W';
1608 	if (test_bit(HIF_FIRST, &iflags))
1609 		*p++ = 'F';
1610 	*p = 0;
1611 	return buf;
1612 }
1613 
1614 /**
1615  * dump_holder - print information about a glock holder
1616  * @seq: the seq_file struct
1617  * @gh: the glock holder
1618  *
1619  * Returns: 0 on success, -ENOBUFS when we run out of space
1620  */
1621 
1622 static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1623 {
1624 	struct task_struct *gh_owner = NULL;
1625 	char buffer[KSYM_SYMBOL_LEN];
1626 	char flags_buf[32];
1627 
1628 	sprint_symbol(buffer, gh->gh_ip);
1629 	if (gh->gh_owner_pid)
1630 		gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1631 	gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %s\n",
1632 		  state2str(gh->gh_state),
1633 		  hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1634 		  gh->gh_error,
1635 		  gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1636 		  gh_owner ? gh_owner->comm : "(ended)", buffer);
1637 	return 0;
1638 }
1639 
1640 static const char *gflags2str(char *buf, const unsigned long *gflags)
1641 {
1642 	char *p = buf;
1643 	if (test_bit(GLF_LOCK, gflags))
1644 		*p++ = 'l';
1645 	if (test_bit(GLF_DEMOTE, gflags))
1646 		*p++ = 'D';
1647 	if (test_bit(GLF_PENDING_DEMOTE, gflags))
1648 		*p++ = 'd';
1649 	if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1650 		*p++ = 'p';
1651 	if (test_bit(GLF_DIRTY, gflags))
1652 		*p++ = 'y';
1653 	if (test_bit(GLF_LFLUSH, gflags))
1654 		*p++ = 'f';
1655 	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1656 		*p++ = 'i';
1657 	if (test_bit(GLF_REPLY_PENDING, gflags))
1658 		*p++ = 'r';
1659 	if (test_bit(GLF_INITIAL, gflags))
1660 		*p++ = 'I';
1661 	if (test_bit(GLF_FROZEN, gflags))
1662 		*p++ = 'F';
1663 	*p = 0;
1664 	return buf;
1665 }
1666 
1667 /**
1668  * __dump_glock - print information about a glock
1669  * @seq: The seq_file struct
1670  * @gl: the glock
1671  *
1672  * The file format is as follows:
1673  * One line per object, capital letters are used to indicate objects
1674  * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1675  * other objects are indented by a single space and follow the glock to
1676  * which they are related. Fields are indicated by lower case letters
1677  * followed by a colon and the field value, except for strings which are in
1678  * [] so that its possible to see if they are composed of spaces for
1679  * example. The field's are n = number (id of the object), f = flags,
1680  * t = type, s = state, r = refcount, e = error, p = pid.
1681  *
1682  * Returns: 0 on success, -ENOBUFS when we run out of space
1683  */
1684 
1685 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1686 {
1687 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1688 	unsigned long long dtime;
1689 	const struct gfs2_holder *gh;
1690 	char gflags_buf[32];
1691 	int error = 0;
1692 
1693 	dtime = jiffies - gl->gl_demote_time;
1694 	dtime *= 1000000/HZ; /* demote time in uSec */
1695 	if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1696 		dtime = 0;
1697 	gfs2_print_dbg(seq, "G:  s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d r:%d\n",
1698 		  state2str(gl->gl_state),
1699 		  gl->gl_name.ln_type,
1700 		  (unsigned long long)gl->gl_name.ln_number,
1701 		  gflags2str(gflags_buf, &gl->gl_flags),
1702 		  state2str(gl->gl_target),
1703 		  state2str(gl->gl_demote_state), dtime,
1704 		  atomic_read(&gl->gl_ail_count),
1705 		  atomic_read(&gl->gl_ref));
1706 
1707 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1708 		error = dump_holder(seq, gh);
1709 		if (error)
1710 			goto out;
1711 	}
1712 	if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1713 		error = glops->go_dump(seq, gl);
1714 out:
1715 	return error;
1716 }
1717 
1718 static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1719 {
1720 	int ret;
1721 	spin_lock(&gl->gl_spin);
1722 	ret = __dump_glock(seq, gl);
1723 	spin_unlock(&gl->gl_spin);
1724 	return ret;
1725 }
1726 
1727 /**
1728  * gfs2_dump_lockstate - print out the current lockstate
1729  * @sdp: the filesystem
1730  * @ub: the buffer to copy the information into
1731  *
1732  * If @ub is NULL, dump the lockstate to the console.
1733  *
1734  */
1735 
1736 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
1737 {
1738 	struct gfs2_glock *gl;
1739 	struct hlist_node *h;
1740 	unsigned int x;
1741 	int error = 0;
1742 
1743 	for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1744 
1745 		read_lock(gl_lock_addr(x));
1746 
1747 		hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
1748 			if (gl->gl_sbd != sdp)
1749 				continue;
1750 
1751 			error = dump_glock(NULL, gl);
1752 			if (error)
1753 				break;
1754 		}
1755 
1756 		read_unlock(gl_lock_addr(x));
1757 
1758 		if (error)
1759 			break;
1760 	}
1761 
1762 
1763 	return error;
1764 }
1765 
1766 
1767 int __init gfs2_glock_init(void)
1768 {
1769 	unsigned i;
1770 	for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1771 		INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
1772 	}
1773 #ifdef GL_HASH_LOCK_SZ
1774 	for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
1775 		rwlock_init(&gl_hash_locks[i]);
1776 	}
1777 #endif
1778 
1779 	glock_workqueue = create_workqueue("glock_workqueue");
1780 	if (IS_ERR(glock_workqueue))
1781 		return PTR_ERR(glock_workqueue);
1782 	gfs2_delete_workqueue = create_workqueue("delete_workqueue");
1783 	if (IS_ERR(gfs2_delete_workqueue)) {
1784 		destroy_workqueue(glock_workqueue);
1785 		return PTR_ERR(gfs2_delete_workqueue);
1786 	}
1787 
1788 	register_shrinker(&glock_shrinker);
1789 
1790 	return 0;
1791 }
1792 
1793 void gfs2_glock_exit(void)
1794 {
1795 	unregister_shrinker(&glock_shrinker);
1796 	destroy_workqueue(glock_workqueue);
1797 	destroy_workqueue(gfs2_delete_workqueue);
1798 }
1799 
1800 static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1801 {
1802 	struct gfs2_glock *gl;
1803 
1804 restart:
1805 	read_lock(gl_lock_addr(gi->hash));
1806 	gl = gi->gl;
1807 	if (gl) {
1808 		gi->gl = hlist_entry(gl->gl_list.next,
1809 				     struct gfs2_glock, gl_list);
1810 	} else {
1811 		gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
1812 				     struct gfs2_glock, gl_list);
1813 	}
1814 	if (gi->gl)
1815 		gfs2_glock_hold(gi->gl);
1816 	read_unlock(gl_lock_addr(gi->hash));
1817 	if (gl)
1818 		gfs2_glock_put(gl);
1819 	while (gi->gl == NULL) {
1820 		gi->hash++;
1821 		if (gi->hash >= GFS2_GL_HASH_SIZE)
1822 			return 1;
1823 		read_lock(gl_lock_addr(gi->hash));
1824 		gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
1825 				     struct gfs2_glock, gl_list);
1826 		if (gi->gl)
1827 			gfs2_glock_hold(gi->gl);
1828 		read_unlock(gl_lock_addr(gi->hash));
1829 	}
1830 
1831 	if (gi->sdp != gi->gl->gl_sbd)
1832 		goto restart;
1833 
1834 	return 0;
1835 }
1836 
1837 static void gfs2_glock_iter_free(struct gfs2_glock_iter *gi)
1838 {
1839 	if (gi->gl)
1840 		gfs2_glock_put(gi->gl);
1841 	gi->gl = NULL;
1842 }
1843 
1844 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1845 {
1846 	struct gfs2_glock_iter *gi = seq->private;
1847 	loff_t n = *pos;
1848 
1849 	gi->hash = 0;
1850 
1851 	do {
1852 		if (gfs2_glock_iter_next(gi)) {
1853 			gfs2_glock_iter_free(gi);
1854 			return NULL;
1855 		}
1856 	} while (n--);
1857 
1858 	return gi->gl;
1859 }
1860 
1861 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1862 				 loff_t *pos)
1863 {
1864 	struct gfs2_glock_iter *gi = seq->private;
1865 
1866 	(*pos)++;
1867 
1868 	if (gfs2_glock_iter_next(gi)) {
1869 		gfs2_glock_iter_free(gi);
1870 		return NULL;
1871 	}
1872 
1873 	return gi->gl;
1874 }
1875 
1876 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1877 {
1878 	struct gfs2_glock_iter *gi = seq->private;
1879 	gfs2_glock_iter_free(gi);
1880 }
1881 
1882 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1883 {
1884 	return dump_glock(seq, iter_ptr);
1885 }
1886 
1887 static const struct seq_operations gfs2_glock_seq_ops = {
1888 	.start = gfs2_glock_seq_start,
1889 	.next  = gfs2_glock_seq_next,
1890 	.stop  = gfs2_glock_seq_stop,
1891 	.show  = gfs2_glock_seq_show,
1892 };
1893 
1894 static int gfs2_debugfs_open(struct inode *inode, struct file *file)
1895 {
1896 	int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1897 				   sizeof(struct gfs2_glock_iter));
1898 	if (ret == 0) {
1899 		struct seq_file *seq = file->private_data;
1900 		struct gfs2_glock_iter *gi = seq->private;
1901 		gi->sdp = inode->i_private;
1902 	}
1903 	return ret;
1904 }
1905 
1906 static const struct file_operations gfs2_debug_fops = {
1907 	.owner   = THIS_MODULE,
1908 	.open    = gfs2_debugfs_open,
1909 	.read    = seq_read,
1910 	.llseek  = seq_lseek,
1911 	.release = seq_release_private,
1912 };
1913 
1914 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
1915 {
1916 	sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
1917 	if (!sdp->debugfs_dir)
1918 		return -ENOMEM;
1919 	sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
1920 							 S_IFREG | S_IRUGO,
1921 							 sdp->debugfs_dir, sdp,
1922 							 &gfs2_debug_fops);
1923 	if (!sdp->debugfs_dentry_glocks)
1924 		return -ENOMEM;
1925 
1926 	return 0;
1927 }
1928 
1929 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
1930 {
1931 	if (sdp && sdp->debugfs_dir) {
1932 		if (sdp->debugfs_dentry_glocks) {
1933 			debugfs_remove(sdp->debugfs_dentry_glocks);
1934 			sdp->debugfs_dentry_glocks = NULL;
1935 		}
1936 		debugfs_remove(sdp->debugfs_dir);
1937 		sdp->debugfs_dir = NULL;
1938 	}
1939 }
1940 
1941 int gfs2_register_debugfs(void)
1942 {
1943 	gfs2_root = debugfs_create_dir("gfs2", NULL);
1944 	return gfs2_root ? 0 : -ENOMEM;
1945 }
1946 
1947 void gfs2_unregister_debugfs(void)
1948 {
1949 	debugfs_remove(gfs2_root);
1950 	gfs2_root = NULL;
1951 }
1952