xref: /linux/fs/gfs2/glock.c (revision 25aee3debe0464f6c680173041fa3de30ec9ff54)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/buffer_head.h>
14 #include <linux/delay.h>
15 #include <linux/sort.h>
16 #include <linux/jhash.h>
17 #include <linux/kallsyms.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/list.h>
20 #include <linux/wait.h>
21 #include <linux/module.h>
22 #include <asm/uaccess.h>
23 #include <linux/seq_file.h>
24 #include <linux/debugfs.h>
25 #include <linux/kthread.h>
26 #include <linux/freezer.h>
27 #include <linux/workqueue.h>
28 #include <linux/jiffies.h>
29 #include <linux/rcupdate.h>
30 #include <linux/rculist_bl.h>
31 #include <linux/bit_spinlock.h>
32 #include <linux/percpu.h>
33 
34 #include "gfs2.h"
35 #include "incore.h"
36 #include "glock.h"
37 #include "glops.h"
38 #include "inode.h"
39 #include "lops.h"
40 #include "meta_io.h"
41 #include "quota.h"
42 #include "super.h"
43 #include "util.h"
44 #include "bmap.h"
45 #define CREATE_TRACE_POINTS
46 #include "trace_gfs2.h"
47 
48 struct gfs2_glock_iter {
49 	int hash;			/* hash bucket index           */
50 	unsigned nhash;			/* Index within current bucket */
51 	struct gfs2_sbd *sdp;		/* incore superblock           */
52 	struct gfs2_glock *gl;		/* current glock struct        */
53 	loff_t last_pos;		/* last position               */
54 };
55 
56 typedef void (*glock_examiner) (struct gfs2_glock * gl);
57 
58 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
59 #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
60 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
61 
62 static struct dentry *gfs2_root;
63 static struct workqueue_struct *glock_workqueue;
64 struct workqueue_struct *gfs2_delete_workqueue;
65 static LIST_HEAD(lru_list);
66 static atomic_t lru_count = ATOMIC_INIT(0);
67 static DEFINE_SPINLOCK(lru_lock);
68 
69 #define GFS2_GL_HASH_SHIFT      15
70 #define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
71 #define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
72 
73 static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
74 static struct dentry *gfs2_root;
75 
76 /**
77  * gl_hash() - Turn glock number into hash bucket number
78  * @lock: The glock number
79  *
80  * Returns: The number of the corresponding hash bucket
81  */
82 
83 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
84 			    const struct lm_lockname *name)
85 {
86 	unsigned int h;
87 
88 	h = jhash(&name->ln_number, sizeof(u64), 0);
89 	h = jhash(&name->ln_type, sizeof(unsigned int), h);
90 	h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
91 	h &= GFS2_GL_HASH_MASK;
92 
93 	return h;
94 }
95 
96 static inline void spin_lock_bucket(unsigned int hash)
97 {
98 	hlist_bl_lock(&gl_hash_table[hash]);
99 }
100 
101 static inline void spin_unlock_bucket(unsigned int hash)
102 {
103 	hlist_bl_unlock(&gl_hash_table[hash]);
104 }
105 
106 static void gfs2_glock_dealloc(struct rcu_head *rcu)
107 {
108 	struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
109 
110 	if (gl->gl_ops->go_flags & GLOF_ASPACE)
111 		kmem_cache_free(gfs2_glock_aspace_cachep, gl);
112 	else
113 		kmem_cache_free(gfs2_glock_cachep, gl);
114 }
115 
116 void gfs2_glock_free(struct gfs2_glock *gl)
117 {
118 	struct gfs2_sbd *sdp = gl->gl_sbd;
119 
120 	call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
121 	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
122 		wake_up(&sdp->sd_glock_wait);
123 }
124 
125 /**
126  * gfs2_glock_hold() - increment reference count on glock
127  * @gl: The glock to hold
128  *
129  */
130 
131 void gfs2_glock_hold(struct gfs2_glock *gl)
132 {
133 	GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
134 	atomic_inc(&gl->gl_ref);
135 }
136 
137 /**
138  * demote_ok - Check to see if it's ok to unlock a glock
139  * @gl: the glock
140  *
141  * Returns: 1 if it's ok
142  */
143 
144 static int demote_ok(const struct gfs2_glock *gl)
145 {
146 	const struct gfs2_glock_operations *glops = gl->gl_ops;
147 
148 	if (gl->gl_state == LM_ST_UNLOCKED)
149 		return 0;
150 	if (!list_empty(&gl->gl_holders))
151 		return 0;
152 	if (glops->go_demote_ok)
153 		return glops->go_demote_ok(gl);
154 	return 1;
155 }
156 
157 
158 void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
159 {
160 	spin_lock(&lru_lock);
161 
162 	if (!list_empty(&gl->gl_lru))
163 		list_del_init(&gl->gl_lru);
164 	else
165 		atomic_inc(&lru_count);
166 
167 	list_add_tail(&gl->gl_lru, &lru_list);
168 	set_bit(GLF_LRU, &gl->gl_flags);
169 	spin_unlock(&lru_lock);
170 }
171 
172 static void __gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
173 {
174 	if (!list_empty(&gl->gl_lru)) {
175 		list_del_init(&gl->gl_lru);
176 		atomic_dec(&lru_count);
177 		clear_bit(GLF_LRU, &gl->gl_flags);
178 	}
179 }
180 
181 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
182 {
183 	spin_lock(&lru_lock);
184 	__gfs2_glock_remove_from_lru(gl);
185 	spin_unlock(&lru_lock);
186 }
187 
188 /**
189  * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
190  * @gl: the glock
191  *
192  * If the glock is demotable, then we add it (or move it) to the end
193  * of the glock LRU list.
194  */
195 
196 static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
197 {
198 	if (demote_ok(gl))
199 		gfs2_glock_add_to_lru(gl);
200 }
201 
202 /**
203  * gfs2_glock_put_nolock() - Decrement reference count on glock
204  * @gl: The glock to put
205  *
206  * This function should only be used if the caller has its own reference
207  * to the glock, in addition to the one it is dropping.
208  */
209 
210 void gfs2_glock_put_nolock(struct gfs2_glock *gl)
211 {
212 	if (atomic_dec_and_test(&gl->gl_ref))
213 		GLOCK_BUG_ON(gl, 1);
214 }
215 
216 /**
217  * gfs2_glock_put() - Decrement reference count on glock
218  * @gl: The glock to put
219  *
220  */
221 
222 void gfs2_glock_put(struct gfs2_glock *gl)
223 {
224 	struct gfs2_sbd *sdp = gl->gl_sbd;
225 	struct address_space *mapping = gfs2_glock2aspace(gl);
226 
227 	if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
228 		__gfs2_glock_remove_from_lru(gl);
229 		spin_unlock(&lru_lock);
230 		spin_lock_bucket(gl->gl_hash);
231 		hlist_bl_del_rcu(&gl->gl_list);
232 		spin_unlock_bucket(gl->gl_hash);
233 		GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
234 		GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
235 		trace_gfs2_glock_put(gl);
236 		sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
237 	}
238 }
239 
240 /**
241  * search_bucket() - Find struct gfs2_glock by lock number
242  * @bucket: the bucket to search
243  * @name: The lock name
244  *
245  * Returns: NULL, or the struct gfs2_glock with the requested number
246  */
247 
248 static struct gfs2_glock *search_bucket(unsigned int hash,
249 					const struct gfs2_sbd *sdp,
250 					const struct lm_lockname *name)
251 {
252 	struct gfs2_glock *gl;
253 	struct hlist_bl_node *h;
254 
255 	hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
256 		if (!lm_name_equal(&gl->gl_name, name))
257 			continue;
258 		if (gl->gl_sbd != sdp)
259 			continue;
260 		if (atomic_inc_not_zero(&gl->gl_ref))
261 			return gl;
262 	}
263 
264 	return NULL;
265 }
266 
267 /**
268  * may_grant - check if its ok to grant a new lock
269  * @gl: The glock
270  * @gh: The lock request which we wish to grant
271  *
272  * Returns: true if its ok to grant the lock
273  */
274 
275 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
276 {
277 	const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
278 	if ((gh->gh_state == LM_ST_EXCLUSIVE ||
279 	     gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
280 		return 0;
281 	if (gl->gl_state == gh->gh_state)
282 		return 1;
283 	if (gh->gh_flags & GL_EXACT)
284 		return 0;
285 	if (gl->gl_state == LM_ST_EXCLUSIVE) {
286 		if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
287 			return 1;
288 		if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
289 			return 1;
290 	}
291 	if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
292 		return 1;
293 	return 0;
294 }
295 
296 static void gfs2_holder_wake(struct gfs2_holder *gh)
297 {
298 	clear_bit(HIF_WAIT, &gh->gh_iflags);
299 	smp_mb__after_clear_bit();
300 	wake_up_bit(&gh->gh_iflags, HIF_WAIT);
301 }
302 
303 /**
304  * do_error - Something unexpected has happened during a lock request
305  *
306  */
307 
308 static inline void do_error(struct gfs2_glock *gl, const int ret)
309 {
310 	struct gfs2_holder *gh, *tmp;
311 
312 	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
313 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
314 			continue;
315 		if (ret & LM_OUT_ERROR)
316 			gh->gh_error = -EIO;
317 		else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
318 			gh->gh_error = GLR_TRYFAILED;
319 		else
320 			continue;
321 		list_del_init(&gh->gh_list);
322 		trace_gfs2_glock_queue(gh, 0);
323 		gfs2_holder_wake(gh);
324 	}
325 }
326 
327 /**
328  * do_promote - promote as many requests as possible on the current queue
329  * @gl: The glock
330  *
331  * Returns: 1 if there is a blocked holder at the head of the list, or 2
332  *          if a type specific operation is underway.
333  */
334 
335 static int do_promote(struct gfs2_glock *gl)
336 __releases(&gl->gl_spin)
337 __acquires(&gl->gl_spin)
338 {
339 	const struct gfs2_glock_operations *glops = gl->gl_ops;
340 	struct gfs2_holder *gh, *tmp;
341 	int ret;
342 
343 restart:
344 	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
345 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
346 			continue;
347 		if (may_grant(gl, gh)) {
348 			if (gh->gh_list.prev == &gl->gl_holders &&
349 			    glops->go_lock) {
350 				spin_unlock(&gl->gl_spin);
351 				/* FIXME: eliminate this eventually */
352 				ret = glops->go_lock(gh);
353 				spin_lock(&gl->gl_spin);
354 				if (ret) {
355 					if (ret == 1)
356 						return 2;
357 					gh->gh_error = ret;
358 					list_del_init(&gh->gh_list);
359 					trace_gfs2_glock_queue(gh, 0);
360 					gfs2_holder_wake(gh);
361 					goto restart;
362 				}
363 				set_bit(HIF_HOLDER, &gh->gh_iflags);
364 				trace_gfs2_promote(gh, 1);
365 				gfs2_holder_wake(gh);
366 				goto restart;
367 			}
368 			set_bit(HIF_HOLDER, &gh->gh_iflags);
369 			trace_gfs2_promote(gh, 0);
370 			gfs2_holder_wake(gh);
371 			continue;
372 		}
373 		if (gh->gh_list.prev == &gl->gl_holders)
374 			return 1;
375 		do_error(gl, 0);
376 		break;
377 	}
378 	return 0;
379 }
380 
381 /**
382  * find_first_waiter - find the first gh that's waiting for the glock
383  * @gl: the glock
384  */
385 
386 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
387 {
388 	struct gfs2_holder *gh;
389 
390 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
391 		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
392 			return gh;
393 	}
394 	return NULL;
395 }
396 
397 /**
398  * state_change - record that the glock is now in a different state
399  * @gl: the glock
400  * @new_state the new state
401  *
402  */
403 
404 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
405 {
406 	int held1, held2;
407 
408 	held1 = (gl->gl_state != LM_ST_UNLOCKED);
409 	held2 = (new_state != LM_ST_UNLOCKED);
410 
411 	if (held1 != held2) {
412 		if (held2)
413 			gfs2_glock_hold(gl);
414 		else
415 			gfs2_glock_put_nolock(gl);
416 	}
417 	if (held1 && held2 && list_empty(&gl->gl_holders))
418 		clear_bit(GLF_QUEUED, &gl->gl_flags);
419 
420 	if (new_state != gl->gl_target)
421 		/* shorten our minimum hold time */
422 		gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
423 				       GL_GLOCK_MIN_HOLD);
424 	gl->gl_state = new_state;
425 	gl->gl_tchange = jiffies;
426 }
427 
428 static void gfs2_demote_wake(struct gfs2_glock *gl)
429 {
430 	gl->gl_demote_state = LM_ST_EXCLUSIVE;
431 	clear_bit(GLF_DEMOTE, &gl->gl_flags);
432 	smp_mb__after_clear_bit();
433 	wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
434 }
435 
436 /**
437  * finish_xmote - The DLM has replied to one of our lock requests
438  * @gl: The glock
439  * @ret: The status from the DLM
440  *
441  */
442 
443 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
444 {
445 	const struct gfs2_glock_operations *glops = gl->gl_ops;
446 	struct gfs2_holder *gh;
447 	unsigned state = ret & LM_OUT_ST_MASK;
448 	int rv;
449 
450 	spin_lock(&gl->gl_spin);
451 	trace_gfs2_glock_state_change(gl, state);
452 	state_change(gl, state);
453 	gh = find_first_waiter(gl);
454 
455 	/* Demote to UN request arrived during demote to SH or DF */
456 	if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
457 	    state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
458 		gl->gl_target = LM_ST_UNLOCKED;
459 
460 	/* Check for state != intended state */
461 	if (unlikely(state != gl->gl_target)) {
462 		if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
463 			/* move to back of queue and try next entry */
464 			if (ret & LM_OUT_CANCELED) {
465 				if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
466 					list_move_tail(&gh->gh_list, &gl->gl_holders);
467 				gh = find_first_waiter(gl);
468 				gl->gl_target = gh->gh_state;
469 				goto retry;
470 			}
471 			/* Some error or failed "try lock" - report it */
472 			if ((ret & LM_OUT_ERROR) ||
473 			    (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
474 				gl->gl_target = gl->gl_state;
475 				do_error(gl, ret);
476 				goto out;
477 			}
478 		}
479 		switch(state) {
480 		/* Unlocked due to conversion deadlock, try again */
481 		case LM_ST_UNLOCKED:
482 retry:
483 			do_xmote(gl, gh, gl->gl_target);
484 			break;
485 		/* Conversion fails, unlock and try again */
486 		case LM_ST_SHARED:
487 		case LM_ST_DEFERRED:
488 			do_xmote(gl, gh, LM_ST_UNLOCKED);
489 			break;
490 		default: /* Everything else */
491 			printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
492 			GLOCK_BUG_ON(gl, 1);
493 		}
494 		spin_unlock(&gl->gl_spin);
495 		return;
496 	}
497 
498 	/* Fast path - we got what we asked for */
499 	if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
500 		gfs2_demote_wake(gl);
501 	if (state != LM_ST_UNLOCKED) {
502 		if (glops->go_xmote_bh) {
503 			spin_unlock(&gl->gl_spin);
504 			rv = glops->go_xmote_bh(gl, gh);
505 			spin_lock(&gl->gl_spin);
506 			if (rv) {
507 				do_error(gl, rv);
508 				goto out;
509 			}
510 		}
511 		rv = do_promote(gl);
512 		if (rv == 2)
513 			goto out_locked;
514 	}
515 out:
516 	clear_bit(GLF_LOCK, &gl->gl_flags);
517 out_locked:
518 	spin_unlock(&gl->gl_spin);
519 }
520 
521 /**
522  * do_xmote - Calls the DLM to change the state of a lock
523  * @gl: The lock state
524  * @gh: The holder (only for promotes)
525  * @target: The target lock state
526  *
527  */
528 
529 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
530 __releases(&gl->gl_spin)
531 __acquires(&gl->gl_spin)
532 {
533 	const struct gfs2_glock_operations *glops = gl->gl_ops;
534 	struct gfs2_sbd *sdp = gl->gl_sbd;
535 	unsigned int lck_flags = gh ? gh->gh_flags : 0;
536 	int ret;
537 
538 	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
539 		      LM_FLAG_PRIORITY);
540 	GLOCK_BUG_ON(gl, gl->gl_state == target);
541 	GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
542 	if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
543 	    glops->go_inval) {
544 		set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
545 		do_error(gl, 0); /* Fail queued try locks */
546 	}
547 	gl->gl_req = target;
548 	set_bit(GLF_BLOCKING, &gl->gl_flags);
549 	if ((gl->gl_req == LM_ST_UNLOCKED) ||
550 	    (gl->gl_state == LM_ST_EXCLUSIVE) ||
551 	    (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
552 		clear_bit(GLF_BLOCKING, &gl->gl_flags);
553 	spin_unlock(&gl->gl_spin);
554 	if (glops->go_xmote_th)
555 		glops->go_xmote_th(gl);
556 	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
557 		glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
558 	clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
559 
560 	gfs2_glock_hold(gl);
561 	if (sdp->sd_lockstruct.ls_ops->lm_lock)	{
562 		/* lock_dlm */
563 		ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
564 		GLOCK_BUG_ON(gl, ret);
565 	} else { /* lock_nolock */
566 		finish_xmote(gl, target);
567 		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
568 			gfs2_glock_put(gl);
569 	}
570 
571 	spin_lock(&gl->gl_spin);
572 }
573 
574 /**
575  * find_first_holder - find the first "holder" gh
576  * @gl: the glock
577  */
578 
579 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
580 {
581 	struct gfs2_holder *gh;
582 
583 	if (!list_empty(&gl->gl_holders)) {
584 		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
585 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
586 			return gh;
587 	}
588 	return NULL;
589 }
590 
591 /**
592  * run_queue - do all outstanding tasks related to a glock
593  * @gl: The glock in question
594  * @nonblock: True if we must not block in run_queue
595  *
596  */
597 
598 static void run_queue(struct gfs2_glock *gl, const int nonblock)
599 __releases(&gl->gl_spin)
600 __acquires(&gl->gl_spin)
601 {
602 	struct gfs2_holder *gh = NULL;
603 	int ret;
604 
605 	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
606 		return;
607 
608 	GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
609 
610 	if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
611 	    gl->gl_demote_state != gl->gl_state) {
612 		if (find_first_holder(gl))
613 			goto out_unlock;
614 		if (nonblock)
615 			goto out_sched;
616 		set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
617 		GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
618 		gl->gl_target = gl->gl_demote_state;
619 	} else {
620 		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
621 			gfs2_demote_wake(gl);
622 		ret = do_promote(gl);
623 		if (ret == 0)
624 			goto out_unlock;
625 		if (ret == 2)
626 			goto out;
627 		gh = find_first_waiter(gl);
628 		gl->gl_target = gh->gh_state;
629 		if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
630 			do_error(gl, 0); /* Fail queued try locks */
631 	}
632 	do_xmote(gl, gh, gl->gl_target);
633 out:
634 	return;
635 
636 out_sched:
637 	clear_bit(GLF_LOCK, &gl->gl_flags);
638 	smp_mb__after_clear_bit();
639 	gfs2_glock_hold(gl);
640 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
641 		gfs2_glock_put_nolock(gl);
642 	return;
643 
644 out_unlock:
645 	clear_bit(GLF_LOCK, &gl->gl_flags);
646 	smp_mb__after_clear_bit();
647 	return;
648 }
649 
650 static void delete_work_func(struct work_struct *work)
651 {
652 	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
653 	struct gfs2_sbd *sdp = gl->gl_sbd;
654 	struct gfs2_inode *ip;
655 	struct inode *inode;
656 	u64 no_addr = gl->gl_name.ln_number;
657 
658 	ip = gl->gl_object;
659 	/* Note: Unsafe to dereference ip as we don't hold right refs/locks */
660 
661 	if (ip)
662 		inode = gfs2_ilookup(sdp->sd_vfs, no_addr, 1);
663 	else
664 		inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
665 	if (inode && !IS_ERR(inode)) {
666 		d_prune_aliases(inode);
667 		iput(inode);
668 	}
669 	gfs2_glock_put(gl);
670 }
671 
672 static void glock_work_func(struct work_struct *work)
673 {
674 	unsigned long delay = 0;
675 	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
676 	int drop_ref = 0;
677 
678 	if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
679 		finish_xmote(gl, gl->gl_reply);
680 		drop_ref = 1;
681 	}
682 	spin_lock(&gl->gl_spin);
683 	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
684 	    gl->gl_state != LM_ST_UNLOCKED &&
685 	    gl->gl_demote_state != LM_ST_EXCLUSIVE) {
686 		unsigned long holdtime, now = jiffies;
687 
688 		holdtime = gl->gl_tchange + gl->gl_hold_time;
689 		if (time_before(now, holdtime))
690 			delay = holdtime - now;
691 
692 		if (!delay) {
693 			clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
694 			set_bit(GLF_DEMOTE, &gl->gl_flags);
695 		}
696 	}
697 	run_queue(gl, 0);
698 	spin_unlock(&gl->gl_spin);
699 	if (!delay)
700 		gfs2_glock_put(gl);
701 	else {
702 		if (gl->gl_name.ln_type != LM_TYPE_INODE)
703 			delay = 0;
704 		if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
705 			gfs2_glock_put(gl);
706 	}
707 	if (drop_ref)
708 		gfs2_glock_put(gl);
709 }
710 
711 /**
712  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
713  * @sdp: The GFS2 superblock
714  * @number: the lock number
715  * @glops: The glock_operations to use
716  * @create: If 0, don't create the glock if it doesn't exist
717  * @glp: the glock is returned here
718  *
719  * This does not lock a glock, just finds/creates structures for one.
720  *
721  * Returns: errno
722  */
723 
724 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
725 		   const struct gfs2_glock_operations *glops, int create,
726 		   struct gfs2_glock **glp)
727 {
728 	struct super_block *s = sdp->sd_vfs;
729 	struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
730 	struct gfs2_glock *gl, *tmp;
731 	unsigned int hash = gl_hash(sdp, &name);
732 	struct address_space *mapping;
733 	struct kmem_cache *cachep;
734 
735 	rcu_read_lock();
736 	gl = search_bucket(hash, sdp, &name);
737 	rcu_read_unlock();
738 
739 	*glp = gl;
740 	if (gl)
741 		return 0;
742 	if (!create)
743 		return -ENOENT;
744 
745 	if (glops->go_flags & GLOF_ASPACE)
746 		cachep = gfs2_glock_aspace_cachep;
747 	else
748 		cachep = gfs2_glock_cachep;
749 	gl = kmem_cache_alloc(cachep, GFP_KERNEL);
750 	if (!gl)
751 		return -ENOMEM;
752 
753 	atomic_inc(&sdp->sd_glock_disposal);
754 	gl->gl_sbd = sdp;
755 	gl->gl_flags = 0;
756 	gl->gl_name = name;
757 	atomic_set(&gl->gl_ref, 1);
758 	gl->gl_state = LM_ST_UNLOCKED;
759 	gl->gl_target = LM_ST_UNLOCKED;
760 	gl->gl_demote_state = LM_ST_EXCLUSIVE;
761 	gl->gl_hash = hash;
762 	gl->gl_ops = glops;
763 	gl->gl_dstamp = ktime_set(0, 0);
764 	preempt_disable();
765 	/* We use the global stats to estimate the initial per-glock stats */
766 	gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
767 	preempt_enable();
768 	gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
769 	gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
770 	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
771 	memset(gl->gl_lvb, 0, 32 * sizeof(char));
772 	gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
773 	gl->gl_tchange = jiffies;
774 	gl->gl_object = NULL;
775 	gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
776 	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
777 	INIT_WORK(&gl->gl_delete, delete_work_func);
778 
779 	mapping = gfs2_glock2aspace(gl);
780 	if (mapping) {
781                 mapping->a_ops = &gfs2_meta_aops;
782 		mapping->host = s->s_bdev->bd_inode;
783 		mapping->flags = 0;
784 		mapping_set_gfp_mask(mapping, GFP_NOFS);
785 		mapping->assoc_mapping = NULL;
786 		mapping->backing_dev_info = s->s_bdi;
787 		mapping->writeback_index = 0;
788 	}
789 
790 	spin_lock_bucket(hash);
791 	tmp = search_bucket(hash, sdp, &name);
792 	if (tmp) {
793 		spin_unlock_bucket(hash);
794 		kmem_cache_free(cachep, gl);
795 		atomic_dec(&sdp->sd_glock_disposal);
796 		gl = tmp;
797 	} else {
798 		hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
799 		spin_unlock_bucket(hash);
800 	}
801 
802 	*glp = gl;
803 
804 	return 0;
805 }
806 
807 /**
808  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
809  * @gl: the glock
810  * @state: the state we're requesting
811  * @flags: the modifier flags
812  * @gh: the holder structure
813  *
814  */
815 
816 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
817 		      struct gfs2_holder *gh)
818 {
819 	INIT_LIST_HEAD(&gh->gh_list);
820 	gh->gh_gl = gl;
821 	gh->gh_ip = (unsigned long)__builtin_return_address(0);
822 	gh->gh_owner_pid = get_pid(task_pid(current));
823 	gh->gh_state = state;
824 	gh->gh_flags = flags;
825 	gh->gh_error = 0;
826 	gh->gh_iflags = 0;
827 	gfs2_glock_hold(gl);
828 }
829 
830 /**
831  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
832  * @state: the state we're requesting
833  * @flags: the modifier flags
834  * @gh: the holder structure
835  *
836  * Don't mess with the glock.
837  *
838  */
839 
840 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
841 {
842 	gh->gh_state = state;
843 	gh->gh_flags = flags;
844 	gh->gh_iflags = 0;
845 	gh->gh_ip = (unsigned long)__builtin_return_address(0);
846 	if (gh->gh_owner_pid)
847 		put_pid(gh->gh_owner_pid);
848 	gh->gh_owner_pid = get_pid(task_pid(current));
849 }
850 
851 /**
852  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
853  * @gh: the holder structure
854  *
855  */
856 
857 void gfs2_holder_uninit(struct gfs2_holder *gh)
858 {
859 	put_pid(gh->gh_owner_pid);
860 	gfs2_glock_put(gh->gh_gl);
861 	gh->gh_gl = NULL;
862 	gh->gh_ip = 0;
863 }
864 
865 /**
866  * gfs2_glock_holder_wait
867  * @word: unused
868  *
869  * This function and gfs2_glock_demote_wait both show up in the WCHAN
870  * field. Thus I've separated these otherwise identical functions in
871  * order to be more informative to the user.
872  */
873 
874 static int gfs2_glock_holder_wait(void *word)
875 {
876         schedule();
877         return 0;
878 }
879 
880 static int gfs2_glock_demote_wait(void *word)
881 {
882 	schedule();
883 	return 0;
884 }
885 
886 static void wait_on_holder(struct gfs2_holder *gh)
887 {
888 	unsigned long time1 = jiffies;
889 
890 	might_sleep();
891 	wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
892 	if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
893 		/* Lengthen the minimum hold time. */
894 		gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
895 					      GL_GLOCK_HOLD_INCR,
896 					      GL_GLOCK_MAX_HOLD);
897 }
898 
899 static void wait_on_demote(struct gfs2_glock *gl)
900 {
901 	might_sleep();
902 	wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
903 }
904 
905 /**
906  * handle_callback - process a demote request
907  * @gl: the glock
908  * @state: the state the caller wants us to change to
909  *
910  * There are only two requests that we are going to see in actual
911  * practise: LM_ST_SHARED and LM_ST_UNLOCKED
912  */
913 
914 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
915 			    unsigned long delay)
916 {
917 	int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
918 
919 	set_bit(bit, &gl->gl_flags);
920 	if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
921 		gl->gl_demote_state = state;
922 		gl->gl_demote_time = jiffies;
923 	} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
924 			gl->gl_demote_state != state) {
925 		gl->gl_demote_state = LM_ST_UNLOCKED;
926 	}
927 	if (gl->gl_ops->go_callback)
928 		gl->gl_ops->go_callback(gl);
929 	trace_gfs2_demote_rq(gl);
930 }
931 
932 /**
933  * gfs2_glock_wait - wait on a glock acquisition
934  * @gh: the glock holder
935  *
936  * Returns: 0 on success
937  */
938 
939 int gfs2_glock_wait(struct gfs2_holder *gh)
940 {
941 	wait_on_holder(gh);
942 	return gh->gh_error;
943 }
944 
945 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
946 {
947 	struct va_format vaf;
948 	va_list args;
949 
950 	va_start(args, fmt);
951 
952 	if (seq) {
953 		seq_vprintf(seq, fmt, args);
954 	} else {
955 		vaf.fmt = fmt;
956 		vaf.va = &args;
957 
958 		printk(KERN_ERR " %pV", &vaf);
959 	}
960 
961 	va_end(args);
962 }
963 
964 /**
965  * add_to_queue - Add a holder to the wait queue (but look for recursion)
966  * @gh: the holder structure to add
967  *
968  * Eventually we should move the recursive locking trap to a
969  * debugging option or something like that. This is the fast
970  * path and needs to have the minimum number of distractions.
971  *
972  */
973 
974 static inline void add_to_queue(struct gfs2_holder *gh)
975 __releases(&gl->gl_spin)
976 __acquires(&gl->gl_spin)
977 {
978 	struct gfs2_glock *gl = gh->gh_gl;
979 	struct gfs2_sbd *sdp = gl->gl_sbd;
980 	struct list_head *insert_pt = NULL;
981 	struct gfs2_holder *gh2;
982 	int try_lock = 0;
983 
984 	BUG_ON(gh->gh_owner_pid == NULL);
985 	if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
986 		BUG();
987 
988 	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
989 		if (test_bit(GLF_LOCK, &gl->gl_flags))
990 			try_lock = 1;
991 		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
992 			goto fail;
993 	}
994 
995 	list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
996 		if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
997 		    (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
998 			goto trap_recursive;
999 		if (try_lock &&
1000 		    !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
1001 		    !may_grant(gl, gh)) {
1002 fail:
1003 			gh->gh_error = GLR_TRYFAILED;
1004 			gfs2_holder_wake(gh);
1005 			return;
1006 		}
1007 		if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
1008 			continue;
1009 		if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
1010 			insert_pt = &gh2->gh_list;
1011 	}
1012 	set_bit(GLF_QUEUED, &gl->gl_flags);
1013 	trace_gfs2_glock_queue(gh, 1);
1014 	gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
1015 	gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
1016 	if (likely(insert_pt == NULL)) {
1017 		list_add_tail(&gh->gh_list, &gl->gl_holders);
1018 		if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
1019 			goto do_cancel;
1020 		return;
1021 	}
1022 	list_add_tail(&gh->gh_list, insert_pt);
1023 do_cancel:
1024 	gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
1025 	if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
1026 		spin_unlock(&gl->gl_spin);
1027 		if (sdp->sd_lockstruct.ls_ops->lm_cancel)
1028 			sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
1029 		spin_lock(&gl->gl_spin);
1030 	}
1031 	return;
1032 
1033 trap_recursive:
1034 	print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
1035 	printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
1036 	printk(KERN_ERR "lock type: %d req lock state : %d\n",
1037 	       gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1038 	print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
1039 	printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
1040 	printk(KERN_ERR "lock type: %d req lock state : %d\n",
1041 	       gh->gh_gl->gl_name.ln_type, gh->gh_state);
1042 	__dump_glock(NULL, gl);
1043 	BUG();
1044 }
1045 
1046 /**
1047  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1048  * @gh: the holder structure
1049  *
1050  * if (gh->gh_flags & GL_ASYNC), this never returns an error
1051  *
1052  * Returns: 0, GLR_TRYFAILED, or errno on failure
1053  */
1054 
1055 int gfs2_glock_nq(struct gfs2_holder *gh)
1056 {
1057 	struct gfs2_glock *gl = gh->gh_gl;
1058 	struct gfs2_sbd *sdp = gl->gl_sbd;
1059 	int error = 0;
1060 
1061 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1062 		return -EIO;
1063 
1064 	if (test_bit(GLF_LRU, &gl->gl_flags))
1065 		gfs2_glock_remove_from_lru(gl);
1066 
1067 	spin_lock(&gl->gl_spin);
1068 	add_to_queue(gh);
1069 	if ((LM_FLAG_NOEXP & gh->gh_flags) &&
1070 	    test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1071 		set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1072 	run_queue(gl, 1);
1073 	spin_unlock(&gl->gl_spin);
1074 
1075 	if (!(gh->gh_flags & GL_ASYNC))
1076 		error = gfs2_glock_wait(gh);
1077 
1078 	return error;
1079 }
1080 
1081 /**
1082  * gfs2_glock_poll - poll to see if an async request has been completed
1083  * @gh: the holder
1084  *
1085  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1086  */
1087 
1088 int gfs2_glock_poll(struct gfs2_holder *gh)
1089 {
1090 	return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1091 }
1092 
1093 /**
1094  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1095  * @gh: the glock holder
1096  *
1097  */
1098 
1099 void gfs2_glock_dq(struct gfs2_holder *gh)
1100 {
1101 	struct gfs2_glock *gl = gh->gh_gl;
1102 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1103 	unsigned delay = 0;
1104 	int fast_path = 0;
1105 
1106 	spin_lock(&gl->gl_spin);
1107 	if (gh->gh_flags & GL_NOCACHE)
1108 		handle_callback(gl, LM_ST_UNLOCKED, 0);
1109 
1110 	list_del_init(&gh->gh_list);
1111 	if (find_first_holder(gl) == NULL) {
1112 		if (glops->go_unlock) {
1113 			GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1114 			spin_unlock(&gl->gl_spin);
1115 			glops->go_unlock(gh);
1116 			spin_lock(&gl->gl_spin);
1117 			clear_bit(GLF_LOCK, &gl->gl_flags);
1118 		}
1119 		if (list_empty(&gl->gl_holders) &&
1120 		    !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1121 		    !test_bit(GLF_DEMOTE, &gl->gl_flags))
1122 			fast_path = 1;
1123 	}
1124 	if (!test_bit(GLF_LFLUSH, &gl->gl_flags))
1125 		__gfs2_glock_schedule_for_reclaim(gl);
1126 	trace_gfs2_glock_queue(gh, 0);
1127 	spin_unlock(&gl->gl_spin);
1128 	if (likely(fast_path))
1129 		return;
1130 
1131 	gfs2_glock_hold(gl);
1132 	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1133 	    !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1134 	    gl->gl_name.ln_type == LM_TYPE_INODE)
1135 		delay = gl->gl_hold_time;
1136 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1137 		gfs2_glock_put(gl);
1138 }
1139 
1140 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1141 {
1142 	struct gfs2_glock *gl = gh->gh_gl;
1143 	gfs2_glock_dq(gh);
1144 	wait_on_demote(gl);
1145 }
1146 
1147 /**
1148  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1149  * @gh: the holder structure
1150  *
1151  */
1152 
1153 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1154 {
1155 	gfs2_glock_dq(gh);
1156 	gfs2_holder_uninit(gh);
1157 }
1158 
1159 /**
1160  * gfs2_glock_nq_num - acquire a glock based on lock number
1161  * @sdp: the filesystem
1162  * @number: the lock number
1163  * @glops: the glock operations for the type of glock
1164  * @state: the state to acquire the glock in
1165  * @flags: modifier flags for the acquisition
1166  * @gh: the struct gfs2_holder
1167  *
1168  * Returns: errno
1169  */
1170 
1171 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1172 		      const struct gfs2_glock_operations *glops,
1173 		      unsigned int state, int flags, struct gfs2_holder *gh)
1174 {
1175 	struct gfs2_glock *gl;
1176 	int error;
1177 
1178 	error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1179 	if (!error) {
1180 		error = gfs2_glock_nq_init(gl, state, flags, gh);
1181 		gfs2_glock_put(gl);
1182 	}
1183 
1184 	return error;
1185 }
1186 
1187 /**
1188  * glock_compare - Compare two struct gfs2_glock structures for sorting
1189  * @arg_a: the first structure
1190  * @arg_b: the second structure
1191  *
1192  */
1193 
1194 static int glock_compare(const void *arg_a, const void *arg_b)
1195 {
1196 	const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1197 	const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1198 	const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1199 	const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1200 
1201 	if (a->ln_number > b->ln_number)
1202 		return 1;
1203 	if (a->ln_number < b->ln_number)
1204 		return -1;
1205 	BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1206 	return 0;
1207 }
1208 
1209 /**
1210  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1211  * @num_gh: the number of structures
1212  * @ghs: an array of struct gfs2_holder structures
1213  *
1214  * Returns: 0 on success (all glocks acquired),
1215  *          errno on failure (no glocks acquired)
1216  */
1217 
1218 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1219 		     struct gfs2_holder **p)
1220 {
1221 	unsigned int x;
1222 	int error = 0;
1223 
1224 	for (x = 0; x < num_gh; x++)
1225 		p[x] = &ghs[x];
1226 
1227 	sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1228 
1229 	for (x = 0; x < num_gh; x++) {
1230 		p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1231 
1232 		error = gfs2_glock_nq(p[x]);
1233 		if (error) {
1234 			while (x--)
1235 				gfs2_glock_dq(p[x]);
1236 			break;
1237 		}
1238 	}
1239 
1240 	return error;
1241 }
1242 
1243 /**
1244  * gfs2_glock_nq_m - acquire multiple glocks
1245  * @num_gh: the number of structures
1246  * @ghs: an array of struct gfs2_holder structures
1247  *
1248  *
1249  * Returns: 0 on success (all glocks acquired),
1250  *          errno on failure (no glocks acquired)
1251  */
1252 
1253 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1254 {
1255 	struct gfs2_holder *tmp[4];
1256 	struct gfs2_holder **pph = tmp;
1257 	int error = 0;
1258 
1259 	switch(num_gh) {
1260 	case 0:
1261 		return 0;
1262 	case 1:
1263 		ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1264 		return gfs2_glock_nq(ghs);
1265 	default:
1266 		if (num_gh <= 4)
1267 			break;
1268 		pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1269 		if (!pph)
1270 			return -ENOMEM;
1271 	}
1272 
1273 	error = nq_m_sync(num_gh, ghs, pph);
1274 
1275 	if (pph != tmp)
1276 		kfree(pph);
1277 
1278 	return error;
1279 }
1280 
1281 /**
1282  * gfs2_glock_dq_m - release multiple glocks
1283  * @num_gh: the number of structures
1284  * @ghs: an array of struct gfs2_holder structures
1285  *
1286  */
1287 
1288 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1289 {
1290 	while (num_gh--)
1291 		gfs2_glock_dq(&ghs[num_gh]);
1292 }
1293 
1294 /**
1295  * gfs2_glock_dq_uninit_m - release multiple glocks
1296  * @num_gh: the number of structures
1297  * @ghs: an array of struct gfs2_holder structures
1298  *
1299  */
1300 
1301 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1302 {
1303 	while (num_gh--)
1304 		gfs2_glock_dq_uninit(&ghs[num_gh]);
1305 }
1306 
1307 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1308 {
1309 	unsigned long delay = 0;
1310 	unsigned long holdtime;
1311 	unsigned long now = jiffies;
1312 
1313 	gfs2_glock_hold(gl);
1314 	holdtime = gl->gl_tchange + gl->gl_hold_time;
1315 	if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
1316 	    gl->gl_name.ln_type == LM_TYPE_INODE) {
1317 		if (time_before(now, holdtime))
1318 			delay = holdtime - now;
1319 		if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1320 			delay = gl->gl_hold_time;
1321 	}
1322 
1323 	spin_lock(&gl->gl_spin);
1324 	handle_callback(gl, state, delay);
1325 	spin_unlock(&gl->gl_spin);
1326 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1327 		gfs2_glock_put(gl);
1328 }
1329 
1330 /**
1331  * gfs2_should_freeze - Figure out if glock should be frozen
1332  * @gl: The glock in question
1333  *
1334  * Glocks are not frozen if (a) the result of the dlm operation is
1335  * an error, (b) the locking operation was an unlock operation or
1336  * (c) if there is a "noexp" flagged request anywhere in the queue
1337  *
1338  * Returns: 1 if freezing should occur, 0 otherwise
1339  */
1340 
1341 static int gfs2_should_freeze(const struct gfs2_glock *gl)
1342 {
1343 	const struct gfs2_holder *gh;
1344 
1345 	if (gl->gl_reply & ~LM_OUT_ST_MASK)
1346 		return 0;
1347 	if (gl->gl_target == LM_ST_UNLOCKED)
1348 		return 0;
1349 
1350 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1351 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1352 			continue;
1353 		if (LM_FLAG_NOEXP & gh->gh_flags)
1354 			return 0;
1355 	}
1356 
1357 	return 1;
1358 }
1359 
1360 /**
1361  * gfs2_glock_complete - Callback used by locking
1362  * @gl: Pointer to the glock
1363  * @ret: The return value from the dlm
1364  *
1365  * The gl_reply field is under the gl_spin lock so that it is ok
1366  * to use a bitfield shared with other glock state fields.
1367  */
1368 
1369 void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1370 {
1371 	struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
1372 
1373 	spin_lock(&gl->gl_spin);
1374 	gl->gl_reply = ret;
1375 
1376 	if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
1377 		if (gfs2_should_freeze(gl)) {
1378 			set_bit(GLF_FROZEN, &gl->gl_flags);
1379 			spin_unlock(&gl->gl_spin);
1380 			return;
1381 		}
1382 	}
1383 
1384 	spin_unlock(&gl->gl_spin);
1385 	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1386 	smp_wmb();
1387 	gfs2_glock_hold(gl);
1388 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1389 		gfs2_glock_put(gl);
1390 }
1391 
1392 
1393 static int gfs2_shrink_glock_memory(struct shrinker *shrink,
1394 				    struct shrink_control *sc)
1395 {
1396 	struct gfs2_glock *gl;
1397 	int may_demote;
1398 	int nr_skipped = 0;
1399 	int nr = sc->nr_to_scan;
1400 	gfp_t gfp_mask = sc->gfp_mask;
1401 	LIST_HEAD(skipped);
1402 
1403 	if (nr == 0)
1404 		goto out;
1405 
1406 	if (!(gfp_mask & __GFP_FS))
1407 		return -1;
1408 
1409 	spin_lock(&lru_lock);
1410 	while(nr && !list_empty(&lru_list)) {
1411 		gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1412 		list_del_init(&gl->gl_lru);
1413 		clear_bit(GLF_LRU, &gl->gl_flags);
1414 		atomic_dec(&lru_count);
1415 
1416 		/* Test for being demotable */
1417 		if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1418 			gfs2_glock_hold(gl);
1419 			spin_unlock(&lru_lock);
1420 			spin_lock(&gl->gl_spin);
1421 			may_demote = demote_ok(gl);
1422 			if (may_demote) {
1423 				handle_callback(gl, LM_ST_UNLOCKED, 0);
1424 				nr--;
1425 			}
1426 			clear_bit(GLF_LOCK, &gl->gl_flags);
1427 			smp_mb__after_clear_bit();
1428 			if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1429 				gfs2_glock_put_nolock(gl);
1430 			spin_unlock(&gl->gl_spin);
1431 			spin_lock(&lru_lock);
1432 			continue;
1433 		}
1434 		nr_skipped++;
1435 		list_add(&gl->gl_lru, &skipped);
1436 		set_bit(GLF_LRU, &gl->gl_flags);
1437 	}
1438 	list_splice(&skipped, &lru_list);
1439 	atomic_add(nr_skipped, &lru_count);
1440 	spin_unlock(&lru_lock);
1441 out:
1442 	return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure;
1443 }
1444 
1445 static struct shrinker glock_shrinker = {
1446 	.shrink = gfs2_shrink_glock_memory,
1447 	.seeks = DEFAULT_SEEKS,
1448 };
1449 
1450 /**
1451  * examine_bucket - Call a function for glock in a hash bucket
1452  * @examiner: the function
1453  * @sdp: the filesystem
1454  * @bucket: the bucket
1455  *
1456  */
1457 
1458 static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
1459 			  unsigned int hash)
1460 {
1461 	struct gfs2_glock *gl;
1462 	struct hlist_bl_head *head = &gl_hash_table[hash];
1463 	struct hlist_bl_node *pos;
1464 
1465 	rcu_read_lock();
1466 	hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
1467 		if ((gl->gl_sbd == sdp) && atomic_read(&gl->gl_ref))
1468 			examiner(gl);
1469 	}
1470 	rcu_read_unlock();
1471 	cond_resched();
1472 }
1473 
1474 static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1475 {
1476 	unsigned x;
1477 
1478 	for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1479 		examine_bucket(examiner, sdp, x);
1480 }
1481 
1482 
1483 /**
1484  * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1485  * @gl: The glock to thaw
1486  *
1487  * N.B. When we freeze a glock, we leave a ref to the glock outstanding,
1488  * so this has to result in the ref count being dropped by one.
1489  */
1490 
1491 static void thaw_glock(struct gfs2_glock *gl)
1492 {
1493 	if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1494 		return;
1495 	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1496 	gfs2_glock_hold(gl);
1497 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1498 		gfs2_glock_put(gl);
1499 }
1500 
1501 /**
1502  * clear_glock - look at a glock and see if we can free it from glock cache
1503  * @gl: the glock to look at
1504  *
1505  */
1506 
1507 static void clear_glock(struct gfs2_glock *gl)
1508 {
1509 	gfs2_glock_remove_from_lru(gl);
1510 
1511 	spin_lock(&gl->gl_spin);
1512 	if (gl->gl_state != LM_ST_UNLOCKED)
1513 		handle_callback(gl, LM_ST_UNLOCKED, 0);
1514 	spin_unlock(&gl->gl_spin);
1515 	gfs2_glock_hold(gl);
1516 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1517 		gfs2_glock_put(gl);
1518 }
1519 
1520 /**
1521  * gfs2_glock_thaw - Thaw any frozen glocks
1522  * @sdp: The super block
1523  *
1524  */
1525 
1526 void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1527 {
1528 	glock_hash_walk(thaw_glock, sdp);
1529 }
1530 
1531 static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1532 {
1533 	int ret;
1534 	spin_lock(&gl->gl_spin);
1535 	ret = __dump_glock(seq, gl);
1536 	spin_unlock(&gl->gl_spin);
1537 	return ret;
1538 }
1539 
1540 static void dump_glock_func(struct gfs2_glock *gl)
1541 {
1542 	dump_glock(NULL, gl);
1543 }
1544 
1545 /**
1546  * gfs2_gl_hash_clear - Empty out the glock hash table
1547  * @sdp: the filesystem
1548  * @wait: wait until it's all gone
1549  *
1550  * Called when unmounting the filesystem.
1551  */
1552 
1553 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1554 {
1555 	glock_hash_walk(clear_glock, sdp);
1556 	flush_workqueue(glock_workqueue);
1557 	wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
1558 	glock_hash_walk(dump_glock_func, sdp);
1559 }
1560 
1561 void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1562 {
1563 	struct gfs2_glock *gl = ip->i_gl;
1564 	int ret;
1565 
1566 	ret = gfs2_truncatei_resume(ip);
1567 	gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
1568 
1569 	spin_lock(&gl->gl_spin);
1570 	clear_bit(GLF_LOCK, &gl->gl_flags);
1571 	run_queue(gl, 1);
1572 	spin_unlock(&gl->gl_spin);
1573 }
1574 
1575 static const char *state2str(unsigned state)
1576 {
1577 	switch(state) {
1578 	case LM_ST_UNLOCKED:
1579 		return "UN";
1580 	case LM_ST_SHARED:
1581 		return "SH";
1582 	case LM_ST_DEFERRED:
1583 		return "DF";
1584 	case LM_ST_EXCLUSIVE:
1585 		return "EX";
1586 	}
1587 	return "??";
1588 }
1589 
1590 static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1591 {
1592 	char *p = buf;
1593 	if (flags & LM_FLAG_TRY)
1594 		*p++ = 't';
1595 	if (flags & LM_FLAG_TRY_1CB)
1596 		*p++ = 'T';
1597 	if (flags & LM_FLAG_NOEXP)
1598 		*p++ = 'e';
1599 	if (flags & LM_FLAG_ANY)
1600 		*p++ = 'A';
1601 	if (flags & LM_FLAG_PRIORITY)
1602 		*p++ = 'p';
1603 	if (flags & GL_ASYNC)
1604 		*p++ = 'a';
1605 	if (flags & GL_EXACT)
1606 		*p++ = 'E';
1607 	if (flags & GL_NOCACHE)
1608 		*p++ = 'c';
1609 	if (test_bit(HIF_HOLDER, &iflags))
1610 		*p++ = 'H';
1611 	if (test_bit(HIF_WAIT, &iflags))
1612 		*p++ = 'W';
1613 	if (test_bit(HIF_FIRST, &iflags))
1614 		*p++ = 'F';
1615 	*p = 0;
1616 	return buf;
1617 }
1618 
1619 /**
1620  * dump_holder - print information about a glock holder
1621  * @seq: the seq_file struct
1622  * @gh: the glock holder
1623  *
1624  * Returns: 0 on success, -ENOBUFS when we run out of space
1625  */
1626 
1627 static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1628 {
1629 	struct task_struct *gh_owner = NULL;
1630 	char flags_buf[32];
1631 
1632 	if (gh->gh_owner_pid)
1633 		gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1634 	gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1635 		       state2str(gh->gh_state),
1636 		       hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1637 		       gh->gh_error,
1638 		       gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1639 		       gh_owner ? gh_owner->comm : "(ended)",
1640 		       (void *)gh->gh_ip);
1641 	return 0;
1642 }
1643 
1644 static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1645 {
1646 	const unsigned long *gflags = &gl->gl_flags;
1647 	char *p = buf;
1648 
1649 	if (test_bit(GLF_LOCK, gflags))
1650 		*p++ = 'l';
1651 	if (test_bit(GLF_DEMOTE, gflags))
1652 		*p++ = 'D';
1653 	if (test_bit(GLF_PENDING_DEMOTE, gflags))
1654 		*p++ = 'd';
1655 	if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1656 		*p++ = 'p';
1657 	if (test_bit(GLF_DIRTY, gflags))
1658 		*p++ = 'y';
1659 	if (test_bit(GLF_LFLUSH, gflags))
1660 		*p++ = 'f';
1661 	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1662 		*p++ = 'i';
1663 	if (test_bit(GLF_REPLY_PENDING, gflags))
1664 		*p++ = 'r';
1665 	if (test_bit(GLF_INITIAL, gflags))
1666 		*p++ = 'I';
1667 	if (test_bit(GLF_FROZEN, gflags))
1668 		*p++ = 'F';
1669 	if (test_bit(GLF_QUEUED, gflags))
1670 		*p++ = 'q';
1671 	if (test_bit(GLF_LRU, gflags))
1672 		*p++ = 'L';
1673 	if (gl->gl_object)
1674 		*p++ = 'o';
1675 	if (test_bit(GLF_BLOCKING, gflags))
1676 		*p++ = 'b';
1677 	*p = 0;
1678 	return buf;
1679 }
1680 
1681 /**
1682  * __dump_glock - print information about a glock
1683  * @seq: The seq_file struct
1684  * @gl: the glock
1685  *
1686  * The file format is as follows:
1687  * One line per object, capital letters are used to indicate objects
1688  * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1689  * other objects are indented by a single space and follow the glock to
1690  * which they are related. Fields are indicated by lower case letters
1691  * followed by a colon and the field value, except for strings which are in
1692  * [] so that its possible to see if they are composed of spaces for
1693  * example. The field's are n = number (id of the object), f = flags,
1694  * t = type, s = state, r = refcount, e = error, p = pid.
1695  *
1696  * Returns: 0 on success, -ENOBUFS when we run out of space
1697  */
1698 
1699 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1700 {
1701 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1702 	unsigned long long dtime;
1703 	const struct gfs2_holder *gh;
1704 	char gflags_buf[32];
1705 	int error = 0;
1706 
1707 	dtime = jiffies - gl->gl_demote_time;
1708 	dtime *= 1000000/HZ; /* demote time in uSec */
1709 	if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1710 		dtime = 0;
1711 	gfs2_print_dbg(seq, "G:  s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n",
1712 		  state2str(gl->gl_state),
1713 		  gl->gl_name.ln_type,
1714 		  (unsigned long long)gl->gl_name.ln_number,
1715 		  gflags2str(gflags_buf, gl),
1716 		  state2str(gl->gl_target),
1717 		  state2str(gl->gl_demote_state), dtime,
1718 		  atomic_read(&gl->gl_ail_count),
1719 		  atomic_read(&gl->gl_revokes),
1720 		  atomic_read(&gl->gl_ref), gl->gl_hold_time);
1721 
1722 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1723 		error = dump_holder(seq, gh);
1724 		if (error)
1725 			goto out;
1726 	}
1727 	if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1728 		error = glops->go_dump(seq, gl);
1729 out:
1730 	return error;
1731 }
1732 
1733 static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
1734 {
1735 	struct gfs2_glock *gl = iter_ptr;
1736 
1737 	seq_printf(seq, "G: n:%u/%llx rtt:%lld/%lld rttb:%lld/%lld irt:%lld/%lld dcnt: %lld qcnt: %lld\n",
1738 		   gl->gl_name.ln_type,
1739 		   (unsigned long long)gl->gl_name.ln_number,
1740 		   (long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
1741 		   (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
1742 		   (long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
1743 		   (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
1744 		   (long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
1745 		   (long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
1746 		   (long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
1747 		   (long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
1748 	return 0;
1749 }
1750 
1751 static const char *gfs2_gltype[] = {
1752 	"type",
1753 	"reserved",
1754 	"nondisk",
1755 	"inode",
1756 	"rgrp",
1757 	"meta",
1758 	"iopen",
1759 	"flock",
1760 	"plock",
1761 	"quota",
1762 	"journal",
1763 };
1764 
1765 static const char *gfs2_stype[] = {
1766 	[GFS2_LKS_SRTT]		= "srtt",
1767 	[GFS2_LKS_SRTTVAR]	= "srttvar",
1768 	[GFS2_LKS_SRTTB]	= "srttb",
1769 	[GFS2_LKS_SRTTVARB]	= "srttvarb",
1770 	[GFS2_LKS_SIRT]		= "sirt",
1771 	[GFS2_LKS_SIRTVAR]	= "sirtvar",
1772 	[GFS2_LKS_DCOUNT]	= "dlm",
1773 	[GFS2_LKS_QCOUNT]	= "queue",
1774 };
1775 
1776 #define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
1777 
1778 static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1779 {
1780 	struct gfs2_glock_iter *gi = seq->private;
1781 	struct gfs2_sbd *sdp = gi->sdp;
1782 	unsigned index = gi->hash >> 3;
1783 	unsigned subindex = gi->hash & 0x07;
1784 	s64 value;
1785 	int i;
1786 
1787 	if (index == 0 && subindex != 0)
1788 		return 0;
1789 
1790 	seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
1791 		   (index == 0) ? "cpu": gfs2_stype[subindex]);
1792 
1793 	for_each_possible_cpu(i) {
1794                 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
1795 		if (index == 0) {
1796 			value = i;
1797 		} else {
1798 			value = lkstats->lkstats[index - 1].stats[subindex];
1799 		}
1800 		seq_printf(seq, " %15lld", (long long)value);
1801 	}
1802 	seq_putc(seq, '\n');
1803 	return 0;
1804 }
1805 
1806 int __init gfs2_glock_init(void)
1807 {
1808 	unsigned i;
1809 	for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1810 		INIT_HLIST_BL_HEAD(&gl_hash_table[i]);
1811 	}
1812 
1813 	glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1814 					  WQ_HIGHPRI | WQ_FREEZABLE, 0);
1815 	if (IS_ERR(glock_workqueue))
1816 		return PTR_ERR(glock_workqueue);
1817 	gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1818 						WQ_MEM_RECLAIM | WQ_FREEZABLE,
1819 						0);
1820 	if (IS_ERR(gfs2_delete_workqueue)) {
1821 		destroy_workqueue(glock_workqueue);
1822 		return PTR_ERR(gfs2_delete_workqueue);
1823 	}
1824 
1825 	register_shrinker(&glock_shrinker);
1826 
1827 	return 0;
1828 }
1829 
1830 void gfs2_glock_exit(void)
1831 {
1832 	unregister_shrinker(&glock_shrinker);
1833 	destroy_workqueue(glock_workqueue);
1834 	destroy_workqueue(gfs2_delete_workqueue);
1835 }
1836 
1837 static inline struct gfs2_glock *glock_hash_chain(unsigned hash)
1838 {
1839 	return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]),
1840 			      struct gfs2_glock, gl_list);
1841 }
1842 
1843 static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl)
1844 {
1845 	return hlist_bl_entry(rcu_dereference(gl->gl_list.next),
1846 			      struct gfs2_glock, gl_list);
1847 }
1848 
1849 static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1850 {
1851 	struct gfs2_glock *gl;
1852 
1853 	do {
1854 		gl = gi->gl;
1855 		if (gl) {
1856 			gi->gl = glock_hash_next(gl);
1857 			gi->nhash++;
1858 		} else {
1859 			if (gi->hash >= GFS2_GL_HASH_SIZE) {
1860 				rcu_read_unlock();
1861 				return 1;
1862 			}
1863 			gi->gl = glock_hash_chain(gi->hash);
1864 			gi->nhash = 0;
1865 		}
1866 		while (gi->gl == NULL) {
1867 			gi->hash++;
1868 			if (gi->hash >= GFS2_GL_HASH_SIZE) {
1869 				rcu_read_unlock();
1870 				return 1;
1871 			}
1872 			gi->gl = glock_hash_chain(gi->hash);
1873 			gi->nhash = 0;
1874 		}
1875 	/* Skip entries for other sb and dead entries */
1876 	} while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0);
1877 
1878 	return 0;
1879 }
1880 
1881 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1882 {
1883 	struct gfs2_glock_iter *gi = seq->private;
1884 	loff_t n = *pos;
1885 
1886 	if (gi->last_pos <= *pos)
1887 		n = gi->nhash + (*pos - gi->last_pos);
1888 	else
1889 		gi->hash = 0;
1890 
1891 	gi->nhash = 0;
1892 	rcu_read_lock();
1893 
1894 	do {
1895 		if (gfs2_glock_iter_next(gi))
1896 			return NULL;
1897 	} while (n--);
1898 
1899 	gi->last_pos = *pos;
1900 	return gi->gl;
1901 }
1902 
1903 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1904 				 loff_t *pos)
1905 {
1906 	struct gfs2_glock_iter *gi = seq->private;
1907 
1908 	(*pos)++;
1909 	gi->last_pos = *pos;
1910 	if (gfs2_glock_iter_next(gi))
1911 		return NULL;
1912 
1913 	return gi->gl;
1914 }
1915 
1916 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1917 {
1918 	struct gfs2_glock_iter *gi = seq->private;
1919 
1920 	if (gi->gl)
1921 		rcu_read_unlock();
1922 	gi->gl = NULL;
1923 }
1924 
1925 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1926 {
1927 	return dump_glock(seq, iter_ptr);
1928 }
1929 
1930 static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
1931 {
1932 	struct gfs2_glock_iter *gi = seq->private;
1933 
1934 	gi->hash = *pos;
1935 	if (*pos >= GFS2_NR_SBSTATS)
1936 		return NULL;
1937 	preempt_disable();
1938 	return SEQ_START_TOKEN;
1939 }
1940 
1941 static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
1942 				   loff_t *pos)
1943 {
1944 	struct gfs2_glock_iter *gi = seq->private;
1945 	(*pos)++;
1946 	gi->hash++;
1947 	if (gi->hash >= GFS2_NR_SBSTATS) {
1948 		preempt_enable();
1949 		return NULL;
1950 	}
1951 	return SEQ_START_TOKEN;
1952 }
1953 
1954 static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
1955 {
1956 	preempt_enable();
1957 }
1958 
1959 static const struct seq_operations gfs2_glock_seq_ops = {
1960 	.start = gfs2_glock_seq_start,
1961 	.next  = gfs2_glock_seq_next,
1962 	.stop  = gfs2_glock_seq_stop,
1963 	.show  = gfs2_glock_seq_show,
1964 };
1965 
1966 static const struct seq_operations gfs2_glstats_seq_ops = {
1967 	.start = gfs2_glock_seq_start,
1968 	.next  = gfs2_glock_seq_next,
1969 	.stop  = gfs2_glock_seq_stop,
1970 	.show  = gfs2_glstats_seq_show,
1971 };
1972 
1973 static const struct seq_operations gfs2_sbstats_seq_ops = {
1974 	.start = gfs2_sbstats_seq_start,
1975 	.next  = gfs2_sbstats_seq_next,
1976 	.stop  = gfs2_sbstats_seq_stop,
1977 	.show  = gfs2_sbstats_seq_show,
1978 };
1979 
1980 #define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
1981 
1982 static int gfs2_glocks_open(struct inode *inode, struct file *file)
1983 {
1984 	int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1985 				   sizeof(struct gfs2_glock_iter));
1986 	if (ret == 0) {
1987 		struct seq_file *seq = file->private_data;
1988 		struct gfs2_glock_iter *gi = seq->private;
1989 		gi->sdp = inode->i_private;
1990 		seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
1991 		if (seq->buf)
1992 			seq->size = GFS2_SEQ_GOODSIZE;
1993 	}
1994 	return ret;
1995 }
1996 
1997 static int gfs2_glstats_open(struct inode *inode, struct file *file)
1998 {
1999 	int ret = seq_open_private(file, &gfs2_glstats_seq_ops,
2000 				   sizeof(struct gfs2_glock_iter));
2001 	if (ret == 0) {
2002 		struct seq_file *seq = file->private_data;
2003 		struct gfs2_glock_iter *gi = seq->private;
2004 		gi->sdp = inode->i_private;
2005 		seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
2006 		if (seq->buf)
2007 			seq->size = GFS2_SEQ_GOODSIZE;
2008 	}
2009 	return ret;
2010 }
2011 
2012 static int gfs2_sbstats_open(struct inode *inode, struct file *file)
2013 {
2014 	int ret = seq_open_private(file, &gfs2_sbstats_seq_ops,
2015 				   sizeof(struct gfs2_glock_iter));
2016 	if (ret == 0) {
2017 		struct seq_file *seq = file->private_data;
2018 		struct gfs2_glock_iter *gi = seq->private;
2019 		gi->sdp = inode->i_private;
2020 	}
2021 	return ret;
2022 }
2023 
2024 static const struct file_operations gfs2_glocks_fops = {
2025 	.owner   = THIS_MODULE,
2026 	.open    = gfs2_glocks_open,
2027 	.read    = seq_read,
2028 	.llseek  = seq_lseek,
2029 	.release = seq_release_private,
2030 };
2031 
2032 static const struct file_operations gfs2_glstats_fops = {
2033 	.owner   = THIS_MODULE,
2034 	.open    = gfs2_glstats_open,
2035 	.read    = seq_read,
2036 	.llseek  = seq_lseek,
2037 	.release = seq_release_private,
2038 };
2039 
2040 static const struct file_operations gfs2_sbstats_fops = {
2041 	.owner   = THIS_MODULE,
2042 	.open	 = gfs2_sbstats_open,
2043 	.read    = seq_read,
2044 	.llseek  = seq_lseek,
2045 	.release = seq_release_private,
2046 };
2047 
2048 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2049 {
2050 	sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2051 	if (!sdp->debugfs_dir)
2052 		return -ENOMEM;
2053 	sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
2054 							 S_IFREG | S_IRUGO,
2055 							 sdp->debugfs_dir, sdp,
2056 							 &gfs2_glocks_fops);
2057 	if (!sdp->debugfs_dentry_glocks)
2058 		goto fail;
2059 
2060 	sdp->debugfs_dentry_glstats = debugfs_create_file("glstats",
2061 							S_IFREG | S_IRUGO,
2062 							sdp->debugfs_dir, sdp,
2063 							&gfs2_glstats_fops);
2064 	if (!sdp->debugfs_dentry_glstats)
2065 		goto fail;
2066 
2067 	sdp->debugfs_dentry_sbstats = debugfs_create_file("sbstats",
2068 							S_IFREG | S_IRUGO,
2069 							sdp->debugfs_dir, sdp,
2070 							&gfs2_sbstats_fops);
2071 	if (!sdp->debugfs_dentry_sbstats)
2072 		goto fail;
2073 
2074 	return 0;
2075 fail:
2076 	gfs2_delete_debugfs_file(sdp);
2077 	return -ENOMEM;
2078 }
2079 
2080 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2081 {
2082 	if (sdp->debugfs_dir) {
2083 		if (sdp->debugfs_dentry_glocks) {
2084 			debugfs_remove(sdp->debugfs_dentry_glocks);
2085 			sdp->debugfs_dentry_glocks = NULL;
2086 		}
2087 		if (sdp->debugfs_dentry_glstats) {
2088 			debugfs_remove(sdp->debugfs_dentry_glstats);
2089 			sdp->debugfs_dentry_glstats = NULL;
2090 		}
2091 		if (sdp->debugfs_dentry_sbstats) {
2092 			debugfs_remove(sdp->debugfs_dentry_sbstats);
2093 			sdp->debugfs_dentry_sbstats = NULL;
2094 		}
2095 		debugfs_remove(sdp->debugfs_dir);
2096 		sdp->debugfs_dir = NULL;
2097 	}
2098 }
2099 
2100 int gfs2_register_debugfs(void)
2101 {
2102 	gfs2_root = debugfs_create_dir("gfs2", NULL);
2103 	return gfs2_root ? 0 : -ENOMEM;
2104 }
2105 
2106 void gfs2_unregister_debugfs(void)
2107 {
2108 	debugfs_remove(gfs2_root);
2109 	gfs2_root = NULL;
2110 }
2111