xref: /linux/fs/ocfs2/dlmglue.c (revision b7019ac550eb3916f34d79db583e9b7ea2524afa)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* -*- mode: c; c-basic-offset: 8; -*-
3  * vim: noexpandtab sw=8 ts=8 sts=0:
4  *
5  * dlmglue.c
6  *
7  * Code which implements an OCFS2 specific interface to our DLM.
8  *
9  * Copyright (C) 2003, 2004 Oracle.  All rights reserved.
10  */
11 
12 #include <linux/types.h>
13 #include <linux/slab.h>
14 #include <linux/highmem.h>
15 #include <linux/mm.h>
16 #include <linux/kthread.h>
17 #include <linux/pagemap.h>
18 #include <linux/debugfs.h>
19 #include <linux/seq_file.h>
20 #include <linux/time.h>
21 #include <linux/quotaops.h>
22 #include <linux/sched/signal.h>
23 
24 #define MLOG_MASK_PREFIX ML_DLM_GLUE
25 #include <cluster/masklog.h>
26 
27 #include "ocfs2.h"
28 #include "ocfs2_lockingver.h"
29 
30 #include "alloc.h"
31 #include "dcache.h"
32 #include "dlmglue.h"
33 #include "extent_map.h"
34 #include "file.h"
35 #include "heartbeat.h"
36 #include "inode.h"
37 #include "journal.h"
38 #include "stackglue.h"
39 #include "slot_map.h"
40 #include "super.h"
41 #include "uptodate.h"
42 #include "quota.h"
43 #include "refcounttree.h"
44 #include "acl.h"
45 
46 #include "buffer_head_io.h"
47 
48 struct ocfs2_mask_waiter {
49 	struct list_head	mw_item;
50 	int			mw_status;
51 	struct completion	mw_complete;
52 	unsigned long		mw_mask;
53 	unsigned long		mw_goal;
54 #ifdef CONFIG_OCFS2_FS_STATS
55 	ktime_t			mw_lock_start;
56 #endif
57 };
58 
59 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres);
60 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres);
61 static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres);
62 static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres);
63 
64 /*
65  * Return value from ->downconvert_worker functions.
66  *
67  * These control the precise actions of ocfs2_unblock_lock()
68  * and ocfs2_process_blocked_lock()
69  *
70  */
71 enum ocfs2_unblock_action {
72 	UNBLOCK_CONTINUE	= 0, /* Continue downconvert */
73 	UNBLOCK_CONTINUE_POST	= 1, /* Continue downconvert, fire
74 				      * ->post_unlock callback */
75 	UNBLOCK_STOP_POST	= 2, /* Do not downconvert, fire
76 				      * ->post_unlock() callback. */
77 };
78 
79 struct ocfs2_unblock_ctl {
80 	int requeue;
81 	enum ocfs2_unblock_action unblock_action;
82 };
83 
84 /* Lockdep class keys */
85 #ifdef CONFIG_DEBUG_LOCK_ALLOC
86 static struct lock_class_key lockdep_keys[OCFS2_NUM_LOCK_TYPES];
87 #endif
88 
89 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
90 					int new_level);
91 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres);
92 
93 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
94 				     int blocking);
95 
96 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
97 				       int blocking);
98 
99 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
100 				     struct ocfs2_lock_res *lockres);
101 
102 static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres);
103 
104 static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
105 					    int new_level);
106 static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
107 					 int blocking);
108 
109 #define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres)
110 
111 /* This aids in debugging situations where a bad LVB might be involved. */
112 static void ocfs2_dump_meta_lvb_info(u64 level,
113 				     const char *function,
114 				     unsigned int line,
115 				     struct ocfs2_lock_res *lockres)
116 {
117 	struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
118 
119 	mlog(level, "LVB information for %s (called from %s:%u):\n",
120 	     lockres->l_name, function, line);
121 	mlog(level, "version: %u, clusters: %u, generation: 0x%x\n",
122 	     lvb->lvb_version, be32_to_cpu(lvb->lvb_iclusters),
123 	     be32_to_cpu(lvb->lvb_igeneration));
124 	mlog(level, "size: %llu, uid %u, gid %u, mode 0x%x\n",
125 	     (unsigned long long)be64_to_cpu(lvb->lvb_isize),
126 	     be32_to_cpu(lvb->lvb_iuid), be32_to_cpu(lvb->lvb_igid),
127 	     be16_to_cpu(lvb->lvb_imode));
128 	mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, "
129 	     "mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb->lvb_inlink),
130 	     (long long)be64_to_cpu(lvb->lvb_iatime_packed),
131 	     (long long)be64_to_cpu(lvb->lvb_ictime_packed),
132 	     (long long)be64_to_cpu(lvb->lvb_imtime_packed),
133 	     be32_to_cpu(lvb->lvb_iattr));
134 }
135 
136 
137 /*
138  * OCFS2 Lock Resource Operations
139  *
140  * These fine tune the behavior of the generic dlmglue locking infrastructure.
141  *
142  * The most basic of lock types can point ->l_priv to their respective
143  * struct ocfs2_super and allow the default actions to manage things.
144  *
145  * Right now, each lock type also needs to implement an init function,
146  * and trivial lock/unlock wrappers. ocfs2_simple_drop_lockres()
147  * should be called when the lock is no longer needed (i.e., object
148  * destruction time).
149  */
150 struct ocfs2_lock_res_ops {
151 	/*
152 	 * Translate an ocfs2_lock_res * into an ocfs2_super *. Define
153 	 * this callback if ->l_priv is not an ocfs2_super pointer
154 	 */
155 	struct ocfs2_super * (*get_osb)(struct ocfs2_lock_res *);
156 
157 	/*
158 	 * Optionally called in the downconvert thread after a
159 	 * successful downconvert. The lockres will not be referenced
160 	 * after this callback is called, so it is safe to free
161 	 * memory, etc.
162 	 *
163 	 * The exact semantics of when this is called are controlled
164 	 * by ->downconvert_worker()
165 	 */
166 	void (*post_unlock)(struct ocfs2_super *, struct ocfs2_lock_res *);
167 
168 	/*
169 	 * Allow a lock type to add checks to determine whether it is
170 	 * safe to downconvert a lock. Return 0 to re-queue the
171 	 * downconvert at a later time, nonzero to continue.
172 	 *
173 	 * For most locks, the default checks that there are no
174 	 * incompatible holders are sufficient.
175 	 *
176 	 * Called with the lockres spinlock held.
177 	 */
178 	int (*check_downconvert)(struct ocfs2_lock_res *, int);
179 
180 	/*
181 	 * Allows a lock type to populate the lock value block. This
182 	 * is called on downconvert, and when we drop a lock.
183 	 *
184 	 * Locks that want to use this should set LOCK_TYPE_USES_LVB
185 	 * in the flags field.
186 	 *
187 	 * Called with the lockres spinlock held.
188 	 */
189 	void (*set_lvb)(struct ocfs2_lock_res *);
190 
191 	/*
192 	 * Called from the downconvert thread when it is determined
193 	 * that a lock will be downconverted. This is called without
194 	 * any locks held so the function can do work that might
195 	 * schedule (syncing out data, etc).
196 	 *
197 	 * This should return any one of the ocfs2_unblock_action
198 	 * values, depending on what it wants the thread to do.
199 	 */
200 	int (*downconvert_worker)(struct ocfs2_lock_res *, int);
201 
202 	/*
203 	 * LOCK_TYPE_* flags which describe the specific requirements
204 	 * of a lock type. Descriptions of each individual flag follow.
205 	 */
206 	int flags;
207 };
208 
209 /*
210  * Some locks want to "refresh" potentially stale data when a
211  * meaningful (PRMODE or EXMODE) lock level is first obtained. If this
212  * flag is set, the OCFS2_LOCK_NEEDS_REFRESH flag will be set on the
213  * individual lockres l_flags member from the ast function. It is
214  * expected that the locking wrapper will clear the
215  * OCFS2_LOCK_NEEDS_REFRESH flag when done.
216  */
217 #define LOCK_TYPE_REQUIRES_REFRESH 0x1
218 
219 /*
220  * Indicate that a lock type makes use of the lock value block. The
221  * ->set_lvb lock type callback must be defined.
222  */
223 #define LOCK_TYPE_USES_LVB		0x2
224 
225 static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = {
226 	.get_osb	= ocfs2_get_inode_osb,
227 	.flags		= 0,
228 };
229 
230 static struct ocfs2_lock_res_ops ocfs2_inode_inode_lops = {
231 	.get_osb	= ocfs2_get_inode_osb,
232 	.check_downconvert = ocfs2_check_meta_downconvert,
233 	.set_lvb	= ocfs2_set_meta_lvb,
234 	.downconvert_worker = ocfs2_data_convert_worker,
235 	.flags		= LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
236 };
237 
238 static struct ocfs2_lock_res_ops ocfs2_super_lops = {
239 	.flags		= LOCK_TYPE_REQUIRES_REFRESH,
240 };
241 
242 static struct ocfs2_lock_res_ops ocfs2_rename_lops = {
243 	.flags		= 0,
244 };
245 
246 static struct ocfs2_lock_res_ops ocfs2_nfs_sync_lops = {
247 	.flags		= 0,
248 };
249 
250 static struct ocfs2_lock_res_ops ocfs2_trim_fs_lops = {
251 	.flags		= LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
252 };
253 
254 static struct ocfs2_lock_res_ops ocfs2_orphan_scan_lops = {
255 	.flags		= LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
256 };
257 
258 static struct ocfs2_lock_res_ops ocfs2_dentry_lops = {
259 	.get_osb	= ocfs2_get_dentry_osb,
260 	.post_unlock	= ocfs2_dentry_post_unlock,
261 	.downconvert_worker = ocfs2_dentry_convert_worker,
262 	.flags		= 0,
263 };
264 
265 static struct ocfs2_lock_res_ops ocfs2_inode_open_lops = {
266 	.get_osb	= ocfs2_get_inode_osb,
267 	.flags		= 0,
268 };
269 
270 static struct ocfs2_lock_res_ops ocfs2_flock_lops = {
271 	.get_osb	= ocfs2_get_file_osb,
272 	.flags		= 0,
273 };
274 
275 static struct ocfs2_lock_res_ops ocfs2_qinfo_lops = {
276 	.set_lvb	= ocfs2_set_qinfo_lvb,
277 	.get_osb	= ocfs2_get_qinfo_osb,
278 	.flags		= LOCK_TYPE_REQUIRES_REFRESH | LOCK_TYPE_USES_LVB,
279 };
280 
281 static struct ocfs2_lock_res_ops ocfs2_refcount_block_lops = {
282 	.check_downconvert = ocfs2_check_refcount_downconvert,
283 	.downconvert_worker = ocfs2_refcount_convert_worker,
284 	.flags		= 0,
285 };
286 
287 static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
288 {
289 	return lockres->l_type == OCFS2_LOCK_TYPE_META ||
290 		lockres->l_type == OCFS2_LOCK_TYPE_RW ||
291 		lockres->l_type == OCFS2_LOCK_TYPE_OPEN;
292 }
293 
294 static inline struct ocfs2_lock_res *ocfs2_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
295 {
296 	return container_of(lksb, struct ocfs2_lock_res, l_lksb);
297 }
298 
299 static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
300 {
301 	BUG_ON(!ocfs2_is_inode_lock(lockres));
302 
303 	return (struct inode *) lockres->l_priv;
304 }
305 
306 static inline struct ocfs2_dentry_lock *ocfs2_lock_res_dl(struct ocfs2_lock_res *lockres)
307 {
308 	BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_DENTRY);
309 
310 	return (struct ocfs2_dentry_lock *)lockres->l_priv;
311 }
312 
313 static inline struct ocfs2_mem_dqinfo *ocfs2_lock_res_qinfo(struct ocfs2_lock_res *lockres)
314 {
315 	BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_QINFO);
316 
317 	return (struct ocfs2_mem_dqinfo *)lockres->l_priv;
318 }
319 
320 static inline struct ocfs2_refcount_tree *
321 ocfs2_lock_res_refcount_tree(struct ocfs2_lock_res *res)
322 {
323 	return container_of(res, struct ocfs2_refcount_tree, rf_lockres);
324 }
325 
326 static inline struct ocfs2_super *ocfs2_get_lockres_osb(struct ocfs2_lock_res *lockres)
327 {
328 	if (lockres->l_ops->get_osb)
329 		return lockres->l_ops->get_osb(lockres);
330 
331 	return (struct ocfs2_super *)lockres->l_priv;
332 }
333 
334 static int ocfs2_lock_create(struct ocfs2_super *osb,
335 			     struct ocfs2_lock_res *lockres,
336 			     int level,
337 			     u32 dlm_flags);
338 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
339 						     int wanted);
340 static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
341 				   struct ocfs2_lock_res *lockres,
342 				   int level, unsigned long caller_ip);
343 static inline void ocfs2_cluster_unlock(struct ocfs2_super *osb,
344 					struct ocfs2_lock_res *lockres,
345 					int level)
346 {
347 	__ocfs2_cluster_unlock(osb, lockres, level, _RET_IP_);
348 }
349 
350 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres);
351 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres);
352 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres);
353 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, int level);
354 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
355 					struct ocfs2_lock_res *lockres);
356 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
357 						int convert);
358 #define ocfs2_log_dlm_error(_func, _err, _lockres) do {					\
359 	if ((_lockres)->l_type != OCFS2_LOCK_TYPE_DENTRY)				\
360 		mlog(ML_ERROR, "DLM error %d while calling %s on resource %s\n",	\
361 		     _err, _func, _lockres->l_name);					\
362 	else										\
363 		mlog(ML_ERROR, "DLM error %d while calling %s on resource %.*s%08x\n",	\
364 		     _err, _func, OCFS2_DENTRY_LOCK_INO_START - 1, (_lockres)->l_name,	\
365 		     (unsigned int)ocfs2_get_dentry_lock_ino(_lockres));		\
366 } while (0)
367 static int ocfs2_downconvert_thread(void *arg);
368 static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
369 					struct ocfs2_lock_res *lockres);
370 static int ocfs2_inode_lock_update(struct inode *inode,
371 				  struct buffer_head **bh);
372 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb);
373 static inline int ocfs2_highest_compat_lock_level(int level);
374 static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
375 					      int new_level);
376 static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
377 				  struct ocfs2_lock_res *lockres,
378 				  int new_level,
379 				  int lvb,
380 				  unsigned int generation);
381 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
382 				        struct ocfs2_lock_res *lockres);
383 static int ocfs2_cancel_convert(struct ocfs2_super *osb,
384 				struct ocfs2_lock_res *lockres);
385 
386 
387 static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
388 				  u64 blkno,
389 				  u32 generation,
390 				  char *name)
391 {
392 	int len;
393 
394 	BUG_ON(type >= OCFS2_NUM_LOCK_TYPES);
395 
396 	len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x",
397 		       ocfs2_lock_type_char(type), OCFS2_LOCK_ID_PAD,
398 		       (long long)blkno, generation);
399 
400 	BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1));
401 
402 	mlog(0, "built lock resource with name: %s\n", name);
403 }
404 
405 static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);
406 
407 static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res,
408 				       struct ocfs2_dlm_debug *dlm_debug)
409 {
410 	mlog(0, "Add tracking for lockres %s\n", res->l_name);
411 
412 	spin_lock(&ocfs2_dlm_tracking_lock);
413 	list_add(&res->l_debug_list, &dlm_debug->d_lockres_tracking);
414 	spin_unlock(&ocfs2_dlm_tracking_lock);
415 }
416 
417 static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
418 {
419 	spin_lock(&ocfs2_dlm_tracking_lock);
420 	if (!list_empty(&res->l_debug_list))
421 		list_del_init(&res->l_debug_list);
422 	spin_unlock(&ocfs2_dlm_tracking_lock);
423 }
424 
425 #ifdef CONFIG_OCFS2_FS_STATS
426 static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
427 {
428 	res->l_lock_refresh = 0;
429 	memset(&res->l_lock_prmode, 0, sizeof(struct ocfs2_lock_stats));
430 	memset(&res->l_lock_exmode, 0, sizeof(struct ocfs2_lock_stats));
431 }
432 
433 static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level,
434 				    struct ocfs2_mask_waiter *mw, int ret)
435 {
436 	u32 usec;
437 	ktime_t kt;
438 	struct ocfs2_lock_stats *stats;
439 
440 	if (level == LKM_PRMODE)
441 		stats = &res->l_lock_prmode;
442 	else if (level == LKM_EXMODE)
443 		stats = &res->l_lock_exmode;
444 	else
445 		return;
446 
447 	kt = ktime_sub(ktime_get(), mw->mw_lock_start);
448 	usec = ktime_to_us(kt);
449 
450 	stats->ls_gets++;
451 	stats->ls_total += ktime_to_ns(kt);
452 	/* overflow */
453 	if (unlikely(stats->ls_gets == 0)) {
454 		stats->ls_gets++;
455 		stats->ls_total = ktime_to_ns(kt);
456 	}
457 
458 	if (stats->ls_max < usec)
459 		stats->ls_max = usec;
460 
461 	if (ret)
462 		stats->ls_fail++;
463 }
464 
465 static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
466 {
467 	lockres->l_lock_refresh++;
468 }
469 
470 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
471 {
472 	mw->mw_lock_start = ktime_get();
473 }
474 #else
475 static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
476 {
477 }
478 static inline void ocfs2_update_lock_stats(struct ocfs2_lock_res *res,
479 			   int level, struct ocfs2_mask_waiter *mw, int ret)
480 {
481 }
482 static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
483 {
484 }
485 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
486 {
487 }
488 #endif
489 
490 static void ocfs2_lock_res_init_common(struct ocfs2_super *osb,
491 				       struct ocfs2_lock_res *res,
492 				       enum ocfs2_lock_type type,
493 				       struct ocfs2_lock_res_ops *ops,
494 				       void *priv)
495 {
496 	res->l_type          = type;
497 	res->l_ops           = ops;
498 	res->l_priv          = priv;
499 
500 	res->l_level         = DLM_LOCK_IV;
501 	res->l_requested     = DLM_LOCK_IV;
502 	res->l_blocking      = DLM_LOCK_IV;
503 	res->l_action        = OCFS2_AST_INVALID;
504 	res->l_unlock_action = OCFS2_UNLOCK_INVALID;
505 
506 	res->l_flags         = OCFS2_LOCK_INITIALIZED;
507 
508 	ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug);
509 
510 	ocfs2_init_lock_stats(res);
511 #ifdef CONFIG_DEBUG_LOCK_ALLOC
512 	if (type != OCFS2_LOCK_TYPE_OPEN)
513 		lockdep_init_map(&res->l_lockdep_map, ocfs2_lock_type_strings[type],
514 				 &lockdep_keys[type], 0);
515 	else
516 		res->l_lockdep_map.key = NULL;
517 #endif
518 }
519 
520 void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
521 {
522 	/* This also clears out the lock status block */
523 	memset(res, 0, sizeof(struct ocfs2_lock_res));
524 	spin_lock_init(&res->l_lock);
525 	init_waitqueue_head(&res->l_event);
526 	INIT_LIST_HEAD(&res->l_blocked_list);
527 	INIT_LIST_HEAD(&res->l_mask_waiters);
528 	INIT_LIST_HEAD(&res->l_holders);
529 }
530 
531 void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
532 			       enum ocfs2_lock_type type,
533 			       unsigned int generation,
534 			       struct inode *inode)
535 {
536 	struct ocfs2_lock_res_ops *ops;
537 
538 	switch(type) {
539 		case OCFS2_LOCK_TYPE_RW:
540 			ops = &ocfs2_inode_rw_lops;
541 			break;
542 		case OCFS2_LOCK_TYPE_META:
543 			ops = &ocfs2_inode_inode_lops;
544 			break;
545 		case OCFS2_LOCK_TYPE_OPEN:
546 			ops = &ocfs2_inode_open_lops;
547 			break;
548 		default:
549 			mlog_bug_on_msg(1, "type: %d\n", type);
550 			ops = NULL; /* thanks, gcc */
551 			break;
552 	};
553 
554 	ocfs2_build_lock_name(type, OCFS2_I(inode)->ip_blkno,
555 			      generation, res->l_name);
556 	ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), res, type, ops, inode);
557 }
558 
559 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres)
560 {
561 	struct inode *inode = ocfs2_lock_res_inode(lockres);
562 
563 	return OCFS2_SB(inode->i_sb);
564 }
565 
566 static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres)
567 {
568 	struct ocfs2_mem_dqinfo *info = lockres->l_priv;
569 
570 	return OCFS2_SB(info->dqi_gi.dqi_sb);
571 }
572 
573 static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres)
574 {
575 	struct ocfs2_file_private *fp = lockres->l_priv;
576 
577 	return OCFS2_SB(fp->fp_file->f_mapping->host->i_sb);
578 }
579 
580 static __u64 ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res *lockres)
581 {
582 	__be64 inode_blkno_be;
583 
584 	memcpy(&inode_blkno_be, &lockres->l_name[OCFS2_DENTRY_LOCK_INO_START],
585 	       sizeof(__be64));
586 
587 	return be64_to_cpu(inode_blkno_be);
588 }
589 
590 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres)
591 {
592 	struct ocfs2_dentry_lock *dl = lockres->l_priv;
593 
594 	return OCFS2_SB(dl->dl_inode->i_sb);
595 }
596 
597 void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl,
598 				u64 parent, struct inode *inode)
599 {
600 	int len;
601 	u64 inode_blkno = OCFS2_I(inode)->ip_blkno;
602 	__be64 inode_blkno_be = cpu_to_be64(inode_blkno);
603 	struct ocfs2_lock_res *lockres = &dl->dl_lockres;
604 
605 	ocfs2_lock_res_init_once(lockres);
606 
607 	/*
608 	 * Unfortunately, the standard lock naming scheme won't work
609 	 * here because we have two 16 byte values to use. Instead,
610 	 * we'll stuff the inode number as a binary value. We still
611 	 * want error prints to show something without garbling the
612 	 * display, so drop a null byte in there before the inode
613 	 * number. A future version of OCFS2 will likely use all
614 	 * binary lock names. The stringified names have been a
615 	 * tremendous aid in debugging, but now that the debugfs
616 	 * interface exists, we can mangle things there if need be.
617 	 *
618 	 * NOTE: We also drop the standard "pad" value (the total lock
619 	 * name size stays the same though - the last part is all
620 	 * zeros due to the memset in ocfs2_lock_res_init_once()
621 	 */
622 	len = snprintf(lockres->l_name, OCFS2_DENTRY_LOCK_INO_START,
623 		       "%c%016llx",
624 		       ocfs2_lock_type_char(OCFS2_LOCK_TYPE_DENTRY),
625 		       (long long)parent);
626 
627 	BUG_ON(len != (OCFS2_DENTRY_LOCK_INO_START - 1));
628 
629 	memcpy(&lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], &inode_blkno_be,
630 	       sizeof(__be64));
631 
632 	ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
633 				   OCFS2_LOCK_TYPE_DENTRY, &ocfs2_dentry_lops,
634 				   dl);
635 }
636 
637 static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res,
638 				      struct ocfs2_super *osb)
639 {
640 	/* Superblock lockres doesn't come from a slab so we call init
641 	 * once on it manually.  */
642 	ocfs2_lock_res_init_once(res);
643 	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_SUPER, OCFS2_SUPER_BLOCK_BLKNO,
644 			      0, res->l_name);
645 	ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_SUPER,
646 				   &ocfs2_super_lops, osb);
647 }
648 
649 static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res *res,
650 				       struct ocfs2_super *osb)
651 {
652 	/* Rename lockres doesn't come from a slab so we call init
653 	 * once on it manually.  */
654 	ocfs2_lock_res_init_once(res);
655 	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_RENAME, 0, 0, res->l_name);
656 	ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_RENAME,
657 				   &ocfs2_rename_lops, osb);
658 }
659 
660 static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res *res,
661 					 struct ocfs2_super *osb)
662 {
663 	/* nfs_sync lockres doesn't come from a slab so we call init
664 	 * once on it manually.  */
665 	ocfs2_lock_res_init_once(res);
666 	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_NFS_SYNC, 0, 0, res->l_name);
667 	ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_NFS_SYNC,
668 				   &ocfs2_nfs_sync_lops, osb);
669 }
670 
671 void ocfs2_trim_fs_lock_res_init(struct ocfs2_super *osb)
672 {
673 	struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
674 
675 	/* Only one trimfs thread are allowed to work at the same time. */
676 	mutex_lock(&osb->obs_trim_fs_mutex);
677 
678 	ocfs2_lock_res_init_once(lockres);
679 	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_TRIM_FS, 0, 0, lockres->l_name);
680 	ocfs2_lock_res_init_common(osb, lockres, OCFS2_LOCK_TYPE_TRIM_FS,
681 				   &ocfs2_trim_fs_lops, osb);
682 }
683 
684 void ocfs2_trim_fs_lock_res_uninit(struct ocfs2_super *osb)
685 {
686 	struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
687 
688 	ocfs2_simple_drop_lockres(osb, lockres);
689 	ocfs2_lock_res_free(lockres);
690 
691 	mutex_unlock(&osb->obs_trim_fs_mutex);
692 }
693 
694 static void ocfs2_orphan_scan_lock_res_init(struct ocfs2_lock_res *res,
695 					    struct ocfs2_super *osb)
696 {
697 	ocfs2_lock_res_init_once(res);
698 	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_ORPHAN_SCAN, 0, 0, res->l_name);
699 	ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_ORPHAN_SCAN,
700 				   &ocfs2_orphan_scan_lops, osb);
701 }
702 
703 void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres,
704 			      struct ocfs2_file_private *fp)
705 {
706 	struct inode *inode = fp->fp_file->f_mapping->host;
707 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
708 
709 	ocfs2_lock_res_init_once(lockres);
710 	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_FLOCK, oi->ip_blkno,
711 			      inode->i_generation, lockres->l_name);
712 	ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
713 				   OCFS2_LOCK_TYPE_FLOCK, &ocfs2_flock_lops,
714 				   fp);
715 	lockres->l_flags |= OCFS2_LOCK_NOCACHE;
716 }
717 
718 void ocfs2_qinfo_lock_res_init(struct ocfs2_lock_res *lockres,
719 			       struct ocfs2_mem_dqinfo *info)
720 {
721 	ocfs2_lock_res_init_once(lockres);
722 	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_QINFO, info->dqi_gi.dqi_type,
723 			      0, lockres->l_name);
724 	ocfs2_lock_res_init_common(OCFS2_SB(info->dqi_gi.dqi_sb), lockres,
725 				   OCFS2_LOCK_TYPE_QINFO, &ocfs2_qinfo_lops,
726 				   info);
727 }
728 
729 void ocfs2_refcount_lock_res_init(struct ocfs2_lock_res *lockres,
730 				  struct ocfs2_super *osb, u64 ref_blkno,
731 				  unsigned int generation)
732 {
733 	ocfs2_lock_res_init_once(lockres);
734 	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_REFCOUNT, ref_blkno,
735 			      generation, lockres->l_name);
736 	ocfs2_lock_res_init_common(osb, lockres, OCFS2_LOCK_TYPE_REFCOUNT,
737 				   &ocfs2_refcount_block_lops, osb);
738 }
739 
740 void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
741 {
742 	if (!(res->l_flags & OCFS2_LOCK_INITIALIZED))
743 		return;
744 
745 	ocfs2_remove_lockres_tracking(res);
746 
747 	mlog_bug_on_msg(!list_empty(&res->l_blocked_list),
748 			"Lockres %s is on the blocked list\n",
749 			res->l_name);
750 	mlog_bug_on_msg(!list_empty(&res->l_mask_waiters),
751 			"Lockres %s has mask waiters pending\n",
752 			res->l_name);
753 	mlog_bug_on_msg(spin_is_locked(&res->l_lock),
754 			"Lockres %s is locked\n",
755 			res->l_name);
756 	mlog_bug_on_msg(res->l_ro_holders,
757 			"Lockres %s has %u ro holders\n",
758 			res->l_name, res->l_ro_holders);
759 	mlog_bug_on_msg(res->l_ex_holders,
760 			"Lockres %s has %u ex holders\n",
761 			res->l_name, res->l_ex_holders);
762 
763 	/* Need to clear out the lock status block for the dlm */
764 	memset(&res->l_lksb, 0, sizeof(res->l_lksb));
765 
766 	res->l_flags = 0UL;
767 }
768 
769 /*
770  * Keep a list of processes who have interest in a lockres.
771  * Note: this is now only uesed for check recursive cluster locking.
772  */
773 static inline void ocfs2_add_holder(struct ocfs2_lock_res *lockres,
774 				   struct ocfs2_lock_holder *oh)
775 {
776 	INIT_LIST_HEAD(&oh->oh_list);
777 	oh->oh_owner_pid = get_pid(task_pid(current));
778 
779 	spin_lock(&lockres->l_lock);
780 	list_add_tail(&oh->oh_list, &lockres->l_holders);
781 	spin_unlock(&lockres->l_lock);
782 }
783 
784 static struct ocfs2_lock_holder *
785 ocfs2_pid_holder(struct ocfs2_lock_res *lockres,
786 		struct pid *pid)
787 {
788 	struct ocfs2_lock_holder *oh;
789 
790 	spin_lock(&lockres->l_lock);
791 	list_for_each_entry(oh, &lockres->l_holders, oh_list) {
792 		if (oh->oh_owner_pid == pid) {
793 			spin_unlock(&lockres->l_lock);
794 			return oh;
795 		}
796 	}
797 	spin_unlock(&lockres->l_lock);
798 	return NULL;
799 }
800 
801 static inline void ocfs2_remove_holder(struct ocfs2_lock_res *lockres,
802 				       struct ocfs2_lock_holder *oh)
803 {
804 	spin_lock(&lockres->l_lock);
805 	list_del(&oh->oh_list);
806 	spin_unlock(&lockres->l_lock);
807 
808 	put_pid(oh->oh_owner_pid);
809 }
810 
811 
812 static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
813 				     int level)
814 {
815 	BUG_ON(!lockres);
816 
817 	switch(level) {
818 	case DLM_LOCK_EX:
819 		lockres->l_ex_holders++;
820 		break;
821 	case DLM_LOCK_PR:
822 		lockres->l_ro_holders++;
823 		break;
824 	default:
825 		BUG();
826 	}
827 }
828 
829 static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
830 				     int level)
831 {
832 	BUG_ON(!lockres);
833 
834 	switch(level) {
835 	case DLM_LOCK_EX:
836 		BUG_ON(!lockres->l_ex_holders);
837 		lockres->l_ex_holders--;
838 		break;
839 	case DLM_LOCK_PR:
840 		BUG_ON(!lockres->l_ro_holders);
841 		lockres->l_ro_holders--;
842 		break;
843 	default:
844 		BUG();
845 	}
846 }
847 
848 /* WARNING: This function lives in a world where the only three lock
849  * levels are EX, PR, and NL. It *will* have to be adjusted when more
850  * lock types are added. */
851 static inline int ocfs2_highest_compat_lock_level(int level)
852 {
853 	int new_level = DLM_LOCK_EX;
854 
855 	if (level == DLM_LOCK_EX)
856 		new_level = DLM_LOCK_NL;
857 	else if (level == DLM_LOCK_PR)
858 		new_level = DLM_LOCK_PR;
859 	return new_level;
860 }
861 
862 static void lockres_set_flags(struct ocfs2_lock_res *lockres,
863 			      unsigned long newflags)
864 {
865 	struct ocfs2_mask_waiter *mw, *tmp;
866 
867  	assert_spin_locked(&lockres->l_lock);
868 
869 	lockres->l_flags = newflags;
870 
871 	list_for_each_entry_safe(mw, tmp, &lockres->l_mask_waiters, mw_item) {
872 		if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
873 			continue;
874 
875 		list_del_init(&mw->mw_item);
876 		mw->mw_status = 0;
877 		complete(&mw->mw_complete);
878 	}
879 }
880 static void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or)
881 {
882 	lockres_set_flags(lockres, lockres->l_flags | or);
883 }
884 static void lockres_clear_flags(struct ocfs2_lock_res *lockres,
885 				unsigned long clear)
886 {
887 	lockres_set_flags(lockres, lockres->l_flags & ~clear);
888 }
889 
890 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres)
891 {
892 	BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
893 	BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
894 	BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
895 	BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
896 
897 	lockres->l_level = lockres->l_requested;
898 	if (lockres->l_level <=
899 	    ocfs2_highest_compat_lock_level(lockres->l_blocking)) {
900 		lockres->l_blocking = DLM_LOCK_NL;
901 		lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
902 	}
903 	lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
904 }
905 
906 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres)
907 {
908 	BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
909 	BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
910 
911 	/* Convert from RO to EX doesn't really need anything as our
912 	 * information is already up to data. Convert from NL to
913 	 * *anything* however should mark ourselves as needing an
914 	 * update */
915 	if (lockres->l_level == DLM_LOCK_NL &&
916 	    lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
917 		lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
918 
919 	lockres->l_level = lockres->l_requested;
920 
921 	/*
922 	 * We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing
923 	 * the OCFS2_LOCK_BUSY flag to prevent the dc thread from
924 	 * downconverting the lock before the upconvert has fully completed.
925 	 * Do not prevent the dc thread from downconverting if NONBLOCK lock
926 	 * had already returned.
927 	 */
928 	if (!(lockres->l_flags & OCFS2_LOCK_NONBLOCK_FINISHED))
929 		lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
930 	else
931 		lockres_clear_flags(lockres, OCFS2_LOCK_NONBLOCK_FINISHED);
932 
933 	lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
934 }
935 
936 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres)
937 {
938 	BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY)));
939 	BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
940 
941 	if (lockres->l_requested > DLM_LOCK_NL &&
942 	    !(lockres->l_flags & OCFS2_LOCK_LOCAL) &&
943 	    lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
944 		lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
945 
946 	lockres->l_level = lockres->l_requested;
947 	lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED);
948 	lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
949 }
950 
951 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
952 				     int level)
953 {
954 	int needs_downconvert = 0;
955 
956 	assert_spin_locked(&lockres->l_lock);
957 
958 	if (level > lockres->l_blocking) {
959 		/* only schedule a downconvert if we haven't already scheduled
960 		 * one that goes low enough to satisfy the level we're
961 		 * blocking.  this also catches the case where we get
962 		 * duplicate BASTs */
963 		if (ocfs2_highest_compat_lock_level(level) <
964 		    ocfs2_highest_compat_lock_level(lockres->l_blocking))
965 			needs_downconvert = 1;
966 
967 		lockres->l_blocking = level;
968 	}
969 
970 	mlog(ML_BASTS, "lockres %s, block %d, level %d, l_block %d, dwn %d\n",
971 	     lockres->l_name, level, lockres->l_level, lockres->l_blocking,
972 	     needs_downconvert);
973 
974 	if (needs_downconvert)
975 		lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
976 	mlog(0, "needs_downconvert = %d\n", needs_downconvert);
977 	return needs_downconvert;
978 }
979 
980 /*
981  * OCFS2_LOCK_PENDING and l_pending_gen.
982  *
983  * Why does OCFS2_LOCK_PENDING exist?  To close a race between setting
984  * OCFS2_LOCK_BUSY and calling ocfs2_dlm_lock().  See ocfs2_unblock_lock()
985  * for more details on the race.
986  *
987  * OCFS2_LOCK_PENDING closes the race quite nicely.  However, it introduces
988  * a race on itself.  In o2dlm, we can get the ast before ocfs2_dlm_lock()
989  * returns.  The ast clears OCFS2_LOCK_BUSY, and must therefore clear
990  * OCFS2_LOCK_PENDING at the same time.  When ocfs2_dlm_lock() returns,
991  * the caller is going to try to clear PENDING again.  If nothing else is
992  * happening, __lockres_clear_pending() sees PENDING is unset and does
993  * nothing.
994  *
995  * But what if another path (eg downconvert thread) has just started a
996  * new locking action?  The other path has re-set PENDING.  Our path
997  * cannot clear PENDING, because that will re-open the original race
998  * window.
999  *
1000  * [Example]
1001  *
1002  * ocfs2_meta_lock()
1003  *  ocfs2_cluster_lock()
1004  *   set BUSY
1005  *   set PENDING
1006  *   drop l_lock
1007  *   ocfs2_dlm_lock()
1008  *    ocfs2_locking_ast()		ocfs2_downconvert_thread()
1009  *     clear PENDING			 ocfs2_unblock_lock()
1010  *					  take_l_lock
1011  *					  !BUSY
1012  *					  ocfs2_prepare_downconvert()
1013  *					   set BUSY
1014  *					   set PENDING
1015  *					  drop l_lock
1016  *   take l_lock
1017  *   clear PENDING
1018  *   drop l_lock
1019  *			<window>
1020  *					  ocfs2_dlm_lock()
1021  *
1022  * So as you can see, we now have a window where l_lock is not held,
1023  * PENDING is not set, and ocfs2_dlm_lock() has not been called.
1024  *
1025  * The core problem is that ocfs2_cluster_lock() has cleared the PENDING
1026  * set by ocfs2_prepare_downconvert().  That wasn't nice.
1027  *
1028  * To solve this we introduce l_pending_gen.  A call to
1029  * lockres_clear_pending() will only do so when it is passed a generation
1030  * number that matches the lockres.  lockres_set_pending() will return the
1031  * current generation number.  When ocfs2_cluster_lock() goes to clear
1032  * PENDING, it passes the generation it got from set_pending().  In our
1033  * example above, the generation numbers will *not* match.  Thus,
1034  * ocfs2_cluster_lock() will not clear the PENDING set by
1035  * ocfs2_prepare_downconvert().
1036  */
1037 
1038 /* Unlocked version for ocfs2_locking_ast() */
1039 static void __lockres_clear_pending(struct ocfs2_lock_res *lockres,
1040 				    unsigned int generation,
1041 				    struct ocfs2_super *osb)
1042 {
1043 	assert_spin_locked(&lockres->l_lock);
1044 
1045 	/*
1046 	 * The ast and locking functions can race us here.  The winner
1047 	 * will clear pending, the loser will not.
1048 	 */
1049 	if (!(lockres->l_flags & OCFS2_LOCK_PENDING) ||
1050 	    (lockres->l_pending_gen != generation))
1051 		return;
1052 
1053 	lockres_clear_flags(lockres, OCFS2_LOCK_PENDING);
1054 	lockres->l_pending_gen++;
1055 
1056 	/*
1057 	 * The downconvert thread may have skipped us because we
1058 	 * were PENDING.  Wake it up.
1059 	 */
1060 	if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
1061 		ocfs2_wake_downconvert_thread(osb);
1062 }
1063 
1064 /* Locked version for callers of ocfs2_dlm_lock() */
1065 static void lockres_clear_pending(struct ocfs2_lock_res *lockres,
1066 				  unsigned int generation,
1067 				  struct ocfs2_super *osb)
1068 {
1069 	unsigned long flags;
1070 
1071 	spin_lock_irqsave(&lockres->l_lock, flags);
1072 	__lockres_clear_pending(lockres, generation, osb);
1073 	spin_unlock_irqrestore(&lockres->l_lock, flags);
1074 }
1075 
1076 static unsigned int lockres_set_pending(struct ocfs2_lock_res *lockres)
1077 {
1078 	assert_spin_locked(&lockres->l_lock);
1079 	BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
1080 
1081 	lockres_or_flags(lockres, OCFS2_LOCK_PENDING);
1082 
1083 	return lockres->l_pending_gen;
1084 }
1085 
1086 static void ocfs2_blocking_ast(struct ocfs2_dlm_lksb *lksb, int level)
1087 {
1088 	struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
1089 	struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1090 	int needs_downconvert;
1091 	unsigned long flags;
1092 
1093 	BUG_ON(level <= DLM_LOCK_NL);
1094 
1095 	mlog(ML_BASTS, "BAST fired for lockres %s, blocking %d, level %d, "
1096 	     "type %s\n", lockres->l_name, level, lockres->l_level,
1097 	     ocfs2_lock_type_string(lockres->l_type));
1098 
1099 	/*
1100 	 * We can skip the bast for locks which don't enable caching -
1101 	 * they'll be dropped at the earliest possible time anyway.
1102 	 */
1103 	if (lockres->l_flags & OCFS2_LOCK_NOCACHE)
1104 		return;
1105 
1106 	spin_lock_irqsave(&lockres->l_lock, flags);
1107 	needs_downconvert = ocfs2_generic_handle_bast(lockres, level);
1108 	if (needs_downconvert)
1109 		ocfs2_schedule_blocked_lock(osb, lockres);
1110 	spin_unlock_irqrestore(&lockres->l_lock, flags);
1111 
1112 	wake_up(&lockres->l_event);
1113 
1114 	ocfs2_wake_downconvert_thread(osb);
1115 }
1116 
1117 static void ocfs2_locking_ast(struct ocfs2_dlm_lksb *lksb)
1118 {
1119 	struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
1120 	struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1121 	unsigned long flags;
1122 	int status;
1123 
1124 	spin_lock_irqsave(&lockres->l_lock, flags);
1125 
1126 	status = ocfs2_dlm_lock_status(&lockres->l_lksb);
1127 
1128 	if (status == -EAGAIN) {
1129 		lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1130 		goto out;
1131 	}
1132 
1133 	if (status) {
1134 		mlog(ML_ERROR, "lockres %s: lksb status value of %d!\n",
1135 		     lockres->l_name, status);
1136 		spin_unlock_irqrestore(&lockres->l_lock, flags);
1137 		return;
1138 	}
1139 
1140 	mlog(ML_BASTS, "AST fired for lockres %s, action %d, unlock %d, "
1141 	     "level %d => %d\n", lockres->l_name, lockres->l_action,
1142 	     lockres->l_unlock_action, lockres->l_level, lockres->l_requested);
1143 
1144 	switch(lockres->l_action) {
1145 	case OCFS2_AST_ATTACH:
1146 		ocfs2_generic_handle_attach_action(lockres);
1147 		lockres_clear_flags(lockres, OCFS2_LOCK_LOCAL);
1148 		break;
1149 	case OCFS2_AST_CONVERT:
1150 		ocfs2_generic_handle_convert_action(lockres);
1151 		break;
1152 	case OCFS2_AST_DOWNCONVERT:
1153 		ocfs2_generic_handle_downconvert_action(lockres);
1154 		break;
1155 	default:
1156 		mlog(ML_ERROR, "lockres %s: AST fired with invalid action: %u, "
1157 		     "flags 0x%lx, unlock: %u\n",
1158 		     lockres->l_name, lockres->l_action, lockres->l_flags,
1159 		     lockres->l_unlock_action);
1160 		BUG();
1161 	}
1162 out:
1163 	/* set it to something invalid so if we get called again we
1164 	 * can catch it. */
1165 	lockres->l_action = OCFS2_AST_INVALID;
1166 
1167 	/* Did we try to cancel this lock?  Clear that state */
1168 	if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT)
1169 		lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
1170 
1171 	/*
1172 	 * We may have beaten the locking functions here.  We certainly
1173 	 * know that dlm_lock() has been called :-)
1174 	 * Because we can't have two lock calls in flight at once, we
1175 	 * can use lockres->l_pending_gen.
1176 	 */
1177 	__lockres_clear_pending(lockres, lockres->l_pending_gen,  osb);
1178 
1179 	wake_up(&lockres->l_event);
1180 	spin_unlock_irqrestore(&lockres->l_lock, flags);
1181 }
1182 
1183 static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error)
1184 {
1185 	struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
1186 	unsigned long flags;
1187 
1188 	mlog(ML_BASTS, "UNLOCK AST fired for lockres %s, action = %d\n",
1189 	     lockres->l_name, lockres->l_unlock_action);
1190 
1191 	spin_lock_irqsave(&lockres->l_lock, flags);
1192 	if (error) {
1193 		mlog(ML_ERROR, "Dlm passes error %d for lock %s, "
1194 		     "unlock_action %d\n", error, lockres->l_name,
1195 		     lockres->l_unlock_action);
1196 		spin_unlock_irqrestore(&lockres->l_lock, flags);
1197 		return;
1198 	}
1199 
1200 	switch(lockres->l_unlock_action) {
1201 	case OCFS2_UNLOCK_CANCEL_CONVERT:
1202 		mlog(0, "Cancel convert success for %s\n", lockres->l_name);
1203 		lockres->l_action = OCFS2_AST_INVALID;
1204 		/* Downconvert thread may have requeued this lock, we
1205 		 * need to wake it. */
1206 		if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
1207 			ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres));
1208 		break;
1209 	case OCFS2_UNLOCK_DROP_LOCK:
1210 		lockres->l_level = DLM_LOCK_IV;
1211 		break;
1212 	default:
1213 		BUG();
1214 	}
1215 
1216 	lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1217 	lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
1218 	wake_up(&lockres->l_event);
1219 	spin_unlock_irqrestore(&lockres->l_lock, flags);
1220 }
1221 
1222 /*
1223  * This is the filesystem locking protocol.  It provides the lock handling
1224  * hooks for the underlying DLM.  It has a maximum version number.
1225  * The version number allows interoperability with systems running at
1226  * the same major number and an equal or smaller minor number.
1227  *
1228  * Whenever the filesystem does new things with locks (adds or removes a
1229  * lock, orders them differently, does different things underneath a lock),
1230  * the version must be changed.  The protocol is negotiated when joining
1231  * the dlm domain.  A node may join the domain if its major version is
1232  * identical to all other nodes and its minor version is greater than
1233  * or equal to all other nodes.  When its minor version is greater than
1234  * the other nodes, it will run at the minor version specified by the
1235  * other nodes.
1236  *
1237  * If a locking change is made that will not be compatible with older
1238  * versions, the major number must be increased and the minor version set
1239  * to zero.  If a change merely adds a behavior that can be disabled when
1240  * speaking to older versions, the minor version must be increased.  If a
1241  * change adds a fully backwards compatible change (eg, LVB changes that
1242  * are just ignored by older versions), the version does not need to be
1243  * updated.
1244  */
1245 static struct ocfs2_locking_protocol lproto = {
1246 	.lp_max_version = {
1247 		.pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
1248 		.pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
1249 	},
1250 	.lp_lock_ast		= ocfs2_locking_ast,
1251 	.lp_blocking_ast	= ocfs2_blocking_ast,
1252 	.lp_unlock_ast		= ocfs2_unlock_ast,
1253 };
1254 
1255 void ocfs2_set_locking_protocol(void)
1256 {
1257 	ocfs2_stack_glue_set_max_proto_version(&lproto.lp_max_version);
1258 }
1259 
1260 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
1261 						int convert)
1262 {
1263 	unsigned long flags;
1264 
1265 	spin_lock_irqsave(&lockres->l_lock, flags);
1266 	lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1267 	lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
1268 	if (convert)
1269 		lockres->l_action = OCFS2_AST_INVALID;
1270 	else
1271 		lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
1272 	spin_unlock_irqrestore(&lockres->l_lock, flags);
1273 
1274 	wake_up(&lockres->l_event);
1275 }
1276 
1277 /* Note: If we detect another process working on the lock (i.e.,
1278  * OCFS2_LOCK_BUSY), we'll bail out returning 0. It's up to the caller
1279  * to do the right thing in that case.
1280  */
1281 static int ocfs2_lock_create(struct ocfs2_super *osb,
1282 			     struct ocfs2_lock_res *lockres,
1283 			     int level,
1284 			     u32 dlm_flags)
1285 {
1286 	int ret = 0;
1287 	unsigned long flags;
1288 	unsigned int gen;
1289 
1290 	mlog(0, "lock %s, level = %d, flags = %u\n", lockres->l_name, level,
1291 	     dlm_flags);
1292 
1293 	spin_lock_irqsave(&lockres->l_lock, flags);
1294 	if ((lockres->l_flags & OCFS2_LOCK_ATTACHED) ||
1295 	    (lockres->l_flags & OCFS2_LOCK_BUSY)) {
1296 		spin_unlock_irqrestore(&lockres->l_lock, flags);
1297 		goto bail;
1298 	}
1299 
1300 	lockres->l_action = OCFS2_AST_ATTACH;
1301 	lockres->l_requested = level;
1302 	lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1303 	gen = lockres_set_pending(lockres);
1304 	spin_unlock_irqrestore(&lockres->l_lock, flags);
1305 
1306 	ret = ocfs2_dlm_lock(osb->cconn,
1307 			     level,
1308 			     &lockres->l_lksb,
1309 			     dlm_flags,
1310 			     lockres->l_name,
1311 			     OCFS2_LOCK_ID_MAX_LEN - 1);
1312 	lockres_clear_pending(lockres, gen, osb);
1313 	if (ret) {
1314 		ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
1315 		ocfs2_recover_from_dlm_error(lockres, 1);
1316 	}
1317 
1318 	mlog(0, "lock %s, return from ocfs2_dlm_lock\n", lockres->l_name);
1319 
1320 bail:
1321 	return ret;
1322 }
1323 
1324 static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res *lockres,
1325 					int flag)
1326 {
1327 	unsigned long flags;
1328 	int ret;
1329 
1330 	spin_lock_irqsave(&lockres->l_lock, flags);
1331 	ret = lockres->l_flags & flag;
1332 	spin_unlock_irqrestore(&lockres->l_lock, flags);
1333 
1334 	return ret;
1335 }
1336 
1337 static inline void ocfs2_wait_on_busy_lock(struct ocfs2_lock_res *lockres)
1338 
1339 {
1340 	wait_event(lockres->l_event,
1341 		   !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_BUSY));
1342 }
1343 
1344 static inline void ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res *lockres)
1345 
1346 {
1347 	wait_event(lockres->l_event,
1348 		   !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_REFRESHING));
1349 }
1350 
1351 /* predict what lock level we'll be dropping down to on behalf
1352  * of another node, and return true if the currently wanted
1353  * level will be compatible with it. */
1354 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
1355 						     int wanted)
1356 {
1357 	BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
1358 
1359 	return wanted <= ocfs2_highest_compat_lock_level(lockres->l_blocking);
1360 }
1361 
1362 static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw)
1363 {
1364 	INIT_LIST_HEAD(&mw->mw_item);
1365 	init_completion(&mw->mw_complete);
1366 	ocfs2_init_start_time(mw);
1367 }
1368 
1369 static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw)
1370 {
1371 	wait_for_completion(&mw->mw_complete);
1372 	/* Re-arm the completion in case we want to wait on it again */
1373 	reinit_completion(&mw->mw_complete);
1374 	return mw->mw_status;
1375 }
1376 
1377 static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres,
1378 				    struct ocfs2_mask_waiter *mw,
1379 				    unsigned long mask,
1380 				    unsigned long goal)
1381 {
1382 	BUG_ON(!list_empty(&mw->mw_item));
1383 
1384 	assert_spin_locked(&lockres->l_lock);
1385 
1386 	list_add_tail(&mw->mw_item, &lockres->l_mask_waiters);
1387 	mw->mw_mask = mask;
1388 	mw->mw_goal = goal;
1389 }
1390 
1391 /* returns 0 if the mw that was removed was already satisfied, -EBUSY
1392  * if the mask still hadn't reached its goal */
1393 static int __lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
1394 				      struct ocfs2_mask_waiter *mw)
1395 {
1396 	int ret = 0;
1397 
1398 	assert_spin_locked(&lockres->l_lock);
1399 	if (!list_empty(&mw->mw_item)) {
1400 		if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
1401 			ret = -EBUSY;
1402 
1403 		list_del_init(&mw->mw_item);
1404 		init_completion(&mw->mw_complete);
1405 	}
1406 
1407 	return ret;
1408 }
1409 
1410 static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
1411 				      struct ocfs2_mask_waiter *mw)
1412 {
1413 	unsigned long flags;
1414 	int ret = 0;
1415 
1416 	spin_lock_irqsave(&lockres->l_lock, flags);
1417 	ret = __lockres_remove_mask_waiter(lockres, mw);
1418 	spin_unlock_irqrestore(&lockres->l_lock, flags);
1419 
1420 	return ret;
1421 
1422 }
1423 
1424 static int ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter *mw,
1425 					     struct ocfs2_lock_res *lockres)
1426 {
1427 	int ret;
1428 
1429 	ret = wait_for_completion_interruptible(&mw->mw_complete);
1430 	if (ret)
1431 		lockres_remove_mask_waiter(lockres, mw);
1432 	else
1433 		ret = mw->mw_status;
1434 	/* Re-arm the completion in case we want to wait on it again */
1435 	reinit_completion(&mw->mw_complete);
1436 	return ret;
1437 }
1438 
1439 static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
1440 				struct ocfs2_lock_res *lockres,
1441 				int level,
1442 				u32 lkm_flags,
1443 				int arg_flags,
1444 				int l_subclass,
1445 				unsigned long caller_ip)
1446 {
1447 	struct ocfs2_mask_waiter mw;
1448 	int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR);
1449 	int ret = 0; /* gcc doesn't realize wait = 1 guarantees ret is set */
1450 	unsigned long flags;
1451 	unsigned int gen;
1452 	int noqueue_attempted = 0;
1453 	int dlm_locked = 0;
1454 	int kick_dc = 0;
1455 
1456 	if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED)) {
1457 		mlog_errno(-EINVAL);
1458 		return -EINVAL;
1459 	}
1460 
1461 	ocfs2_init_mask_waiter(&mw);
1462 
1463 	if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
1464 		lkm_flags |= DLM_LKF_VALBLK;
1465 
1466 again:
1467 	wait = 0;
1468 
1469 	spin_lock_irqsave(&lockres->l_lock, flags);
1470 
1471 	if (catch_signals && signal_pending(current)) {
1472 		ret = -ERESTARTSYS;
1473 		goto unlock;
1474 	}
1475 
1476 	mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
1477 			"Cluster lock called on freeing lockres %s! flags "
1478 			"0x%lx\n", lockres->l_name, lockres->l_flags);
1479 
1480 	/* We only compare against the currently granted level
1481 	 * here. If the lock is blocked waiting on a downconvert,
1482 	 * we'll get caught below. */
1483 	if (lockres->l_flags & OCFS2_LOCK_BUSY &&
1484 	    level > lockres->l_level) {
1485 		/* is someone sitting in dlm_lock? If so, wait on
1486 		 * them. */
1487 		lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1488 		wait = 1;
1489 		goto unlock;
1490 	}
1491 
1492 	if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) {
1493 		/*
1494 		 * We've upconverted. If the lock now has a level we can
1495 		 * work with, we take it. If, however, the lock is not at the
1496 		 * required level, we go thru the full cycle. One way this could
1497 		 * happen is if a process requesting an upconvert to PR is
1498 		 * closely followed by another requesting upconvert to an EX.
1499 		 * If the process requesting EX lands here, we want it to
1500 		 * continue attempting to upconvert and let the process
1501 		 * requesting PR take the lock.
1502 		 * If multiple processes request upconvert to PR, the first one
1503 		 * here will take the lock. The others will have to go thru the
1504 		 * OCFS2_LOCK_BLOCKED check to ensure that there is no pending
1505 		 * downconvert request.
1506 		 */
1507 		if (level <= lockres->l_level)
1508 			goto update_holders;
1509 	}
1510 
1511 	if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
1512 	    !ocfs2_may_continue_on_blocked_lock(lockres, level)) {
1513 		/* is the lock is currently blocked on behalf of
1514 		 * another node */
1515 		lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BLOCKED, 0);
1516 		wait = 1;
1517 		goto unlock;
1518 	}
1519 
1520 	if (level > lockres->l_level) {
1521 		if (noqueue_attempted > 0) {
1522 			ret = -EAGAIN;
1523 			goto unlock;
1524 		}
1525 		if (lkm_flags & DLM_LKF_NOQUEUE)
1526 			noqueue_attempted = 1;
1527 
1528 		if (lockres->l_action != OCFS2_AST_INVALID)
1529 			mlog(ML_ERROR, "lockres %s has action %u pending\n",
1530 			     lockres->l_name, lockres->l_action);
1531 
1532 		if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
1533 			lockres->l_action = OCFS2_AST_ATTACH;
1534 			lkm_flags &= ~DLM_LKF_CONVERT;
1535 		} else {
1536 			lockres->l_action = OCFS2_AST_CONVERT;
1537 			lkm_flags |= DLM_LKF_CONVERT;
1538 		}
1539 
1540 		lockres->l_requested = level;
1541 		lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1542 		gen = lockres_set_pending(lockres);
1543 		spin_unlock_irqrestore(&lockres->l_lock, flags);
1544 
1545 		BUG_ON(level == DLM_LOCK_IV);
1546 		BUG_ON(level == DLM_LOCK_NL);
1547 
1548 		mlog(ML_BASTS, "lockres %s, convert from %d to %d\n",
1549 		     lockres->l_name, lockres->l_level, level);
1550 
1551 		/* call dlm_lock to upgrade lock now */
1552 		ret = ocfs2_dlm_lock(osb->cconn,
1553 				     level,
1554 				     &lockres->l_lksb,
1555 				     lkm_flags,
1556 				     lockres->l_name,
1557 				     OCFS2_LOCK_ID_MAX_LEN - 1);
1558 		lockres_clear_pending(lockres, gen, osb);
1559 		if (ret) {
1560 			if (!(lkm_flags & DLM_LKF_NOQUEUE) ||
1561 			    (ret != -EAGAIN)) {
1562 				ocfs2_log_dlm_error("ocfs2_dlm_lock",
1563 						    ret, lockres);
1564 			}
1565 			ocfs2_recover_from_dlm_error(lockres, 1);
1566 			goto out;
1567 		}
1568 		dlm_locked = 1;
1569 
1570 		mlog(0, "lock %s, successful return from ocfs2_dlm_lock\n",
1571 		     lockres->l_name);
1572 
1573 		/* At this point we've gone inside the dlm and need to
1574 		 * complete our work regardless. */
1575 		catch_signals = 0;
1576 
1577 		/* wait for busy to clear and carry on */
1578 		goto again;
1579 	}
1580 
1581 update_holders:
1582 	/* Ok, if we get here then we're good to go. */
1583 	ocfs2_inc_holders(lockres, level);
1584 
1585 	ret = 0;
1586 unlock:
1587 	lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
1588 
1589 	/* ocfs2_unblock_lock reques on seeing OCFS2_LOCK_UPCONVERT_FINISHING */
1590 	kick_dc = (lockres->l_flags & OCFS2_LOCK_BLOCKED);
1591 
1592 	spin_unlock_irqrestore(&lockres->l_lock, flags);
1593 	if (kick_dc)
1594 		ocfs2_wake_downconvert_thread(osb);
1595 out:
1596 	/*
1597 	 * This is helping work around a lock inversion between the page lock
1598 	 * and dlm locks.  One path holds the page lock while calling aops
1599 	 * which block acquiring dlm locks.  The voting thread holds dlm
1600 	 * locks while acquiring page locks while down converting data locks.
1601 	 * This block is helping an aop path notice the inversion and back
1602 	 * off to unlock its page lock before trying the dlm lock again.
1603 	 */
1604 	if (wait && arg_flags & OCFS2_LOCK_NONBLOCK &&
1605 	    mw.mw_mask & (OCFS2_LOCK_BUSY|OCFS2_LOCK_BLOCKED)) {
1606 		wait = 0;
1607 		spin_lock_irqsave(&lockres->l_lock, flags);
1608 		if (__lockres_remove_mask_waiter(lockres, &mw)) {
1609 			if (dlm_locked)
1610 				lockres_or_flags(lockres,
1611 					OCFS2_LOCK_NONBLOCK_FINISHED);
1612 			spin_unlock_irqrestore(&lockres->l_lock, flags);
1613 			ret = -EAGAIN;
1614 		} else {
1615 			spin_unlock_irqrestore(&lockres->l_lock, flags);
1616 			goto again;
1617 		}
1618 	}
1619 	if (wait) {
1620 		ret = ocfs2_wait_for_mask(&mw);
1621 		if (ret == 0)
1622 			goto again;
1623 		mlog_errno(ret);
1624 	}
1625 	ocfs2_update_lock_stats(lockres, level, &mw, ret);
1626 
1627 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1628 	if (!ret && lockres->l_lockdep_map.key != NULL) {
1629 		if (level == DLM_LOCK_PR)
1630 			rwsem_acquire_read(&lockres->l_lockdep_map, l_subclass,
1631 				!!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
1632 				caller_ip);
1633 		else
1634 			rwsem_acquire(&lockres->l_lockdep_map, l_subclass,
1635 				!!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
1636 				caller_ip);
1637 	}
1638 #endif
1639 	return ret;
1640 }
1641 
1642 static inline int ocfs2_cluster_lock(struct ocfs2_super *osb,
1643 				     struct ocfs2_lock_res *lockres,
1644 				     int level,
1645 				     u32 lkm_flags,
1646 				     int arg_flags)
1647 {
1648 	return __ocfs2_cluster_lock(osb, lockres, level, lkm_flags, arg_flags,
1649 				    0, _RET_IP_);
1650 }
1651 
1652 
1653 static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
1654 				   struct ocfs2_lock_res *lockres,
1655 				   int level,
1656 				   unsigned long caller_ip)
1657 {
1658 	unsigned long flags;
1659 
1660 	spin_lock_irqsave(&lockres->l_lock, flags);
1661 	ocfs2_dec_holders(lockres, level);
1662 	ocfs2_downconvert_on_unlock(osb, lockres);
1663 	spin_unlock_irqrestore(&lockres->l_lock, flags);
1664 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1665 	if (lockres->l_lockdep_map.key != NULL)
1666 		rwsem_release(&lockres->l_lockdep_map, 1, caller_ip);
1667 #endif
1668 }
1669 
1670 static int ocfs2_create_new_lock(struct ocfs2_super *osb,
1671 				 struct ocfs2_lock_res *lockres,
1672 				 int ex,
1673 				 int local)
1674 {
1675 	int level =  ex ? DLM_LOCK_EX : DLM_LOCK_PR;
1676 	unsigned long flags;
1677 	u32 lkm_flags = local ? DLM_LKF_LOCAL : 0;
1678 
1679 	spin_lock_irqsave(&lockres->l_lock, flags);
1680 	BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
1681 	lockres_or_flags(lockres, OCFS2_LOCK_LOCAL);
1682 	spin_unlock_irqrestore(&lockres->l_lock, flags);
1683 
1684 	return ocfs2_lock_create(osb, lockres, level, lkm_flags);
1685 }
1686 
1687 /* Grants us an EX lock on the data and metadata resources, skipping
1688  * the normal cluster directory lookup. Use this ONLY on newly created
1689  * inodes which other nodes can't possibly see, and which haven't been
1690  * hashed in the inode hash yet. This can give us a good performance
1691  * increase as it'll skip the network broadcast normally associated
1692  * with creating a new lock resource. */
1693 int ocfs2_create_new_inode_locks(struct inode *inode)
1694 {
1695 	int ret;
1696 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1697 
1698 	BUG_ON(!ocfs2_inode_is_new(inode));
1699 
1700 	mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);
1701 
1702 	/* NOTE: That we don't increment any of the holder counts, nor
1703 	 * do we add anything to a journal handle. Since this is
1704 	 * supposed to be a new inode which the cluster doesn't know
1705 	 * about yet, there is no need to.  As far as the LVB handling
1706 	 * is concerned, this is basically like acquiring an EX lock
1707 	 * on a resource which has an invalid one -- we'll set it
1708 	 * valid when we release the EX. */
1709 
1710 	ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_rw_lockres, 1, 1);
1711 	if (ret) {
1712 		mlog_errno(ret);
1713 		goto bail;
1714 	}
1715 
1716 	/*
1717 	 * We don't want to use DLM_LKF_LOCAL on a meta data lock as they
1718 	 * don't use a generation in their lock names.
1719 	 */
1720 	ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_inode_lockres, 1, 0);
1721 	if (ret) {
1722 		mlog_errno(ret);
1723 		goto bail;
1724 	}
1725 
1726 	ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_open_lockres, 0, 0);
1727 	if (ret)
1728 		mlog_errno(ret);
1729 
1730 bail:
1731 	return ret;
1732 }
1733 
1734 int ocfs2_rw_lock(struct inode *inode, int write)
1735 {
1736 	int status, level;
1737 	struct ocfs2_lock_res *lockres;
1738 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1739 
1740 	mlog(0, "inode %llu take %s RW lock\n",
1741 	     (unsigned long long)OCFS2_I(inode)->ip_blkno,
1742 	     write ? "EXMODE" : "PRMODE");
1743 
1744 	if (ocfs2_mount_local(osb))
1745 		return 0;
1746 
1747 	lockres = &OCFS2_I(inode)->ip_rw_lockres;
1748 
1749 	level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1750 
1751 	status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
1752 	if (status < 0)
1753 		mlog_errno(status);
1754 
1755 	return status;
1756 }
1757 
1758 int ocfs2_try_rw_lock(struct inode *inode, int write)
1759 {
1760 	int status, level;
1761 	struct ocfs2_lock_res *lockres;
1762 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1763 
1764 	mlog(0, "inode %llu try to take %s RW lock\n",
1765 	     (unsigned long long)OCFS2_I(inode)->ip_blkno,
1766 	     write ? "EXMODE" : "PRMODE");
1767 
1768 	if (ocfs2_mount_local(osb))
1769 		return 0;
1770 
1771 	lockres = &OCFS2_I(inode)->ip_rw_lockres;
1772 
1773 	level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1774 
1775 	status = ocfs2_cluster_lock(osb, lockres, level, DLM_LKF_NOQUEUE, 0);
1776 	return status;
1777 }
1778 
1779 void ocfs2_rw_unlock(struct inode *inode, int write)
1780 {
1781 	int level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1782 	struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres;
1783 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1784 
1785 	mlog(0, "inode %llu drop %s RW lock\n",
1786 	     (unsigned long long)OCFS2_I(inode)->ip_blkno,
1787 	     write ? "EXMODE" : "PRMODE");
1788 
1789 	if (!ocfs2_mount_local(osb))
1790 		ocfs2_cluster_unlock(osb, lockres, level);
1791 }
1792 
1793 /*
1794  * ocfs2_open_lock always get PR mode lock.
1795  */
1796 int ocfs2_open_lock(struct inode *inode)
1797 {
1798 	int status = 0;
1799 	struct ocfs2_lock_res *lockres;
1800 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1801 
1802 	mlog(0, "inode %llu take PRMODE open lock\n",
1803 	     (unsigned long long)OCFS2_I(inode)->ip_blkno);
1804 
1805 	if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb))
1806 		goto out;
1807 
1808 	lockres = &OCFS2_I(inode)->ip_open_lockres;
1809 
1810 	status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_PR, 0, 0);
1811 	if (status < 0)
1812 		mlog_errno(status);
1813 
1814 out:
1815 	return status;
1816 }
1817 
1818 int ocfs2_try_open_lock(struct inode *inode, int write)
1819 {
1820 	int status = 0, level;
1821 	struct ocfs2_lock_res *lockres;
1822 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1823 
1824 	mlog(0, "inode %llu try to take %s open lock\n",
1825 	     (unsigned long long)OCFS2_I(inode)->ip_blkno,
1826 	     write ? "EXMODE" : "PRMODE");
1827 
1828 	if (ocfs2_is_hard_readonly(osb)) {
1829 		if (write)
1830 			status = -EROFS;
1831 		goto out;
1832 	}
1833 
1834 	if (ocfs2_mount_local(osb))
1835 		goto out;
1836 
1837 	lockres = &OCFS2_I(inode)->ip_open_lockres;
1838 
1839 	level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1840 
1841 	/*
1842 	 * The file system may already holding a PRMODE/EXMODE open lock.
1843 	 * Since we pass DLM_LKF_NOQUEUE, the request won't block waiting on
1844 	 * other nodes and the -EAGAIN will indicate to the caller that
1845 	 * this inode is still in use.
1846 	 */
1847 	status = ocfs2_cluster_lock(osb, lockres, level, DLM_LKF_NOQUEUE, 0);
1848 
1849 out:
1850 	return status;
1851 }
1852 
1853 /*
1854  * ocfs2_open_unlock unlock PR and EX mode open locks.
1855  */
1856 void ocfs2_open_unlock(struct inode *inode)
1857 {
1858 	struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres;
1859 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1860 
1861 	mlog(0, "inode %llu drop open lock\n",
1862 	     (unsigned long long)OCFS2_I(inode)->ip_blkno);
1863 
1864 	if (ocfs2_mount_local(osb))
1865 		goto out;
1866 
1867 	if(lockres->l_ro_holders)
1868 		ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_PR);
1869 	if(lockres->l_ex_holders)
1870 		ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
1871 
1872 out:
1873 	return;
1874 }
1875 
1876 static int ocfs2_flock_handle_signal(struct ocfs2_lock_res *lockres,
1877 				     int level)
1878 {
1879 	int ret;
1880 	struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1881 	unsigned long flags;
1882 	struct ocfs2_mask_waiter mw;
1883 
1884 	ocfs2_init_mask_waiter(&mw);
1885 
1886 retry_cancel:
1887 	spin_lock_irqsave(&lockres->l_lock, flags);
1888 	if (lockres->l_flags & OCFS2_LOCK_BUSY) {
1889 		ret = ocfs2_prepare_cancel_convert(osb, lockres);
1890 		if (ret) {
1891 			spin_unlock_irqrestore(&lockres->l_lock, flags);
1892 			ret = ocfs2_cancel_convert(osb, lockres);
1893 			if (ret < 0) {
1894 				mlog_errno(ret);
1895 				goto out;
1896 			}
1897 			goto retry_cancel;
1898 		}
1899 		lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1900 		spin_unlock_irqrestore(&lockres->l_lock, flags);
1901 
1902 		ocfs2_wait_for_mask(&mw);
1903 		goto retry_cancel;
1904 	}
1905 
1906 	ret = -ERESTARTSYS;
1907 	/*
1908 	 * We may still have gotten the lock, in which case there's no
1909 	 * point to restarting the syscall.
1910 	 */
1911 	if (lockres->l_level == level)
1912 		ret = 0;
1913 
1914 	mlog(0, "Cancel returning %d. flags: 0x%lx, level: %d, act: %d\n", ret,
1915 	     lockres->l_flags, lockres->l_level, lockres->l_action);
1916 
1917 	spin_unlock_irqrestore(&lockres->l_lock, flags);
1918 
1919 out:
1920 	return ret;
1921 }
1922 
1923 /*
1924  * ocfs2_file_lock() and ocfs2_file_unlock() map to a single pair of
1925  * flock() calls. The locking approach this requires is sufficiently
1926  * different from all other cluster lock types that we implement a
1927  * separate path to the "low-level" dlm calls. In particular:
1928  *
1929  * - No optimization of lock levels is done - we take at exactly
1930  *   what's been requested.
1931  *
1932  * - No lock caching is employed. We immediately downconvert to
1933  *   no-lock at unlock time. This also means flock locks never go on
1934  *   the blocking list).
1935  *
1936  * - Since userspace can trivially deadlock itself with flock, we make
1937  *   sure to allow cancellation of a misbehaving applications flock()
1938  *   request.
1939  *
1940  * - Access to any flock lockres doesn't require concurrency, so we
1941  *   can simplify the code by requiring the caller to guarantee
1942  *   serialization of dlmglue flock calls.
1943  */
1944 int ocfs2_file_lock(struct file *file, int ex, int trylock)
1945 {
1946 	int ret, level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
1947 	unsigned int lkm_flags = trylock ? DLM_LKF_NOQUEUE : 0;
1948 	unsigned long flags;
1949 	struct ocfs2_file_private *fp = file->private_data;
1950 	struct ocfs2_lock_res *lockres = &fp->fp_flock;
1951 	struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
1952 	struct ocfs2_mask_waiter mw;
1953 
1954 	ocfs2_init_mask_waiter(&mw);
1955 
1956 	if ((lockres->l_flags & OCFS2_LOCK_BUSY) ||
1957 	    (lockres->l_level > DLM_LOCK_NL)) {
1958 		mlog(ML_ERROR,
1959 		     "File lock \"%s\" has busy or locked state: flags: 0x%lx, "
1960 		     "level: %u\n", lockres->l_name, lockres->l_flags,
1961 		     lockres->l_level);
1962 		return -EINVAL;
1963 	}
1964 
1965 	spin_lock_irqsave(&lockres->l_lock, flags);
1966 	if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
1967 		lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1968 		spin_unlock_irqrestore(&lockres->l_lock, flags);
1969 
1970 		/*
1971 		 * Get the lock at NLMODE to start - that way we
1972 		 * can cancel the upconvert request if need be.
1973 		 */
1974 		ret = ocfs2_lock_create(osb, lockres, DLM_LOCK_NL, 0);
1975 		if (ret < 0) {
1976 			mlog_errno(ret);
1977 			goto out;
1978 		}
1979 
1980 		ret = ocfs2_wait_for_mask(&mw);
1981 		if (ret) {
1982 			mlog_errno(ret);
1983 			goto out;
1984 		}
1985 		spin_lock_irqsave(&lockres->l_lock, flags);
1986 	}
1987 
1988 	lockres->l_action = OCFS2_AST_CONVERT;
1989 	lkm_flags |= DLM_LKF_CONVERT;
1990 	lockres->l_requested = level;
1991 	lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1992 
1993 	lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1994 	spin_unlock_irqrestore(&lockres->l_lock, flags);
1995 
1996 	ret = ocfs2_dlm_lock(osb->cconn, level, &lockres->l_lksb, lkm_flags,
1997 			     lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1);
1998 	if (ret) {
1999 		if (!trylock || (ret != -EAGAIN)) {
2000 			ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
2001 			ret = -EINVAL;
2002 		}
2003 
2004 		ocfs2_recover_from_dlm_error(lockres, 1);
2005 		lockres_remove_mask_waiter(lockres, &mw);
2006 		goto out;
2007 	}
2008 
2009 	ret = ocfs2_wait_for_mask_interruptible(&mw, lockres);
2010 	if (ret == -ERESTARTSYS) {
2011 		/*
2012 		 * Userspace can cause deadlock itself with
2013 		 * flock(). Current behavior locally is to allow the
2014 		 * deadlock, but abort the system call if a signal is
2015 		 * received. We follow this example, otherwise a
2016 		 * poorly written program could sit in kernel until
2017 		 * reboot.
2018 		 *
2019 		 * Handling this is a bit more complicated for Ocfs2
2020 		 * though. We can't exit this function with an
2021 		 * outstanding lock request, so a cancel convert is
2022 		 * required. We intentionally overwrite 'ret' - if the
2023 		 * cancel fails and the lock was granted, it's easier
2024 		 * to just bubble success back up to the user.
2025 		 */
2026 		ret = ocfs2_flock_handle_signal(lockres, level);
2027 	} else if (!ret && (level > lockres->l_level)) {
2028 		/* Trylock failed asynchronously */
2029 		BUG_ON(!trylock);
2030 		ret = -EAGAIN;
2031 	}
2032 
2033 out:
2034 
2035 	mlog(0, "Lock: \"%s\" ex: %d, trylock: %d, returns: %d\n",
2036 	     lockres->l_name, ex, trylock, ret);
2037 	return ret;
2038 }
2039 
2040 void ocfs2_file_unlock(struct file *file)
2041 {
2042 	int ret;
2043 	unsigned int gen;
2044 	unsigned long flags;
2045 	struct ocfs2_file_private *fp = file->private_data;
2046 	struct ocfs2_lock_res *lockres = &fp->fp_flock;
2047 	struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
2048 	struct ocfs2_mask_waiter mw;
2049 
2050 	ocfs2_init_mask_waiter(&mw);
2051 
2052 	if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED))
2053 		return;
2054 
2055 	if (lockres->l_level == DLM_LOCK_NL)
2056 		return;
2057 
2058 	mlog(0, "Unlock: \"%s\" flags: 0x%lx, level: %d, act: %d\n",
2059 	     lockres->l_name, lockres->l_flags, lockres->l_level,
2060 	     lockres->l_action);
2061 
2062 	spin_lock_irqsave(&lockres->l_lock, flags);
2063 	/*
2064 	 * Fake a blocking ast for the downconvert code.
2065 	 */
2066 	lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
2067 	lockres->l_blocking = DLM_LOCK_EX;
2068 
2069 	gen = ocfs2_prepare_downconvert(lockres, DLM_LOCK_NL);
2070 	lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
2071 	spin_unlock_irqrestore(&lockres->l_lock, flags);
2072 
2073 	ret = ocfs2_downconvert_lock(osb, lockres, DLM_LOCK_NL, 0, gen);
2074 	if (ret) {
2075 		mlog_errno(ret);
2076 		return;
2077 	}
2078 
2079 	ret = ocfs2_wait_for_mask(&mw);
2080 	if (ret)
2081 		mlog_errno(ret);
2082 }
2083 
2084 static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
2085 					struct ocfs2_lock_res *lockres)
2086 {
2087 	int kick = 0;
2088 
2089 	/* If we know that another node is waiting on our lock, kick
2090 	 * the downconvert thread * pre-emptively when we reach a release
2091 	 * condition. */
2092 	if (lockres->l_flags & OCFS2_LOCK_BLOCKED) {
2093 		switch(lockres->l_blocking) {
2094 		case DLM_LOCK_EX:
2095 			if (!lockres->l_ex_holders && !lockres->l_ro_holders)
2096 				kick = 1;
2097 			break;
2098 		case DLM_LOCK_PR:
2099 			if (!lockres->l_ex_holders)
2100 				kick = 1;
2101 			break;
2102 		default:
2103 			BUG();
2104 		}
2105 	}
2106 
2107 	if (kick)
2108 		ocfs2_wake_downconvert_thread(osb);
2109 }
2110 
2111 #define OCFS2_SEC_BITS   34
2112 #define OCFS2_SEC_SHIFT  (64 - 34)
2113 #define OCFS2_NSEC_MASK  ((1ULL << OCFS2_SEC_SHIFT) - 1)
2114 
2115 /* LVB only has room for 64 bits of time here so we pack it for
2116  * now. */
2117 static u64 ocfs2_pack_timespec(struct timespec64 *spec)
2118 {
2119 	u64 res;
2120 	u64 sec = clamp_t(time64_t, spec->tv_sec, 0, 0x3ffffffffull);
2121 	u32 nsec = spec->tv_nsec;
2122 
2123 	res = (sec << OCFS2_SEC_SHIFT) | (nsec & OCFS2_NSEC_MASK);
2124 
2125 	return res;
2126 }
2127 
2128 /* Call this with the lockres locked. I am reasonably sure we don't
2129  * need ip_lock in this function as anyone who would be changing those
2130  * values is supposed to be blocked in ocfs2_inode_lock right now. */
2131 static void __ocfs2_stuff_meta_lvb(struct inode *inode)
2132 {
2133 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
2134 	struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
2135 	struct ocfs2_meta_lvb *lvb;
2136 
2137 	lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2138 
2139 	/*
2140 	 * Invalidate the LVB of a deleted inode - this way other
2141 	 * nodes are forced to go to disk and discover the new inode
2142 	 * status.
2143 	 */
2144 	if (oi->ip_flags & OCFS2_INODE_DELETED) {
2145 		lvb->lvb_version = 0;
2146 		goto out;
2147 	}
2148 
2149 	lvb->lvb_version   = OCFS2_LVB_VERSION;
2150 	lvb->lvb_isize	   = cpu_to_be64(i_size_read(inode));
2151 	lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters);
2152 	lvb->lvb_iuid      = cpu_to_be32(i_uid_read(inode));
2153 	lvb->lvb_igid      = cpu_to_be32(i_gid_read(inode));
2154 	lvb->lvb_imode     = cpu_to_be16(inode->i_mode);
2155 	lvb->lvb_inlink    = cpu_to_be16(inode->i_nlink);
2156 	lvb->lvb_iatime_packed  =
2157 		cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime));
2158 	lvb->lvb_ictime_packed =
2159 		cpu_to_be64(ocfs2_pack_timespec(&inode->i_ctime));
2160 	lvb->lvb_imtime_packed =
2161 		cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
2162 	lvb->lvb_iattr    = cpu_to_be32(oi->ip_attr);
2163 	lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features);
2164 	lvb->lvb_igeneration = cpu_to_be32(inode->i_generation);
2165 
2166 out:
2167 	mlog_meta_lvb(0, lockres);
2168 }
2169 
2170 static void ocfs2_unpack_timespec(struct timespec64 *spec,
2171 				  u64 packed_time)
2172 {
2173 	spec->tv_sec = packed_time >> OCFS2_SEC_SHIFT;
2174 	spec->tv_nsec = packed_time & OCFS2_NSEC_MASK;
2175 }
2176 
2177 static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
2178 {
2179 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
2180 	struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
2181 	struct ocfs2_meta_lvb *lvb;
2182 
2183 	mlog_meta_lvb(0, lockres);
2184 
2185 	lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2186 
2187 	/* We're safe here without the lockres lock... */
2188 	spin_lock(&oi->ip_lock);
2189 	oi->ip_clusters = be32_to_cpu(lvb->lvb_iclusters);
2190 	i_size_write(inode, be64_to_cpu(lvb->lvb_isize));
2191 
2192 	oi->ip_attr = be32_to_cpu(lvb->lvb_iattr);
2193 	oi->ip_dyn_features = be16_to_cpu(lvb->lvb_idynfeatures);
2194 	ocfs2_set_inode_flags(inode);
2195 
2196 	/* fast-symlinks are a special case */
2197 	if (S_ISLNK(inode->i_mode) && !oi->ip_clusters)
2198 		inode->i_blocks = 0;
2199 	else
2200 		inode->i_blocks = ocfs2_inode_sector_count(inode);
2201 
2202 	i_uid_write(inode, be32_to_cpu(lvb->lvb_iuid));
2203 	i_gid_write(inode, be32_to_cpu(lvb->lvb_igid));
2204 	inode->i_mode    = be16_to_cpu(lvb->lvb_imode);
2205 	set_nlink(inode, be16_to_cpu(lvb->lvb_inlink));
2206 	ocfs2_unpack_timespec(&inode->i_atime,
2207 			      be64_to_cpu(lvb->lvb_iatime_packed));
2208 	ocfs2_unpack_timespec(&inode->i_mtime,
2209 			      be64_to_cpu(lvb->lvb_imtime_packed));
2210 	ocfs2_unpack_timespec(&inode->i_ctime,
2211 			      be64_to_cpu(lvb->lvb_ictime_packed));
2212 	spin_unlock(&oi->ip_lock);
2213 }
2214 
2215 static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode,
2216 					      struct ocfs2_lock_res *lockres)
2217 {
2218 	struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2219 
2220 	if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)
2221 	    && lvb->lvb_version == OCFS2_LVB_VERSION
2222 	    && be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation)
2223 		return 1;
2224 	return 0;
2225 }
2226 
2227 /* Determine whether a lock resource needs to be refreshed, and
2228  * arbitrate who gets to refresh it.
2229  *
2230  *   0 means no refresh needed.
2231  *
2232  *   > 0 means you need to refresh this and you MUST call
2233  *   ocfs2_complete_lock_res_refresh afterwards. */
2234 static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres)
2235 {
2236 	unsigned long flags;
2237 	int status = 0;
2238 
2239 refresh_check:
2240 	spin_lock_irqsave(&lockres->l_lock, flags);
2241 	if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
2242 		spin_unlock_irqrestore(&lockres->l_lock, flags);
2243 		goto bail;
2244 	}
2245 
2246 	if (lockres->l_flags & OCFS2_LOCK_REFRESHING) {
2247 		spin_unlock_irqrestore(&lockres->l_lock, flags);
2248 
2249 		ocfs2_wait_on_refreshing_lock(lockres);
2250 		goto refresh_check;
2251 	}
2252 
2253 	/* Ok, I'll be the one to refresh this lock. */
2254 	lockres_or_flags(lockres, OCFS2_LOCK_REFRESHING);
2255 	spin_unlock_irqrestore(&lockres->l_lock, flags);
2256 
2257 	status = 1;
2258 bail:
2259 	mlog(0, "status %d\n", status);
2260 	return status;
2261 }
2262 
2263 /* If status is non zero, I'll mark it as not being in refresh
2264  * anymroe, but i won't clear the needs refresh flag. */
2265 static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockres,
2266 						   int status)
2267 {
2268 	unsigned long flags;
2269 
2270 	spin_lock_irqsave(&lockres->l_lock, flags);
2271 	lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING);
2272 	if (!status)
2273 		lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
2274 	spin_unlock_irqrestore(&lockres->l_lock, flags);
2275 
2276 	wake_up(&lockres->l_event);
2277 }
2278 
2279 /* may or may not return a bh if it went to disk. */
2280 static int ocfs2_inode_lock_update(struct inode *inode,
2281 				  struct buffer_head **bh)
2282 {
2283 	int status = 0;
2284 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
2285 	struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
2286 	struct ocfs2_dinode *fe;
2287 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2288 
2289 	if (ocfs2_mount_local(osb))
2290 		goto bail;
2291 
2292 	spin_lock(&oi->ip_lock);
2293 	if (oi->ip_flags & OCFS2_INODE_DELETED) {
2294 		mlog(0, "Orphaned inode %llu was deleted while we "
2295 		     "were waiting on a lock. ip_flags = 0x%x\n",
2296 		     (unsigned long long)oi->ip_blkno, oi->ip_flags);
2297 		spin_unlock(&oi->ip_lock);
2298 		status = -ENOENT;
2299 		goto bail;
2300 	}
2301 	spin_unlock(&oi->ip_lock);
2302 
2303 	if (!ocfs2_should_refresh_lock_res(lockres))
2304 		goto bail;
2305 
2306 	/* This will discard any caching information we might have had
2307 	 * for the inode metadata. */
2308 	ocfs2_metadata_cache_purge(INODE_CACHE(inode));
2309 
2310 	ocfs2_extent_map_trunc(inode, 0);
2311 
2312 	if (ocfs2_meta_lvb_is_trustable(inode, lockres)) {
2313 		mlog(0, "Trusting LVB on inode %llu\n",
2314 		     (unsigned long long)oi->ip_blkno);
2315 		ocfs2_refresh_inode_from_lvb(inode);
2316 	} else {
2317 		/* Boo, we have to go to disk. */
2318 		/* read bh, cast, ocfs2_refresh_inode */
2319 		status = ocfs2_read_inode_block(inode, bh);
2320 		if (status < 0) {
2321 			mlog_errno(status);
2322 			goto bail_refresh;
2323 		}
2324 		fe = (struct ocfs2_dinode *) (*bh)->b_data;
2325 
2326 		/* This is a good chance to make sure we're not
2327 		 * locking an invalid object.  ocfs2_read_inode_block()
2328 		 * already checked that the inode block is sane.
2329 		 *
2330 		 * We bug on a stale inode here because we checked
2331 		 * above whether it was wiped from disk. The wiping
2332 		 * node provides a guarantee that we receive that
2333 		 * message and can mark the inode before dropping any
2334 		 * locks associated with it. */
2335 		mlog_bug_on_msg(inode->i_generation !=
2336 				le32_to_cpu(fe->i_generation),
2337 				"Invalid dinode %llu disk generation: %u "
2338 				"inode->i_generation: %u\n",
2339 				(unsigned long long)oi->ip_blkno,
2340 				le32_to_cpu(fe->i_generation),
2341 				inode->i_generation);
2342 		mlog_bug_on_msg(le64_to_cpu(fe->i_dtime) ||
2343 				!(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)),
2344 				"Stale dinode %llu dtime: %llu flags: 0x%x\n",
2345 				(unsigned long long)oi->ip_blkno,
2346 				(unsigned long long)le64_to_cpu(fe->i_dtime),
2347 				le32_to_cpu(fe->i_flags));
2348 
2349 		ocfs2_refresh_inode(inode, fe);
2350 		ocfs2_track_lock_refresh(lockres);
2351 	}
2352 
2353 	status = 0;
2354 bail_refresh:
2355 	ocfs2_complete_lock_res_refresh(lockres, status);
2356 bail:
2357 	return status;
2358 }
2359 
2360 static int ocfs2_assign_bh(struct inode *inode,
2361 			   struct buffer_head **ret_bh,
2362 			   struct buffer_head *passed_bh)
2363 {
2364 	int status;
2365 
2366 	if (passed_bh) {
2367 		/* Ok, the update went to disk for us, use the
2368 		 * returned bh. */
2369 		*ret_bh = passed_bh;
2370 		get_bh(*ret_bh);
2371 
2372 		return 0;
2373 	}
2374 
2375 	status = ocfs2_read_inode_block(inode, ret_bh);
2376 	if (status < 0)
2377 		mlog_errno(status);
2378 
2379 	return status;
2380 }
2381 
2382 /*
2383  * returns < 0 error if the callback will never be called, otherwise
2384  * the result of the lock will be communicated via the callback.
2385  */
2386 int ocfs2_inode_lock_full_nested(struct inode *inode,
2387 				 struct buffer_head **ret_bh,
2388 				 int ex,
2389 				 int arg_flags,
2390 				 int subclass)
2391 {
2392 	int status, level, acquired;
2393 	u32 dlm_flags;
2394 	struct ocfs2_lock_res *lockres = NULL;
2395 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2396 	struct buffer_head *local_bh = NULL;
2397 
2398 	mlog(0, "inode %llu, take %s META lock\n",
2399 	     (unsigned long long)OCFS2_I(inode)->ip_blkno,
2400 	     ex ? "EXMODE" : "PRMODE");
2401 
2402 	status = 0;
2403 	acquired = 0;
2404 	/* We'll allow faking a readonly metadata lock for
2405 	 * rodevices. */
2406 	if (ocfs2_is_hard_readonly(osb)) {
2407 		if (ex)
2408 			status = -EROFS;
2409 		goto getbh;
2410 	}
2411 
2412 	if ((arg_flags & OCFS2_META_LOCK_GETBH) ||
2413 	    ocfs2_mount_local(osb))
2414 		goto update;
2415 
2416 	if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
2417 		ocfs2_wait_for_recovery(osb);
2418 
2419 	lockres = &OCFS2_I(inode)->ip_inode_lockres;
2420 	level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2421 	dlm_flags = 0;
2422 	if (arg_flags & OCFS2_META_LOCK_NOQUEUE)
2423 		dlm_flags |= DLM_LKF_NOQUEUE;
2424 
2425 	status = __ocfs2_cluster_lock(osb, lockres, level, dlm_flags,
2426 				      arg_flags, subclass, _RET_IP_);
2427 	if (status < 0) {
2428 		if (status != -EAGAIN)
2429 			mlog_errno(status);
2430 		goto bail;
2431 	}
2432 
2433 	/* Notify the error cleanup path to drop the cluster lock. */
2434 	acquired = 1;
2435 
2436 	/* We wait twice because a node may have died while we were in
2437 	 * the lower dlm layers. The second time though, we've
2438 	 * committed to owning this lock so we don't allow signals to
2439 	 * abort the operation. */
2440 	if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
2441 		ocfs2_wait_for_recovery(osb);
2442 
2443 update:
2444 	/*
2445 	 * We only see this flag if we're being called from
2446 	 * ocfs2_read_locked_inode(). It means we're locking an inode
2447 	 * which hasn't been populated yet, so clear the refresh flag
2448 	 * and let the caller handle it.
2449 	 */
2450 	if (inode->i_state & I_NEW) {
2451 		status = 0;
2452 		if (lockres)
2453 			ocfs2_complete_lock_res_refresh(lockres, 0);
2454 		goto bail;
2455 	}
2456 
2457 	/* This is fun. The caller may want a bh back, or it may
2458 	 * not. ocfs2_inode_lock_update definitely wants one in, but
2459 	 * may or may not read one, depending on what's in the
2460 	 * LVB. The result of all of this is that we've *only* gone to
2461 	 * disk if we have to, so the complexity is worthwhile. */
2462 	status = ocfs2_inode_lock_update(inode, &local_bh);
2463 	if (status < 0) {
2464 		if (status != -ENOENT)
2465 			mlog_errno(status);
2466 		goto bail;
2467 	}
2468 getbh:
2469 	if (ret_bh) {
2470 		status = ocfs2_assign_bh(inode, ret_bh, local_bh);
2471 		if (status < 0) {
2472 			mlog_errno(status);
2473 			goto bail;
2474 		}
2475 	}
2476 
2477 bail:
2478 	if (status < 0) {
2479 		if (ret_bh && (*ret_bh)) {
2480 			brelse(*ret_bh);
2481 			*ret_bh = NULL;
2482 		}
2483 		if (acquired)
2484 			ocfs2_inode_unlock(inode, ex);
2485 	}
2486 
2487 	if (local_bh)
2488 		brelse(local_bh);
2489 
2490 	return status;
2491 }
2492 
2493 /*
2494  * This is working around a lock inversion between tasks acquiring DLM
2495  * locks while holding a page lock and the downconvert thread which
2496  * blocks dlm lock acquiry while acquiring page locks.
2497  *
2498  * ** These _with_page variantes are only intended to be called from aop
2499  * methods that hold page locks and return a very specific *positive* error
2500  * code that aop methods pass up to the VFS -- test for errors with != 0. **
2501  *
2502  * The DLM is called such that it returns -EAGAIN if it would have
2503  * blocked waiting for the downconvert thread.  In that case we unlock
2504  * our page so the downconvert thread can make progress.  Once we've
2505  * done this we have to return AOP_TRUNCATED_PAGE so the aop method
2506  * that called us can bubble that back up into the VFS who will then
2507  * immediately retry the aop call.
2508  */
2509 int ocfs2_inode_lock_with_page(struct inode *inode,
2510 			      struct buffer_head **ret_bh,
2511 			      int ex,
2512 			      struct page *page)
2513 {
2514 	int ret;
2515 
2516 	ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
2517 	if (ret == -EAGAIN) {
2518 		unlock_page(page);
2519 		/*
2520 		 * If we can't get inode lock immediately, we should not return
2521 		 * directly here, since this will lead to a softlockup problem.
2522 		 * The method is to get a blocking lock and immediately unlock
2523 		 * before returning, this can avoid CPU resource waste due to
2524 		 * lots of retries, and benefits fairness in getting lock.
2525 		 */
2526 		if (ocfs2_inode_lock(inode, ret_bh, ex) == 0)
2527 			ocfs2_inode_unlock(inode, ex);
2528 		ret = AOP_TRUNCATED_PAGE;
2529 	}
2530 
2531 	return ret;
2532 }
2533 
2534 int ocfs2_inode_lock_atime(struct inode *inode,
2535 			  struct vfsmount *vfsmnt,
2536 			  int *level, int wait)
2537 {
2538 	int ret;
2539 
2540 	if (wait)
2541 		ret = ocfs2_inode_lock(inode, NULL, 0);
2542 	else
2543 		ret = ocfs2_try_inode_lock(inode, NULL, 0);
2544 
2545 	if (ret < 0) {
2546 		if (ret != -EAGAIN)
2547 			mlog_errno(ret);
2548 		return ret;
2549 	}
2550 
2551 	/*
2552 	 * If we should update atime, we will get EX lock,
2553 	 * otherwise we just get PR lock.
2554 	 */
2555 	if (ocfs2_should_update_atime(inode, vfsmnt)) {
2556 		struct buffer_head *bh = NULL;
2557 
2558 		ocfs2_inode_unlock(inode, 0);
2559 		if (wait)
2560 			ret = ocfs2_inode_lock(inode, &bh, 1);
2561 		else
2562 			ret = ocfs2_try_inode_lock(inode, &bh, 1);
2563 
2564 		if (ret < 0) {
2565 			if (ret != -EAGAIN)
2566 				mlog_errno(ret);
2567 			return ret;
2568 		}
2569 		*level = 1;
2570 		if (ocfs2_should_update_atime(inode, vfsmnt))
2571 			ocfs2_update_inode_atime(inode, bh);
2572 		if (bh)
2573 			brelse(bh);
2574 	} else
2575 		*level = 0;
2576 
2577 	return ret;
2578 }
2579 
2580 void ocfs2_inode_unlock(struct inode *inode,
2581 		       int ex)
2582 {
2583 	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2584 	struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres;
2585 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2586 
2587 	mlog(0, "inode %llu drop %s META lock\n",
2588 	     (unsigned long long)OCFS2_I(inode)->ip_blkno,
2589 	     ex ? "EXMODE" : "PRMODE");
2590 
2591 	if (!ocfs2_is_hard_readonly(osb) &&
2592 	    !ocfs2_mount_local(osb))
2593 		ocfs2_cluster_unlock(osb, lockres, level);
2594 }
2595 
2596 /*
2597  * This _tracker variantes are introduced to deal with the recursive cluster
2598  * locking issue. The idea is to keep track of a lock holder on the stack of
2599  * the current process. If there's a lock holder on the stack, we know the
2600  * task context is already protected by cluster locking. Currently, they're
2601  * used in some VFS entry routines.
2602  *
2603  * return < 0 on error, return == 0 if there's no lock holder on the stack
2604  * before this call, return == 1 if this call would be a recursive locking.
2605  * return == -1 if this lock attempt will cause an upgrade which is forbidden.
2606  *
2607  * When taking lock levels into account,we face some different situations.
2608  *
2609  * 1. no lock is held
2610  *    In this case, just lock the inode as requested and return 0
2611  *
2612  * 2. We are holding a lock
2613  *    For this situation, things diverges into several cases
2614  *
2615  *    wanted     holding	     what to do
2616  *    ex		ex	    see 2.1 below
2617  *    ex		pr	    see 2.2 below
2618  *    pr		ex	    see 2.1 below
2619  *    pr		pr	    see 2.1 below
2620  *
2621  *    2.1 lock level that is been held is compatible
2622  *    with the wanted level, so no lock action will be tacken.
2623  *
2624  *    2.2 Otherwise, an upgrade is needed, but it is forbidden.
2625  *
2626  * Reason why upgrade within a process is forbidden is that
2627  * lock upgrade may cause dead lock. The following illustrates
2628  * how it happens.
2629  *
2630  *         thread on node1                             thread on node2
2631  * ocfs2_inode_lock_tracker(ex=0)
2632  *
2633  *                                <======   ocfs2_inode_lock_tracker(ex=1)
2634  *
2635  * ocfs2_inode_lock_tracker(ex=1)
2636  */
2637 int ocfs2_inode_lock_tracker(struct inode *inode,
2638 			     struct buffer_head **ret_bh,
2639 			     int ex,
2640 			     struct ocfs2_lock_holder *oh)
2641 {
2642 	int status = 0;
2643 	struct ocfs2_lock_res *lockres;
2644 	struct ocfs2_lock_holder *tmp_oh;
2645 	struct pid *pid = task_pid(current);
2646 
2647 
2648 	lockres = &OCFS2_I(inode)->ip_inode_lockres;
2649 	tmp_oh = ocfs2_pid_holder(lockres, pid);
2650 
2651 	if (!tmp_oh) {
2652 		/*
2653 		 * This corresponds to the case 1.
2654 		 * We haven't got any lock before.
2655 		 */
2656 		status = ocfs2_inode_lock_full(inode, ret_bh, ex, 0);
2657 		if (status < 0) {
2658 			if (status != -ENOENT)
2659 				mlog_errno(status);
2660 			return status;
2661 		}
2662 
2663 		oh->oh_ex = ex;
2664 		ocfs2_add_holder(lockres, oh);
2665 		return 0;
2666 	}
2667 
2668 	if (unlikely(ex && !tmp_oh->oh_ex)) {
2669 		/*
2670 		 * case 2.2 upgrade may cause dead lock, forbid it.
2671 		 */
2672 		mlog(ML_ERROR, "Recursive locking is not permitted to "
2673 		     "upgrade to EX level from PR level.\n");
2674 		dump_stack();
2675 		return -EINVAL;
2676 	}
2677 
2678 	/*
2679 	 *  case 2.1 OCFS2_META_LOCK_GETBH flag make ocfs2_inode_lock_full.
2680 	 *  ignore the lock level and just update it.
2681 	 */
2682 	if (ret_bh) {
2683 		status = ocfs2_inode_lock_full(inode, ret_bh, ex,
2684 					       OCFS2_META_LOCK_GETBH);
2685 		if (status < 0) {
2686 			if (status != -ENOENT)
2687 				mlog_errno(status);
2688 			return status;
2689 		}
2690 	}
2691 	return tmp_oh ? 1 : 0;
2692 }
2693 
2694 void ocfs2_inode_unlock_tracker(struct inode *inode,
2695 				int ex,
2696 				struct ocfs2_lock_holder *oh,
2697 				int had_lock)
2698 {
2699 	struct ocfs2_lock_res *lockres;
2700 
2701 	lockres = &OCFS2_I(inode)->ip_inode_lockres;
2702 	/* had_lock means that the currect process already takes the cluster
2703 	 * lock previously.
2704 	 * If had_lock is 1, we have nothing to do here.
2705 	 * If had_lock is 0, we will release the lock.
2706 	 */
2707 	if (!had_lock) {
2708 		ocfs2_inode_unlock(inode, oh->oh_ex);
2709 		ocfs2_remove_holder(lockres, oh);
2710 	}
2711 }
2712 
2713 int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
2714 {
2715 	struct ocfs2_lock_res *lockres;
2716 	struct ocfs2_orphan_scan_lvb *lvb;
2717 	int status = 0;
2718 
2719 	if (ocfs2_is_hard_readonly(osb))
2720 		return -EROFS;
2721 
2722 	if (ocfs2_mount_local(osb))
2723 		return 0;
2724 
2725 	lockres = &osb->osb_orphan_scan.os_lockres;
2726 	status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
2727 	if (status < 0)
2728 		return status;
2729 
2730 	lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2731 	if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
2732 	    lvb->lvb_version == OCFS2_ORPHAN_LVB_VERSION)
2733 		*seqno = be32_to_cpu(lvb->lvb_os_seqno);
2734 	else
2735 		*seqno = osb->osb_orphan_scan.os_seqno + 1;
2736 
2737 	return status;
2738 }
2739 
2740 void ocfs2_orphan_scan_unlock(struct ocfs2_super *osb, u32 seqno)
2741 {
2742 	struct ocfs2_lock_res *lockres;
2743 	struct ocfs2_orphan_scan_lvb *lvb;
2744 
2745 	if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb)) {
2746 		lockres = &osb->osb_orphan_scan.os_lockres;
2747 		lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2748 		lvb->lvb_version = OCFS2_ORPHAN_LVB_VERSION;
2749 		lvb->lvb_os_seqno = cpu_to_be32(seqno);
2750 		ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
2751 	}
2752 }
2753 
2754 int ocfs2_super_lock(struct ocfs2_super *osb,
2755 		     int ex)
2756 {
2757 	int status = 0;
2758 	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2759 	struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
2760 
2761 	if (ocfs2_is_hard_readonly(osb))
2762 		return -EROFS;
2763 
2764 	if (ocfs2_mount_local(osb))
2765 		goto bail;
2766 
2767 	status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
2768 	if (status < 0) {
2769 		mlog_errno(status);
2770 		goto bail;
2771 	}
2772 
2773 	/* The super block lock path is really in the best position to
2774 	 * know when resources covered by the lock need to be
2775 	 * refreshed, so we do it here. Of course, making sense of
2776 	 * everything is up to the caller :) */
2777 	status = ocfs2_should_refresh_lock_res(lockres);
2778 	if (status) {
2779 		status = ocfs2_refresh_slot_info(osb);
2780 
2781 		ocfs2_complete_lock_res_refresh(lockres, status);
2782 
2783 		if (status < 0) {
2784 			ocfs2_cluster_unlock(osb, lockres, level);
2785 			mlog_errno(status);
2786 		}
2787 		ocfs2_track_lock_refresh(lockres);
2788 	}
2789 bail:
2790 	return status;
2791 }
2792 
2793 void ocfs2_super_unlock(struct ocfs2_super *osb,
2794 			int ex)
2795 {
2796 	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2797 	struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
2798 
2799 	if (!ocfs2_mount_local(osb))
2800 		ocfs2_cluster_unlock(osb, lockres, level);
2801 }
2802 
2803 int ocfs2_rename_lock(struct ocfs2_super *osb)
2804 {
2805 	int status;
2806 	struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
2807 
2808 	if (ocfs2_is_hard_readonly(osb))
2809 		return -EROFS;
2810 
2811 	if (ocfs2_mount_local(osb))
2812 		return 0;
2813 
2814 	status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
2815 	if (status < 0)
2816 		mlog_errno(status);
2817 
2818 	return status;
2819 }
2820 
2821 void ocfs2_rename_unlock(struct ocfs2_super *osb)
2822 {
2823 	struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
2824 
2825 	if (!ocfs2_mount_local(osb))
2826 		ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
2827 }
2828 
2829 int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex)
2830 {
2831 	int status;
2832 	struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres;
2833 
2834 	if (ocfs2_is_hard_readonly(osb))
2835 		return -EROFS;
2836 
2837 	if (ocfs2_mount_local(osb))
2838 		return 0;
2839 
2840 	status = ocfs2_cluster_lock(osb, lockres, ex ? LKM_EXMODE : LKM_PRMODE,
2841 				    0, 0);
2842 	if (status < 0)
2843 		mlog(ML_ERROR, "lock on nfs sync lock failed %d\n", status);
2844 
2845 	return status;
2846 }
2847 
2848 void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex)
2849 {
2850 	struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres;
2851 
2852 	if (!ocfs2_mount_local(osb))
2853 		ocfs2_cluster_unlock(osb, lockres,
2854 				     ex ? LKM_EXMODE : LKM_PRMODE);
2855 }
2856 
2857 int ocfs2_trim_fs_lock(struct ocfs2_super *osb,
2858 		       struct ocfs2_trim_fs_info *info, int trylock)
2859 {
2860 	int status;
2861 	struct ocfs2_trim_fs_lvb *lvb;
2862 	struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
2863 
2864 	if (info)
2865 		info->tf_valid = 0;
2866 
2867 	if (ocfs2_is_hard_readonly(osb))
2868 		return -EROFS;
2869 
2870 	if (ocfs2_mount_local(osb))
2871 		return 0;
2872 
2873 	status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX,
2874 				    trylock ? DLM_LKF_NOQUEUE : 0, 0);
2875 	if (status < 0) {
2876 		if (status != -EAGAIN)
2877 			mlog_errno(status);
2878 		return status;
2879 	}
2880 
2881 	if (info) {
2882 		lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2883 		if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
2884 		    lvb->lvb_version == OCFS2_TRIMFS_LVB_VERSION) {
2885 			info->tf_valid = 1;
2886 			info->tf_success = lvb->lvb_success;
2887 			info->tf_nodenum = be32_to_cpu(lvb->lvb_nodenum);
2888 			info->tf_start = be64_to_cpu(lvb->lvb_start);
2889 			info->tf_len = be64_to_cpu(lvb->lvb_len);
2890 			info->tf_minlen = be64_to_cpu(lvb->lvb_minlen);
2891 			info->tf_trimlen = be64_to_cpu(lvb->lvb_trimlen);
2892 		}
2893 	}
2894 
2895 	return status;
2896 }
2897 
2898 void ocfs2_trim_fs_unlock(struct ocfs2_super *osb,
2899 			  struct ocfs2_trim_fs_info *info)
2900 {
2901 	struct ocfs2_trim_fs_lvb *lvb;
2902 	struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
2903 
2904 	if (ocfs2_mount_local(osb))
2905 		return;
2906 
2907 	if (info) {
2908 		lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2909 		lvb->lvb_version = OCFS2_TRIMFS_LVB_VERSION;
2910 		lvb->lvb_success = info->tf_success;
2911 		lvb->lvb_nodenum = cpu_to_be32(info->tf_nodenum);
2912 		lvb->lvb_start = cpu_to_be64(info->tf_start);
2913 		lvb->lvb_len = cpu_to_be64(info->tf_len);
2914 		lvb->lvb_minlen = cpu_to_be64(info->tf_minlen);
2915 		lvb->lvb_trimlen = cpu_to_be64(info->tf_trimlen);
2916 	}
2917 
2918 	ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
2919 }
2920 
2921 int ocfs2_dentry_lock(struct dentry *dentry, int ex)
2922 {
2923 	int ret;
2924 	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2925 	struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2926 	struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2927 
2928 	BUG_ON(!dl);
2929 
2930 	if (ocfs2_is_hard_readonly(osb)) {
2931 		if (ex)
2932 			return -EROFS;
2933 		return 0;
2934 	}
2935 
2936 	if (ocfs2_mount_local(osb))
2937 		return 0;
2938 
2939 	ret = ocfs2_cluster_lock(osb, &dl->dl_lockres, level, 0, 0);
2940 	if (ret < 0)
2941 		mlog_errno(ret);
2942 
2943 	return ret;
2944 }
2945 
2946 void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
2947 {
2948 	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2949 	struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2950 	struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2951 
2952 	if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
2953 		ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
2954 }
2955 
2956 /* Reference counting of the dlm debug structure. We want this because
2957  * open references on the debug inodes can live on after a mount, so
2958  * we can't rely on the ocfs2_super to always exist. */
2959 static void ocfs2_dlm_debug_free(struct kref *kref)
2960 {
2961 	struct ocfs2_dlm_debug *dlm_debug;
2962 
2963 	dlm_debug = container_of(kref, struct ocfs2_dlm_debug, d_refcnt);
2964 
2965 	kfree(dlm_debug);
2966 }
2967 
2968 void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug)
2969 {
2970 	if (dlm_debug)
2971 		kref_put(&dlm_debug->d_refcnt, ocfs2_dlm_debug_free);
2972 }
2973 
2974 static void ocfs2_get_dlm_debug(struct ocfs2_dlm_debug *debug)
2975 {
2976 	kref_get(&debug->d_refcnt);
2977 }
2978 
2979 struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void)
2980 {
2981 	struct ocfs2_dlm_debug *dlm_debug;
2982 
2983 	dlm_debug = kmalloc(sizeof(struct ocfs2_dlm_debug), GFP_KERNEL);
2984 	if (!dlm_debug) {
2985 		mlog_errno(-ENOMEM);
2986 		goto out;
2987 	}
2988 
2989 	kref_init(&dlm_debug->d_refcnt);
2990 	INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking);
2991 	dlm_debug->d_locking_state = NULL;
2992 out:
2993 	return dlm_debug;
2994 }
2995 
2996 /* Access to this is arbitrated for us via seq_file->sem. */
2997 struct ocfs2_dlm_seq_priv {
2998 	struct ocfs2_dlm_debug *p_dlm_debug;
2999 	struct ocfs2_lock_res p_iter_res;
3000 	struct ocfs2_lock_res p_tmp_res;
3001 };
3002 
3003 static struct ocfs2_lock_res *ocfs2_dlm_next_res(struct ocfs2_lock_res *start,
3004 						 struct ocfs2_dlm_seq_priv *priv)
3005 {
3006 	struct ocfs2_lock_res *iter, *ret = NULL;
3007 	struct ocfs2_dlm_debug *dlm_debug = priv->p_dlm_debug;
3008 
3009 	assert_spin_locked(&ocfs2_dlm_tracking_lock);
3010 
3011 	list_for_each_entry(iter, &start->l_debug_list, l_debug_list) {
3012 		/* discover the head of the list */
3013 		if (&iter->l_debug_list == &dlm_debug->d_lockres_tracking) {
3014 			mlog(0, "End of list found, %p\n", ret);
3015 			break;
3016 		}
3017 
3018 		/* We track our "dummy" iteration lockres' by a NULL
3019 		 * l_ops field. */
3020 		if (iter->l_ops != NULL) {
3021 			ret = iter;
3022 			break;
3023 		}
3024 	}
3025 
3026 	return ret;
3027 }
3028 
3029 static void *ocfs2_dlm_seq_start(struct seq_file *m, loff_t *pos)
3030 {
3031 	struct ocfs2_dlm_seq_priv *priv = m->private;
3032 	struct ocfs2_lock_res *iter;
3033 
3034 	spin_lock(&ocfs2_dlm_tracking_lock);
3035 	iter = ocfs2_dlm_next_res(&priv->p_iter_res, priv);
3036 	if (iter) {
3037 		/* Since lockres' have the lifetime of their container
3038 		 * (which can be inodes, ocfs2_supers, etc) we want to
3039 		 * copy this out to a temporary lockres while still
3040 		 * under the spinlock. Obviously after this we can't
3041 		 * trust any pointers on the copy returned, but that's
3042 		 * ok as the information we want isn't typically held
3043 		 * in them. */
3044 		priv->p_tmp_res = *iter;
3045 		iter = &priv->p_tmp_res;
3046 	}
3047 	spin_unlock(&ocfs2_dlm_tracking_lock);
3048 
3049 	return iter;
3050 }
3051 
3052 static void ocfs2_dlm_seq_stop(struct seq_file *m, void *v)
3053 {
3054 }
3055 
3056 static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
3057 {
3058 	struct ocfs2_dlm_seq_priv *priv = m->private;
3059 	struct ocfs2_lock_res *iter = v;
3060 	struct ocfs2_lock_res *dummy = &priv->p_iter_res;
3061 
3062 	spin_lock(&ocfs2_dlm_tracking_lock);
3063 	iter = ocfs2_dlm_next_res(iter, priv);
3064 	list_del_init(&dummy->l_debug_list);
3065 	if (iter) {
3066 		list_add(&dummy->l_debug_list, &iter->l_debug_list);
3067 		priv->p_tmp_res = *iter;
3068 		iter = &priv->p_tmp_res;
3069 	}
3070 	spin_unlock(&ocfs2_dlm_tracking_lock);
3071 
3072 	return iter;
3073 }
3074 
3075 /*
3076  * Version is used by debugfs.ocfs2 to determine the format being used
3077  *
3078  * New in version 2
3079  *	- Lock stats printed
3080  * New in version 3
3081  *	- Max time in lock stats is in usecs (instead of nsecs)
3082  */
3083 #define OCFS2_DLM_DEBUG_STR_VERSION 3
3084 static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
3085 {
3086 	int i;
3087 	char *lvb;
3088 	struct ocfs2_lock_res *lockres = v;
3089 
3090 	if (!lockres)
3091 		return -EINVAL;
3092 
3093 	seq_printf(m, "0x%x\t", OCFS2_DLM_DEBUG_STR_VERSION);
3094 
3095 	if (lockres->l_type == OCFS2_LOCK_TYPE_DENTRY)
3096 		seq_printf(m, "%.*s%08x\t", OCFS2_DENTRY_LOCK_INO_START - 1,
3097 			   lockres->l_name,
3098 			   (unsigned int)ocfs2_get_dentry_lock_ino(lockres));
3099 	else
3100 		seq_printf(m, "%.*s\t", OCFS2_LOCK_ID_MAX_LEN, lockres->l_name);
3101 
3102 	seq_printf(m, "%d\t"
3103 		   "0x%lx\t"
3104 		   "0x%x\t"
3105 		   "0x%x\t"
3106 		   "%u\t"
3107 		   "%u\t"
3108 		   "%d\t"
3109 		   "%d\t",
3110 		   lockres->l_level,
3111 		   lockres->l_flags,
3112 		   lockres->l_action,
3113 		   lockres->l_unlock_action,
3114 		   lockres->l_ro_holders,
3115 		   lockres->l_ex_holders,
3116 		   lockres->l_requested,
3117 		   lockres->l_blocking);
3118 
3119 	/* Dump the raw LVB */
3120 	lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
3121 	for(i = 0; i < DLM_LVB_LEN; i++)
3122 		seq_printf(m, "0x%x\t", lvb[i]);
3123 
3124 #ifdef CONFIG_OCFS2_FS_STATS
3125 # define lock_num_prmode(_l)		((_l)->l_lock_prmode.ls_gets)
3126 # define lock_num_exmode(_l)		((_l)->l_lock_exmode.ls_gets)
3127 # define lock_num_prmode_failed(_l)	((_l)->l_lock_prmode.ls_fail)
3128 # define lock_num_exmode_failed(_l)	((_l)->l_lock_exmode.ls_fail)
3129 # define lock_total_prmode(_l)		((_l)->l_lock_prmode.ls_total)
3130 # define lock_total_exmode(_l)		((_l)->l_lock_exmode.ls_total)
3131 # define lock_max_prmode(_l)		((_l)->l_lock_prmode.ls_max)
3132 # define lock_max_exmode(_l)		((_l)->l_lock_exmode.ls_max)
3133 # define lock_refresh(_l)		((_l)->l_lock_refresh)
3134 #else
3135 # define lock_num_prmode(_l)		(0)
3136 # define lock_num_exmode(_l)		(0)
3137 # define lock_num_prmode_failed(_l)	(0)
3138 # define lock_num_exmode_failed(_l)	(0)
3139 # define lock_total_prmode(_l)		(0ULL)
3140 # define lock_total_exmode(_l)		(0ULL)
3141 # define lock_max_prmode(_l)		(0)
3142 # define lock_max_exmode(_l)		(0)
3143 # define lock_refresh(_l)		(0)
3144 #endif
3145 	/* The following seq_print was added in version 2 of this output */
3146 	seq_printf(m, "%u\t"
3147 		   "%u\t"
3148 		   "%u\t"
3149 		   "%u\t"
3150 		   "%llu\t"
3151 		   "%llu\t"
3152 		   "%u\t"
3153 		   "%u\t"
3154 		   "%u\t",
3155 		   lock_num_prmode(lockres),
3156 		   lock_num_exmode(lockres),
3157 		   lock_num_prmode_failed(lockres),
3158 		   lock_num_exmode_failed(lockres),
3159 		   lock_total_prmode(lockres),
3160 		   lock_total_exmode(lockres),
3161 		   lock_max_prmode(lockres),
3162 		   lock_max_exmode(lockres),
3163 		   lock_refresh(lockres));
3164 
3165 	/* End the line */
3166 	seq_printf(m, "\n");
3167 	return 0;
3168 }
3169 
3170 static const struct seq_operations ocfs2_dlm_seq_ops = {
3171 	.start =	ocfs2_dlm_seq_start,
3172 	.stop =		ocfs2_dlm_seq_stop,
3173 	.next =		ocfs2_dlm_seq_next,
3174 	.show =		ocfs2_dlm_seq_show,
3175 };
3176 
3177 static int ocfs2_dlm_debug_release(struct inode *inode, struct file *file)
3178 {
3179 	struct seq_file *seq = file->private_data;
3180 	struct ocfs2_dlm_seq_priv *priv = seq->private;
3181 	struct ocfs2_lock_res *res = &priv->p_iter_res;
3182 
3183 	ocfs2_remove_lockres_tracking(res);
3184 	ocfs2_put_dlm_debug(priv->p_dlm_debug);
3185 	return seq_release_private(inode, file);
3186 }
3187 
3188 static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file)
3189 {
3190 	struct ocfs2_dlm_seq_priv *priv;
3191 	struct ocfs2_super *osb;
3192 
3193 	priv = __seq_open_private(file, &ocfs2_dlm_seq_ops, sizeof(*priv));
3194 	if (!priv) {
3195 		mlog_errno(-ENOMEM);
3196 		return -ENOMEM;
3197 	}
3198 
3199 	osb = inode->i_private;
3200 	ocfs2_get_dlm_debug(osb->osb_dlm_debug);
3201 	priv->p_dlm_debug = osb->osb_dlm_debug;
3202 	INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list);
3203 
3204 	ocfs2_add_lockres_tracking(&priv->p_iter_res,
3205 				   priv->p_dlm_debug);
3206 
3207 	return 0;
3208 }
3209 
3210 static const struct file_operations ocfs2_dlm_debug_fops = {
3211 	.open =		ocfs2_dlm_debug_open,
3212 	.release =	ocfs2_dlm_debug_release,
3213 	.read =		seq_read,
3214 	.llseek =	seq_lseek,
3215 };
3216 
3217 static int ocfs2_dlm_init_debug(struct ocfs2_super *osb)
3218 {
3219 	int ret = 0;
3220 	struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
3221 
3222 	dlm_debug->d_locking_state = debugfs_create_file("locking_state",
3223 							 S_IFREG|S_IRUSR,
3224 							 osb->osb_debug_root,
3225 							 osb,
3226 							 &ocfs2_dlm_debug_fops);
3227 	if (!dlm_debug->d_locking_state) {
3228 		ret = -EINVAL;
3229 		mlog(ML_ERROR,
3230 		     "Unable to create locking state debugfs file.\n");
3231 		goto out;
3232 	}
3233 
3234 	ocfs2_get_dlm_debug(dlm_debug);
3235 out:
3236 	return ret;
3237 }
3238 
3239 static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
3240 {
3241 	struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
3242 
3243 	if (dlm_debug) {
3244 		debugfs_remove(dlm_debug->d_locking_state);
3245 		ocfs2_put_dlm_debug(dlm_debug);
3246 	}
3247 }
3248 
3249 int ocfs2_dlm_init(struct ocfs2_super *osb)
3250 {
3251 	int status = 0;
3252 	struct ocfs2_cluster_connection *conn = NULL;
3253 
3254 	if (ocfs2_mount_local(osb)) {
3255 		osb->node_num = 0;
3256 		goto local;
3257 	}
3258 
3259 	status = ocfs2_dlm_init_debug(osb);
3260 	if (status < 0) {
3261 		mlog_errno(status);
3262 		goto bail;
3263 	}
3264 
3265 	/* launch downconvert thread */
3266 	osb->dc_task = kthread_run(ocfs2_downconvert_thread, osb, "ocfs2dc-%s",
3267 			osb->uuid_str);
3268 	if (IS_ERR(osb->dc_task)) {
3269 		status = PTR_ERR(osb->dc_task);
3270 		osb->dc_task = NULL;
3271 		mlog_errno(status);
3272 		goto bail;
3273 	}
3274 
3275 	/* for now, uuid == domain */
3276 	status = ocfs2_cluster_connect(osb->osb_cluster_stack,
3277 				       osb->osb_cluster_name,
3278 				       strlen(osb->osb_cluster_name),
3279 				       osb->uuid_str,
3280 				       strlen(osb->uuid_str),
3281 				       &lproto, ocfs2_do_node_down, osb,
3282 				       &conn);
3283 	if (status) {
3284 		mlog_errno(status);
3285 		goto bail;
3286 	}
3287 
3288 	status = ocfs2_cluster_this_node(conn, &osb->node_num);
3289 	if (status < 0) {
3290 		mlog_errno(status);
3291 		mlog(ML_ERROR,
3292 		     "could not find this host's node number\n");
3293 		ocfs2_cluster_disconnect(conn, 0);
3294 		goto bail;
3295 	}
3296 
3297 local:
3298 	ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
3299 	ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
3300 	ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
3301 	ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb);
3302 
3303 	osb->cconn = conn;
3304 bail:
3305 	if (status < 0) {
3306 		ocfs2_dlm_shutdown_debug(osb);
3307 		if (osb->dc_task)
3308 			kthread_stop(osb->dc_task);
3309 	}
3310 
3311 	return status;
3312 }
3313 
3314 void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
3315 			int hangup_pending)
3316 {
3317 	ocfs2_drop_osb_locks(osb);
3318 
3319 	/*
3320 	 * Now that we have dropped all locks and ocfs2_dismount_volume()
3321 	 * has disabled recovery, the DLM won't be talking to us.  It's
3322 	 * safe to tear things down before disconnecting the cluster.
3323 	 */
3324 
3325 	if (osb->dc_task) {
3326 		kthread_stop(osb->dc_task);
3327 		osb->dc_task = NULL;
3328 	}
3329 
3330 	ocfs2_lock_res_free(&osb->osb_super_lockres);
3331 	ocfs2_lock_res_free(&osb->osb_rename_lockres);
3332 	ocfs2_lock_res_free(&osb->osb_nfs_sync_lockres);
3333 	ocfs2_lock_res_free(&osb->osb_orphan_scan.os_lockres);
3334 
3335 	ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
3336 	osb->cconn = NULL;
3337 
3338 	ocfs2_dlm_shutdown_debug(osb);
3339 }
3340 
3341 static int ocfs2_drop_lock(struct ocfs2_super *osb,
3342 			   struct ocfs2_lock_res *lockres)
3343 {
3344 	int ret;
3345 	unsigned long flags;
3346 	u32 lkm_flags = 0;
3347 
3348 	/* We didn't get anywhere near actually using this lockres. */
3349 	if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED))
3350 		goto out;
3351 
3352 	if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
3353 		lkm_flags |= DLM_LKF_VALBLK;
3354 
3355 	spin_lock_irqsave(&lockres->l_lock, flags);
3356 
3357 	mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_FREEING),
3358 			"lockres %s, flags 0x%lx\n",
3359 			lockres->l_name, lockres->l_flags);
3360 
3361 	while (lockres->l_flags & OCFS2_LOCK_BUSY) {
3362 		mlog(0, "waiting on busy lock \"%s\": flags = %lx, action = "
3363 		     "%u, unlock_action = %u\n",
3364 		     lockres->l_name, lockres->l_flags, lockres->l_action,
3365 		     lockres->l_unlock_action);
3366 
3367 		spin_unlock_irqrestore(&lockres->l_lock, flags);
3368 
3369 		/* XXX: Today we just wait on any busy
3370 		 * locks... Perhaps we need to cancel converts in the
3371 		 * future? */
3372 		ocfs2_wait_on_busy_lock(lockres);
3373 
3374 		spin_lock_irqsave(&lockres->l_lock, flags);
3375 	}
3376 
3377 	if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
3378 		if (lockres->l_flags & OCFS2_LOCK_ATTACHED &&
3379 		    lockres->l_level == DLM_LOCK_EX &&
3380 		    !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
3381 			lockres->l_ops->set_lvb(lockres);
3382 	}
3383 
3384 	if (lockres->l_flags & OCFS2_LOCK_BUSY)
3385 		mlog(ML_ERROR, "destroying busy lock: \"%s\"\n",
3386 		     lockres->l_name);
3387 	if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
3388 		mlog(0, "destroying blocked lock: \"%s\"\n", lockres->l_name);
3389 
3390 	if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
3391 		spin_unlock_irqrestore(&lockres->l_lock, flags);
3392 		goto out;
3393 	}
3394 
3395 	lockres_clear_flags(lockres, OCFS2_LOCK_ATTACHED);
3396 
3397 	/* make sure we never get here while waiting for an ast to
3398 	 * fire. */
3399 	BUG_ON(lockres->l_action != OCFS2_AST_INVALID);
3400 
3401 	/* is this necessary? */
3402 	lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
3403 	lockres->l_unlock_action = OCFS2_UNLOCK_DROP_LOCK;
3404 	spin_unlock_irqrestore(&lockres->l_lock, flags);
3405 
3406 	mlog(0, "lock %s\n", lockres->l_name);
3407 
3408 	ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags);
3409 	if (ret) {
3410 		ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
3411 		mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
3412 		ocfs2_dlm_dump_lksb(&lockres->l_lksb);
3413 		BUG();
3414 	}
3415 	mlog(0, "lock %s, successful return from ocfs2_dlm_unlock\n",
3416 	     lockres->l_name);
3417 
3418 	ocfs2_wait_on_busy_lock(lockres);
3419 out:
3420 	return 0;
3421 }
3422 
3423 static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
3424 				       struct ocfs2_lock_res *lockres);
3425 
3426 /* Mark the lockres as being dropped. It will no longer be
3427  * queued if blocking, but we still may have to wait on it
3428  * being dequeued from the downconvert thread before we can consider
3429  * it safe to drop.
3430  *
3431  * You can *not* attempt to call cluster_lock on this lockres anymore. */
3432 void ocfs2_mark_lockres_freeing(struct ocfs2_super *osb,
3433 				struct ocfs2_lock_res *lockres)
3434 {
3435 	int status;
3436 	struct ocfs2_mask_waiter mw;
3437 	unsigned long flags, flags2;
3438 
3439 	ocfs2_init_mask_waiter(&mw);
3440 
3441 	spin_lock_irqsave(&lockres->l_lock, flags);
3442 	lockres->l_flags |= OCFS2_LOCK_FREEING;
3443 	if (lockres->l_flags & OCFS2_LOCK_QUEUED && current == osb->dc_task) {
3444 		/*
3445 		 * We know the downconvert is queued but not in progress
3446 		 * because we are the downconvert thread and processing
3447 		 * different lock. So we can just remove the lock from the
3448 		 * queue. This is not only an optimization but also a way
3449 		 * to avoid the following deadlock:
3450 		 *   ocfs2_dentry_post_unlock()
3451 		 *     ocfs2_dentry_lock_put()
3452 		 *       ocfs2_drop_dentry_lock()
3453 		 *         iput()
3454 		 *           ocfs2_evict_inode()
3455 		 *             ocfs2_clear_inode()
3456 		 *               ocfs2_mark_lockres_freeing()
3457 		 *                 ... blocks waiting for OCFS2_LOCK_QUEUED
3458 		 *                 since we are the downconvert thread which
3459 		 *                 should clear the flag.
3460 		 */
3461 		spin_unlock_irqrestore(&lockres->l_lock, flags);
3462 		spin_lock_irqsave(&osb->dc_task_lock, flags2);
3463 		list_del_init(&lockres->l_blocked_list);
3464 		osb->blocked_lock_count--;
3465 		spin_unlock_irqrestore(&osb->dc_task_lock, flags2);
3466 		/*
3467 		 * Warn if we recurse into another post_unlock call.  Strictly
3468 		 * speaking it isn't a problem but we need to be careful if
3469 		 * that happens (stack overflow, deadlocks, ...) so warn if
3470 		 * ocfs2 grows a path for which this can happen.
3471 		 */
3472 		WARN_ON_ONCE(lockres->l_ops->post_unlock);
3473 		/* Since the lock is freeing we don't do much in the fn below */
3474 		ocfs2_process_blocked_lock(osb, lockres);
3475 		return;
3476 	}
3477 	while (lockres->l_flags & OCFS2_LOCK_QUEUED) {
3478 		lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0);
3479 		spin_unlock_irqrestore(&lockres->l_lock, flags);
3480 
3481 		mlog(0, "Waiting on lockres %s\n", lockres->l_name);
3482 
3483 		status = ocfs2_wait_for_mask(&mw);
3484 		if (status)
3485 			mlog_errno(status);
3486 
3487 		spin_lock_irqsave(&lockres->l_lock, flags);
3488 	}
3489 	spin_unlock_irqrestore(&lockres->l_lock, flags);
3490 }
3491 
3492 void ocfs2_simple_drop_lockres(struct ocfs2_super *osb,
3493 			       struct ocfs2_lock_res *lockres)
3494 {
3495 	int ret;
3496 
3497 	ocfs2_mark_lockres_freeing(osb, lockres);
3498 	ret = ocfs2_drop_lock(osb, lockres);
3499 	if (ret)
3500 		mlog_errno(ret);
3501 }
3502 
3503 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb)
3504 {
3505 	ocfs2_simple_drop_lockres(osb, &osb->osb_super_lockres);
3506 	ocfs2_simple_drop_lockres(osb, &osb->osb_rename_lockres);
3507 	ocfs2_simple_drop_lockres(osb, &osb->osb_nfs_sync_lockres);
3508 	ocfs2_simple_drop_lockres(osb, &osb->osb_orphan_scan.os_lockres);
3509 }
3510 
3511 int ocfs2_drop_inode_locks(struct inode *inode)
3512 {
3513 	int status, err;
3514 
3515 	/* No need to call ocfs2_mark_lockres_freeing here -
3516 	 * ocfs2_clear_inode has done it for us. */
3517 
3518 	err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
3519 			      &OCFS2_I(inode)->ip_open_lockres);
3520 	if (err < 0)
3521 		mlog_errno(err);
3522 
3523 	status = err;
3524 
3525 	err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
3526 			      &OCFS2_I(inode)->ip_inode_lockres);
3527 	if (err < 0)
3528 		mlog_errno(err);
3529 	if (err < 0 && !status)
3530 		status = err;
3531 
3532 	err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
3533 			      &OCFS2_I(inode)->ip_rw_lockres);
3534 	if (err < 0)
3535 		mlog_errno(err);
3536 	if (err < 0 && !status)
3537 		status = err;
3538 
3539 	return status;
3540 }
3541 
3542 static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
3543 					      int new_level)
3544 {
3545 	assert_spin_locked(&lockres->l_lock);
3546 
3547 	BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
3548 
3549 	if (lockres->l_level <= new_level) {
3550 		mlog(ML_ERROR, "lockres %s, lvl %d <= %d, blcklst %d, mask %d, "
3551 		     "type %d, flags 0x%lx, hold %d %d, act %d %d, req %d, "
3552 		     "block %d, pgen %d\n", lockres->l_name, lockres->l_level,
3553 		     new_level, list_empty(&lockres->l_blocked_list),
3554 		     list_empty(&lockres->l_mask_waiters), lockres->l_type,
3555 		     lockres->l_flags, lockres->l_ro_holders,
3556 		     lockres->l_ex_holders, lockres->l_action,
3557 		     lockres->l_unlock_action, lockres->l_requested,
3558 		     lockres->l_blocking, lockres->l_pending_gen);
3559 		BUG();
3560 	}
3561 
3562 	mlog(ML_BASTS, "lockres %s, level %d => %d, blocking %d\n",
3563 	     lockres->l_name, lockres->l_level, new_level, lockres->l_blocking);
3564 
3565 	lockres->l_action = OCFS2_AST_DOWNCONVERT;
3566 	lockres->l_requested = new_level;
3567 	lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
3568 	return lockres_set_pending(lockres);
3569 }
3570 
3571 static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
3572 				  struct ocfs2_lock_res *lockres,
3573 				  int new_level,
3574 				  int lvb,
3575 				  unsigned int generation)
3576 {
3577 	int ret;
3578 	u32 dlm_flags = DLM_LKF_CONVERT;
3579 
3580 	mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
3581 	     lockres->l_level, new_level);
3582 
3583 	/*
3584 	 * On DLM_LKF_VALBLK, fsdlm behaves differently with o2cb. It always
3585 	 * expects DLM_LKF_VALBLK being set if the LKB has LVB, so that
3586 	 * we can recover correctly from node failure. Otherwise, we may get
3587 	 * invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set.
3588 	 */
3589 	if (ocfs2_userspace_stack(osb) &&
3590 	    lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
3591 		lvb = 1;
3592 
3593 	if (lvb)
3594 		dlm_flags |= DLM_LKF_VALBLK;
3595 
3596 	ret = ocfs2_dlm_lock(osb->cconn,
3597 			     new_level,
3598 			     &lockres->l_lksb,
3599 			     dlm_flags,
3600 			     lockres->l_name,
3601 			     OCFS2_LOCK_ID_MAX_LEN - 1);
3602 	lockres_clear_pending(lockres, generation, osb);
3603 	if (ret) {
3604 		ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
3605 		ocfs2_recover_from_dlm_error(lockres, 1);
3606 		goto bail;
3607 	}
3608 
3609 	ret = 0;
3610 bail:
3611 	return ret;
3612 }
3613 
3614 /* returns 1 when the caller should unlock and call ocfs2_dlm_unlock */
3615 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
3616 				        struct ocfs2_lock_res *lockres)
3617 {
3618 	assert_spin_locked(&lockres->l_lock);
3619 
3620 	if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
3621 		/* If we're already trying to cancel a lock conversion
3622 		 * then just drop the spinlock and allow the caller to
3623 		 * requeue this lock. */
3624 		mlog(ML_BASTS, "lockres %s, skip convert\n", lockres->l_name);
3625 		return 0;
3626 	}
3627 
3628 	/* were we in a convert when we got the bast fire? */
3629 	BUG_ON(lockres->l_action != OCFS2_AST_CONVERT &&
3630 	       lockres->l_action != OCFS2_AST_DOWNCONVERT);
3631 	/* set things up for the unlockast to know to just
3632 	 * clear out the ast_action and unset busy, etc. */
3633 	lockres->l_unlock_action = OCFS2_UNLOCK_CANCEL_CONVERT;
3634 
3635 	mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_BUSY),
3636 			"lock %s, invalid flags: 0x%lx\n",
3637 			lockres->l_name, lockres->l_flags);
3638 
3639 	mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
3640 
3641 	return 1;
3642 }
3643 
3644 static int ocfs2_cancel_convert(struct ocfs2_super *osb,
3645 				struct ocfs2_lock_res *lockres)
3646 {
3647 	int ret;
3648 
3649 	ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb,
3650 			       DLM_LKF_CANCEL);
3651 	if (ret) {
3652 		ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
3653 		ocfs2_recover_from_dlm_error(lockres, 0);
3654 	}
3655 
3656 	mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
3657 
3658 	return ret;
3659 }
3660 
3661 static int ocfs2_unblock_lock(struct ocfs2_super *osb,
3662 			      struct ocfs2_lock_res *lockres,
3663 			      struct ocfs2_unblock_ctl *ctl)
3664 {
3665 	unsigned long flags;
3666 	int blocking;
3667 	int new_level;
3668 	int level;
3669 	int ret = 0;
3670 	int set_lvb = 0;
3671 	unsigned int gen;
3672 
3673 	spin_lock_irqsave(&lockres->l_lock, flags);
3674 
3675 recheck:
3676 	/*
3677 	 * Is it still blocking? If not, we have no more work to do.
3678 	 */
3679 	if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) {
3680 		BUG_ON(lockres->l_blocking != DLM_LOCK_NL);
3681 		spin_unlock_irqrestore(&lockres->l_lock, flags);
3682 		ret = 0;
3683 		goto leave;
3684 	}
3685 
3686 	if (lockres->l_flags & OCFS2_LOCK_BUSY) {
3687 		/* XXX
3688 		 * This is a *big* race.  The OCFS2_LOCK_PENDING flag
3689 		 * exists entirely for one reason - another thread has set
3690 		 * OCFS2_LOCK_BUSY, but has *NOT* yet called dlm_lock().
3691 		 *
3692 		 * If we do ocfs2_cancel_convert() before the other thread
3693 		 * calls dlm_lock(), our cancel will do nothing.  We will
3694 		 * get no ast, and we will have no way of knowing the
3695 		 * cancel failed.  Meanwhile, the other thread will call
3696 		 * into dlm_lock() and wait...forever.
3697 		 *
3698 		 * Why forever?  Because another node has asked for the
3699 		 * lock first; that's why we're here in unblock_lock().
3700 		 *
3701 		 * The solution is OCFS2_LOCK_PENDING.  When PENDING is
3702 		 * set, we just requeue the unblock.  Only when the other
3703 		 * thread has called dlm_lock() and cleared PENDING will
3704 		 * we then cancel their request.
3705 		 *
3706 		 * All callers of dlm_lock() must set OCFS2_DLM_PENDING
3707 		 * at the same time they set OCFS2_DLM_BUSY.  They must
3708 		 * clear OCFS2_DLM_PENDING after dlm_lock() returns.
3709 		 */
3710 		if (lockres->l_flags & OCFS2_LOCK_PENDING) {
3711 			mlog(ML_BASTS, "lockres %s, ReQ: Pending\n",
3712 			     lockres->l_name);
3713 			goto leave_requeue;
3714 		}
3715 
3716 		ctl->requeue = 1;
3717 		ret = ocfs2_prepare_cancel_convert(osb, lockres);
3718 		spin_unlock_irqrestore(&lockres->l_lock, flags);
3719 		if (ret) {
3720 			ret = ocfs2_cancel_convert(osb, lockres);
3721 			if (ret < 0)
3722 				mlog_errno(ret);
3723 		}
3724 		goto leave;
3725 	}
3726 
3727 	/*
3728 	 * This prevents livelocks. OCFS2_LOCK_UPCONVERT_FINISHING flag is
3729 	 * set when the ast is received for an upconvert just before the
3730 	 * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast
3731 	 * on the heels of the ast, we want to delay the downconvert just
3732 	 * enough to allow the up requestor to do its task. Because this
3733 	 * lock is in the blocked queue, the lock will be downconverted
3734 	 * as soon as the requestor is done with the lock.
3735 	 */
3736 	if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING)
3737 		goto leave_requeue;
3738 
3739 	/*
3740 	 * How can we block and yet be at NL?  We were trying to upconvert
3741 	 * from NL and got canceled.  The code comes back here, and now
3742 	 * we notice and clear BLOCKING.
3743 	 */
3744 	if (lockres->l_level == DLM_LOCK_NL) {
3745 		BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders);
3746 		mlog(ML_BASTS, "lockres %s, Aborting dc\n", lockres->l_name);
3747 		lockres->l_blocking = DLM_LOCK_NL;
3748 		lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
3749 		spin_unlock_irqrestore(&lockres->l_lock, flags);
3750 		goto leave;
3751 	}
3752 
3753 	/* if we're blocking an exclusive and we have *any* holders,
3754 	 * then requeue. */
3755 	if ((lockres->l_blocking == DLM_LOCK_EX)
3756 	    && (lockres->l_ex_holders || lockres->l_ro_holders)) {
3757 		mlog(ML_BASTS, "lockres %s, ReQ: EX/PR Holders %u,%u\n",
3758 		     lockres->l_name, lockres->l_ex_holders,
3759 		     lockres->l_ro_holders);
3760 		goto leave_requeue;
3761 	}
3762 
3763 	/* If it's a PR we're blocking, then only
3764 	 * requeue if we've got any EX holders */
3765 	if (lockres->l_blocking == DLM_LOCK_PR &&
3766 	    lockres->l_ex_holders) {
3767 		mlog(ML_BASTS, "lockres %s, ReQ: EX Holders %u\n",
3768 		     lockres->l_name, lockres->l_ex_holders);
3769 		goto leave_requeue;
3770 	}
3771 
3772 	/*
3773 	 * Can we get a lock in this state if the holder counts are
3774 	 * zero? The meta data unblock code used to check this.
3775 	 */
3776 	if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
3777 	    && (lockres->l_flags & OCFS2_LOCK_REFRESHING)) {
3778 		mlog(ML_BASTS, "lockres %s, ReQ: Lock Refreshing\n",
3779 		     lockres->l_name);
3780 		goto leave_requeue;
3781 	}
3782 
3783 	new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
3784 
3785 	if (lockres->l_ops->check_downconvert
3786 	    && !lockres->l_ops->check_downconvert(lockres, new_level)) {
3787 		mlog(ML_BASTS, "lockres %s, ReQ: Checkpointing\n",
3788 		     lockres->l_name);
3789 		goto leave_requeue;
3790 	}
3791 
3792 	/* If we get here, then we know that there are no more
3793 	 * incompatible holders (and anyone asking for an incompatible
3794 	 * lock is blocked). We can now downconvert the lock */
3795 	if (!lockres->l_ops->downconvert_worker)
3796 		goto downconvert;
3797 
3798 	/* Some lockres types want to do a bit of work before
3799 	 * downconverting a lock. Allow that here. The worker function
3800 	 * may sleep, so we save off a copy of what we're blocking as
3801 	 * it may change while we're not holding the spin lock. */
3802 	blocking = lockres->l_blocking;
3803 	level = lockres->l_level;
3804 	spin_unlock_irqrestore(&lockres->l_lock, flags);
3805 
3806 	ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
3807 
3808 	if (ctl->unblock_action == UNBLOCK_STOP_POST) {
3809 		mlog(ML_BASTS, "lockres %s, UNBLOCK_STOP_POST\n",
3810 		     lockres->l_name);
3811 		goto leave;
3812 	}
3813 
3814 	spin_lock_irqsave(&lockres->l_lock, flags);
3815 	if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) {
3816 		/* If this changed underneath us, then we can't drop
3817 		 * it just yet. */
3818 		mlog(ML_BASTS, "lockres %s, block=%d:%d, level=%d:%d, "
3819 		     "Recheck\n", lockres->l_name, blocking,
3820 		     lockres->l_blocking, level, lockres->l_level);
3821 		goto recheck;
3822 	}
3823 
3824 downconvert:
3825 	ctl->requeue = 0;
3826 
3827 	if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
3828 		if (lockres->l_level == DLM_LOCK_EX)
3829 			set_lvb = 1;
3830 
3831 		/*
3832 		 * We only set the lvb if the lock has been fully
3833 		 * refreshed - otherwise we risk setting stale
3834 		 * data. Otherwise, there's no need to actually clear
3835 		 * out the lvb here as it's value is still valid.
3836 		 */
3837 		if (set_lvb && !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
3838 			lockres->l_ops->set_lvb(lockres);
3839 	}
3840 
3841 	gen = ocfs2_prepare_downconvert(lockres, new_level);
3842 	spin_unlock_irqrestore(&lockres->l_lock, flags);
3843 	ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb,
3844 				     gen);
3845 
3846 leave:
3847 	if (ret)
3848 		mlog_errno(ret);
3849 	return ret;
3850 
3851 leave_requeue:
3852 	spin_unlock_irqrestore(&lockres->l_lock, flags);
3853 	ctl->requeue = 1;
3854 
3855 	return 0;
3856 }
3857 
3858 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
3859 				     int blocking)
3860 {
3861 	struct inode *inode;
3862 	struct address_space *mapping;
3863 	struct ocfs2_inode_info *oi;
3864 
3865        	inode = ocfs2_lock_res_inode(lockres);
3866 	mapping = inode->i_mapping;
3867 
3868 	if (S_ISDIR(inode->i_mode)) {
3869 		oi = OCFS2_I(inode);
3870 		oi->ip_dir_lock_gen++;
3871 		mlog(0, "generation: %u\n", oi->ip_dir_lock_gen);
3872 		goto out;
3873 	}
3874 
3875 	if (!S_ISREG(inode->i_mode))
3876 		goto out;
3877 
3878 	/*
3879 	 * We need this before the filemap_fdatawrite() so that it can
3880 	 * transfer the dirty bit from the PTE to the
3881 	 * page. Unfortunately this means that even for EX->PR
3882 	 * downconverts, we'll lose our mappings and have to build
3883 	 * them up again.
3884 	 */
3885 	unmap_mapping_range(mapping, 0, 0, 0);
3886 
3887 	if (filemap_fdatawrite(mapping)) {
3888 		mlog(ML_ERROR, "Could not sync inode %llu for downconvert!",
3889 		     (unsigned long long)OCFS2_I(inode)->ip_blkno);
3890 	}
3891 	sync_mapping_buffers(mapping);
3892 	if (blocking == DLM_LOCK_EX) {
3893 		truncate_inode_pages(mapping, 0);
3894 	} else {
3895 		/* We only need to wait on the I/O if we're not also
3896 		 * truncating pages because truncate_inode_pages waits
3897 		 * for us above. We don't truncate pages if we're
3898 		 * blocking anything < EXMODE because we want to keep
3899 		 * them around in that case. */
3900 		filemap_fdatawait(mapping);
3901 	}
3902 
3903 	forget_all_cached_acls(inode);
3904 
3905 out:
3906 	return UNBLOCK_CONTINUE;
3907 }
3908 
3909 static int ocfs2_ci_checkpointed(struct ocfs2_caching_info *ci,
3910 				 struct ocfs2_lock_res *lockres,
3911 				 int new_level)
3912 {
3913 	int checkpointed = ocfs2_ci_fully_checkpointed(ci);
3914 
3915 	BUG_ON(new_level != DLM_LOCK_NL && new_level != DLM_LOCK_PR);
3916 	BUG_ON(lockres->l_level != DLM_LOCK_EX && !checkpointed);
3917 
3918 	if (checkpointed)
3919 		return 1;
3920 
3921 	ocfs2_start_checkpoint(OCFS2_SB(ocfs2_metadata_cache_get_super(ci)));
3922 	return 0;
3923 }
3924 
3925 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
3926 					int new_level)
3927 {
3928 	struct inode *inode = ocfs2_lock_res_inode(lockres);
3929 
3930 	return ocfs2_ci_checkpointed(INODE_CACHE(inode), lockres, new_level);
3931 }
3932 
3933 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres)
3934 {
3935 	struct inode *inode = ocfs2_lock_res_inode(lockres);
3936 
3937 	__ocfs2_stuff_meta_lvb(inode);
3938 }
3939 
3940 /*
3941  * Does the final reference drop on our dentry lock. Right now this
3942  * happens in the downconvert thread, but we could choose to simplify the
3943  * dlmglue API and push these off to the ocfs2_wq in the future.
3944  */
3945 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
3946 				     struct ocfs2_lock_res *lockres)
3947 {
3948 	struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
3949 	ocfs2_dentry_lock_put(osb, dl);
3950 }
3951 
3952 /*
3953  * d_delete() matching dentries before the lock downconvert.
3954  *
3955  * At this point, any process waiting to destroy the
3956  * dentry_lock due to last ref count is stopped by the
3957  * OCFS2_LOCK_QUEUED flag.
3958  *
3959  * We have two potential problems
3960  *
3961  * 1) If we do the last reference drop on our dentry_lock (via dput)
3962  *    we'll wind up in ocfs2_release_dentry_lock(), waiting on
3963  *    the downconvert to finish. Instead we take an elevated
3964  *    reference and push the drop until after we've completed our
3965  *    unblock processing.
3966  *
3967  * 2) There might be another process with a final reference,
3968  *    waiting on us to finish processing. If this is the case, we
3969  *    detect it and exit out - there's no more dentries anyway.
3970  */
3971 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
3972 				       int blocking)
3973 {
3974 	struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
3975 	struct ocfs2_inode_info *oi = OCFS2_I(dl->dl_inode);
3976 	struct dentry *dentry;
3977 	unsigned long flags;
3978 	int extra_ref = 0;
3979 
3980 	/*
3981 	 * This node is blocking another node from getting a read
3982 	 * lock. This happens when we've renamed within a
3983 	 * directory. We've forced the other nodes to d_delete(), but
3984 	 * we never actually dropped our lock because it's still
3985 	 * valid. The downconvert code will retain a PR for this node,
3986 	 * so there's no further work to do.
3987 	 */
3988 	if (blocking == DLM_LOCK_PR)
3989 		return UNBLOCK_CONTINUE;
3990 
3991 	/*
3992 	 * Mark this inode as potentially orphaned. The code in
3993 	 * ocfs2_delete_inode() will figure out whether it actually
3994 	 * needs to be freed or not.
3995 	 */
3996 	spin_lock(&oi->ip_lock);
3997 	oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
3998 	spin_unlock(&oi->ip_lock);
3999 
4000 	/*
4001 	 * Yuck. We need to make sure however that the check of
4002 	 * OCFS2_LOCK_FREEING and the extra reference are atomic with
4003 	 * respect to a reference decrement or the setting of that
4004 	 * flag.
4005 	 */
4006 	spin_lock_irqsave(&lockres->l_lock, flags);
4007 	spin_lock(&dentry_attach_lock);
4008 	if (!(lockres->l_flags & OCFS2_LOCK_FREEING)
4009 	    && dl->dl_count) {
4010 		dl->dl_count++;
4011 		extra_ref = 1;
4012 	}
4013 	spin_unlock(&dentry_attach_lock);
4014 	spin_unlock_irqrestore(&lockres->l_lock, flags);
4015 
4016 	mlog(0, "extra_ref = %d\n", extra_ref);
4017 
4018 	/*
4019 	 * We have a process waiting on us in ocfs2_dentry_iput(),
4020 	 * which means we can't have any more outstanding
4021 	 * aliases. There's no need to do any more work.
4022 	 */
4023 	if (!extra_ref)
4024 		return UNBLOCK_CONTINUE;
4025 
4026 	spin_lock(&dentry_attach_lock);
4027 	while (1) {
4028 		dentry = ocfs2_find_local_alias(dl->dl_inode,
4029 						dl->dl_parent_blkno, 1);
4030 		if (!dentry)
4031 			break;
4032 		spin_unlock(&dentry_attach_lock);
4033 
4034 		if (S_ISDIR(dl->dl_inode->i_mode))
4035 			shrink_dcache_parent(dentry);
4036 
4037 		mlog(0, "d_delete(%pd);\n", dentry);
4038 
4039 		/*
4040 		 * The following dcache calls may do an
4041 		 * iput(). Normally we don't want that from the
4042 		 * downconverting thread, but in this case it's ok
4043 		 * because the requesting node already has an
4044 		 * exclusive lock on the inode, so it can't be queued
4045 		 * for a downconvert.
4046 		 */
4047 		d_delete(dentry);
4048 		dput(dentry);
4049 
4050 		spin_lock(&dentry_attach_lock);
4051 	}
4052 	spin_unlock(&dentry_attach_lock);
4053 
4054 	/*
4055 	 * If we are the last holder of this dentry lock, there is no
4056 	 * reason to downconvert so skip straight to the unlock.
4057 	 */
4058 	if (dl->dl_count == 1)
4059 		return UNBLOCK_STOP_POST;
4060 
4061 	return UNBLOCK_CONTINUE_POST;
4062 }
4063 
4064 static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
4065 					    int new_level)
4066 {
4067 	struct ocfs2_refcount_tree *tree =
4068 				ocfs2_lock_res_refcount_tree(lockres);
4069 
4070 	return ocfs2_ci_checkpointed(&tree->rf_ci, lockres, new_level);
4071 }
4072 
4073 static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
4074 					 int blocking)
4075 {
4076 	struct ocfs2_refcount_tree *tree =
4077 				ocfs2_lock_res_refcount_tree(lockres);
4078 
4079 	ocfs2_metadata_cache_purge(&tree->rf_ci);
4080 
4081 	return UNBLOCK_CONTINUE;
4082 }
4083 
4084 static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres)
4085 {
4086 	struct ocfs2_qinfo_lvb *lvb;
4087 	struct ocfs2_mem_dqinfo *oinfo = ocfs2_lock_res_qinfo(lockres);
4088 	struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
4089 					    oinfo->dqi_gi.dqi_type);
4090 
4091 	lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
4092 	lvb->lvb_version = OCFS2_QINFO_LVB_VERSION;
4093 	lvb->lvb_bgrace = cpu_to_be32(info->dqi_bgrace);
4094 	lvb->lvb_igrace = cpu_to_be32(info->dqi_igrace);
4095 	lvb->lvb_syncms = cpu_to_be32(oinfo->dqi_syncms);
4096 	lvb->lvb_blocks = cpu_to_be32(oinfo->dqi_gi.dqi_blocks);
4097 	lvb->lvb_free_blk = cpu_to_be32(oinfo->dqi_gi.dqi_free_blk);
4098 	lvb->lvb_free_entry = cpu_to_be32(oinfo->dqi_gi.dqi_free_entry);
4099 }
4100 
4101 void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex)
4102 {
4103 	struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
4104 	struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
4105 	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
4106 
4107 	if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
4108 		ocfs2_cluster_unlock(osb, lockres, level);
4109 }
4110 
4111 static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo)
4112 {
4113 	struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
4114 					    oinfo->dqi_gi.dqi_type);
4115 	struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
4116 	struct ocfs2_qinfo_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
4117 	struct buffer_head *bh = NULL;
4118 	struct ocfs2_global_disk_dqinfo *gdinfo;
4119 	int status = 0;
4120 
4121 	if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
4122 	    lvb->lvb_version == OCFS2_QINFO_LVB_VERSION) {
4123 		info->dqi_bgrace = be32_to_cpu(lvb->lvb_bgrace);
4124 		info->dqi_igrace = be32_to_cpu(lvb->lvb_igrace);
4125 		oinfo->dqi_syncms = be32_to_cpu(lvb->lvb_syncms);
4126 		oinfo->dqi_gi.dqi_blocks = be32_to_cpu(lvb->lvb_blocks);
4127 		oinfo->dqi_gi.dqi_free_blk = be32_to_cpu(lvb->lvb_free_blk);
4128 		oinfo->dqi_gi.dqi_free_entry =
4129 					be32_to_cpu(lvb->lvb_free_entry);
4130 	} else {
4131 		status = ocfs2_read_quota_phys_block(oinfo->dqi_gqinode,
4132 						     oinfo->dqi_giblk, &bh);
4133 		if (status) {
4134 			mlog_errno(status);
4135 			goto bail;
4136 		}
4137 		gdinfo = (struct ocfs2_global_disk_dqinfo *)
4138 					(bh->b_data + OCFS2_GLOBAL_INFO_OFF);
4139 		info->dqi_bgrace = le32_to_cpu(gdinfo->dqi_bgrace);
4140 		info->dqi_igrace = le32_to_cpu(gdinfo->dqi_igrace);
4141 		oinfo->dqi_syncms = le32_to_cpu(gdinfo->dqi_syncms);
4142 		oinfo->dqi_gi.dqi_blocks = le32_to_cpu(gdinfo->dqi_blocks);
4143 		oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(gdinfo->dqi_free_blk);
4144 		oinfo->dqi_gi.dqi_free_entry =
4145 					le32_to_cpu(gdinfo->dqi_free_entry);
4146 		brelse(bh);
4147 		ocfs2_track_lock_refresh(lockres);
4148 	}
4149 
4150 bail:
4151 	return status;
4152 }
4153 
4154 /* Lock quota info, this function expects at least shared lock on the quota file
4155  * so that we can safely refresh quota info from disk. */
4156 int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex)
4157 {
4158 	struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
4159 	struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
4160 	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
4161 	int status = 0;
4162 
4163 	/* On RO devices, locking really isn't needed... */
4164 	if (ocfs2_is_hard_readonly(osb)) {
4165 		if (ex)
4166 			status = -EROFS;
4167 		goto bail;
4168 	}
4169 	if (ocfs2_mount_local(osb))
4170 		goto bail;
4171 
4172 	status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
4173 	if (status < 0) {
4174 		mlog_errno(status);
4175 		goto bail;
4176 	}
4177 	if (!ocfs2_should_refresh_lock_res(lockres))
4178 		goto bail;
4179 	/* OK, we have the lock but we need to refresh the quota info */
4180 	status = ocfs2_refresh_qinfo(oinfo);
4181 	if (status)
4182 		ocfs2_qinfo_unlock(oinfo, ex);
4183 	ocfs2_complete_lock_res_refresh(lockres, status);
4184 bail:
4185 	return status;
4186 }
4187 
4188 int ocfs2_refcount_lock(struct ocfs2_refcount_tree *ref_tree, int ex)
4189 {
4190 	int status;
4191 	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
4192 	struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres;
4193 	struct ocfs2_super *osb = lockres->l_priv;
4194 
4195 
4196 	if (ocfs2_is_hard_readonly(osb))
4197 		return -EROFS;
4198 
4199 	if (ocfs2_mount_local(osb))
4200 		return 0;
4201 
4202 	status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
4203 	if (status < 0)
4204 		mlog_errno(status);
4205 
4206 	return status;
4207 }
4208 
4209 void ocfs2_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex)
4210 {
4211 	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
4212 	struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres;
4213 	struct ocfs2_super *osb = lockres->l_priv;
4214 
4215 	if (!ocfs2_mount_local(osb))
4216 		ocfs2_cluster_unlock(osb, lockres, level);
4217 }
4218 
4219 static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
4220 				       struct ocfs2_lock_res *lockres)
4221 {
4222 	int status;
4223 	struct ocfs2_unblock_ctl ctl = {0, 0,};
4224 	unsigned long flags;
4225 
4226 	/* Our reference to the lockres in this function can be
4227 	 * considered valid until we remove the OCFS2_LOCK_QUEUED
4228 	 * flag. */
4229 
4230 	BUG_ON(!lockres);
4231 	BUG_ON(!lockres->l_ops);
4232 
4233 	mlog(ML_BASTS, "lockres %s blocked\n", lockres->l_name);
4234 
4235 	/* Detect whether a lock has been marked as going away while
4236 	 * the downconvert thread was processing other things. A lock can
4237 	 * still be marked with OCFS2_LOCK_FREEING after this check,
4238 	 * but short circuiting here will still save us some
4239 	 * performance. */
4240 	spin_lock_irqsave(&lockres->l_lock, flags);
4241 	if (lockres->l_flags & OCFS2_LOCK_FREEING)
4242 		goto unqueue;
4243 	spin_unlock_irqrestore(&lockres->l_lock, flags);
4244 
4245 	status = ocfs2_unblock_lock(osb, lockres, &ctl);
4246 	if (status < 0)
4247 		mlog_errno(status);
4248 
4249 	spin_lock_irqsave(&lockres->l_lock, flags);
4250 unqueue:
4251 	if (lockres->l_flags & OCFS2_LOCK_FREEING || !ctl.requeue) {
4252 		lockres_clear_flags(lockres, OCFS2_LOCK_QUEUED);
4253 	} else
4254 		ocfs2_schedule_blocked_lock(osb, lockres);
4255 
4256 	mlog(ML_BASTS, "lockres %s, requeue = %s.\n", lockres->l_name,
4257 	     ctl.requeue ? "yes" : "no");
4258 	spin_unlock_irqrestore(&lockres->l_lock, flags);
4259 
4260 	if (ctl.unblock_action != UNBLOCK_CONTINUE
4261 	    && lockres->l_ops->post_unlock)
4262 		lockres->l_ops->post_unlock(osb, lockres);
4263 }
4264 
4265 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
4266 					struct ocfs2_lock_res *lockres)
4267 {
4268 	unsigned long flags;
4269 
4270 	assert_spin_locked(&lockres->l_lock);
4271 
4272 	if (lockres->l_flags & OCFS2_LOCK_FREEING) {
4273 		/* Do not schedule a lock for downconvert when it's on
4274 		 * the way to destruction - any nodes wanting access
4275 		 * to the resource will get it soon. */
4276 		mlog(ML_BASTS, "lockres %s won't be scheduled: flags 0x%lx\n",
4277 		     lockres->l_name, lockres->l_flags);
4278 		return;
4279 	}
4280 
4281 	lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
4282 
4283 	spin_lock_irqsave(&osb->dc_task_lock, flags);
4284 	if (list_empty(&lockres->l_blocked_list)) {
4285 		list_add_tail(&lockres->l_blocked_list,
4286 			      &osb->blocked_lock_list);
4287 		osb->blocked_lock_count++;
4288 	}
4289 	spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4290 }
4291 
4292 static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
4293 {
4294 	unsigned long processed;
4295 	unsigned long flags;
4296 	struct ocfs2_lock_res *lockres;
4297 
4298 	spin_lock_irqsave(&osb->dc_task_lock, flags);
4299 	/* grab this early so we know to try again if a state change and
4300 	 * wake happens part-way through our work  */
4301 	osb->dc_work_sequence = osb->dc_wake_sequence;
4302 
4303 	processed = osb->blocked_lock_count;
4304 	/*
4305 	 * blocked lock processing in this loop might call iput which can
4306 	 * remove items off osb->blocked_lock_list. Downconvert up to
4307 	 * 'processed' number of locks, but stop short if we had some
4308 	 * removed in ocfs2_mark_lockres_freeing when downconverting.
4309 	 */
4310 	while (processed && !list_empty(&osb->blocked_lock_list)) {
4311 		lockres = list_entry(osb->blocked_lock_list.next,
4312 				     struct ocfs2_lock_res, l_blocked_list);
4313 		list_del_init(&lockres->l_blocked_list);
4314 		osb->blocked_lock_count--;
4315 		spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4316 
4317 		BUG_ON(!processed);
4318 		processed--;
4319 
4320 		ocfs2_process_blocked_lock(osb, lockres);
4321 
4322 		spin_lock_irqsave(&osb->dc_task_lock, flags);
4323 	}
4324 	spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4325 }
4326 
4327 static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
4328 {
4329 	int empty = 0;
4330 	unsigned long flags;
4331 
4332 	spin_lock_irqsave(&osb->dc_task_lock, flags);
4333 	if (list_empty(&osb->blocked_lock_list))
4334 		empty = 1;
4335 
4336 	spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4337 	return empty;
4338 }
4339 
4340 static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb)
4341 {
4342 	int should_wake = 0;
4343 	unsigned long flags;
4344 
4345 	spin_lock_irqsave(&osb->dc_task_lock, flags);
4346 	if (osb->dc_work_sequence != osb->dc_wake_sequence)
4347 		should_wake = 1;
4348 	spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4349 
4350 	return should_wake;
4351 }
4352 
4353 static int ocfs2_downconvert_thread(void *arg)
4354 {
4355 	int status = 0;
4356 	struct ocfs2_super *osb = arg;
4357 
4358 	/* only quit once we've been asked to stop and there is no more
4359 	 * work available */
4360 	while (!(kthread_should_stop() &&
4361 		ocfs2_downconvert_thread_lists_empty(osb))) {
4362 
4363 		wait_event_interruptible(osb->dc_event,
4364 					 ocfs2_downconvert_thread_should_wake(osb) ||
4365 					 kthread_should_stop());
4366 
4367 		mlog(0, "downconvert_thread: awoken\n");
4368 
4369 		ocfs2_downconvert_thread_do_work(osb);
4370 	}
4371 
4372 	osb->dc_task = NULL;
4373 	return status;
4374 }
4375 
4376 void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb)
4377 {
4378 	unsigned long flags;
4379 
4380 	spin_lock_irqsave(&osb->dc_task_lock, flags);
4381 	/* make sure the voting thread gets a swipe at whatever changes
4382 	 * the caller may have made to the voting state */
4383 	osb->dc_wake_sequence++;
4384 	spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4385 	wake_up(&osb->dc_event);
4386 }
4387