xref: /linux/fs/dlm/lock.c (revision a0f97e06a43cf524e616f09e6af3398e1e9c1c5b)
1 /******************************************************************************
2 *******************************************************************************
3 **
4 **  Copyright (C) 2005-2007 Red Hat, Inc.  All rights reserved.
5 **
6 **  This copyrighted material is made available to anyone wishing to use,
7 **  modify, copy, or redistribute it subject to the terms and conditions
8 **  of the GNU General Public License v.2.
9 **
10 *******************************************************************************
11 ******************************************************************************/
12 
13 /* Central locking logic has four stages:
14 
15    dlm_lock()
16    dlm_unlock()
17 
18    request_lock(ls, lkb)
19    convert_lock(ls, lkb)
20    unlock_lock(ls, lkb)
21    cancel_lock(ls, lkb)
22 
23    _request_lock(r, lkb)
24    _convert_lock(r, lkb)
25    _unlock_lock(r, lkb)
26    _cancel_lock(r, lkb)
27 
28    do_request(r, lkb)
29    do_convert(r, lkb)
30    do_unlock(r, lkb)
31    do_cancel(r, lkb)
32 
33    Stage 1 (lock, unlock) is mainly about checking input args and
34    splitting into one of the four main operations:
35 
36        dlm_lock          = request_lock
37        dlm_lock+CONVERT  = convert_lock
38        dlm_unlock        = unlock_lock
39        dlm_unlock+CANCEL = cancel_lock
40 
41    Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is
42    provided to the next stage.
43 
44    Stage 3, _xxxx_lock(), determines if the operation is local or remote.
45    When remote, it calls send_xxxx(), when local it calls do_xxxx().
46 
47    Stage 4, do_xxxx(), is the guts of the operation.  It manipulates the
48    given rsb and lkb and queues callbacks.
49 
50    For remote operations, send_xxxx() results in the corresponding do_xxxx()
51    function being executed on the remote node.  The connecting send/receive
52    calls on local (L) and remote (R) nodes:
53 
54    L: send_xxxx()              ->  R: receive_xxxx()
55                                    R: do_xxxx()
56    L: receive_xxxx_reply()     <-  R: send_xxxx_reply()
57 */
58 #include <linux/types.h>
59 #include "dlm_internal.h"
60 #include <linux/dlm_device.h>
61 #include "memory.h"
62 #include "lowcomms.h"
63 #include "requestqueue.h"
64 #include "util.h"
65 #include "dir.h"
66 #include "member.h"
67 #include "lockspace.h"
68 #include "ast.h"
69 #include "lock.h"
70 #include "rcom.h"
71 #include "recover.h"
72 #include "lvb_table.h"
73 #include "user.h"
74 #include "config.h"
75 
76 static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb);
77 static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb);
78 static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb);
79 static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb);
80 static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb);
81 static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode);
82 static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb);
83 static int send_remove(struct dlm_rsb *r);
84 static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
85 static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
86 static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
87 				    struct dlm_message *ms);
88 static int receive_extralen(struct dlm_message *ms);
89 static void do_purge(struct dlm_ls *ls, int nodeid, int pid);
90 static void del_timeout(struct dlm_lkb *lkb);
91 void dlm_timeout_warn(struct dlm_lkb *lkb);
92 
93 /*
94  * Lock compatibilty matrix - thanks Steve
95  * UN = Unlocked state. Not really a state, used as a flag
96  * PD = Padding. Used to make the matrix a nice power of two in size
97  * Other states are the same as the VMS DLM.
98  * Usage: matrix[grmode+1][rqmode+1]  (although m[rq+1][gr+1] is the same)
99  */
100 
101 static const int __dlm_compat_matrix[8][8] = {
102       /* UN NL CR CW PR PW EX PD */
103         {1, 1, 1, 1, 1, 1, 1, 0},       /* UN */
104         {1, 1, 1, 1, 1, 1, 1, 0},       /* NL */
105         {1, 1, 1, 1, 1, 1, 0, 0},       /* CR */
106         {1, 1, 1, 1, 0, 0, 0, 0},       /* CW */
107         {1, 1, 1, 0, 1, 0, 0, 0},       /* PR */
108         {1, 1, 1, 0, 0, 0, 0, 0},       /* PW */
109         {1, 1, 0, 0, 0, 0, 0, 0},       /* EX */
110         {0, 0, 0, 0, 0, 0, 0, 0}        /* PD */
111 };
112 
113 /*
114  * This defines the direction of transfer of LVB data.
115  * Granted mode is the row; requested mode is the column.
116  * Usage: matrix[grmode+1][rqmode+1]
117  * 1 = LVB is returned to the caller
118  * 0 = LVB is written to the resource
119  * -1 = nothing happens to the LVB
120  */
121 
122 const int dlm_lvb_operations[8][8] = {
123         /* UN   NL  CR  CW  PR  PW  EX  PD*/
124         {  -1,  1,  1,  1,  1,  1,  1, -1 }, /* UN */
125         {  -1,  1,  1,  1,  1,  1,  1,  0 }, /* NL */
126         {  -1, -1,  1,  1,  1,  1,  1,  0 }, /* CR */
127         {  -1, -1, -1,  1,  1,  1,  1,  0 }, /* CW */
128         {  -1, -1, -1, -1,  1,  1,  1,  0 }, /* PR */
129         {  -1,  0,  0,  0,  0,  0,  1,  0 }, /* PW */
130         {  -1,  0,  0,  0,  0,  0,  0,  0 }, /* EX */
131         {  -1,  0,  0,  0,  0,  0,  0,  0 }  /* PD */
132 };
133 
134 #define modes_compat(gr, rq) \
135 	__dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
136 
137 int dlm_modes_compat(int mode1, int mode2)
138 {
139 	return __dlm_compat_matrix[mode1 + 1][mode2 + 1];
140 }
141 
142 /*
143  * Compatibility matrix for conversions with QUECVT set.
144  * Granted mode is the row; requested mode is the column.
145  * Usage: matrix[grmode+1][rqmode+1]
146  */
147 
148 static const int __quecvt_compat_matrix[8][8] = {
149       /* UN NL CR CW PR PW EX PD */
150         {0, 0, 0, 0, 0, 0, 0, 0},       /* UN */
151         {0, 0, 1, 1, 1, 1, 1, 0},       /* NL */
152         {0, 0, 0, 1, 1, 1, 1, 0},       /* CR */
153         {0, 0, 0, 0, 1, 1, 1, 0},       /* CW */
154         {0, 0, 0, 1, 0, 1, 1, 0},       /* PR */
155         {0, 0, 0, 0, 0, 0, 1, 0},       /* PW */
156         {0, 0, 0, 0, 0, 0, 0, 0},       /* EX */
157         {0, 0, 0, 0, 0, 0, 0, 0}        /* PD */
158 };
159 
160 void dlm_print_lkb(struct dlm_lkb *lkb)
161 {
162 	printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x\n"
163 	       "     status %d rqmode %d grmode %d wait_type %d ast_type %d\n",
164 	       lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags,
165 	       lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode,
166 	       lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_ast_type);
167 }
168 
169 void dlm_print_rsb(struct dlm_rsb *r)
170 {
171 	printk(KERN_ERR "rsb: nodeid %d flags %lx first %x rlc %d name %s\n",
172 	       r->res_nodeid, r->res_flags, r->res_first_lkid,
173 	       r->res_recover_locks_count, r->res_name);
174 }
175 
176 void dlm_dump_rsb(struct dlm_rsb *r)
177 {
178 	struct dlm_lkb *lkb;
179 
180 	dlm_print_rsb(r);
181 
182 	printk(KERN_ERR "rsb: root_list empty %d recover_list empty %d\n",
183 	       list_empty(&r->res_root_list), list_empty(&r->res_recover_list));
184 	printk(KERN_ERR "rsb lookup list\n");
185 	list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup)
186 		dlm_print_lkb(lkb);
187 	printk(KERN_ERR "rsb grant queue:\n");
188 	list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue)
189 		dlm_print_lkb(lkb);
190 	printk(KERN_ERR "rsb convert queue:\n");
191 	list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue)
192 		dlm_print_lkb(lkb);
193 	printk(KERN_ERR "rsb wait queue:\n");
194 	list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue)
195 		dlm_print_lkb(lkb);
196 }
197 
198 /* Threads cannot use the lockspace while it's being recovered */
199 
200 static inline void dlm_lock_recovery(struct dlm_ls *ls)
201 {
202 	down_read(&ls->ls_in_recovery);
203 }
204 
205 void dlm_unlock_recovery(struct dlm_ls *ls)
206 {
207 	up_read(&ls->ls_in_recovery);
208 }
209 
210 int dlm_lock_recovery_try(struct dlm_ls *ls)
211 {
212 	return down_read_trylock(&ls->ls_in_recovery);
213 }
214 
215 static inline int can_be_queued(struct dlm_lkb *lkb)
216 {
217 	return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE);
218 }
219 
220 static inline int force_blocking_asts(struct dlm_lkb *lkb)
221 {
222 	return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST);
223 }
224 
225 static inline int is_demoted(struct dlm_lkb *lkb)
226 {
227 	return (lkb->lkb_sbflags & DLM_SBF_DEMOTED);
228 }
229 
230 static inline int is_altmode(struct dlm_lkb *lkb)
231 {
232 	return (lkb->lkb_sbflags & DLM_SBF_ALTMODE);
233 }
234 
235 static inline int is_granted(struct dlm_lkb *lkb)
236 {
237 	return (lkb->lkb_status == DLM_LKSTS_GRANTED);
238 }
239 
240 static inline int is_remote(struct dlm_rsb *r)
241 {
242 	DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r););
243 	return !!r->res_nodeid;
244 }
245 
246 static inline int is_process_copy(struct dlm_lkb *lkb)
247 {
248 	return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY));
249 }
250 
251 static inline int is_master_copy(struct dlm_lkb *lkb)
252 {
253 	if (lkb->lkb_flags & DLM_IFL_MSTCPY)
254 		DLM_ASSERT(lkb->lkb_nodeid, dlm_print_lkb(lkb););
255 	return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0;
256 }
257 
258 static inline int middle_conversion(struct dlm_lkb *lkb)
259 {
260 	if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) ||
261 	    (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW))
262 		return 1;
263 	return 0;
264 }
265 
266 static inline int down_conversion(struct dlm_lkb *lkb)
267 {
268 	return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode);
269 }
270 
271 static inline int is_overlap_unlock(struct dlm_lkb *lkb)
272 {
273 	return lkb->lkb_flags & DLM_IFL_OVERLAP_UNLOCK;
274 }
275 
276 static inline int is_overlap_cancel(struct dlm_lkb *lkb)
277 {
278 	return lkb->lkb_flags & DLM_IFL_OVERLAP_CANCEL;
279 }
280 
281 static inline int is_overlap(struct dlm_lkb *lkb)
282 {
283 	return (lkb->lkb_flags & (DLM_IFL_OVERLAP_UNLOCK |
284 				  DLM_IFL_OVERLAP_CANCEL));
285 }
286 
287 static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
288 {
289 	if (is_master_copy(lkb))
290 		return;
291 
292 	del_timeout(lkb);
293 
294 	DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
295 
296 	/* if the operation was a cancel, then return -DLM_ECANCEL, if a
297 	   timeout caused the cancel then return -ETIMEDOUT */
298 	if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_TIMEOUT_CANCEL)) {
299 		lkb->lkb_flags &= ~DLM_IFL_TIMEOUT_CANCEL;
300 		rv = -ETIMEDOUT;
301 	}
302 
303 	if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_DEADLOCK_CANCEL)) {
304 		lkb->lkb_flags &= ~DLM_IFL_DEADLOCK_CANCEL;
305 		rv = -EDEADLK;
306 	}
307 
308 	lkb->lkb_lksb->sb_status = rv;
309 	lkb->lkb_lksb->sb_flags = lkb->lkb_sbflags;
310 
311 	dlm_add_ast(lkb, AST_COMP);
312 }
313 
314 static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb)
315 {
316 	queue_cast(r, lkb,
317 		   is_overlap_unlock(lkb) ? -DLM_EUNLOCK : -DLM_ECANCEL);
318 }
319 
320 static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
321 {
322 	if (is_master_copy(lkb))
323 		send_bast(r, lkb, rqmode);
324 	else {
325 		lkb->lkb_bastmode = rqmode;
326 		dlm_add_ast(lkb, AST_BAST);
327 	}
328 }
329 
330 /*
331  * Basic operations on rsb's and lkb's
332  */
333 
334 static struct dlm_rsb *create_rsb(struct dlm_ls *ls, char *name, int len)
335 {
336 	struct dlm_rsb *r;
337 
338 	r = allocate_rsb(ls, len);
339 	if (!r)
340 		return NULL;
341 
342 	r->res_ls = ls;
343 	r->res_length = len;
344 	memcpy(r->res_name, name, len);
345 	mutex_init(&r->res_mutex);
346 
347 	INIT_LIST_HEAD(&r->res_lookup);
348 	INIT_LIST_HEAD(&r->res_grantqueue);
349 	INIT_LIST_HEAD(&r->res_convertqueue);
350 	INIT_LIST_HEAD(&r->res_waitqueue);
351 	INIT_LIST_HEAD(&r->res_root_list);
352 	INIT_LIST_HEAD(&r->res_recover_list);
353 
354 	return r;
355 }
356 
357 static int search_rsb_list(struct list_head *head, char *name, int len,
358 			   unsigned int flags, struct dlm_rsb **r_ret)
359 {
360 	struct dlm_rsb *r;
361 	int error = 0;
362 
363 	list_for_each_entry(r, head, res_hashchain) {
364 		if (len == r->res_length && !memcmp(name, r->res_name, len))
365 			goto found;
366 	}
367 	return -EBADR;
368 
369  found:
370 	if (r->res_nodeid && (flags & R_MASTER))
371 		error = -ENOTBLK;
372 	*r_ret = r;
373 	return error;
374 }
375 
376 static int _search_rsb(struct dlm_ls *ls, char *name, int len, int b,
377 		       unsigned int flags, struct dlm_rsb **r_ret)
378 {
379 	struct dlm_rsb *r;
380 	int error;
381 
382 	error = search_rsb_list(&ls->ls_rsbtbl[b].list, name, len, flags, &r);
383 	if (!error) {
384 		kref_get(&r->res_ref);
385 		goto out;
386 	}
387 	error = search_rsb_list(&ls->ls_rsbtbl[b].toss, name, len, flags, &r);
388 	if (error)
389 		goto out;
390 
391 	list_move(&r->res_hashchain, &ls->ls_rsbtbl[b].list);
392 
393 	if (dlm_no_directory(ls))
394 		goto out;
395 
396 	if (r->res_nodeid == -1) {
397 		rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
398 		r->res_first_lkid = 0;
399 	} else if (r->res_nodeid > 0) {
400 		rsb_set_flag(r, RSB_MASTER_UNCERTAIN);
401 		r->res_first_lkid = 0;
402 	} else {
403 		DLM_ASSERT(r->res_nodeid == 0, dlm_print_rsb(r););
404 		DLM_ASSERT(!rsb_flag(r, RSB_MASTER_UNCERTAIN),);
405 	}
406  out:
407 	*r_ret = r;
408 	return error;
409 }
410 
411 static int search_rsb(struct dlm_ls *ls, char *name, int len, int b,
412 		      unsigned int flags, struct dlm_rsb **r_ret)
413 {
414 	int error;
415 	write_lock(&ls->ls_rsbtbl[b].lock);
416 	error = _search_rsb(ls, name, len, b, flags, r_ret);
417 	write_unlock(&ls->ls_rsbtbl[b].lock);
418 	return error;
419 }
420 
421 /*
422  * Find rsb in rsbtbl and potentially create/add one
423  *
424  * Delaying the release of rsb's has a similar benefit to applications keeping
425  * NL locks on an rsb, but without the guarantee that the cached master value
426  * will still be valid when the rsb is reused.  Apps aren't always smart enough
427  * to keep NL locks on an rsb that they may lock again shortly; this can lead
428  * to excessive master lookups and removals if we don't delay the release.
429  *
430  * Searching for an rsb means looking through both the normal list and toss
431  * list.  When found on the toss list the rsb is moved to the normal list with
432  * ref count of 1; when found on normal list the ref count is incremented.
433  */
434 
435 static int find_rsb(struct dlm_ls *ls, char *name, int namelen,
436 		    unsigned int flags, struct dlm_rsb **r_ret)
437 {
438 	struct dlm_rsb *r, *tmp;
439 	uint32_t hash, bucket;
440 	int error = 0;
441 
442 	if (dlm_no_directory(ls))
443 		flags |= R_CREATE;
444 
445 	hash = jhash(name, namelen, 0);
446 	bucket = hash & (ls->ls_rsbtbl_size - 1);
447 
448 	error = search_rsb(ls, name, namelen, bucket, flags, &r);
449 	if (!error)
450 		goto out;
451 
452 	if (error == -EBADR && !(flags & R_CREATE))
453 		goto out;
454 
455 	/* the rsb was found but wasn't a master copy */
456 	if (error == -ENOTBLK)
457 		goto out;
458 
459 	error = -ENOMEM;
460 	r = create_rsb(ls, name, namelen);
461 	if (!r)
462 		goto out;
463 
464 	r->res_hash = hash;
465 	r->res_bucket = bucket;
466 	r->res_nodeid = -1;
467 	kref_init(&r->res_ref);
468 
469 	/* With no directory, the master can be set immediately */
470 	if (dlm_no_directory(ls)) {
471 		int nodeid = dlm_dir_nodeid(r);
472 		if (nodeid == dlm_our_nodeid())
473 			nodeid = 0;
474 		r->res_nodeid = nodeid;
475 	}
476 
477 	write_lock(&ls->ls_rsbtbl[bucket].lock);
478 	error = _search_rsb(ls, name, namelen, bucket, 0, &tmp);
479 	if (!error) {
480 		write_unlock(&ls->ls_rsbtbl[bucket].lock);
481 		free_rsb(r);
482 		r = tmp;
483 		goto out;
484 	}
485 	list_add(&r->res_hashchain, &ls->ls_rsbtbl[bucket].list);
486 	write_unlock(&ls->ls_rsbtbl[bucket].lock);
487 	error = 0;
488  out:
489 	*r_ret = r;
490 	return error;
491 }
492 
493 int dlm_find_rsb(struct dlm_ls *ls, char *name, int namelen,
494 		 unsigned int flags, struct dlm_rsb **r_ret)
495 {
496 	return find_rsb(ls, name, namelen, flags, r_ret);
497 }
498 
499 /* This is only called to add a reference when the code already holds
500    a valid reference to the rsb, so there's no need for locking. */
501 
502 static inline void hold_rsb(struct dlm_rsb *r)
503 {
504 	kref_get(&r->res_ref);
505 }
506 
507 void dlm_hold_rsb(struct dlm_rsb *r)
508 {
509 	hold_rsb(r);
510 }
511 
512 static void toss_rsb(struct kref *kref)
513 {
514 	struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
515 	struct dlm_ls *ls = r->res_ls;
516 
517 	DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
518 	kref_init(&r->res_ref);
519 	list_move(&r->res_hashchain, &ls->ls_rsbtbl[r->res_bucket].toss);
520 	r->res_toss_time = jiffies;
521 	if (r->res_lvbptr) {
522 		free_lvb(r->res_lvbptr);
523 		r->res_lvbptr = NULL;
524 	}
525 }
526 
527 /* When all references to the rsb are gone it's transfered to
528    the tossed list for later disposal. */
529 
530 static void put_rsb(struct dlm_rsb *r)
531 {
532 	struct dlm_ls *ls = r->res_ls;
533 	uint32_t bucket = r->res_bucket;
534 
535 	write_lock(&ls->ls_rsbtbl[bucket].lock);
536 	kref_put(&r->res_ref, toss_rsb);
537 	write_unlock(&ls->ls_rsbtbl[bucket].lock);
538 }
539 
540 void dlm_put_rsb(struct dlm_rsb *r)
541 {
542 	put_rsb(r);
543 }
544 
545 /* See comment for unhold_lkb */
546 
547 static void unhold_rsb(struct dlm_rsb *r)
548 {
549 	int rv;
550 	rv = kref_put(&r->res_ref, toss_rsb);
551 	DLM_ASSERT(!rv, dlm_dump_rsb(r););
552 }
553 
554 static void kill_rsb(struct kref *kref)
555 {
556 	struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
557 
558 	/* All work is done after the return from kref_put() so we
559 	   can release the write_lock before the remove and free. */
560 
561 	DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
562 	DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
563 	DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
564 	DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r););
565 	DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r););
566 	DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r););
567 }
568 
569 /* Attaching/detaching lkb's from rsb's is for rsb reference counting.
570    The rsb must exist as long as any lkb's for it do. */
571 
572 static void attach_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
573 {
574 	hold_rsb(r);
575 	lkb->lkb_resource = r;
576 }
577 
578 static void detach_lkb(struct dlm_lkb *lkb)
579 {
580 	if (lkb->lkb_resource) {
581 		put_rsb(lkb->lkb_resource);
582 		lkb->lkb_resource = NULL;
583 	}
584 }
585 
586 static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
587 {
588 	struct dlm_lkb *lkb, *tmp;
589 	uint32_t lkid = 0;
590 	uint16_t bucket;
591 
592 	lkb = allocate_lkb(ls);
593 	if (!lkb)
594 		return -ENOMEM;
595 
596 	lkb->lkb_nodeid = -1;
597 	lkb->lkb_grmode = DLM_LOCK_IV;
598 	kref_init(&lkb->lkb_ref);
599 	INIT_LIST_HEAD(&lkb->lkb_ownqueue);
600 	INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
601 	INIT_LIST_HEAD(&lkb->lkb_time_list);
602 
603 	get_random_bytes(&bucket, sizeof(bucket));
604 	bucket &= (ls->ls_lkbtbl_size - 1);
605 
606 	write_lock(&ls->ls_lkbtbl[bucket].lock);
607 
608 	/* counter can roll over so we must verify lkid is not in use */
609 
610 	while (lkid == 0) {
611 		lkid = (bucket << 16) | ls->ls_lkbtbl[bucket].counter++;
612 
613 		list_for_each_entry(tmp, &ls->ls_lkbtbl[bucket].list,
614 				    lkb_idtbl_list) {
615 			if (tmp->lkb_id != lkid)
616 				continue;
617 			lkid = 0;
618 			break;
619 		}
620 	}
621 
622 	lkb->lkb_id = lkid;
623 	list_add(&lkb->lkb_idtbl_list, &ls->ls_lkbtbl[bucket].list);
624 	write_unlock(&ls->ls_lkbtbl[bucket].lock);
625 
626 	*lkb_ret = lkb;
627 	return 0;
628 }
629 
630 static struct dlm_lkb *__find_lkb(struct dlm_ls *ls, uint32_t lkid)
631 {
632 	struct dlm_lkb *lkb;
633 	uint16_t bucket = (lkid >> 16);
634 
635 	list_for_each_entry(lkb, &ls->ls_lkbtbl[bucket].list, lkb_idtbl_list) {
636 		if (lkb->lkb_id == lkid)
637 			return lkb;
638 	}
639 	return NULL;
640 }
641 
642 static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
643 {
644 	struct dlm_lkb *lkb;
645 	uint16_t bucket = (lkid >> 16);
646 
647 	if (bucket >= ls->ls_lkbtbl_size)
648 		return -EBADSLT;
649 
650 	read_lock(&ls->ls_lkbtbl[bucket].lock);
651 	lkb = __find_lkb(ls, lkid);
652 	if (lkb)
653 		kref_get(&lkb->lkb_ref);
654 	read_unlock(&ls->ls_lkbtbl[bucket].lock);
655 
656 	*lkb_ret = lkb;
657 	return lkb ? 0 : -ENOENT;
658 }
659 
660 static void kill_lkb(struct kref *kref)
661 {
662 	struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
663 
664 	/* All work is done after the return from kref_put() so we
665 	   can release the write_lock before the detach_lkb */
666 
667 	DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
668 }
669 
670 /* __put_lkb() is used when an lkb may not have an rsb attached to
671    it so we need to provide the lockspace explicitly */
672 
673 static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
674 {
675 	uint16_t bucket = (lkb->lkb_id >> 16);
676 
677 	write_lock(&ls->ls_lkbtbl[bucket].lock);
678 	if (kref_put(&lkb->lkb_ref, kill_lkb)) {
679 		list_del(&lkb->lkb_idtbl_list);
680 		write_unlock(&ls->ls_lkbtbl[bucket].lock);
681 
682 		detach_lkb(lkb);
683 
684 		/* for local/process lkbs, lvbptr points to caller's lksb */
685 		if (lkb->lkb_lvbptr && is_master_copy(lkb))
686 			free_lvb(lkb->lkb_lvbptr);
687 		free_lkb(lkb);
688 		return 1;
689 	} else {
690 		write_unlock(&ls->ls_lkbtbl[bucket].lock);
691 		return 0;
692 	}
693 }
694 
695 int dlm_put_lkb(struct dlm_lkb *lkb)
696 {
697 	struct dlm_ls *ls;
698 
699 	DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb););
700 	DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb););
701 
702 	ls = lkb->lkb_resource->res_ls;
703 	return __put_lkb(ls, lkb);
704 }
705 
706 /* This is only called to add a reference when the code already holds
707    a valid reference to the lkb, so there's no need for locking. */
708 
709 static inline void hold_lkb(struct dlm_lkb *lkb)
710 {
711 	kref_get(&lkb->lkb_ref);
712 }
713 
714 /* This is called when we need to remove a reference and are certain
715    it's not the last ref.  e.g. del_lkb is always called between a
716    find_lkb/put_lkb and is always the inverse of a previous add_lkb.
717    put_lkb would work fine, but would involve unnecessary locking */
718 
719 static inline void unhold_lkb(struct dlm_lkb *lkb)
720 {
721 	int rv;
722 	rv = kref_put(&lkb->lkb_ref, kill_lkb);
723 	DLM_ASSERT(!rv, dlm_print_lkb(lkb););
724 }
725 
726 static void lkb_add_ordered(struct list_head *new, struct list_head *head,
727 			    int mode)
728 {
729 	struct dlm_lkb *lkb = NULL;
730 
731 	list_for_each_entry(lkb, head, lkb_statequeue)
732 		if (lkb->lkb_rqmode < mode)
733 			break;
734 
735 	if (!lkb)
736 		list_add_tail(new, head);
737 	else
738 		__list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue);
739 }
740 
741 /* add/remove lkb to rsb's grant/convert/wait queue */
742 
743 static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status)
744 {
745 	kref_get(&lkb->lkb_ref);
746 
747 	DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
748 
749 	lkb->lkb_status = status;
750 
751 	switch (status) {
752 	case DLM_LKSTS_WAITING:
753 		if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
754 			list_add(&lkb->lkb_statequeue, &r->res_waitqueue);
755 		else
756 			list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue);
757 		break;
758 	case DLM_LKSTS_GRANTED:
759 		/* convention says granted locks kept in order of grmode */
760 		lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue,
761 				lkb->lkb_grmode);
762 		break;
763 	case DLM_LKSTS_CONVERT:
764 		if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
765 			list_add(&lkb->lkb_statequeue, &r->res_convertqueue);
766 		else
767 			list_add_tail(&lkb->lkb_statequeue,
768 				      &r->res_convertqueue);
769 		break;
770 	default:
771 		DLM_ASSERT(0, dlm_print_lkb(lkb); printk("sts=%d\n", status););
772 	}
773 }
774 
775 static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
776 {
777 	lkb->lkb_status = 0;
778 	list_del(&lkb->lkb_statequeue);
779 	unhold_lkb(lkb);
780 }
781 
782 static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
783 {
784 	hold_lkb(lkb);
785 	del_lkb(r, lkb);
786 	add_lkb(r, lkb, sts);
787 	unhold_lkb(lkb);
788 }
789 
790 static int msg_reply_type(int mstype)
791 {
792 	switch (mstype) {
793 	case DLM_MSG_REQUEST:
794 		return DLM_MSG_REQUEST_REPLY;
795 	case DLM_MSG_CONVERT:
796 		return DLM_MSG_CONVERT_REPLY;
797 	case DLM_MSG_UNLOCK:
798 		return DLM_MSG_UNLOCK_REPLY;
799 	case DLM_MSG_CANCEL:
800 		return DLM_MSG_CANCEL_REPLY;
801 	case DLM_MSG_LOOKUP:
802 		return DLM_MSG_LOOKUP_REPLY;
803 	}
804 	return -1;
805 }
806 
807 /* add/remove lkb from global waiters list of lkb's waiting for
808    a reply from a remote node */
809 
810 static int add_to_waiters(struct dlm_lkb *lkb, int mstype)
811 {
812 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
813 	int error = 0;
814 
815 	mutex_lock(&ls->ls_waiters_mutex);
816 
817 	if (is_overlap_unlock(lkb) ||
818 	    (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) {
819 		error = -EINVAL;
820 		goto out;
821 	}
822 
823 	if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) {
824 		switch (mstype) {
825 		case DLM_MSG_UNLOCK:
826 			lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
827 			break;
828 		case DLM_MSG_CANCEL:
829 			lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
830 			break;
831 		default:
832 			error = -EBUSY;
833 			goto out;
834 		}
835 		lkb->lkb_wait_count++;
836 		hold_lkb(lkb);
837 
838 		log_debug(ls, "add overlap %x cur %d new %d count %d flags %x",
839 			  lkb->lkb_id, lkb->lkb_wait_type, mstype,
840 			  lkb->lkb_wait_count, lkb->lkb_flags);
841 		goto out;
842 	}
843 
844 	DLM_ASSERT(!lkb->lkb_wait_count,
845 		   dlm_print_lkb(lkb);
846 		   printk("wait_count %d\n", lkb->lkb_wait_count););
847 
848 	lkb->lkb_wait_count++;
849 	lkb->lkb_wait_type = mstype;
850 	hold_lkb(lkb);
851 	list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
852  out:
853 	if (error)
854 		log_error(ls, "add_to_waiters %x error %d flags %x %d %d %s",
855 			  lkb->lkb_id, error, lkb->lkb_flags, mstype,
856 			  lkb->lkb_wait_type, lkb->lkb_resource->res_name);
857 	mutex_unlock(&ls->ls_waiters_mutex);
858 	return error;
859 }
860 
861 /* We clear the RESEND flag because we might be taking an lkb off the waiters
862    list as part of process_requestqueue (e.g. a lookup that has an optimized
863    request reply on the requestqueue) between dlm_recover_waiters_pre() which
864    set RESEND and dlm_recover_waiters_post() */
865 
866 static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype)
867 {
868 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
869 	int overlap_done = 0;
870 
871 	if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) {
872 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
873 		overlap_done = 1;
874 		goto out_del;
875 	}
876 
877 	if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) {
878 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
879 		overlap_done = 1;
880 		goto out_del;
881 	}
882 
883 	/* N.B. type of reply may not always correspond to type of original
884 	   msg due to lookup->request optimization, verify others? */
885 
886 	if (lkb->lkb_wait_type) {
887 		lkb->lkb_wait_type = 0;
888 		goto out_del;
889 	}
890 
891 	log_error(ls, "remove_from_waiters lkid %x flags %x types %d %d",
892 		  lkb->lkb_id, lkb->lkb_flags, mstype, lkb->lkb_wait_type);
893 	return -1;
894 
895  out_del:
896 	/* the force-unlock/cancel has completed and we haven't recvd a reply
897 	   to the op that was in progress prior to the unlock/cancel; we
898 	   give up on any reply to the earlier op.  FIXME: not sure when/how
899 	   this would happen */
900 
901 	if (overlap_done && lkb->lkb_wait_type) {
902 		log_error(ls, "remove_from_waiters %x reply %d give up on %d",
903 			  lkb->lkb_id, mstype, lkb->lkb_wait_type);
904 		lkb->lkb_wait_count--;
905 		lkb->lkb_wait_type = 0;
906 	}
907 
908 	DLM_ASSERT(lkb->lkb_wait_count, dlm_print_lkb(lkb););
909 
910 	lkb->lkb_flags &= ~DLM_IFL_RESEND;
911 	lkb->lkb_wait_count--;
912 	if (!lkb->lkb_wait_count)
913 		list_del_init(&lkb->lkb_wait_reply);
914 	unhold_lkb(lkb);
915 	return 0;
916 }
917 
918 static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
919 {
920 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
921 	int error;
922 
923 	mutex_lock(&ls->ls_waiters_mutex);
924 	error = _remove_from_waiters(lkb, mstype);
925 	mutex_unlock(&ls->ls_waiters_mutex);
926 	return error;
927 }
928 
929 /* Handles situations where we might be processing a "fake" or "stub" reply in
930    which we can't try to take waiters_mutex again. */
931 
932 static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
933 {
934 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
935 	int error;
936 
937 	if (ms != &ls->ls_stub_ms)
938 		mutex_lock(&ls->ls_waiters_mutex);
939 	error = _remove_from_waiters(lkb, ms->m_type);
940 	if (ms != &ls->ls_stub_ms)
941 		mutex_unlock(&ls->ls_waiters_mutex);
942 	return error;
943 }
944 
945 static void dir_remove(struct dlm_rsb *r)
946 {
947 	int to_nodeid;
948 
949 	if (dlm_no_directory(r->res_ls))
950 		return;
951 
952 	to_nodeid = dlm_dir_nodeid(r);
953 	if (to_nodeid != dlm_our_nodeid())
954 		send_remove(r);
955 	else
956 		dlm_dir_remove_entry(r->res_ls, to_nodeid,
957 				     r->res_name, r->res_length);
958 }
959 
960 /* FIXME: shouldn't this be able to exit as soon as one non-due rsb is
961    found since they are in order of newest to oldest? */
962 
963 static int shrink_bucket(struct dlm_ls *ls, int b)
964 {
965 	struct dlm_rsb *r;
966 	int count = 0, found;
967 
968 	for (;;) {
969 		found = 0;
970 		write_lock(&ls->ls_rsbtbl[b].lock);
971 		list_for_each_entry_reverse(r, &ls->ls_rsbtbl[b].toss,
972 					    res_hashchain) {
973 			if (!time_after_eq(jiffies, r->res_toss_time +
974 					   dlm_config.ci_toss_secs * HZ))
975 				continue;
976 			found = 1;
977 			break;
978 		}
979 
980 		if (!found) {
981 			write_unlock(&ls->ls_rsbtbl[b].lock);
982 			break;
983 		}
984 
985 		if (kref_put(&r->res_ref, kill_rsb)) {
986 			list_del(&r->res_hashchain);
987 			write_unlock(&ls->ls_rsbtbl[b].lock);
988 
989 			if (is_master(r))
990 				dir_remove(r);
991 			free_rsb(r);
992 			count++;
993 		} else {
994 			write_unlock(&ls->ls_rsbtbl[b].lock);
995 			log_error(ls, "tossed rsb in use %s", r->res_name);
996 		}
997 	}
998 
999 	return count;
1000 }
1001 
1002 void dlm_scan_rsbs(struct dlm_ls *ls)
1003 {
1004 	int i;
1005 
1006 	for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1007 		shrink_bucket(ls, i);
1008 		if (dlm_locking_stopped(ls))
1009 			break;
1010 		cond_resched();
1011 	}
1012 }
1013 
1014 static void add_timeout(struct dlm_lkb *lkb)
1015 {
1016 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1017 
1018 	if (is_master_copy(lkb)) {
1019 		lkb->lkb_timestamp = jiffies;
1020 		return;
1021 	}
1022 
1023 	if (test_bit(LSFL_TIMEWARN, &ls->ls_flags) &&
1024 	    !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
1025 		lkb->lkb_flags |= DLM_IFL_WATCH_TIMEWARN;
1026 		goto add_it;
1027 	}
1028 	if (lkb->lkb_exflags & DLM_LKF_TIMEOUT)
1029 		goto add_it;
1030 	return;
1031 
1032  add_it:
1033 	DLM_ASSERT(list_empty(&lkb->lkb_time_list), dlm_print_lkb(lkb););
1034 	mutex_lock(&ls->ls_timeout_mutex);
1035 	hold_lkb(lkb);
1036 	lkb->lkb_timestamp = jiffies;
1037 	list_add_tail(&lkb->lkb_time_list, &ls->ls_timeout);
1038 	mutex_unlock(&ls->ls_timeout_mutex);
1039 }
1040 
1041 static void del_timeout(struct dlm_lkb *lkb)
1042 {
1043 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1044 
1045 	mutex_lock(&ls->ls_timeout_mutex);
1046 	if (!list_empty(&lkb->lkb_time_list)) {
1047 		list_del_init(&lkb->lkb_time_list);
1048 		unhold_lkb(lkb);
1049 	}
1050 	mutex_unlock(&ls->ls_timeout_mutex);
1051 }
1052 
1053 /* FIXME: is it safe to look at lkb_exflags, lkb_flags, lkb_timestamp, and
1054    lkb_lksb_timeout without lock_rsb?  Note: we can't lock timeout_mutex
1055    and then lock rsb because of lock ordering in add_timeout.  We may need
1056    to specify some special timeout-related bits in the lkb that are just to
1057    be accessed under the timeout_mutex. */
1058 
1059 void dlm_scan_timeout(struct dlm_ls *ls)
1060 {
1061 	struct dlm_rsb *r;
1062 	struct dlm_lkb *lkb;
1063 	int do_cancel, do_warn;
1064 
1065 	for (;;) {
1066 		if (dlm_locking_stopped(ls))
1067 			break;
1068 
1069 		do_cancel = 0;
1070 		do_warn = 0;
1071 		mutex_lock(&ls->ls_timeout_mutex);
1072 		list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) {
1073 
1074 			if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) &&
1075 			    time_after_eq(jiffies, lkb->lkb_timestamp +
1076 					  lkb->lkb_timeout_cs * HZ/100))
1077 				do_cancel = 1;
1078 
1079 			if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
1080 			    time_after_eq(jiffies, lkb->lkb_timestamp +
1081 				   	   dlm_config.ci_timewarn_cs * HZ/100))
1082 				do_warn = 1;
1083 
1084 			if (!do_cancel && !do_warn)
1085 				continue;
1086 			hold_lkb(lkb);
1087 			break;
1088 		}
1089 		mutex_unlock(&ls->ls_timeout_mutex);
1090 
1091 		if (!do_cancel && !do_warn)
1092 			break;
1093 
1094 		r = lkb->lkb_resource;
1095 		hold_rsb(r);
1096 		lock_rsb(r);
1097 
1098 		if (do_warn) {
1099 			/* clear flag so we only warn once */
1100 			lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1101 			if (!(lkb->lkb_exflags & DLM_LKF_TIMEOUT))
1102 				del_timeout(lkb);
1103 			dlm_timeout_warn(lkb);
1104 		}
1105 
1106 		if (do_cancel) {
1107 			log_debug(ls, "timeout cancel %x node %d %s",
1108 				  lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
1109 			lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1110 			lkb->lkb_flags |= DLM_IFL_TIMEOUT_CANCEL;
1111 			del_timeout(lkb);
1112 			_cancel_lock(r, lkb);
1113 		}
1114 
1115 		unlock_rsb(r);
1116 		unhold_rsb(r);
1117 		dlm_put_lkb(lkb);
1118 	}
1119 }
1120 
1121 /* This is only called by dlm_recoverd, and we rely on dlm_ls_stop() stopping
1122    dlm_recoverd before checking/setting ls_recover_begin. */
1123 
1124 void dlm_adjust_timeouts(struct dlm_ls *ls)
1125 {
1126 	struct dlm_lkb *lkb;
1127 	long adj = jiffies - ls->ls_recover_begin;
1128 
1129 	ls->ls_recover_begin = 0;
1130 	mutex_lock(&ls->ls_timeout_mutex);
1131 	list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list)
1132 		lkb->lkb_timestamp += adj;
1133 	mutex_unlock(&ls->ls_timeout_mutex);
1134 }
1135 
1136 /* lkb is master or local copy */
1137 
1138 static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1139 {
1140 	int b, len = r->res_ls->ls_lvblen;
1141 
1142 	/* b=1 lvb returned to caller
1143 	   b=0 lvb written to rsb or invalidated
1144 	   b=-1 do nothing */
1145 
1146 	b =  dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1147 
1148 	if (b == 1) {
1149 		if (!lkb->lkb_lvbptr)
1150 			return;
1151 
1152 		if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1153 			return;
1154 
1155 		if (!r->res_lvbptr)
1156 			return;
1157 
1158 		memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len);
1159 		lkb->lkb_lvbseq = r->res_lvbseq;
1160 
1161 	} else if (b == 0) {
1162 		if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1163 			rsb_set_flag(r, RSB_VALNOTVALID);
1164 			return;
1165 		}
1166 
1167 		if (!lkb->lkb_lvbptr)
1168 			return;
1169 
1170 		if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1171 			return;
1172 
1173 		if (!r->res_lvbptr)
1174 			r->res_lvbptr = allocate_lvb(r->res_ls);
1175 
1176 		if (!r->res_lvbptr)
1177 			return;
1178 
1179 		memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len);
1180 		r->res_lvbseq++;
1181 		lkb->lkb_lvbseq = r->res_lvbseq;
1182 		rsb_clear_flag(r, RSB_VALNOTVALID);
1183 	}
1184 
1185 	if (rsb_flag(r, RSB_VALNOTVALID))
1186 		lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID;
1187 }
1188 
1189 static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1190 {
1191 	if (lkb->lkb_grmode < DLM_LOCK_PW)
1192 		return;
1193 
1194 	if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1195 		rsb_set_flag(r, RSB_VALNOTVALID);
1196 		return;
1197 	}
1198 
1199 	if (!lkb->lkb_lvbptr)
1200 		return;
1201 
1202 	if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1203 		return;
1204 
1205 	if (!r->res_lvbptr)
1206 		r->res_lvbptr = allocate_lvb(r->res_ls);
1207 
1208 	if (!r->res_lvbptr)
1209 		return;
1210 
1211 	memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
1212 	r->res_lvbseq++;
1213 	rsb_clear_flag(r, RSB_VALNOTVALID);
1214 }
1215 
1216 /* lkb is process copy (pc) */
1217 
1218 static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
1219 			    struct dlm_message *ms)
1220 {
1221 	int b;
1222 
1223 	if (!lkb->lkb_lvbptr)
1224 		return;
1225 
1226 	if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1227 		return;
1228 
1229 	b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1230 	if (b == 1) {
1231 		int len = receive_extralen(ms);
1232 		memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
1233 		lkb->lkb_lvbseq = ms->m_lvbseq;
1234 	}
1235 }
1236 
1237 /* Manipulate lkb's on rsb's convert/granted/waiting queues
1238    remove_lock -- used for unlock, removes lkb from granted
1239    revert_lock -- used for cancel, moves lkb from convert to granted
1240    grant_lock  -- used for request and convert, adds lkb to granted or
1241                   moves lkb from convert or waiting to granted
1242 
1243    Each of these is used for master or local copy lkb's.  There is
1244    also a _pc() variation used to make the corresponding change on
1245    a process copy (pc) lkb. */
1246 
1247 static void _remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1248 {
1249 	del_lkb(r, lkb);
1250 	lkb->lkb_grmode = DLM_LOCK_IV;
1251 	/* this unhold undoes the original ref from create_lkb()
1252 	   so this leads to the lkb being freed */
1253 	unhold_lkb(lkb);
1254 }
1255 
1256 static void remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1257 {
1258 	set_lvb_unlock(r, lkb);
1259 	_remove_lock(r, lkb);
1260 }
1261 
1262 static void remove_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
1263 {
1264 	_remove_lock(r, lkb);
1265 }
1266 
1267 /* returns: 0 did nothing
1268 	    1 moved lock to granted
1269 	   -1 removed lock */
1270 
1271 static int revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1272 {
1273 	int rv = 0;
1274 
1275 	lkb->lkb_rqmode = DLM_LOCK_IV;
1276 
1277 	switch (lkb->lkb_status) {
1278 	case DLM_LKSTS_GRANTED:
1279 		break;
1280 	case DLM_LKSTS_CONVERT:
1281 		move_lkb(r, lkb, DLM_LKSTS_GRANTED);
1282 		rv = 1;
1283 		break;
1284 	case DLM_LKSTS_WAITING:
1285 		del_lkb(r, lkb);
1286 		lkb->lkb_grmode = DLM_LOCK_IV;
1287 		/* this unhold undoes the original ref from create_lkb()
1288 		   so this leads to the lkb being freed */
1289 		unhold_lkb(lkb);
1290 		rv = -1;
1291 		break;
1292 	default:
1293 		log_print("invalid status for revert %d", lkb->lkb_status);
1294 	}
1295 	return rv;
1296 }
1297 
1298 static int revert_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
1299 {
1300 	return revert_lock(r, lkb);
1301 }
1302 
1303 static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1304 {
1305 	if (lkb->lkb_grmode != lkb->lkb_rqmode) {
1306 		lkb->lkb_grmode = lkb->lkb_rqmode;
1307 		if (lkb->lkb_status)
1308 			move_lkb(r, lkb, DLM_LKSTS_GRANTED);
1309 		else
1310 			add_lkb(r, lkb, DLM_LKSTS_GRANTED);
1311 	}
1312 
1313 	lkb->lkb_rqmode = DLM_LOCK_IV;
1314 }
1315 
1316 static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1317 {
1318 	set_lvb_lock(r, lkb);
1319 	_grant_lock(r, lkb);
1320 	lkb->lkb_highbast = 0;
1321 }
1322 
1323 static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
1324 			  struct dlm_message *ms)
1325 {
1326 	set_lvb_lock_pc(r, lkb, ms);
1327 	_grant_lock(r, lkb);
1328 }
1329 
1330 /* called by grant_pending_locks() which means an async grant message must
1331    be sent to the requesting node in addition to granting the lock if the
1332    lkb belongs to a remote node. */
1333 
1334 static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb)
1335 {
1336 	grant_lock(r, lkb);
1337 	if (is_master_copy(lkb))
1338 		send_grant(r, lkb);
1339 	else
1340 		queue_cast(r, lkb, 0);
1341 }
1342 
1343 /* The special CONVDEADLK, ALTPR and ALTCW flags allow the master to
1344    change the granted/requested modes.  We're munging things accordingly in
1345    the process copy.
1346    CONVDEADLK: our grmode may have been forced down to NL to resolve a
1347    conversion deadlock
1348    ALTPR/ALTCW: our rqmode may have been changed to PR or CW to become
1349    compatible with other granted locks */
1350 
1351 static void munge_demoted(struct dlm_lkb *lkb, struct dlm_message *ms)
1352 {
1353 	if (ms->m_type != DLM_MSG_CONVERT_REPLY) {
1354 		log_print("munge_demoted %x invalid reply type %d",
1355 			  lkb->lkb_id, ms->m_type);
1356 		return;
1357 	}
1358 
1359 	if (lkb->lkb_rqmode == DLM_LOCK_IV || lkb->lkb_grmode == DLM_LOCK_IV) {
1360 		log_print("munge_demoted %x invalid modes gr %d rq %d",
1361 			  lkb->lkb_id, lkb->lkb_grmode, lkb->lkb_rqmode);
1362 		return;
1363 	}
1364 
1365 	lkb->lkb_grmode = DLM_LOCK_NL;
1366 }
1367 
1368 static void munge_altmode(struct dlm_lkb *lkb, struct dlm_message *ms)
1369 {
1370 	if (ms->m_type != DLM_MSG_REQUEST_REPLY &&
1371 	    ms->m_type != DLM_MSG_GRANT) {
1372 		log_print("munge_altmode %x invalid reply type %d",
1373 			  lkb->lkb_id, ms->m_type);
1374 		return;
1375 	}
1376 
1377 	if (lkb->lkb_exflags & DLM_LKF_ALTPR)
1378 		lkb->lkb_rqmode = DLM_LOCK_PR;
1379 	else if (lkb->lkb_exflags & DLM_LKF_ALTCW)
1380 		lkb->lkb_rqmode = DLM_LOCK_CW;
1381 	else {
1382 		log_print("munge_altmode invalid exflags %x", lkb->lkb_exflags);
1383 		dlm_print_lkb(lkb);
1384 	}
1385 }
1386 
1387 static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head)
1388 {
1389 	struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb,
1390 					   lkb_statequeue);
1391 	if (lkb->lkb_id == first->lkb_id)
1392 		return 1;
1393 
1394 	return 0;
1395 }
1396 
1397 /* Check if the given lkb conflicts with another lkb on the queue. */
1398 
1399 static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb)
1400 {
1401 	struct dlm_lkb *this;
1402 
1403 	list_for_each_entry(this, head, lkb_statequeue) {
1404 		if (this == lkb)
1405 			continue;
1406 		if (!modes_compat(this, lkb))
1407 			return 1;
1408 	}
1409 	return 0;
1410 }
1411 
1412 /*
1413  * "A conversion deadlock arises with a pair of lock requests in the converting
1414  * queue for one resource.  The granted mode of each lock blocks the requested
1415  * mode of the other lock."
1416  *
1417  * Part 2: if the granted mode of lkb is preventing an earlier lkb in the
1418  * convert queue from being granted, then deadlk/demote lkb.
1419  *
1420  * Example:
1421  * Granted Queue: empty
1422  * Convert Queue: NL->EX (first lock)
1423  *                PR->EX (second lock)
1424  *
1425  * The first lock can't be granted because of the granted mode of the second
1426  * lock and the second lock can't be granted because it's not first in the
1427  * list.  We either cancel lkb's conversion (PR->EX) and return EDEADLK, or we
1428  * demote the granted mode of lkb (from PR to NL) if it has the CONVDEADLK
1429  * flag set and return DEMOTED in the lksb flags.
1430  *
1431  * Originally, this function detected conv-deadlk in a more limited scope:
1432  * - if !modes_compat(lkb1, lkb2) && !modes_compat(lkb2, lkb1), or
1433  * - if lkb1 was the first entry in the queue (not just earlier), and was
1434  *   blocked by the granted mode of lkb2, and there was nothing on the
1435  *   granted queue preventing lkb1 from being granted immediately, i.e.
1436  *   lkb2 was the only thing preventing lkb1 from being granted.
1437  *
1438  * That second condition meant we'd only say there was conv-deadlk if
1439  * resolving it (by demotion) would lead to the first lock on the convert
1440  * queue being granted right away.  It allowed conversion deadlocks to exist
1441  * between locks on the convert queue while they couldn't be granted anyway.
1442  *
1443  * Now, we detect and take action on conversion deadlocks immediately when
1444  * they're created, even if they may not be immediately consequential.  If
1445  * lkb1 exists anywhere in the convert queue and lkb2 comes in with a granted
1446  * mode that would prevent lkb1's conversion from being granted, we do a
1447  * deadlk/demote on lkb2 right away and don't let it onto the convert queue.
1448  * I think this means that the lkb_is_ahead condition below should always
1449  * be zero, i.e. there will never be conv-deadlk between two locks that are
1450  * both already on the convert queue.
1451  */
1452 
1453 static int conversion_deadlock_detect(struct dlm_rsb *r, struct dlm_lkb *lkb2)
1454 {
1455 	struct dlm_lkb *lkb1;
1456 	int lkb_is_ahead = 0;
1457 
1458 	list_for_each_entry(lkb1, &r->res_convertqueue, lkb_statequeue) {
1459 		if (lkb1 == lkb2) {
1460 			lkb_is_ahead = 1;
1461 			continue;
1462 		}
1463 
1464 		if (!lkb_is_ahead) {
1465 			if (!modes_compat(lkb2, lkb1))
1466 				return 1;
1467 		} else {
1468 			if (!modes_compat(lkb2, lkb1) &&
1469 			    !modes_compat(lkb1, lkb2))
1470 				return 1;
1471 		}
1472 	}
1473 	return 0;
1474 }
1475 
1476 /*
1477  * Return 1 if the lock can be granted, 0 otherwise.
1478  * Also detect and resolve conversion deadlocks.
1479  *
1480  * lkb is the lock to be granted
1481  *
1482  * now is 1 if the function is being called in the context of the
1483  * immediate request, it is 0 if called later, after the lock has been
1484  * queued.
1485  *
1486  * References are from chapter 6 of "VAXcluster Principles" by Roy Davis
1487  */
1488 
1489 static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now)
1490 {
1491 	int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV);
1492 
1493 	/*
1494 	 * 6-10: Version 5.4 introduced an option to address the phenomenon of
1495 	 * a new request for a NL mode lock being blocked.
1496 	 *
1497 	 * 6-11: If the optional EXPEDITE flag is used with the new NL mode
1498 	 * request, then it would be granted.  In essence, the use of this flag
1499 	 * tells the Lock Manager to expedite theis request by not considering
1500 	 * what may be in the CONVERTING or WAITING queues...  As of this
1501 	 * writing, the EXPEDITE flag can be used only with new requests for NL
1502 	 * mode locks.  This flag is not valid for conversion requests.
1503 	 *
1504 	 * A shortcut.  Earlier checks return an error if EXPEDITE is used in a
1505 	 * conversion or used with a non-NL requested mode.  We also know an
1506 	 * EXPEDITE request is always granted immediately, so now must always
1507 	 * be 1.  The full condition to grant an expedite request: (now &&
1508 	 * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can
1509 	 * therefore be shortened to just checking the flag.
1510 	 */
1511 
1512 	if (lkb->lkb_exflags & DLM_LKF_EXPEDITE)
1513 		return 1;
1514 
1515 	/*
1516 	 * A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be
1517 	 * added to the remaining conditions.
1518 	 */
1519 
1520 	if (queue_conflict(&r->res_grantqueue, lkb))
1521 		goto out;
1522 
1523 	/*
1524 	 * 6-3: By default, a conversion request is immediately granted if the
1525 	 * requested mode is compatible with the modes of all other granted
1526 	 * locks
1527 	 */
1528 
1529 	if (queue_conflict(&r->res_convertqueue, lkb))
1530 		goto out;
1531 
1532 	/*
1533 	 * 6-5: But the default algorithm for deciding whether to grant or
1534 	 * queue conversion requests does not by itself guarantee that such
1535 	 * requests are serviced on a "first come first serve" basis.  This, in
1536 	 * turn, can lead to a phenomenon known as "indefinate postponement".
1537 	 *
1538 	 * 6-7: This issue is dealt with by using the optional QUECVT flag with
1539 	 * the system service employed to request a lock conversion.  This flag
1540 	 * forces certain conversion requests to be queued, even if they are
1541 	 * compatible with the granted modes of other locks on the same
1542 	 * resource.  Thus, the use of this flag results in conversion requests
1543 	 * being ordered on a "first come first servce" basis.
1544 	 *
1545 	 * DCT: This condition is all about new conversions being able to occur
1546 	 * "in place" while the lock remains on the granted queue (assuming
1547 	 * nothing else conflicts.)  IOW if QUECVT isn't set, a conversion
1548 	 * doesn't _have_ to go onto the convert queue where it's processed in
1549 	 * order.  The "now" variable is necessary to distinguish converts
1550 	 * being received and processed for the first time now, because once a
1551 	 * convert is moved to the conversion queue the condition below applies
1552 	 * requiring fifo granting.
1553 	 */
1554 
1555 	if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT))
1556 		return 1;
1557 
1558 	/*
1559 	 * The NOORDER flag is set to avoid the standard vms rules on grant
1560 	 * order.
1561 	 */
1562 
1563 	if (lkb->lkb_exflags & DLM_LKF_NOORDER)
1564 		return 1;
1565 
1566 	/*
1567 	 * 6-3: Once in that queue [CONVERTING], a conversion request cannot be
1568 	 * granted until all other conversion requests ahead of it are granted
1569 	 * and/or canceled.
1570 	 */
1571 
1572 	if (!now && conv && first_in_list(lkb, &r->res_convertqueue))
1573 		return 1;
1574 
1575 	/*
1576 	 * 6-4: By default, a new request is immediately granted only if all
1577 	 * three of the following conditions are satisfied when the request is
1578 	 * issued:
1579 	 * - The queue of ungranted conversion requests for the resource is
1580 	 *   empty.
1581 	 * - The queue of ungranted new requests for the resource is empty.
1582 	 * - The mode of the new request is compatible with the most
1583 	 *   restrictive mode of all granted locks on the resource.
1584 	 */
1585 
1586 	if (now && !conv && list_empty(&r->res_convertqueue) &&
1587 	    list_empty(&r->res_waitqueue))
1588 		return 1;
1589 
1590 	/*
1591 	 * 6-4: Once a lock request is in the queue of ungranted new requests,
1592 	 * it cannot be granted until the queue of ungranted conversion
1593 	 * requests is empty, all ungranted new requests ahead of it are
1594 	 * granted and/or canceled, and it is compatible with the granted mode
1595 	 * of the most restrictive lock granted on the resource.
1596 	 */
1597 
1598 	if (!now && !conv && list_empty(&r->res_convertqueue) &&
1599 	    first_in_list(lkb, &r->res_waitqueue))
1600 		return 1;
1601  out:
1602 	return 0;
1603 }
1604 
1605 static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
1606 			  int *err)
1607 {
1608 	int rv;
1609 	int8_t alt = 0, rqmode = lkb->lkb_rqmode;
1610 	int8_t is_convert = (lkb->lkb_grmode != DLM_LOCK_IV);
1611 
1612 	if (err)
1613 		*err = 0;
1614 
1615 	rv = _can_be_granted(r, lkb, now);
1616 	if (rv)
1617 		goto out;
1618 
1619 	/*
1620 	 * The CONVDEADLK flag is non-standard and tells the dlm to resolve
1621 	 * conversion deadlocks by demoting grmode to NL, otherwise the dlm
1622 	 * cancels one of the locks.
1623 	 */
1624 
1625 	if (is_convert && can_be_queued(lkb) &&
1626 	    conversion_deadlock_detect(r, lkb)) {
1627 		if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) {
1628 			lkb->lkb_grmode = DLM_LOCK_NL;
1629 			lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
1630 		} else if (!(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
1631 			if (err)
1632 				*err = -EDEADLK;
1633 			else {
1634 				log_print("can_be_granted deadlock %x now %d",
1635 					  lkb->lkb_id, now);
1636 				dlm_dump_rsb(r);
1637 			}
1638 		}
1639 		goto out;
1640 	}
1641 
1642 	/*
1643 	 * The ALTPR and ALTCW flags are non-standard and tell the dlm to try
1644 	 * to grant a request in a mode other than the normal rqmode.  It's a
1645 	 * simple way to provide a big optimization to applications that can
1646 	 * use them.
1647 	 */
1648 
1649 	if (rqmode != DLM_LOCK_PR && (lkb->lkb_exflags & DLM_LKF_ALTPR))
1650 		alt = DLM_LOCK_PR;
1651 	else if (rqmode != DLM_LOCK_CW && (lkb->lkb_exflags & DLM_LKF_ALTCW))
1652 		alt = DLM_LOCK_CW;
1653 
1654 	if (alt) {
1655 		lkb->lkb_rqmode = alt;
1656 		rv = _can_be_granted(r, lkb, now);
1657 		if (rv)
1658 			lkb->lkb_sbflags |= DLM_SBF_ALTMODE;
1659 		else
1660 			lkb->lkb_rqmode = rqmode;
1661 	}
1662  out:
1663 	return rv;
1664 }
1665 
1666 /* FIXME: I don't think that can_be_granted() can/will demote or find deadlock
1667    for locks pending on the convert list.  Once verified (watch for these
1668    log_prints), we should be able to just call _can_be_granted() and not
1669    bother with the demote/deadlk cases here (and there's no easy way to deal
1670    with a deadlk here, we'd have to generate something like grant_lock with
1671    the deadlk error.) */
1672 
1673 /* Returns the highest requested mode of all blocked conversions; sets
1674    cw if there's a blocked conversion to DLM_LOCK_CW. */
1675 
1676 static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw)
1677 {
1678 	struct dlm_lkb *lkb, *s;
1679 	int hi, demoted, quit, grant_restart, demote_restart;
1680 	int deadlk;
1681 
1682 	quit = 0;
1683  restart:
1684 	grant_restart = 0;
1685 	demote_restart = 0;
1686 	hi = DLM_LOCK_IV;
1687 
1688 	list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) {
1689 		demoted = is_demoted(lkb);
1690 		deadlk = 0;
1691 
1692 		if (can_be_granted(r, lkb, 0, &deadlk)) {
1693 			grant_lock_pending(r, lkb);
1694 			grant_restart = 1;
1695 			continue;
1696 		}
1697 
1698 		if (!demoted && is_demoted(lkb)) {
1699 			log_print("WARN: pending demoted %x node %d %s",
1700 				  lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
1701 			demote_restart = 1;
1702 			continue;
1703 		}
1704 
1705 		if (deadlk) {
1706 			log_print("WARN: pending deadlock %x node %d %s",
1707 				  lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
1708 			dlm_dump_rsb(r);
1709 			continue;
1710 		}
1711 
1712 		hi = max_t(int, lkb->lkb_rqmode, hi);
1713 
1714 		if (cw && lkb->lkb_rqmode == DLM_LOCK_CW)
1715 			*cw = 1;
1716 	}
1717 
1718 	if (grant_restart)
1719 		goto restart;
1720 	if (demote_restart && !quit) {
1721 		quit = 1;
1722 		goto restart;
1723 	}
1724 
1725 	return max_t(int, high, hi);
1726 }
1727 
1728 static int grant_pending_wait(struct dlm_rsb *r, int high, int *cw)
1729 {
1730 	struct dlm_lkb *lkb, *s;
1731 
1732 	list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
1733 		if (can_be_granted(r, lkb, 0, NULL))
1734 			grant_lock_pending(r, lkb);
1735                 else {
1736 			high = max_t(int, lkb->lkb_rqmode, high);
1737 			if (lkb->lkb_rqmode == DLM_LOCK_CW)
1738 				*cw = 1;
1739 		}
1740 	}
1741 
1742 	return high;
1743 }
1744 
1745 /* cw of 1 means there's a lock with a rqmode of DLM_LOCK_CW that's blocked
1746    on either the convert or waiting queue.
1747    high is the largest rqmode of all locks blocked on the convert or
1748    waiting queue. */
1749 
1750 static int lock_requires_bast(struct dlm_lkb *gr, int high, int cw)
1751 {
1752 	if (gr->lkb_grmode == DLM_LOCK_PR && cw) {
1753 		if (gr->lkb_highbast < DLM_LOCK_EX)
1754 			return 1;
1755 		return 0;
1756 	}
1757 
1758 	if (gr->lkb_highbast < high &&
1759 	    !__dlm_compat_matrix[gr->lkb_grmode+1][high+1])
1760 		return 1;
1761 	return 0;
1762 }
1763 
1764 static void grant_pending_locks(struct dlm_rsb *r)
1765 {
1766 	struct dlm_lkb *lkb, *s;
1767 	int high = DLM_LOCK_IV;
1768 	int cw = 0;
1769 
1770 	DLM_ASSERT(is_master(r), dlm_dump_rsb(r););
1771 
1772 	high = grant_pending_convert(r, high, &cw);
1773 	high = grant_pending_wait(r, high, &cw);
1774 
1775 	if (high == DLM_LOCK_IV)
1776 		return;
1777 
1778 	/*
1779 	 * If there are locks left on the wait/convert queue then send blocking
1780 	 * ASTs to granted locks based on the largest requested mode (high)
1781 	 * found above.
1782 	 */
1783 
1784 	list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
1785 		if (lkb->lkb_bastaddr && lock_requires_bast(lkb, high, cw)) {
1786 			if (cw && high == DLM_LOCK_PR)
1787 				queue_bast(r, lkb, DLM_LOCK_CW);
1788 			else
1789 				queue_bast(r, lkb, high);
1790 			lkb->lkb_highbast = high;
1791 		}
1792 	}
1793 }
1794 
1795 static int modes_require_bast(struct dlm_lkb *gr, struct dlm_lkb *rq)
1796 {
1797 	if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) ||
1798 	    (gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) {
1799 		if (gr->lkb_highbast < DLM_LOCK_EX)
1800 			return 1;
1801 		return 0;
1802 	}
1803 
1804 	if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq))
1805 		return 1;
1806 	return 0;
1807 }
1808 
1809 static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
1810 			    struct dlm_lkb *lkb)
1811 {
1812 	struct dlm_lkb *gr;
1813 
1814 	list_for_each_entry(gr, head, lkb_statequeue) {
1815 		if (gr->lkb_bastaddr && modes_require_bast(gr, lkb)) {
1816 			queue_bast(r, gr, lkb->lkb_rqmode);
1817 			gr->lkb_highbast = lkb->lkb_rqmode;
1818 		}
1819 	}
1820 }
1821 
1822 static void send_blocking_asts(struct dlm_rsb *r, struct dlm_lkb *lkb)
1823 {
1824 	send_bast_queue(r, &r->res_grantqueue, lkb);
1825 }
1826 
1827 static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
1828 {
1829 	send_bast_queue(r, &r->res_grantqueue, lkb);
1830 	send_bast_queue(r, &r->res_convertqueue, lkb);
1831 }
1832 
1833 /* set_master(r, lkb) -- set the master nodeid of a resource
1834 
1835    The purpose of this function is to set the nodeid field in the given
1836    lkb using the nodeid field in the given rsb.  If the rsb's nodeid is
1837    known, it can just be copied to the lkb and the function will return
1838    0.  If the rsb's nodeid is _not_ known, it needs to be looked up
1839    before it can be copied to the lkb.
1840 
1841    When the rsb nodeid is being looked up remotely, the initial lkb
1842    causing the lookup is kept on the ls_waiters list waiting for the
1843    lookup reply.  Other lkb's waiting for the same rsb lookup are kept
1844    on the rsb's res_lookup list until the master is verified.
1845 
1846    Return values:
1847    0: nodeid is set in rsb/lkb and the caller should go ahead and use it
1848    1: the rsb master is not available and the lkb has been placed on
1849       a wait queue
1850 */
1851 
1852 static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
1853 {
1854 	struct dlm_ls *ls = r->res_ls;
1855 	int error, dir_nodeid, ret_nodeid, our_nodeid = dlm_our_nodeid();
1856 
1857 	if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
1858 		rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
1859 		r->res_first_lkid = lkb->lkb_id;
1860 		lkb->lkb_nodeid = r->res_nodeid;
1861 		return 0;
1862 	}
1863 
1864 	if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) {
1865 		list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup);
1866 		return 1;
1867 	}
1868 
1869 	if (r->res_nodeid == 0) {
1870 		lkb->lkb_nodeid = 0;
1871 		return 0;
1872 	}
1873 
1874 	if (r->res_nodeid > 0) {
1875 		lkb->lkb_nodeid = r->res_nodeid;
1876 		return 0;
1877 	}
1878 
1879 	DLM_ASSERT(r->res_nodeid == -1, dlm_dump_rsb(r););
1880 
1881 	dir_nodeid = dlm_dir_nodeid(r);
1882 
1883 	if (dir_nodeid != our_nodeid) {
1884 		r->res_first_lkid = lkb->lkb_id;
1885 		send_lookup(r, lkb);
1886 		return 1;
1887 	}
1888 
1889 	for (;;) {
1890 		/* It's possible for dlm_scand to remove an old rsb for
1891 		   this same resource from the toss list, us to create
1892 		   a new one, look up the master locally, and find it
1893 		   already exists just before dlm_scand does the
1894 		   dir_remove() on the previous rsb. */
1895 
1896 		error = dlm_dir_lookup(ls, our_nodeid, r->res_name,
1897 				       r->res_length, &ret_nodeid);
1898 		if (!error)
1899 			break;
1900 		log_debug(ls, "dir_lookup error %d %s", error, r->res_name);
1901 		schedule();
1902 	}
1903 
1904 	if (ret_nodeid == our_nodeid) {
1905 		r->res_first_lkid = 0;
1906 		r->res_nodeid = 0;
1907 		lkb->lkb_nodeid = 0;
1908 	} else {
1909 		r->res_first_lkid = lkb->lkb_id;
1910 		r->res_nodeid = ret_nodeid;
1911 		lkb->lkb_nodeid = ret_nodeid;
1912 	}
1913 	return 0;
1914 }
1915 
1916 static void process_lookup_list(struct dlm_rsb *r)
1917 {
1918 	struct dlm_lkb *lkb, *safe;
1919 
1920 	list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
1921 		list_del_init(&lkb->lkb_rsb_lookup);
1922 		_request_lock(r, lkb);
1923 		schedule();
1924 	}
1925 }
1926 
1927 /* confirm_master -- confirm (or deny) an rsb's master nodeid */
1928 
1929 static void confirm_master(struct dlm_rsb *r, int error)
1930 {
1931 	struct dlm_lkb *lkb;
1932 
1933 	if (!r->res_first_lkid)
1934 		return;
1935 
1936 	switch (error) {
1937 	case 0:
1938 	case -EINPROGRESS:
1939 		r->res_first_lkid = 0;
1940 		process_lookup_list(r);
1941 		break;
1942 
1943 	case -EAGAIN:
1944 		/* the remote master didn't queue our NOQUEUE request;
1945 		   make a waiting lkb the first_lkid */
1946 
1947 		r->res_first_lkid = 0;
1948 
1949 		if (!list_empty(&r->res_lookup)) {
1950 			lkb = list_entry(r->res_lookup.next, struct dlm_lkb,
1951 					 lkb_rsb_lookup);
1952 			list_del_init(&lkb->lkb_rsb_lookup);
1953 			r->res_first_lkid = lkb->lkb_id;
1954 			_request_lock(r, lkb);
1955 		} else
1956 			r->res_nodeid = -1;
1957 		break;
1958 
1959 	default:
1960 		log_error(r->res_ls, "confirm_master unknown error %d", error);
1961 	}
1962 }
1963 
1964 static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
1965 			 int namelen, unsigned long timeout_cs, void *ast,
1966 			 void *astarg, void *bast, struct dlm_args *args)
1967 {
1968 	int rv = -EINVAL;
1969 
1970 	/* check for invalid arg usage */
1971 
1972 	if (mode < 0 || mode > DLM_LOCK_EX)
1973 		goto out;
1974 
1975 	if (!(flags & DLM_LKF_CONVERT) && (namelen > DLM_RESNAME_MAXLEN))
1976 		goto out;
1977 
1978 	if (flags & DLM_LKF_CANCEL)
1979 		goto out;
1980 
1981 	if (flags & DLM_LKF_QUECVT && !(flags & DLM_LKF_CONVERT))
1982 		goto out;
1983 
1984 	if (flags & DLM_LKF_CONVDEADLK && !(flags & DLM_LKF_CONVERT))
1985 		goto out;
1986 
1987 	if (flags & DLM_LKF_CONVDEADLK && flags & DLM_LKF_NOQUEUE)
1988 		goto out;
1989 
1990 	if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_CONVERT)
1991 		goto out;
1992 
1993 	if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_QUECVT)
1994 		goto out;
1995 
1996 	if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_NOQUEUE)
1997 		goto out;
1998 
1999 	if (flags & DLM_LKF_EXPEDITE && mode != DLM_LOCK_NL)
2000 		goto out;
2001 
2002 	if (!ast || !lksb)
2003 		goto out;
2004 
2005 	if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr)
2006 		goto out;
2007 
2008 	if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid)
2009 		goto out;
2010 
2011 	/* these args will be copied to the lkb in validate_lock_args,
2012 	   it cannot be done now because when converting locks, fields in
2013 	   an active lkb cannot be modified before locking the rsb */
2014 
2015 	args->flags = flags;
2016 	args->astaddr = ast;
2017 	args->astparam = (long) astarg;
2018 	args->bastaddr = bast;
2019 	args->timeout = timeout_cs;
2020 	args->mode = mode;
2021 	args->lksb = lksb;
2022 	rv = 0;
2023  out:
2024 	return rv;
2025 }
2026 
2027 static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
2028 {
2029 	if (flags & ~(DLM_LKF_CANCEL | DLM_LKF_VALBLK | DLM_LKF_IVVALBLK |
2030  		      DLM_LKF_FORCEUNLOCK))
2031 		return -EINVAL;
2032 
2033 	if (flags & DLM_LKF_CANCEL && flags & DLM_LKF_FORCEUNLOCK)
2034 		return -EINVAL;
2035 
2036 	args->flags = flags;
2037 	args->astparam = (long) astarg;
2038 	return 0;
2039 }
2040 
2041 static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2042 			      struct dlm_args *args)
2043 {
2044 	int rv = -EINVAL;
2045 
2046 	if (args->flags & DLM_LKF_CONVERT) {
2047 		if (lkb->lkb_flags & DLM_IFL_MSTCPY)
2048 			goto out;
2049 
2050 		if (args->flags & DLM_LKF_QUECVT &&
2051 		    !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
2052 			goto out;
2053 
2054 		rv = -EBUSY;
2055 		if (lkb->lkb_status != DLM_LKSTS_GRANTED)
2056 			goto out;
2057 
2058 		if (lkb->lkb_wait_type)
2059 			goto out;
2060 
2061 		if (is_overlap(lkb))
2062 			goto out;
2063 	}
2064 
2065 	lkb->lkb_exflags = args->flags;
2066 	lkb->lkb_sbflags = 0;
2067 	lkb->lkb_astaddr = args->astaddr;
2068 	lkb->lkb_astparam = args->astparam;
2069 	lkb->lkb_bastaddr = args->bastaddr;
2070 	lkb->lkb_rqmode = args->mode;
2071 	lkb->lkb_lksb = args->lksb;
2072 	lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
2073 	lkb->lkb_ownpid = (int) current->pid;
2074 	lkb->lkb_timeout_cs = args->timeout;
2075 	rv = 0;
2076  out:
2077 	return rv;
2078 }
2079 
2080 /* when dlm_unlock() sees -EBUSY with CANCEL/FORCEUNLOCK it returns 0
2081    for success */
2082 
2083 /* note: it's valid for lkb_nodeid/res_nodeid to be -1 when we get here
2084    because there may be a lookup in progress and it's valid to do
2085    cancel/unlockf on it */
2086 
2087 static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
2088 {
2089 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
2090 	int rv = -EINVAL;
2091 
2092 	if (lkb->lkb_flags & DLM_IFL_MSTCPY) {
2093 		log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id);
2094 		dlm_print_lkb(lkb);
2095 		goto out;
2096 	}
2097 
2098 	/* an lkb may still exist even though the lock is EOL'ed due to a
2099 	   cancel, unlock or failed noqueue request; an app can't use these
2100 	   locks; return same error as if the lkid had not been found at all */
2101 
2102 	if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
2103 		log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id);
2104 		rv = -ENOENT;
2105 		goto out;
2106 	}
2107 
2108 	/* an lkb may be waiting for an rsb lookup to complete where the
2109 	   lookup was initiated by another lock */
2110 
2111 	if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) {
2112 		if (!list_empty(&lkb->lkb_rsb_lookup)) {
2113 			log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id);
2114 			list_del_init(&lkb->lkb_rsb_lookup);
2115 			queue_cast(lkb->lkb_resource, lkb,
2116 				   args->flags & DLM_LKF_CANCEL ?
2117 				   -DLM_ECANCEL : -DLM_EUNLOCK);
2118 			unhold_lkb(lkb); /* undoes create_lkb() */
2119 			rv = -EBUSY;
2120 			goto out;
2121 		}
2122 	}
2123 
2124 	/* cancel not allowed with another cancel/unlock in progress */
2125 
2126 	if (args->flags & DLM_LKF_CANCEL) {
2127 		if (lkb->lkb_exflags & DLM_LKF_CANCEL)
2128 			goto out;
2129 
2130 		if (is_overlap(lkb))
2131 			goto out;
2132 
2133 		/* don't let scand try to do a cancel */
2134 		del_timeout(lkb);
2135 
2136 		if (lkb->lkb_flags & DLM_IFL_RESEND) {
2137 			lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
2138 			rv = -EBUSY;
2139 			goto out;
2140 		}
2141 
2142 		switch (lkb->lkb_wait_type) {
2143 		case DLM_MSG_LOOKUP:
2144 		case DLM_MSG_REQUEST:
2145 			lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
2146 			rv = -EBUSY;
2147 			goto out;
2148 		case DLM_MSG_UNLOCK:
2149 		case DLM_MSG_CANCEL:
2150 			goto out;
2151 		}
2152 		/* add_to_waiters() will set OVERLAP_CANCEL */
2153 		goto out_ok;
2154 	}
2155 
2156 	/* do we need to allow a force-unlock if there's a normal unlock
2157 	   already in progress?  in what conditions could the normal unlock
2158 	   fail such that we'd want to send a force-unlock to be sure? */
2159 
2160 	if (args->flags & DLM_LKF_FORCEUNLOCK) {
2161 		if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK)
2162 			goto out;
2163 
2164 		if (is_overlap_unlock(lkb))
2165 			goto out;
2166 
2167 		/* don't let scand try to do a cancel */
2168 		del_timeout(lkb);
2169 
2170 		if (lkb->lkb_flags & DLM_IFL_RESEND) {
2171 			lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
2172 			rv = -EBUSY;
2173 			goto out;
2174 		}
2175 
2176 		switch (lkb->lkb_wait_type) {
2177 		case DLM_MSG_LOOKUP:
2178 		case DLM_MSG_REQUEST:
2179 			lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
2180 			rv = -EBUSY;
2181 			goto out;
2182 		case DLM_MSG_UNLOCK:
2183 			goto out;
2184 		}
2185 		/* add_to_waiters() will set OVERLAP_UNLOCK */
2186 		goto out_ok;
2187 	}
2188 
2189 	/* normal unlock not allowed if there's any op in progress */
2190 	rv = -EBUSY;
2191 	if (lkb->lkb_wait_type || lkb->lkb_wait_count)
2192 		goto out;
2193 
2194  out_ok:
2195 	/* an overlapping op shouldn't blow away exflags from other op */
2196 	lkb->lkb_exflags |= args->flags;
2197 	lkb->lkb_sbflags = 0;
2198 	lkb->lkb_astparam = args->astparam;
2199 	rv = 0;
2200  out:
2201 	if (rv)
2202 		log_debug(ls, "validate_unlock_args %d %x %x %x %x %d %s", rv,
2203 			  lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags,
2204 			  args->flags, lkb->lkb_wait_type,
2205 			  lkb->lkb_resource->res_name);
2206 	return rv;
2207 }
2208 
2209 /*
2210  * Four stage 4 varieties:
2211  * do_request(), do_convert(), do_unlock(), do_cancel()
2212  * These are called on the master node for the given lock and
2213  * from the central locking logic.
2214  */
2215 
2216 static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
2217 {
2218 	int error = 0;
2219 
2220 	if (can_be_granted(r, lkb, 1, NULL)) {
2221 		grant_lock(r, lkb);
2222 		queue_cast(r, lkb, 0);
2223 		goto out;
2224 	}
2225 
2226 	if (can_be_queued(lkb)) {
2227 		error = -EINPROGRESS;
2228 		add_lkb(r, lkb, DLM_LKSTS_WAITING);
2229 		send_blocking_asts(r, lkb);
2230 		add_timeout(lkb);
2231 		goto out;
2232 	}
2233 
2234 	error = -EAGAIN;
2235 	if (force_blocking_asts(lkb))
2236 		send_blocking_asts_all(r, lkb);
2237 	queue_cast(r, lkb, -EAGAIN);
2238 
2239  out:
2240 	return error;
2241 }
2242 
2243 static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
2244 {
2245 	int error = 0;
2246 	int deadlk = 0;
2247 
2248 	/* changing an existing lock may allow others to be granted */
2249 
2250 	if (can_be_granted(r, lkb, 1, &deadlk)) {
2251 		grant_lock(r, lkb);
2252 		queue_cast(r, lkb, 0);
2253 		grant_pending_locks(r);
2254 		goto out;
2255 	}
2256 
2257 	/* can_be_granted() detected that this lock would block in a conversion
2258 	   deadlock, so we leave it on the granted queue and return EDEADLK in
2259 	   the ast for the convert. */
2260 
2261 	if (deadlk) {
2262 		/* it's left on the granted queue */
2263 		log_debug(r->res_ls, "deadlock %x node %d sts%d g%d r%d %s",
2264 			  lkb->lkb_id, lkb->lkb_nodeid, lkb->lkb_status,
2265 			  lkb->lkb_grmode, lkb->lkb_rqmode, r->res_name);
2266 		revert_lock(r, lkb);
2267 		queue_cast(r, lkb, -EDEADLK);
2268 		error = -EDEADLK;
2269 		goto out;
2270 	}
2271 
2272 	/* is_demoted() means the can_be_granted() above set the grmode
2273 	   to NL, and left us on the granted queue.  This auto-demotion
2274 	   (due to CONVDEADLK) might mean other locks, and/or this lock, are
2275 	   now grantable.  We have to try to grant other converting locks
2276 	   before we try again to grant this one. */
2277 
2278 	if (is_demoted(lkb)) {
2279 		grant_pending_convert(r, DLM_LOCK_IV, NULL);
2280 		if (_can_be_granted(r, lkb, 1)) {
2281 			grant_lock(r, lkb);
2282 			queue_cast(r, lkb, 0);
2283 			grant_pending_locks(r);
2284 			goto out;
2285 		}
2286 		/* else fall through and move to convert queue */
2287 	}
2288 
2289 	if (can_be_queued(lkb)) {
2290 		error = -EINPROGRESS;
2291 		del_lkb(r, lkb);
2292 		add_lkb(r, lkb, DLM_LKSTS_CONVERT);
2293 		send_blocking_asts(r, lkb);
2294 		add_timeout(lkb);
2295 		goto out;
2296 	}
2297 
2298 	error = -EAGAIN;
2299 	if (force_blocking_asts(lkb))
2300 		send_blocking_asts_all(r, lkb);
2301 	queue_cast(r, lkb, -EAGAIN);
2302 
2303  out:
2304 	return error;
2305 }
2306 
2307 static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2308 {
2309 	remove_lock(r, lkb);
2310 	queue_cast(r, lkb, -DLM_EUNLOCK);
2311 	grant_pending_locks(r);
2312 	return -DLM_EUNLOCK;
2313 }
2314 
2315 /* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
2316 
2317 static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
2318 {
2319 	int error;
2320 
2321 	error = revert_lock(r, lkb);
2322 	if (error) {
2323 		queue_cast(r, lkb, -DLM_ECANCEL);
2324 		grant_pending_locks(r);
2325 		return -DLM_ECANCEL;
2326 	}
2327 	return 0;
2328 }
2329 
2330 /*
2331  * Four stage 3 varieties:
2332  * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
2333  */
2334 
2335 /* add a new lkb to a possibly new rsb, called by requesting process */
2336 
2337 static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2338 {
2339 	int error;
2340 
2341 	/* set_master: sets lkb nodeid from r */
2342 
2343 	error = set_master(r, lkb);
2344 	if (error < 0)
2345 		goto out;
2346 	if (error) {
2347 		error = 0;
2348 		goto out;
2349 	}
2350 
2351 	if (is_remote(r))
2352 		/* receive_request() calls do_request() on remote node */
2353 		error = send_request(r, lkb);
2354 	else
2355 		error = do_request(r, lkb);
2356  out:
2357 	return error;
2358 }
2359 
2360 /* change some property of an existing lkb, e.g. mode */
2361 
2362 static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2363 {
2364 	int error;
2365 
2366 	if (is_remote(r))
2367 		/* receive_convert() calls do_convert() on remote node */
2368 		error = send_convert(r, lkb);
2369 	else
2370 		error = do_convert(r, lkb);
2371 
2372 	return error;
2373 }
2374 
2375 /* remove an existing lkb from the granted queue */
2376 
2377 static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2378 {
2379 	int error;
2380 
2381 	if (is_remote(r))
2382 		/* receive_unlock() calls do_unlock() on remote node */
2383 		error = send_unlock(r, lkb);
2384 	else
2385 		error = do_unlock(r, lkb);
2386 
2387 	return error;
2388 }
2389 
2390 /* remove an existing lkb from the convert or wait queue */
2391 
2392 static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2393 {
2394 	int error;
2395 
2396 	if (is_remote(r))
2397 		/* receive_cancel() calls do_cancel() on remote node */
2398 		error = send_cancel(r, lkb);
2399 	else
2400 		error = do_cancel(r, lkb);
2401 
2402 	return error;
2403 }
2404 
2405 /*
2406  * Four stage 2 varieties:
2407  * request_lock(), convert_lock(), unlock_lock(), cancel_lock()
2408  */
2409 
2410 static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, char *name,
2411 			int len, struct dlm_args *args)
2412 {
2413 	struct dlm_rsb *r;
2414 	int error;
2415 
2416 	error = validate_lock_args(ls, lkb, args);
2417 	if (error)
2418 		goto out;
2419 
2420 	error = find_rsb(ls, name, len, R_CREATE, &r);
2421 	if (error)
2422 		goto out;
2423 
2424 	lock_rsb(r);
2425 
2426 	attach_lkb(r, lkb);
2427 	lkb->lkb_lksb->sb_lkid = lkb->lkb_id;
2428 
2429 	error = _request_lock(r, lkb);
2430 
2431 	unlock_rsb(r);
2432 	put_rsb(r);
2433 
2434  out:
2435 	return error;
2436 }
2437 
2438 static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
2439 			struct dlm_args *args)
2440 {
2441 	struct dlm_rsb *r;
2442 	int error;
2443 
2444 	r = lkb->lkb_resource;
2445 
2446 	hold_rsb(r);
2447 	lock_rsb(r);
2448 
2449 	error = validate_lock_args(ls, lkb, args);
2450 	if (error)
2451 		goto out;
2452 
2453 	error = _convert_lock(r, lkb);
2454  out:
2455 	unlock_rsb(r);
2456 	put_rsb(r);
2457 	return error;
2458 }
2459 
2460 static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
2461 		       struct dlm_args *args)
2462 {
2463 	struct dlm_rsb *r;
2464 	int error;
2465 
2466 	r = lkb->lkb_resource;
2467 
2468 	hold_rsb(r);
2469 	lock_rsb(r);
2470 
2471 	error = validate_unlock_args(lkb, args);
2472 	if (error)
2473 		goto out;
2474 
2475 	error = _unlock_lock(r, lkb);
2476  out:
2477 	unlock_rsb(r);
2478 	put_rsb(r);
2479 	return error;
2480 }
2481 
2482 static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
2483 		       struct dlm_args *args)
2484 {
2485 	struct dlm_rsb *r;
2486 	int error;
2487 
2488 	r = lkb->lkb_resource;
2489 
2490 	hold_rsb(r);
2491 	lock_rsb(r);
2492 
2493 	error = validate_unlock_args(lkb, args);
2494 	if (error)
2495 		goto out;
2496 
2497 	error = _cancel_lock(r, lkb);
2498  out:
2499 	unlock_rsb(r);
2500 	put_rsb(r);
2501 	return error;
2502 }
2503 
2504 /*
2505  * Two stage 1 varieties:  dlm_lock() and dlm_unlock()
2506  */
2507 
2508 int dlm_lock(dlm_lockspace_t *lockspace,
2509 	     int mode,
2510 	     struct dlm_lksb *lksb,
2511 	     uint32_t flags,
2512 	     void *name,
2513 	     unsigned int namelen,
2514 	     uint32_t parent_lkid,
2515 	     void (*ast) (void *astarg),
2516 	     void *astarg,
2517 	     void (*bast) (void *astarg, int mode))
2518 {
2519 	struct dlm_ls *ls;
2520 	struct dlm_lkb *lkb;
2521 	struct dlm_args args;
2522 	int error, convert = flags & DLM_LKF_CONVERT;
2523 
2524 	ls = dlm_find_lockspace_local(lockspace);
2525 	if (!ls)
2526 		return -EINVAL;
2527 
2528 	dlm_lock_recovery(ls);
2529 
2530 	if (convert)
2531 		error = find_lkb(ls, lksb->sb_lkid, &lkb);
2532 	else
2533 		error = create_lkb(ls, &lkb);
2534 
2535 	if (error)
2536 		goto out;
2537 
2538 	error = set_lock_args(mode, lksb, flags, namelen, 0, ast,
2539 			      astarg, bast, &args);
2540 	if (error)
2541 		goto out_put;
2542 
2543 	if (convert)
2544 		error = convert_lock(ls, lkb, &args);
2545 	else
2546 		error = request_lock(ls, lkb, name, namelen, &args);
2547 
2548 	if (error == -EINPROGRESS)
2549 		error = 0;
2550  out_put:
2551 	if (convert || error)
2552 		__put_lkb(ls, lkb);
2553 	if (error == -EAGAIN || error == -EDEADLK)
2554 		error = 0;
2555  out:
2556 	dlm_unlock_recovery(ls);
2557 	dlm_put_lockspace(ls);
2558 	return error;
2559 }
2560 
2561 int dlm_unlock(dlm_lockspace_t *lockspace,
2562 	       uint32_t lkid,
2563 	       uint32_t flags,
2564 	       struct dlm_lksb *lksb,
2565 	       void *astarg)
2566 {
2567 	struct dlm_ls *ls;
2568 	struct dlm_lkb *lkb;
2569 	struct dlm_args args;
2570 	int error;
2571 
2572 	ls = dlm_find_lockspace_local(lockspace);
2573 	if (!ls)
2574 		return -EINVAL;
2575 
2576 	dlm_lock_recovery(ls);
2577 
2578 	error = find_lkb(ls, lkid, &lkb);
2579 	if (error)
2580 		goto out;
2581 
2582 	error = set_unlock_args(flags, astarg, &args);
2583 	if (error)
2584 		goto out_put;
2585 
2586 	if (flags & DLM_LKF_CANCEL)
2587 		error = cancel_lock(ls, lkb, &args);
2588 	else
2589 		error = unlock_lock(ls, lkb, &args);
2590 
2591 	if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL)
2592 		error = 0;
2593 	if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)))
2594 		error = 0;
2595  out_put:
2596 	dlm_put_lkb(lkb);
2597  out:
2598 	dlm_unlock_recovery(ls);
2599 	dlm_put_lockspace(ls);
2600 	return error;
2601 }
2602 
2603 /*
2604  * send/receive routines for remote operations and replies
2605  *
2606  * send_args
2607  * send_common
2608  * send_request			receive_request
2609  * send_convert			receive_convert
2610  * send_unlock			receive_unlock
2611  * send_cancel			receive_cancel
2612  * send_grant			receive_grant
2613  * send_bast			receive_bast
2614  * send_lookup			receive_lookup
2615  * send_remove			receive_remove
2616  *
2617  * 				send_common_reply
2618  * receive_request_reply	send_request_reply
2619  * receive_convert_reply	send_convert_reply
2620  * receive_unlock_reply		send_unlock_reply
2621  * receive_cancel_reply		send_cancel_reply
2622  * receive_lookup_reply		send_lookup_reply
2623  */
2624 
2625 static int _create_message(struct dlm_ls *ls, int mb_len,
2626 			   int to_nodeid, int mstype,
2627 			   struct dlm_message **ms_ret,
2628 			   struct dlm_mhandle **mh_ret)
2629 {
2630 	struct dlm_message *ms;
2631 	struct dlm_mhandle *mh;
2632 	char *mb;
2633 
2634 	/* get_buffer gives us a message handle (mh) that we need to
2635 	   pass into lowcomms_commit and a message buffer (mb) that we
2636 	   write our data into */
2637 
2638 	mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, ls->ls_allocation, &mb);
2639 	if (!mh)
2640 		return -ENOBUFS;
2641 
2642 	memset(mb, 0, mb_len);
2643 
2644 	ms = (struct dlm_message *) mb;
2645 
2646 	ms->m_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
2647 	ms->m_header.h_lockspace = ls->ls_global_id;
2648 	ms->m_header.h_nodeid = dlm_our_nodeid();
2649 	ms->m_header.h_length = mb_len;
2650 	ms->m_header.h_cmd = DLM_MSG;
2651 
2652 	ms->m_type = mstype;
2653 
2654 	*mh_ret = mh;
2655 	*ms_ret = ms;
2656 	return 0;
2657 }
2658 
2659 static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
2660 			  int to_nodeid, int mstype,
2661 			  struct dlm_message **ms_ret,
2662 			  struct dlm_mhandle **mh_ret)
2663 {
2664 	int mb_len = sizeof(struct dlm_message);
2665 
2666 	switch (mstype) {
2667 	case DLM_MSG_REQUEST:
2668 	case DLM_MSG_LOOKUP:
2669 	case DLM_MSG_REMOVE:
2670 		mb_len += r->res_length;
2671 		break;
2672 	case DLM_MSG_CONVERT:
2673 	case DLM_MSG_UNLOCK:
2674 	case DLM_MSG_REQUEST_REPLY:
2675 	case DLM_MSG_CONVERT_REPLY:
2676 	case DLM_MSG_GRANT:
2677 		if (lkb && lkb->lkb_lvbptr)
2678 			mb_len += r->res_ls->ls_lvblen;
2679 		break;
2680 	}
2681 
2682 	return _create_message(r->res_ls, mb_len, to_nodeid, mstype,
2683 			       ms_ret, mh_ret);
2684 }
2685 
2686 /* further lowcomms enhancements or alternate implementations may make
2687    the return value from this function useful at some point */
2688 
2689 static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
2690 {
2691 	dlm_message_out(ms);
2692 	dlm_lowcomms_commit_buffer(mh);
2693 	return 0;
2694 }
2695 
2696 static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
2697 		      struct dlm_message *ms)
2698 {
2699 	ms->m_nodeid   = lkb->lkb_nodeid;
2700 	ms->m_pid      = lkb->lkb_ownpid;
2701 	ms->m_lkid     = lkb->lkb_id;
2702 	ms->m_remid    = lkb->lkb_remid;
2703 	ms->m_exflags  = lkb->lkb_exflags;
2704 	ms->m_sbflags  = lkb->lkb_sbflags;
2705 	ms->m_flags    = lkb->lkb_flags;
2706 	ms->m_lvbseq   = lkb->lkb_lvbseq;
2707 	ms->m_status   = lkb->lkb_status;
2708 	ms->m_grmode   = lkb->lkb_grmode;
2709 	ms->m_rqmode   = lkb->lkb_rqmode;
2710 	ms->m_hash     = r->res_hash;
2711 
2712 	/* m_result and m_bastmode are set from function args,
2713 	   not from lkb fields */
2714 
2715 	if (lkb->lkb_bastaddr)
2716 		ms->m_asts |= AST_BAST;
2717 	if (lkb->lkb_astaddr)
2718 		ms->m_asts |= AST_COMP;
2719 
2720 	/* compare with switch in create_message; send_remove() doesn't
2721 	   use send_args() */
2722 
2723 	switch (ms->m_type) {
2724 	case DLM_MSG_REQUEST:
2725 	case DLM_MSG_LOOKUP:
2726 		memcpy(ms->m_extra, r->res_name, r->res_length);
2727 		break;
2728 	case DLM_MSG_CONVERT:
2729 	case DLM_MSG_UNLOCK:
2730 	case DLM_MSG_REQUEST_REPLY:
2731 	case DLM_MSG_CONVERT_REPLY:
2732 	case DLM_MSG_GRANT:
2733 		if (!lkb->lkb_lvbptr)
2734 			break;
2735 		memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
2736 		break;
2737 	}
2738 }
2739 
2740 static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
2741 {
2742 	struct dlm_message *ms;
2743 	struct dlm_mhandle *mh;
2744 	int to_nodeid, error;
2745 
2746 	error = add_to_waiters(lkb, mstype);
2747 	if (error)
2748 		return error;
2749 
2750 	to_nodeid = r->res_nodeid;
2751 
2752 	error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
2753 	if (error)
2754 		goto fail;
2755 
2756 	send_args(r, lkb, ms);
2757 
2758 	error = send_message(mh, ms);
2759 	if (error)
2760 		goto fail;
2761 	return 0;
2762 
2763  fail:
2764 	remove_from_waiters(lkb, msg_reply_type(mstype));
2765 	return error;
2766 }
2767 
2768 static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
2769 {
2770 	return send_common(r, lkb, DLM_MSG_REQUEST);
2771 }
2772 
2773 static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
2774 {
2775 	int error;
2776 
2777 	error = send_common(r, lkb, DLM_MSG_CONVERT);
2778 
2779 	/* down conversions go without a reply from the master */
2780 	if (!error && down_conversion(lkb)) {
2781 		remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY);
2782 		r->res_ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY;
2783 		r->res_ls->ls_stub_ms.m_result = 0;
2784 		r->res_ls->ls_stub_ms.m_flags = lkb->lkb_flags;
2785 		__receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
2786 	}
2787 
2788 	return error;
2789 }
2790 
2791 /* FIXME: if this lkb is the only lock we hold on the rsb, then set
2792    MASTER_UNCERTAIN to force the next request on the rsb to confirm
2793    that the master is still correct. */
2794 
2795 static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2796 {
2797 	return send_common(r, lkb, DLM_MSG_UNLOCK);
2798 }
2799 
2800 static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
2801 {
2802 	return send_common(r, lkb, DLM_MSG_CANCEL);
2803 }
2804 
2805 static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
2806 {
2807 	struct dlm_message *ms;
2808 	struct dlm_mhandle *mh;
2809 	int to_nodeid, error;
2810 
2811 	to_nodeid = lkb->lkb_nodeid;
2812 
2813 	error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh);
2814 	if (error)
2815 		goto out;
2816 
2817 	send_args(r, lkb, ms);
2818 
2819 	ms->m_result = 0;
2820 
2821 	error = send_message(mh, ms);
2822  out:
2823 	return error;
2824 }
2825 
2826 static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
2827 {
2828 	struct dlm_message *ms;
2829 	struct dlm_mhandle *mh;
2830 	int to_nodeid, error;
2831 
2832 	to_nodeid = lkb->lkb_nodeid;
2833 
2834 	error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh);
2835 	if (error)
2836 		goto out;
2837 
2838 	send_args(r, lkb, ms);
2839 
2840 	ms->m_bastmode = mode;
2841 
2842 	error = send_message(mh, ms);
2843  out:
2844 	return error;
2845 }
2846 
2847 static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
2848 {
2849 	struct dlm_message *ms;
2850 	struct dlm_mhandle *mh;
2851 	int to_nodeid, error;
2852 
2853 	error = add_to_waiters(lkb, DLM_MSG_LOOKUP);
2854 	if (error)
2855 		return error;
2856 
2857 	to_nodeid = dlm_dir_nodeid(r);
2858 
2859 	error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh);
2860 	if (error)
2861 		goto fail;
2862 
2863 	send_args(r, lkb, ms);
2864 
2865 	error = send_message(mh, ms);
2866 	if (error)
2867 		goto fail;
2868 	return 0;
2869 
2870  fail:
2871 	remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
2872 	return error;
2873 }
2874 
2875 static int send_remove(struct dlm_rsb *r)
2876 {
2877 	struct dlm_message *ms;
2878 	struct dlm_mhandle *mh;
2879 	int to_nodeid, error;
2880 
2881 	to_nodeid = dlm_dir_nodeid(r);
2882 
2883 	error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh);
2884 	if (error)
2885 		goto out;
2886 
2887 	memcpy(ms->m_extra, r->res_name, r->res_length);
2888 	ms->m_hash = r->res_hash;
2889 
2890 	error = send_message(mh, ms);
2891  out:
2892 	return error;
2893 }
2894 
2895 static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
2896 			     int mstype, int rv)
2897 {
2898 	struct dlm_message *ms;
2899 	struct dlm_mhandle *mh;
2900 	int to_nodeid, error;
2901 
2902 	to_nodeid = lkb->lkb_nodeid;
2903 
2904 	error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
2905 	if (error)
2906 		goto out;
2907 
2908 	send_args(r, lkb, ms);
2909 
2910 	ms->m_result = rv;
2911 
2912 	error = send_message(mh, ms);
2913  out:
2914 	return error;
2915 }
2916 
2917 static int send_request_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
2918 {
2919 	return send_common_reply(r, lkb, DLM_MSG_REQUEST_REPLY, rv);
2920 }
2921 
2922 static int send_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
2923 {
2924 	return send_common_reply(r, lkb, DLM_MSG_CONVERT_REPLY, rv);
2925 }
2926 
2927 static int send_unlock_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
2928 {
2929 	return send_common_reply(r, lkb, DLM_MSG_UNLOCK_REPLY, rv);
2930 }
2931 
2932 static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
2933 {
2934 	return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv);
2935 }
2936 
2937 static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
2938 			     int ret_nodeid, int rv)
2939 {
2940 	struct dlm_rsb *r = &ls->ls_stub_rsb;
2941 	struct dlm_message *ms;
2942 	struct dlm_mhandle *mh;
2943 	int error, nodeid = ms_in->m_header.h_nodeid;
2944 
2945 	error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh);
2946 	if (error)
2947 		goto out;
2948 
2949 	ms->m_lkid = ms_in->m_lkid;
2950 	ms->m_result = rv;
2951 	ms->m_nodeid = ret_nodeid;
2952 
2953 	error = send_message(mh, ms);
2954  out:
2955 	return error;
2956 }
2957 
2958 /* which args we save from a received message depends heavily on the type
2959    of message, unlike the send side where we can safely send everything about
2960    the lkb for any type of message */
2961 
2962 static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
2963 {
2964 	lkb->lkb_exflags = ms->m_exflags;
2965 	lkb->lkb_sbflags = ms->m_sbflags;
2966 	lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
2967 		         (ms->m_flags & 0x0000FFFF);
2968 }
2969 
2970 static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
2971 {
2972 	lkb->lkb_sbflags = ms->m_sbflags;
2973 	lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
2974 		         (ms->m_flags & 0x0000FFFF);
2975 }
2976 
2977 static int receive_extralen(struct dlm_message *ms)
2978 {
2979 	return (ms->m_header.h_length - sizeof(struct dlm_message));
2980 }
2981 
2982 static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
2983 		       struct dlm_message *ms)
2984 {
2985 	int len;
2986 
2987 	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
2988 		if (!lkb->lkb_lvbptr)
2989 			lkb->lkb_lvbptr = allocate_lvb(ls);
2990 		if (!lkb->lkb_lvbptr)
2991 			return -ENOMEM;
2992 		len = receive_extralen(ms);
2993 		memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
2994 	}
2995 	return 0;
2996 }
2997 
2998 static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2999 				struct dlm_message *ms)
3000 {
3001 	lkb->lkb_nodeid = ms->m_header.h_nodeid;
3002 	lkb->lkb_ownpid = ms->m_pid;
3003 	lkb->lkb_remid = ms->m_lkid;
3004 	lkb->lkb_grmode = DLM_LOCK_IV;
3005 	lkb->lkb_rqmode = ms->m_rqmode;
3006 	lkb->lkb_bastaddr = (void *) (long) (ms->m_asts & AST_BAST);
3007 	lkb->lkb_astaddr = (void *) (long) (ms->m_asts & AST_COMP);
3008 
3009 	DLM_ASSERT(is_master_copy(lkb), dlm_print_lkb(lkb););
3010 
3011 	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3012 		/* lkb was just created so there won't be an lvb yet */
3013 		lkb->lkb_lvbptr = allocate_lvb(ls);
3014 		if (!lkb->lkb_lvbptr)
3015 			return -ENOMEM;
3016 	}
3017 
3018 	return 0;
3019 }
3020 
3021 static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3022 				struct dlm_message *ms)
3023 {
3024 	if (lkb->lkb_nodeid != ms->m_header.h_nodeid) {
3025 		log_error(ls, "convert_args nodeid %d %d lkid %x %x",
3026 			  lkb->lkb_nodeid, ms->m_header.h_nodeid,
3027 			  lkb->lkb_id, lkb->lkb_remid);
3028 		return -EINVAL;
3029 	}
3030 
3031 	if (!is_master_copy(lkb))
3032 		return -EINVAL;
3033 
3034 	if (lkb->lkb_status != DLM_LKSTS_GRANTED)
3035 		return -EBUSY;
3036 
3037 	if (receive_lvb(ls, lkb, ms))
3038 		return -ENOMEM;
3039 
3040 	lkb->lkb_rqmode = ms->m_rqmode;
3041 	lkb->lkb_lvbseq = ms->m_lvbseq;
3042 
3043 	return 0;
3044 }
3045 
3046 static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3047 			       struct dlm_message *ms)
3048 {
3049 	if (!is_master_copy(lkb))
3050 		return -EINVAL;
3051 	if (receive_lvb(ls, lkb, ms))
3052 		return -ENOMEM;
3053 	return 0;
3054 }
3055 
3056 /* We fill in the stub-lkb fields with the info that send_xxxx_reply()
3057    uses to send a reply and that the remote end uses to process the reply. */
3058 
3059 static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
3060 {
3061 	struct dlm_lkb *lkb = &ls->ls_stub_lkb;
3062 	lkb->lkb_nodeid = ms->m_header.h_nodeid;
3063 	lkb->lkb_remid = ms->m_lkid;
3064 }
3065 
3066 static void receive_request(struct dlm_ls *ls, struct dlm_message *ms)
3067 {
3068 	struct dlm_lkb *lkb;
3069 	struct dlm_rsb *r;
3070 	int error, namelen;
3071 
3072 	error = create_lkb(ls, &lkb);
3073 	if (error)
3074 		goto fail;
3075 
3076 	receive_flags(lkb, ms);
3077 	lkb->lkb_flags |= DLM_IFL_MSTCPY;
3078 	error = receive_request_args(ls, lkb, ms);
3079 	if (error) {
3080 		__put_lkb(ls, lkb);
3081 		goto fail;
3082 	}
3083 
3084 	namelen = receive_extralen(ms);
3085 
3086 	error = find_rsb(ls, ms->m_extra, namelen, R_MASTER, &r);
3087 	if (error) {
3088 		__put_lkb(ls, lkb);
3089 		goto fail;
3090 	}
3091 
3092 	lock_rsb(r);
3093 
3094 	attach_lkb(r, lkb);
3095 	error = do_request(r, lkb);
3096 	send_request_reply(r, lkb, error);
3097 
3098 	unlock_rsb(r);
3099 	put_rsb(r);
3100 
3101 	if (error == -EINPROGRESS)
3102 		error = 0;
3103 	if (error)
3104 		dlm_put_lkb(lkb);
3105 	return;
3106 
3107  fail:
3108 	setup_stub_lkb(ls, ms);
3109 	send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
3110 }
3111 
3112 static void receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
3113 {
3114 	struct dlm_lkb *lkb;
3115 	struct dlm_rsb *r;
3116 	int error, reply = 1;
3117 
3118 	error = find_lkb(ls, ms->m_remid, &lkb);
3119 	if (error)
3120 		goto fail;
3121 
3122 	r = lkb->lkb_resource;
3123 
3124 	hold_rsb(r);
3125 	lock_rsb(r);
3126 
3127 	receive_flags(lkb, ms);
3128 	error = receive_convert_args(ls, lkb, ms);
3129 	if (error)
3130 		goto out;
3131 	reply = !down_conversion(lkb);
3132 
3133 	error = do_convert(r, lkb);
3134  out:
3135 	if (reply)
3136 		send_convert_reply(r, lkb, error);
3137 
3138 	unlock_rsb(r);
3139 	put_rsb(r);
3140 	dlm_put_lkb(lkb);
3141 	return;
3142 
3143  fail:
3144 	setup_stub_lkb(ls, ms);
3145 	send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
3146 }
3147 
3148 static void receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
3149 {
3150 	struct dlm_lkb *lkb;
3151 	struct dlm_rsb *r;
3152 	int error;
3153 
3154 	error = find_lkb(ls, ms->m_remid, &lkb);
3155 	if (error)
3156 		goto fail;
3157 
3158 	r = lkb->lkb_resource;
3159 
3160 	hold_rsb(r);
3161 	lock_rsb(r);
3162 
3163 	receive_flags(lkb, ms);
3164 	error = receive_unlock_args(ls, lkb, ms);
3165 	if (error)
3166 		goto out;
3167 
3168 	error = do_unlock(r, lkb);
3169  out:
3170 	send_unlock_reply(r, lkb, error);
3171 
3172 	unlock_rsb(r);
3173 	put_rsb(r);
3174 	dlm_put_lkb(lkb);
3175 	return;
3176 
3177  fail:
3178 	setup_stub_lkb(ls, ms);
3179 	send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
3180 }
3181 
3182 static void receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
3183 {
3184 	struct dlm_lkb *lkb;
3185 	struct dlm_rsb *r;
3186 	int error;
3187 
3188 	error = find_lkb(ls, ms->m_remid, &lkb);
3189 	if (error)
3190 		goto fail;
3191 
3192 	receive_flags(lkb, ms);
3193 
3194 	r = lkb->lkb_resource;
3195 
3196 	hold_rsb(r);
3197 	lock_rsb(r);
3198 
3199 	error = do_cancel(r, lkb);
3200 	send_cancel_reply(r, lkb, error);
3201 
3202 	unlock_rsb(r);
3203 	put_rsb(r);
3204 	dlm_put_lkb(lkb);
3205 	return;
3206 
3207  fail:
3208 	setup_stub_lkb(ls, ms);
3209 	send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
3210 }
3211 
3212 static void receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
3213 {
3214 	struct dlm_lkb *lkb;
3215 	struct dlm_rsb *r;
3216 	int error;
3217 
3218 	error = find_lkb(ls, ms->m_remid, &lkb);
3219 	if (error) {
3220 		log_error(ls, "receive_grant no lkb");
3221 		return;
3222 	}
3223 	DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
3224 
3225 	r = lkb->lkb_resource;
3226 
3227 	hold_rsb(r);
3228 	lock_rsb(r);
3229 
3230 	receive_flags_reply(lkb, ms);
3231 	if (is_altmode(lkb))
3232 		munge_altmode(lkb, ms);
3233 	grant_lock_pc(r, lkb, ms);
3234 	queue_cast(r, lkb, 0);
3235 
3236 	unlock_rsb(r);
3237 	put_rsb(r);
3238 	dlm_put_lkb(lkb);
3239 }
3240 
3241 static void receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
3242 {
3243 	struct dlm_lkb *lkb;
3244 	struct dlm_rsb *r;
3245 	int error;
3246 
3247 	error = find_lkb(ls, ms->m_remid, &lkb);
3248 	if (error) {
3249 		log_error(ls, "receive_bast no lkb");
3250 		return;
3251 	}
3252 	DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
3253 
3254 	r = lkb->lkb_resource;
3255 
3256 	hold_rsb(r);
3257 	lock_rsb(r);
3258 
3259 	queue_bast(r, lkb, ms->m_bastmode);
3260 
3261 	unlock_rsb(r);
3262 	put_rsb(r);
3263 	dlm_put_lkb(lkb);
3264 }
3265 
3266 static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
3267 {
3268 	int len, error, ret_nodeid, dir_nodeid, from_nodeid, our_nodeid;
3269 
3270 	from_nodeid = ms->m_header.h_nodeid;
3271 	our_nodeid = dlm_our_nodeid();
3272 
3273 	len = receive_extralen(ms);
3274 
3275 	dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
3276 	if (dir_nodeid != our_nodeid) {
3277 		log_error(ls, "lookup dir_nodeid %d from %d",
3278 			  dir_nodeid, from_nodeid);
3279 		error = -EINVAL;
3280 		ret_nodeid = -1;
3281 		goto out;
3282 	}
3283 
3284 	error = dlm_dir_lookup(ls, from_nodeid, ms->m_extra, len, &ret_nodeid);
3285 
3286 	/* Optimization: we're master so treat lookup as a request */
3287 	if (!error && ret_nodeid == our_nodeid) {
3288 		receive_request(ls, ms);
3289 		return;
3290 	}
3291  out:
3292 	send_lookup_reply(ls, ms, ret_nodeid, error);
3293 }
3294 
3295 static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
3296 {
3297 	int len, dir_nodeid, from_nodeid;
3298 
3299 	from_nodeid = ms->m_header.h_nodeid;
3300 
3301 	len = receive_extralen(ms);
3302 
3303 	dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
3304 	if (dir_nodeid != dlm_our_nodeid()) {
3305 		log_error(ls, "remove dir entry dir_nodeid %d from %d",
3306 			  dir_nodeid, from_nodeid);
3307 		return;
3308 	}
3309 
3310 	dlm_dir_remove_entry(ls, from_nodeid, ms->m_extra, len);
3311 }
3312 
3313 static void receive_purge(struct dlm_ls *ls, struct dlm_message *ms)
3314 {
3315 	do_purge(ls, ms->m_nodeid, ms->m_pid);
3316 }
3317 
3318 static void receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
3319 {
3320 	struct dlm_lkb *lkb;
3321 	struct dlm_rsb *r;
3322 	int error, mstype, result;
3323 
3324 	error = find_lkb(ls, ms->m_remid, &lkb);
3325 	if (error) {
3326 		log_error(ls, "receive_request_reply no lkb");
3327 		return;
3328 	}
3329 	DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
3330 
3331 	r = lkb->lkb_resource;
3332 	hold_rsb(r);
3333 	lock_rsb(r);
3334 
3335 	mstype = lkb->lkb_wait_type;
3336 	error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY);
3337 	if (error)
3338 		goto out;
3339 
3340 	/* Optimization: the dir node was also the master, so it took our
3341 	   lookup as a request and sent request reply instead of lookup reply */
3342 	if (mstype == DLM_MSG_LOOKUP) {
3343 		r->res_nodeid = ms->m_header.h_nodeid;
3344 		lkb->lkb_nodeid = r->res_nodeid;
3345 	}
3346 
3347 	/* this is the value returned from do_request() on the master */
3348 	result = ms->m_result;
3349 
3350 	switch (result) {
3351 	case -EAGAIN:
3352 		/* request would block (be queued) on remote master */
3353 		queue_cast(r, lkb, -EAGAIN);
3354 		confirm_master(r, -EAGAIN);
3355 		unhold_lkb(lkb); /* undoes create_lkb() */
3356 		break;
3357 
3358 	case -EINPROGRESS:
3359 	case 0:
3360 		/* request was queued or granted on remote master */
3361 		receive_flags_reply(lkb, ms);
3362 		lkb->lkb_remid = ms->m_lkid;
3363 		if (is_altmode(lkb))
3364 			munge_altmode(lkb, ms);
3365 		if (result) {
3366 			add_lkb(r, lkb, DLM_LKSTS_WAITING);
3367 			add_timeout(lkb);
3368 		} else {
3369 			grant_lock_pc(r, lkb, ms);
3370 			queue_cast(r, lkb, 0);
3371 		}
3372 		confirm_master(r, result);
3373 		break;
3374 
3375 	case -EBADR:
3376 	case -ENOTBLK:
3377 		/* find_rsb failed to find rsb or rsb wasn't master */
3378 		log_debug(ls, "receive_request_reply %x %x master diff %d %d",
3379 			  lkb->lkb_id, lkb->lkb_flags, r->res_nodeid, result);
3380 		r->res_nodeid = -1;
3381 		lkb->lkb_nodeid = -1;
3382 
3383 		if (is_overlap(lkb)) {
3384 			/* we'll ignore error in cancel/unlock reply */
3385 			queue_cast_overlap(r, lkb);
3386 			unhold_lkb(lkb); /* undoes create_lkb() */
3387 		} else
3388 			_request_lock(r, lkb);
3389 		break;
3390 
3391 	default:
3392 		log_error(ls, "receive_request_reply %x error %d",
3393 			  lkb->lkb_id, result);
3394 	}
3395 
3396 	if (is_overlap_unlock(lkb) && (result == 0 || result == -EINPROGRESS)) {
3397 		log_debug(ls, "receive_request_reply %x result %d unlock",
3398 			  lkb->lkb_id, result);
3399 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
3400 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
3401 		send_unlock(r, lkb);
3402 	} else if (is_overlap_cancel(lkb) && (result == -EINPROGRESS)) {
3403 		log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id);
3404 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
3405 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
3406 		send_cancel(r, lkb);
3407 	} else {
3408 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
3409 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
3410 	}
3411  out:
3412 	unlock_rsb(r);
3413 	put_rsb(r);
3414 	dlm_put_lkb(lkb);
3415 }
3416 
3417 static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
3418 				    struct dlm_message *ms)
3419 {
3420 	/* this is the value returned from do_convert() on the master */
3421 	switch (ms->m_result) {
3422 	case -EAGAIN:
3423 		/* convert would block (be queued) on remote master */
3424 		queue_cast(r, lkb, -EAGAIN);
3425 		break;
3426 
3427 	case -EDEADLK:
3428 		receive_flags_reply(lkb, ms);
3429 		revert_lock_pc(r, lkb);
3430 		queue_cast(r, lkb, -EDEADLK);
3431 		break;
3432 
3433 	case -EINPROGRESS:
3434 		/* convert was queued on remote master */
3435 		receive_flags_reply(lkb, ms);
3436 		if (is_demoted(lkb))
3437 			munge_demoted(lkb, ms);
3438 		del_lkb(r, lkb);
3439 		add_lkb(r, lkb, DLM_LKSTS_CONVERT);
3440 		add_timeout(lkb);
3441 		break;
3442 
3443 	case 0:
3444 		/* convert was granted on remote master */
3445 		receive_flags_reply(lkb, ms);
3446 		if (is_demoted(lkb))
3447 			munge_demoted(lkb, ms);
3448 		grant_lock_pc(r, lkb, ms);
3449 		queue_cast(r, lkb, 0);
3450 		break;
3451 
3452 	default:
3453 		log_error(r->res_ls, "receive_convert_reply %x error %d",
3454 			  lkb->lkb_id, ms->m_result);
3455 	}
3456 }
3457 
3458 static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3459 {
3460 	struct dlm_rsb *r = lkb->lkb_resource;
3461 	int error;
3462 
3463 	hold_rsb(r);
3464 	lock_rsb(r);
3465 
3466 	/* stub reply can happen with waiters_mutex held */
3467 	error = remove_from_waiters_ms(lkb, ms);
3468 	if (error)
3469 		goto out;
3470 
3471 	__receive_convert_reply(r, lkb, ms);
3472  out:
3473 	unlock_rsb(r);
3474 	put_rsb(r);
3475 }
3476 
3477 static void receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
3478 {
3479 	struct dlm_lkb *lkb;
3480 	int error;
3481 
3482 	error = find_lkb(ls, ms->m_remid, &lkb);
3483 	if (error) {
3484 		log_error(ls, "receive_convert_reply no lkb");
3485 		return;
3486 	}
3487 	DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
3488 
3489 	_receive_convert_reply(lkb, ms);
3490 	dlm_put_lkb(lkb);
3491 }
3492 
3493 static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3494 {
3495 	struct dlm_rsb *r = lkb->lkb_resource;
3496 	int error;
3497 
3498 	hold_rsb(r);
3499 	lock_rsb(r);
3500 
3501 	/* stub reply can happen with waiters_mutex held */
3502 	error = remove_from_waiters_ms(lkb, ms);
3503 	if (error)
3504 		goto out;
3505 
3506 	/* this is the value returned from do_unlock() on the master */
3507 
3508 	switch (ms->m_result) {
3509 	case -DLM_EUNLOCK:
3510 		receive_flags_reply(lkb, ms);
3511 		remove_lock_pc(r, lkb);
3512 		queue_cast(r, lkb, -DLM_EUNLOCK);
3513 		break;
3514 	case -ENOENT:
3515 		break;
3516 	default:
3517 		log_error(r->res_ls, "receive_unlock_reply %x error %d",
3518 			  lkb->lkb_id, ms->m_result);
3519 	}
3520  out:
3521 	unlock_rsb(r);
3522 	put_rsb(r);
3523 }
3524 
3525 static void receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
3526 {
3527 	struct dlm_lkb *lkb;
3528 	int error;
3529 
3530 	error = find_lkb(ls, ms->m_remid, &lkb);
3531 	if (error) {
3532 		log_error(ls, "receive_unlock_reply no lkb");
3533 		return;
3534 	}
3535 	DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
3536 
3537 	_receive_unlock_reply(lkb, ms);
3538 	dlm_put_lkb(lkb);
3539 }
3540 
3541 static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3542 {
3543 	struct dlm_rsb *r = lkb->lkb_resource;
3544 	int error;
3545 
3546 	hold_rsb(r);
3547 	lock_rsb(r);
3548 
3549 	/* stub reply can happen with waiters_mutex held */
3550 	error = remove_from_waiters_ms(lkb, ms);
3551 	if (error)
3552 		goto out;
3553 
3554 	/* this is the value returned from do_cancel() on the master */
3555 
3556 	switch (ms->m_result) {
3557 	case -DLM_ECANCEL:
3558 		receive_flags_reply(lkb, ms);
3559 		revert_lock_pc(r, lkb);
3560 		queue_cast(r, lkb, -DLM_ECANCEL);
3561 		break;
3562 	case 0:
3563 		break;
3564 	default:
3565 		log_error(r->res_ls, "receive_cancel_reply %x error %d",
3566 			  lkb->lkb_id, ms->m_result);
3567 	}
3568  out:
3569 	unlock_rsb(r);
3570 	put_rsb(r);
3571 }
3572 
3573 static void receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
3574 {
3575 	struct dlm_lkb *lkb;
3576 	int error;
3577 
3578 	error = find_lkb(ls, ms->m_remid, &lkb);
3579 	if (error) {
3580 		log_error(ls, "receive_cancel_reply no lkb");
3581 		return;
3582 	}
3583 	DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
3584 
3585 	_receive_cancel_reply(lkb, ms);
3586 	dlm_put_lkb(lkb);
3587 }
3588 
3589 static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
3590 {
3591 	struct dlm_lkb *lkb;
3592 	struct dlm_rsb *r;
3593 	int error, ret_nodeid;
3594 
3595 	error = find_lkb(ls, ms->m_lkid, &lkb);
3596 	if (error) {
3597 		log_error(ls, "receive_lookup_reply no lkb");
3598 		return;
3599 	}
3600 
3601 	/* ms->m_result is the value returned by dlm_dir_lookup on dir node
3602 	   FIXME: will a non-zero error ever be returned? */
3603 
3604 	r = lkb->lkb_resource;
3605 	hold_rsb(r);
3606 	lock_rsb(r);
3607 
3608 	error = remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
3609 	if (error)
3610 		goto out;
3611 
3612 	ret_nodeid = ms->m_nodeid;
3613 	if (ret_nodeid == dlm_our_nodeid()) {
3614 		r->res_nodeid = 0;
3615 		ret_nodeid = 0;
3616 		r->res_first_lkid = 0;
3617 	} else {
3618 		/* set_master() will copy res_nodeid to lkb_nodeid */
3619 		r->res_nodeid = ret_nodeid;
3620 	}
3621 
3622 	if (is_overlap(lkb)) {
3623 		log_debug(ls, "receive_lookup_reply %x unlock %x",
3624 			  lkb->lkb_id, lkb->lkb_flags);
3625 		queue_cast_overlap(r, lkb);
3626 		unhold_lkb(lkb); /* undoes create_lkb() */
3627 		goto out_list;
3628 	}
3629 
3630 	_request_lock(r, lkb);
3631 
3632  out_list:
3633 	if (!ret_nodeid)
3634 		process_lookup_list(r);
3635  out:
3636 	unlock_rsb(r);
3637 	put_rsb(r);
3638 	dlm_put_lkb(lkb);
3639 }
3640 
3641 int dlm_receive_message(struct dlm_header *hd, int nodeid, int recovery)
3642 {
3643 	struct dlm_message *ms = (struct dlm_message *) hd;
3644 	struct dlm_ls *ls;
3645 	int error = 0;
3646 
3647 	if (!recovery)
3648 		dlm_message_in(ms);
3649 
3650 	ls = dlm_find_lockspace_global(hd->h_lockspace);
3651 	if (!ls) {
3652 		log_print("drop message %d from %d for unknown lockspace %d",
3653 			  ms->m_type, nodeid, hd->h_lockspace);
3654 		return -EINVAL;
3655 	}
3656 
3657 	/* recovery may have just ended leaving a bunch of backed-up requests
3658 	   in the requestqueue; wait while dlm_recoverd clears them */
3659 
3660 	if (!recovery)
3661 		dlm_wait_requestqueue(ls);
3662 
3663 	/* recovery may have just started while there were a bunch of
3664 	   in-flight requests -- save them in requestqueue to be processed
3665 	   after recovery.  we can't let dlm_recvd block on the recovery
3666 	   lock.  if dlm_recoverd is calling this function to clear the
3667 	   requestqueue, it needs to be interrupted (-EINTR) if another
3668 	   recovery operation is starting. */
3669 
3670 	while (1) {
3671 		if (dlm_locking_stopped(ls)) {
3672 			if (recovery) {
3673 				error = -EINTR;
3674 				goto out;
3675 			}
3676 			error = dlm_add_requestqueue(ls, nodeid, hd);
3677 			if (error == -EAGAIN)
3678 				continue;
3679 			else {
3680 				error = -EINTR;
3681 				goto out;
3682 			}
3683 		}
3684 
3685 		if (dlm_lock_recovery_try(ls))
3686 			break;
3687 		schedule();
3688 	}
3689 
3690 	switch (ms->m_type) {
3691 
3692 	/* messages sent to a master node */
3693 
3694 	case DLM_MSG_REQUEST:
3695 		receive_request(ls, ms);
3696 		break;
3697 
3698 	case DLM_MSG_CONVERT:
3699 		receive_convert(ls, ms);
3700 		break;
3701 
3702 	case DLM_MSG_UNLOCK:
3703 		receive_unlock(ls, ms);
3704 		break;
3705 
3706 	case DLM_MSG_CANCEL:
3707 		receive_cancel(ls, ms);
3708 		break;
3709 
3710 	/* messages sent from a master node (replies to above) */
3711 
3712 	case DLM_MSG_REQUEST_REPLY:
3713 		receive_request_reply(ls, ms);
3714 		break;
3715 
3716 	case DLM_MSG_CONVERT_REPLY:
3717 		receive_convert_reply(ls, ms);
3718 		break;
3719 
3720 	case DLM_MSG_UNLOCK_REPLY:
3721 		receive_unlock_reply(ls, ms);
3722 		break;
3723 
3724 	case DLM_MSG_CANCEL_REPLY:
3725 		receive_cancel_reply(ls, ms);
3726 		break;
3727 
3728 	/* messages sent from a master node (only two types of async msg) */
3729 
3730 	case DLM_MSG_GRANT:
3731 		receive_grant(ls, ms);
3732 		break;
3733 
3734 	case DLM_MSG_BAST:
3735 		receive_bast(ls, ms);
3736 		break;
3737 
3738 	/* messages sent to a dir node */
3739 
3740 	case DLM_MSG_LOOKUP:
3741 		receive_lookup(ls, ms);
3742 		break;
3743 
3744 	case DLM_MSG_REMOVE:
3745 		receive_remove(ls, ms);
3746 		break;
3747 
3748 	/* messages sent from a dir node (remove has no reply) */
3749 
3750 	case DLM_MSG_LOOKUP_REPLY:
3751 		receive_lookup_reply(ls, ms);
3752 		break;
3753 
3754 	/* other messages */
3755 
3756 	case DLM_MSG_PURGE:
3757 		receive_purge(ls, ms);
3758 		break;
3759 
3760 	default:
3761 		log_error(ls, "unknown message type %d", ms->m_type);
3762 	}
3763 
3764 	dlm_unlock_recovery(ls);
3765  out:
3766 	dlm_put_lockspace(ls);
3767 	dlm_astd_wake();
3768 	return error;
3769 }
3770 
3771 
3772 /*
3773  * Recovery related
3774  */
3775 
3776 static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb)
3777 {
3778 	if (middle_conversion(lkb)) {
3779 		hold_lkb(lkb);
3780 		ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY;
3781 		ls->ls_stub_ms.m_result = -EINPROGRESS;
3782 		ls->ls_stub_ms.m_flags = lkb->lkb_flags;
3783 		_receive_convert_reply(lkb, &ls->ls_stub_ms);
3784 
3785 		/* Same special case as in receive_rcom_lock_args() */
3786 		lkb->lkb_grmode = DLM_LOCK_IV;
3787 		rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT);
3788 		unhold_lkb(lkb);
3789 
3790 	} else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
3791 		lkb->lkb_flags |= DLM_IFL_RESEND;
3792 	}
3793 
3794 	/* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
3795 	   conversions are async; there's no reply from the remote master */
3796 }
3797 
3798 /* A waiting lkb needs recovery if the master node has failed, or
3799    the master node is changing (only when no directory is used) */
3800 
3801 static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb)
3802 {
3803 	if (dlm_is_removed(ls, lkb->lkb_nodeid))
3804 		return 1;
3805 
3806 	if (!dlm_no_directory(ls))
3807 		return 0;
3808 
3809 	if (dlm_dir_nodeid(lkb->lkb_resource) != lkb->lkb_nodeid)
3810 		return 1;
3811 
3812 	return 0;
3813 }
3814 
3815 /* Recovery for locks that are waiting for replies from nodes that are now
3816    gone.  We can just complete unlocks and cancels by faking a reply from the
3817    dead node.  Requests and up-conversions we flag to be resent after
3818    recovery.  Down-conversions can just be completed with a fake reply like
3819    unlocks.  Conversions between PR and CW need special attention. */
3820 
3821 void dlm_recover_waiters_pre(struct dlm_ls *ls)
3822 {
3823 	struct dlm_lkb *lkb, *safe;
3824 
3825 	mutex_lock(&ls->ls_waiters_mutex);
3826 
3827 	list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
3828 		log_debug(ls, "pre recover waiter lkid %x type %d flags %x",
3829 			  lkb->lkb_id, lkb->lkb_wait_type, lkb->lkb_flags);
3830 
3831 		/* all outstanding lookups, regardless of destination  will be
3832 		   resent after recovery is done */
3833 
3834 		if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) {
3835 			lkb->lkb_flags |= DLM_IFL_RESEND;
3836 			continue;
3837 		}
3838 
3839 		if (!waiter_needs_recovery(ls, lkb))
3840 			continue;
3841 
3842 		switch (lkb->lkb_wait_type) {
3843 
3844 		case DLM_MSG_REQUEST:
3845 			lkb->lkb_flags |= DLM_IFL_RESEND;
3846 			break;
3847 
3848 		case DLM_MSG_CONVERT:
3849 			recover_convert_waiter(ls, lkb);
3850 			break;
3851 
3852 		case DLM_MSG_UNLOCK:
3853 			hold_lkb(lkb);
3854 			ls->ls_stub_ms.m_type = DLM_MSG_UNLOCK_REPLY;
3855 			ls->ls_stub_ms.m_result = -DLM_EUNLOCK;
3856 			ls->ls_stub_ms.m_flags = lkb->lkb_flags;
3857 			_receive_unlock_reply(lkb, &ls->ls_stub_ms);
3858 			dlm_put_lkb(lkb);
3859 			break;
3860 
3861 		case DLM_MSG_CANCEL:
3862 			hold_lkb(lkb);
3863 			ls->ls_stub_ms.m_type = DLM_MSG_CANCEL_REPLY;
3864 			ls->ls_stub_ms.m_result = -DLM_ECANCEL;
3865 			ls->ls_stub_ms.m_flags = lkb->lkb_flags;
3866 			_receive_cancel_reply(lkb, &ls->ls_stub_ms);
3867 			dlm_put_lkb(lkb);
3868 			break;
3869 
3870 		default:
3871 			log_error(ls, "invalid lkb wait_type %d",
3872 				  lkb->lkb_wait_type);
3873 		}
3874 		schedule();
3875 	}
3876 	mutex_unlock(&ls->ls_waiters_mutex);
3877 }
3878 
3879 static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
3880 {
3881 	struct dlm_lkb *lkb;
3882 	int found = 0;
3883 
3884 	mutex_lock(&ls->ls_waiters_mutex);
3885 	list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
3886 		if (lkb->lkb_flags & DLM_IFL_RESEND) {
3887 			hold_lkb(lkb);
3888 			found = 1;
3889 			break;
3890 		}
3891 	}
3892 	mutex_unlock(&ls->ls_waiters_mutex);
3893 
3894 	if (!found)
3895 		lkb = NULL;
3896 	return lkb;
3897 }
3898 
3899 /* Deal with lookups and lkb's marked RESEND from _pre.  We may now be the
3900    master or dir-node for r.  Processing the lkb may result in it being placed
3901    back on waiters. */
3902 
3903 /* We do this after normal locking has been enabled and any saved messages
3904    (in requestqueue) have been processed.  We should be confident that at
3905    this point we won't get or process a reply to any of these waiting
3906    operations.  But, new ops may be coming in on the rsbs/locks here from
3907    userspace or remotely. */
3908 
3909 /* there may have been an overlap unlock/cancel prior to recovery or after
3910    recovery.  if before, the lkb may still have a pos wait_count; if after, the
3911    overlap flag would just have been set and nothing new sent.  we can be
3912    confident here than any replies to either the initial op or overlap ops
3913    prior to recovery have been received. */
3914 
3915 int dlm_recover_waiters_post(struct dlm_ls *ls)
3916 {
3917 	struct dlm_lkb *lkb;
3918 	struct dlm_rsb *r;
3919 	int error = 0, mstype, err, oc, ou;
3920 
3921 	while (1) {
3922 		if (dlm_locking_stopped(ls)) {
3923 			log_debug(ls, "recover_waiters_post aborted");
3924 			error = -EINTR;
3925 			break;
3926 		}
3927 
3928 		lkb = find_resend_waiter(ls);
3929 		if (!lkb)
3930 			break;
3931 
3932 		r = lkb->lkb_resource;
3933 		hold_rsb(r);
3934 		lock_rsb(r);
3935 
3936 		mstype = lkb->lkb_wait_type;
3937 		oc = is_overlap_cancel(lkb);
3938 		ou = is_overlap_unlock(lkb);
3939 		err = 0;
3940 
3941 		log_debug(ls, "recover_waiters_post %x type %d flags %x %s",
3942 			  lkb->lkb_id, mstype, lkb->lkb_flags, r->res_name);
3943 
3944 		/* At this point we assume that we won't get a reply to any
3945 		   previous op or overlap op on this lock.  First, do a big
3946 		   remove_from_waiters() for all previous ops. */
3947 
3948 		lkb->lkb_flags &= ~DLM_IFL_RESEND;
3949 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
3950 		lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
3951 		lkb->lkb_wait_type = 0;
3952 		lkb->lkb_wait_count = 0;
3953 		mutex_lock(&ls->ls_waiters_mutex);
3954 		list_del_init(&lkb->lkb_wait_reply);
3955 		mutex_unlock(&ls->ls_waiters_mutex);
3956 		unhold_lkb(lkb); /* for waiters list */
3957 
3958 		if (oc || ou) {
3959 			/* do an unlock or cancel instead of resending */
3960 			switch (mstype) {
3961 			case DLM_MSG_LOOKUP:
3962 			case DLM_MSG_REQUEST:
3963 				queue_cast(r, lkb, ou ? -DLM_EUNLOCK :
3964 							-DLM_ECANCEL);
3965 				unhold_lkb(lkb); /* undoes create_lkb() */
3966 				break;
3967 			case DLM_MSG_CONVERT:
3968 				if (oc) {
3969 					queue_cast(r, lkb, -DLM_ECANCEL);
3970 				} else {
3971 					lkb->lkb_exflags |= DLM_LKF_FORCEUNLOCK;
3972 					_unlock_lock(r, lkb);
3973 				}
3974 				break;
3975 			default:
3976 				err = 1;
3977 			}
3978 		} else {
3979 			switch (mstype) {
3980 			case DLM_MSG_LOOKUP:
3981 			case DLM_MSG_REQUEST:
3982 				_request_lock(r, lkb);
3983 				if (is_master(r))
3984 					confirm_master(r, 0);
3985 				break;
3986 			case DLM_MSG_CONVERT:
3987 				_convert_lock(r, lkb);
3988 				break;
3989 			default:
3990 				err = 1;
3991 			}
3992 		}
3993 
3994 		if (err)
3995 			log_error(ls, "recover_waiters_post %x %d %x %d %d",
3996 			  	  lkb->lkb_id, mstype, lkb->lkb_flags, oc, ou);
3997 		unlock_rsb(r);
3998 		put_rsb(r);
3999 		dlm_put_lkb(lkb);
4000 	}
4001 
4002 	return error;
4003 }
4004 
4005 static void purge_queue(struct dlm_rsb *r, struct list_head *queue,
4006 			int (*test)(struct dlm_ls *ls, struct dlm_lkb *lkb))
4007 {
4008 	struct dlm_ls *ls = r->res_ls;
4009 	struct dlm_lkb *lkb, *safe;
4010 
4011 	list_for_each_entry_safe(lkb, safe, queue, lkb_statequeue) {
4012 		if (test(ls, lkb)) {
4013 			rsb_set_flag(r, RSB_LOCKS_PURGED);
4014 			del_lkb(r, lkb);
4015 			/* this put should free the lkb */
4016 			if (!dlm_put_lkb(lkb))
4017 				log_error(ls, "purged lkb not released");
4018 		}
4019 	}
4020 }
4021 
4022 static int purge_dead_test(struct dlm_ls *ls, struct dlm_lkb *lkb)
4023 {
4024 	return (is_master_copy(lkb) && dlm_is_removed(ls, lkb->lkb_nodeid));
4025 }
4026 
4027 static int purge_mstcpy_test(struct dlm_ls *ls, struct dlm_lkb *lkb)
4028 {
4029 	return is_master_copy(lkb);
4030 }
4031 
4032 static void purge_dead_locks(struct dlm_rsb *r)
4033 {
4034 	purge_queue(r, &r->res_grantqueue, &purge_dead_test);
4035 	purge_queue(r, &r->res_convertqueue, &purge_dead_test);
4036 	purge_queue(r, &r->res_waitqueue, &purge_dead_test);
4037 }
4038 
4039 void dlm_purge_mstcpy_locks(struct dlm_rsb *r)
4040 {
4041 	purge_queue(r, &r->res_grantqueue, &purge_mstcpy_test);
4042 	purge_queue(r, &r->res_convertqueue, &purge_mstcpy_test);
4043 	purge_queue(r, &r->res_waitqueue, &purge_mstcpy_test);
4044 }
4045 
4046 /* Get rid of locks held by nodes that are gone. */
4047 
4048 int dlm_purge_locks(struct dlm_ls *ls)
4049 {
4050 	struct dlm_rsb *r;
4051 
4052 	log_debug(ls, "dlm_purge_locks");
4053 
4054 	down_write(&ls->ls_root_sem);
4055 	list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
4056 		hold_rsb(r);
4057 		lock_rsb(r);
4058 		if (is_master(r))
4059 			purge_dead_locks(r);
4060 		unlock_rsb(r);
4061 		unhold_rsb(r);
4062 
4063 		schedule();
4064 	}
4065 	up_write(&ls->ls_root_sem);
4066 
4067 	return 0;
4068 }
4069 
4070 static struct dlm_rsb *find_purged_rsb(struct dlm_ls *ls, int bucket)
4071 {
4072 	struct dlm_rsb *r, *r_ret = NULL;
4073 
4074 	read_lock(&ls->ls_rsbtbl[bucket].lock);
4075 	list_for_each_entry(r, &ls->ls_rsbtbl[bucket].list, res_hashchain) {
4076 		if (!rsb_flag(r, RSB_LOCKS_PURGED))
4077 			continue;
4078 		hold_rsb(r);
4079 		rsb_clear_flag(r, RSB_LOCKS_PURGED);
4080 		r_ret = r;
4081 		break;
4082 	}
4083 	read_unlock(&ls->ls_rsbtbl[bucket].lock);
4084 	return r_ret;
4085 }
4086 
4087 void dlm_grant_after_purge(struct dlm_ls *ls)
4088 {
4089 	struct dlm_rsb *r;
4090 	int bucket = 0;
4091 
4092 	while (1) {
4093 		r = find_purged_rsb(ls, bucket);
4094 		if (!r) {
4095 			if (bucket == ls->ls_rsbtbl_size - 1)
4096 				break;
4097 			bucket++;
4098 			continue;
4099 		}
4100 		lock_rsb(r);
4101 		if (is_master(r)) {
4102 			grant_pending_locks(r);
4103 			confirm_master(r, 0);
4104 		}
4105 		unlock_rsb(r);
4106 		put_rsb(r);
4107 		schedule();
4108 	}
4109 }
4110 
4111 static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid,
4112 					 uint32_t remid)
4113 {
4114 	struct dlm_lkb *lkb;
4115 
4116 	list_for_each_entry(lkb, head, lkb_statequeue) {
4117 		if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid)
4118 			return lkb;
4119 	}
4120 	return NULL;
4121 }
4122 
4123 static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
4124 				    uint32_t remid)
4125 {
4126 	struct dlm_lkb *lkb;
4127 
4128 	lkb = search_remid_list(&r->res_grantqueue, nodeid, remid);
4129 	if (lkb)
4130 		return lkb;
4131 	lkb = search_remid_list(&r->res_convertqueue, nodeid, remid);
4132 	if (lkb)
4133 		return lkb;
4134 	lkb = search_remid_list(&r->res_waitqueue, nodeid, remid);
4135 	if (lkb)
4136 		return lkb;
4137 	return NULL;
4138 }
4139 
4140 static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
4141 				  struct dlm_rsb *r, struct dlm_rcom *rc)
4142 {
4143 	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
4144 	int lvblen;
4145 
4146 	lkb->lkb_nodeid = rc->rc_header.h_nodeid;
4147 	lkb->lkb_ownpid = rl->rl_ownpid;
4148 	lkb->lkb_remid = rl->rl_lkid;
4149 	lkb->lkb_exflags = rl->rl_exflags;
4150 	lkb->lkb_flags = rl->rl_flags & 0x0000FFFF;
4151 	lkb->lkb_flags |= DLM_IFL_MSTCPY;
4152 	lkb->lkb_lvbseq = rl->rl_lvbseq;
4153 	lkb->lkb_rqmode = rl->rl_rqmode;
4154 	lkb->lkb_grmode = rl->rl_grmode;
4155 	/* don't set lkb_status because add_lkb wants to itself */
4156 
4157 	lkb->lkb_bastaddr = (void *) (long) (rl->rl_asts & AST_BAST);
4158 	lkb->lkb_astaddr = (void *) (long) (rl->rl_asts & AST_COMP);
4159 
4160 	if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
4161 		lkb->lkb_lvbptr = allocate_lvb(ls);
4162 		if (!lkb->lkb_lvbptr)
4163 			return -ENOMEM;
4164 		lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) -
4165 			 sizeof(struct rcom_lock);
4166 		memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
4167 	}
4168 
4169 	/* Conversions between PR and CW (middle modes) need special handling.
4170 	   The real granted mode of these converting locks cannot be determined
4171 	   until all locks have been rebuilt on the rsb (recover_conversion) */
4172 
4173 	if (rl->rl_wait_type == DLM_MSG_CONVERT && middle_conversion(lkb)) {
4174 		rl->rl_status = DLM_LKSTS_CONVERT;
4175 		lkb->lkb_grmode = DLM_LOCK_IV;
4176 		rsb_set_flag(r, RSB_RECOVER_CONVERT);
4177 	}
4178 
4179 	return 0;
4180 }
4181 
4182 /* This lkb may have been recovered in a previous aborted recovery so we need
4183    to check if the rsb already has an lkb with the given remote nodeid/lkid.
4184    If so we just send back a standard reply.  If not, we create a new lkb with
4185    the given values and send back our lkid.  We send back our lkid by sending
4186    back the rcom_lock struct we got but with the remid field filled in. */
4187 
4188 int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
4189 {
4190 	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
4191 	struct dlm_rsb *r;
4192 	struct dlm_lkb *lkb;
4193 	int error;
4194 
4195 	if (rl->rl_parent_lkid) {
4196 		error = -EOPNOTSUPP;
4197 		goto out;
4198 	}
4199 
4200 	error = find_rsb(ls, rl->rl_name, rl->rl_namelen, R_MASTER, &r);
4201 	if (error)
4202 		goto out;
4203 
4204 	lock_rsb(r);
4205 
4206 	lkb = search_remid(r, rc->rc_header.h_nodeid, rl->rl_lkid);
4207 	if (lkb) {
4208 		error = -EEXIST;
4209 		goto out_remid;
4210 	}
4211 
4212 	error = create_lkb(ls, &lkb);
4213 	if (error)
4214 		goto out_unlock;
4215 
4216 	error = receive_rcom_lock_args(ls, lkb, r, rc);
4217 	if (error) {
4218 		__put_lkb(ls, lkb);
4219 		goto out_unlock;
4220 	}
4221 
4222 	attach_lkb(r, lkb);
4223 	add_lkb(r, lkb, rl->rl_status);
4224 	error = 0;
4225 
4226  out_remid:
4227 	/* this is the new value returned to the lock holder for
4228 	   saving in its process-copy lkb */
4229 	rl->rl_remid = lkb->lkb_id;
4230 
4231  out_unlock:
4232 	unlock_rsb(r);
4233 	put_rsb(r);
4234  out:
4235 	if (error)
4236 		log_print("recover_master_copy %d %x", error, rl->rl_lkid);
4237 	rl->rl_result = error;
4238 	return error;
4239 }
4240 
4241 int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
4242 {
4243 	struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
4244 	struct dlm_rsb *r;
4245 	struct dlm_lkb *lkb;
4246 	int error;
4247 
4248 	error = find_lkb(ls, rl->rl_lkid, &lkb);
4249 	if (error) {
4250 		log_error(ls, "recover_process_copy no lkid %x", rl->rl_lkid);
4251 		return error;
4252 	}
4253 
4254 	DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
4255 
4256 	error = rl->rl_result;
4257 
4258 	r = lkb->lkb_resource;
4259 	hold_rsb(r);
4260 	lock_rsb(r);
4261 
4262 	switch (error) {
4263 	case -EBADR:
4264 		/* There's a chance the new master received our lock before
4265 		   dlm_recover_master_reply(), this wouldn't happen if we did
4266 		   a barrier between recover_masters and recover_locks. */
4267 		log_debug(ls, "master copy not ready %x r %lx %s", lkb->lkb_id,
4268 			  (unsigned long)r, r->res_name);
4269 		dlm_send_rcom_lock(r, lkb);
4270 		goto out;
4271 	case -EEXIST:
4272 		log_debug(ls, "master copy exists %x", lkb->lkb_id);
4273 		/* fall through */
4274 	case 0:
4275 		lkb->lkb_remid = rl->rl_remid;
4276 		break;
4277 	default:
4278 		log_error(ls, "dlm_recover_process_copy unknown error %d %x",
4279 			  error, lkb->lkb_id);
4280 	}
4281 
4282 	/* an ack for dlm_recover_locks() which waits for replies from
4283 	   all the locks it sends to new masters */
4284 	dlm_recovered_lock(r);
4285  out:
4286 	unlock_rsb(r);
4287 	put_rsb(r);
4288 	dlm_put_lkb(lkb);
4289 
4290 	return 0;
4291 }
4292 
4293 int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
4294 		     int mode, uint32_t flags, void *name, unsigned int namelen,
4295 		     unsigned long timeout_cs)
4296 {
4297 	struct dlm_lkb *lkb;
4298 	struct dlm_args args;
4299 	int error;
4300 
4301 	dlm_lock_recovery(ls);
4302 
4303 	error = create_lkb(ls, &lkb);
4304 	if (error) {
4305 		kfree(ua);
4306 		goto out;
4307 	}
4308 
4309 	if (flags & DLM_LKF_VALBLK) {
4310 		ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_KERNEL);
4311 		if (!ua->lksb.sb_lvbptr) {
4312 			kfree(ua);
4313 			__put_lkb(ls, lkb);
4314 			error = -ENOMEM;
4315 			goto out;
4316 		}
4317 	}
4318 
4319 	/* After ua is attached to lkb it will be freed by free_lkb().
4320 	   When DLM_IFL_USER is set, the dlm knows that this is a userspace
4321 	   lock and that lkb_astparam is the dlm_user_args structure. */
4322 
4323 	error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
4324 			      DLM_FAKE_USER_AST, ua, DLM_FAKE_USER_AST, &args);
4325 	lkb->lkb_flags |= DLM_IFL_USER;
4326 	ua->old_mode = DLM_LOCK_IV;
4327 
4328 	if (error) {
4329 		__put_lkb(ls, lkb);
4330 		goto out;
4331 	}
4332 
4333 	error = request_lock(ls, lkb, name, namelen, &args);
4334 
4335 	switch (error) {
4336 	case 0:
4337 		break;
4338 	case -EINPROGRESS:
4339 		error = 0;
4340 		break;
4341 	case -EAGAIN:
4342 		error = 0;
4343 		/* fall through */
4344 	default:
4345 		__put_lkb(ls, lkb);
4346 		goto out;
4347 	}
4348 
4349 	/* add this new lkb to the per-process list of locks */
4350 	spin_lock(&ua->proc->locks_spin);
4351 	hold_lkb(lkb);
4352 	list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
4353 	spin_unlock(&ua->proc->locks_spin);
4354  out:
4355 	dlm_unlock_recovery(ls);
4356 	return error;
4357 }
4358 
4359 int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
4360 		     int mode, uint32_t flags, uint32_t lkid, char *lvb_in,
4361 		     unsigned long timeout_cs)
4362 {
4363 	struct dlm_lkb *lkb;
4364 	struct dlm_args args;
4365 	struct dlm_user_args *ua;
4366 	int error;
4367 
4368 	dlm_lock_recovery(ls);
4369 
4370 	error = find_lkb(ls, lkid, &lkb);
4371 	if (error)
4372 		goto out;
4373 
4374 	/* user can change the params on its lock when it converts it, or
4375 	   add an lvb that didn't exist before */
4376 
4377 	ua = (struct dlm_user_args *)lkb->lkb_astparam;
4378 
4379 	if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
4380 		ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_KERNEL);
4381 		if (!ua->lksb.sb_lvbptr) {
4382 			error = -ENOMEM;
4383 			goto out_put;
4384 		}
4385 	}
4386 	if (lvb_in && ua->lksb.sb_lvbptr)
4387 		memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
4388 
4389 	ua->xid = ua_tmp->xid;
4390 	ua->castparam = ua_tmp->castparam;
4391 	ua->castaddr = ua_tmp->castaddr;
4392 	ua->bastparam = ua_tmp->bastparam;
4393 	ua->bastaddr = ua_tmp->bastaddr;
4394 	ua->user_lksb = ua_tmp->user_lksb;
4395 	ua->old_mode = lkb->lkb_grmode;
4396 
4397 	error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs,
4398 			      DLM_FAKE_USER_AST, ua, DLM_FAKE_USER_AST, &args);
4399 	if (error)
4400 		goto out_put;
4401 
4402 	error = convert_lock(ls, lkb, &args);
4403 
4404 	if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK)
4405 		error = 0;
4406  out_put:
4407 	dlm_put_lkb(lkb);
4408  out:
4409 	dlm_unlock_recovery(ls);
4410 	kfree(ua_tmp);
4411 	return error;
4412 }
4413 
4414 int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
4415 		    uint32_t flags, uint32_t lkid, char *lvb_in)
4416 {
4417 	struct dlm_lkb *lkb;
4418 	struct dlm_args args;
4419 	struct dlm_user_args *ua;
4420 	int error;
4421 
4422 	dlm_lock_recovery(ls);
4423 
4424 	error = find_lkb(ls, lkid, &lkb);
4425 	if (error)
4426 		goto out;
4427 
4428 	ua = (struct dlm_user_args *)lkb->lkb_astparam;
4429 
4430 	if (lvb_in && ua->lksb.sb_lvbptr)
4431 		memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
4432 	ua->castparam = ua_tmp->castparam;
4433 	ua->user_lksb = ua_tmp->user_lksb;
4434 
4435 	error = set_unlock_args(flags, ua, &args);
4436 	if (error)
4437 		goto out_put;
4438 
4439 	error = unlock_lock(ls, lkb, &args);
4440 
4441 	if (error == -DLM_EUNLOCK)
4442 		error = 0;
4443 	/* from validate_unlock_args() */
4444 	if (error == -EBUSY && (flags & DLM_LKF_FORCEUNLOCK))
4445 		error = 0;
4446 	if (error)
4447 		goto out_put;
4448 
4449 	spin_lock(&ua->proc->locks_spin);
4450 	/* dlm_user_add_ast() may have already taken lkb off the proc list */
4451 	if (!list_empty(&lkb->lkb_ownqueue))
4452 		list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
4453 	spin_unlock(&ua->proc->locks_spin);
4454  out_put:
4455 	dlm_put_lkb(lkb);
4456  out:
4457 	dlm_unlock_recovery(ls);
4458 	kfree(ua_tmp);
4459 	return error;
4460 }
4461 
4462 int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
4463 		    uint32_t flags, uint32_t lkid)
4464 {
4465 	struct dlm_lkb *lkb;
4466 	struct dlm_args args;
4467 	struct dlm_user_args *ua;
4468 	int error;
4469 
4470 	dlm_lock_recovery(ls);
4471 
4472 	error = find_lkb(ls, lkid, &lkb);
4473 	if (error)
4474 		goto out;
4475 
4476 	ua = (struct dlm_user_args *)lkb->lkb_astparam;
4477 	ua->castparam = ua_tmp->castparam;
4478 	ua->user_lksb = ua_tmp->user_lksb;
4479 
4480 	error = set_unlock_args(flags, ua, &args);
4481 	if (error)
4482 		goto out_put;
4483 
4484 	error = cancel_lock(ls, lkb, &args);
4485 
4486 	if (error == -DLM_ECANCEL)
4487 		error = 0;
4488 	/* from validate_unlock_args() */
4489 	if (error == -EBUSY)
4490 		error = 0;
4491  out_put:
4492 	dlm_put_lkb(lkb);
4493  out:
4494 	dlm_unlock_recovery(ls);
4495 	kfree(ua_tmp);
4496 	return error;
4497 }
4498 
4499 int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
4500 {
4501 	struct dlm_lkb *lkb;
4502 	struct dlm_args args;
4503 	struct dlm_user_args *ua;
4504 	struct dlm_rsb *r;
4505 	int error;
4506 
4507 	dlm_lock_recovery(ls);
4508 
4509 	error = find_lkb(ls, lkid, &lkb);
4510 	if (error)
4511 		goto out;
4512 
4513 	ua = (struct dlm_user_args *)lkb->lkb_astparam;
4514 
4515 	error = set_unlock_args(flags, ua, &args);
4516 	if (error)
4517 		goto out_put;
4518 
4519 	/* same as cancel_lock(), but set DEADLOCK_CANCEL after lock_rsb */
4520 
4521 	r = lkb->lkb_resource;
4522 	hold_rsb(r);
4523 	lock_rsb(r);
4524 
4525 	error = validate_unlock_args(lkb, &args);
4526 	if (error)
4527 		goto out_r;
4528 	lkb->lkb_flags |= DLM_IFL_DEADLOCK_CANCEL;
4529 
4530 	error = _cancel_lock(r, lkb);
4531  out_r:
4532 	unlock_rsb(r);
4533 	put_rsb(r);
4534 
4535 	if (error == -DLM_ECANCEL)
4536 		error = 0;
4537 	/* from validate_unlock_args() */
4538 	if (error == -EBUSY)
4539 		error = 0;
4540  out_put:
4541 	dlm_put_lkb(lkb);
4542  out:
4543 	dlm_unlock_recovery(ls);
4544 	return error;
4545 }
4546 
4547 /* lkb's that are removed from the waiters list by revert are just left on the
4548    orphans list with the granted orphan locks, to be freed by purge */
4549 
4550 static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
4551 {
4552 	struct dlm_user_args *ua = (struct dlm_user_args *)lkb->lkb_astparam;
4553 	struct dlm_args args;
4554 	int error;
4555 
4556 	hold_lkb(lkb);
4557 	mutex_lock(&ls->ls_orphans_mutex);
4558 	list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans);
4559 	mutex_unlock(&ls->ls_orphans_mutex);
4560 
4561 	set_unlock_args(0, ua, &args);
4562 
4563 	error = cancel_lock(ls, lkb, &args);
4564 	if (error == -DLM_ECANCEL)
4565 		error = 0;
4566 	return error;
4567 }
4568 
4569 /* The force flag allows the unlock to go ahead even if the lkb isn't granted.
4570    Regardless of what rsb queue the lock is on, it's removed and freed. */
4571 
4572 static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
4573 {
4574 	struct dlm_user_args *ua = (struct dlm_user_args *)lkb->lkb_astparam;
4575 	struct dlm_args args;
4576 	int error;
4577 
4578 	set_unlock_args(DLM_LKF_FORCEUNLOCK, ua, &args);
4579 
4580 	error = unlock_lock(ls, lkb, &args);
4581 	if (error == -DLM_EUNLOCK)
4582 		error = 0;
4583 	return error;
4584 }
4585 
4586 /* We have to release clear_proc_locks mutex before calling unlock_proc_lock()
4587    (which does lock_rsb) due to deadlock with receiving a message that does
4588    lock_rsb followed by dlm_user_add_ast() */
4589 
4590 static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
4591 				     struct dlm_user_proc *proc)
4592 {
4593 	struct dlm_lkb *lkb = NULL;
4594 
4595 	mutex_lock(&ls->ls_clear_proc_locks);
4596 	if (list_empty(&proc->locks))
4597 		goto out;
4598 
4599 	lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue);
4600 	list_del_init(&lkb->lkb_ownqueue);
4601 
4602 	if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
4603 		lkb->lkb_flags |= DLM_IFL_ORPHAN;
4604 	else
4605 		lkb->lkb_flags |= DLM_IFL_DEAD;
4606  out:
4607 	mutex_unlock(&ls->ls_clear_proc_locks);
4608 	return lkb;
4609 }
4610 
4611 /* The ls_clear_proc_locks mutex protects against dlm_user_add_asts() which
4612    1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
4613    which we clear here. */
4614 
4615 /* proc CLOSING flag is set so no more device_reads should look at proc->asts
4616    list, and no more device_writes should add lkb's to proc->locks list; so we
4617    shouldn't need to take asts_spin or locks_spin here.  this assumes that
4618    device reads/writes/closes are serialized -- FIXME: we may need to serialize
4619    them ourself. */
4620 
4621 void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
4622 {
4623 	struct dlm_lkb *lkb, *safe;
4624 
4625 	dlm_lock_recovery(ls);
4626 
4627 	while (1) {
4628 		lkb = del_proc_lock(ls, proc);
4629 		if (!lkb)
4630 			break;
4631 		del_timeout(lkb);
4632 		if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
4633 			orphan_proc_lock(ls, lkb);
4634 		else
4635 			unlock_proc_lock(ls, lkb);
4636 
4637 		/* this removes the reference for the proc->locks list
4638 		   added by dlm_user_request, it may result in the lkb
4639 		   being freed */
4640 
4641 		dlm_put_lkb(lkb);
4642 	}
4643 
4644 	mutex_lock(&ls->ls_clear_proc_locks);
4645 
4646 	/* in-progress unlocks */
4647 	list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
4648 		list_del_init(&lkb->lkb_ownqueue);
4649 		lkb->lkb_flags |= DLM_IFL_DEAD;
4650 		dlm_put_lkb(lkb);
4651 	}
4652 
4653 	list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_astqueue) {
4654 		list_del(&lkb->lkb_astqueue);
4655 		dlm_put_lkb(lkb);
4656 	}
4657 
4658 	mutex_unlock(&ls->ls_clear_proc_locks);
4659 	dlm_unlock_recovery(ls);
4660 }
4661 
4662 static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
4663 {
4664 	struct dlm_lkb *lkb, *safe;
4665 
4666 	while (1) {
4667 		lkb = NULL;
4668 		spin_lock(&proc->locks_spin);
4669 		if (!list_empty(&proc->locks)) {
4670 			lkb = list_entry(proc->locks.next, struct dlm_lkb,
4671 					 lkb_ownqueue);
4672 			list_del_init(&lkb->lkb_ownqueue);
4673 		}
4674 		spin_unlock(&proc->locks_spin);
4675 
4676 		if (!lkb)
4677 			break;
4678 
4679 		lkb->lkb_flags |= DLM_IFL_DEAD;
4680 		unlock_proc_lock(ls, lkb);
4681 		dlm_put_lkb(lkb); /* ref from proc->locks list */
4682 	}
4683 
4684 	spin_lock(&proc->locks_spin);
4685 	list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
4686 		list_del_init(&lkb->lkb_ownqueue);
4687 		lkb->lkb_flags |= DLM_IFL_DEAD;
4688 		dlm_put_lkb(lkb);
4689 	}
4690 	spin_unlock(&proc->locks_spin);
4691 
4692 	spin_lock(&proc->asts_spin);
4693 	list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_astqueue) {
4694 		list_del(&lkb->lkb_astqueue);
4695 		dlm_put_lkb(lkb);
4696 	}
4697 	spin_unlock(&proc->asts_spin);
4698 }
4699 
4700 /* pid of 0 means purge all orphans */
4701 
4702 static void do_purge(struct dlm_ls *ls, int nodeid, int pid)
4703 {
4704 	struct dlm_lkb *lkb, *safe;
4705 
4706 	mutex_lock(&ls->ls_orphans_mutex);
4707 	list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) {
4708 		if (pid && lkb->lkb_ownpid != pid)
4709 			continue;
4710 		unlock_proc_lock(ls, lkb);
4711 		list_del_init(&lkb->lkb_ownqueue);
4712 		dlm_put_lkb(lkb);
4713 	}
4714 	mutex_unlock(&ls->ls_orphans_mutex);
4715 }
4716 
4717 static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
4718 {
4719 	struct dlm_message *ms;
4720 	struct dlm_mhandle *mh;
4721 	int error;
4722 
4723 	error = _create_message(ls, sizeof(struct dlm_message), nodeid,
4724 				DLM_MSG_PURGE, &ms, &mh);
4725 	if (error)
4726 		return error;
4727 	ms->m_nodeid = nodeid;
4728 	ms->m_pid = pid;
4729 
4730 	return send_message(mh, ms);
4731 }
4732 
4733 int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc,
4734 		   int nodeid, int pid)
4735 {
4736 	int error = 0;
4737 
4738 	if (nodeid != dlm_our_nodeid()) {
4739 		error = send_purge(ls, nodeid, pid);
4740 	} else {
4741 		dlm_lock_recovery(ls);
4742 		if (pid == current->pid)
4743 			purge_proc_locks(ls, proc);
4744 		else
4745 			do_purge(ls, nodeid, pid);
4746 		dlm_unlock_recovery(ls);
4747 	}
4748 	return error;
4749 }
4750 
4751