xref: /linux/fs/lockd/clntproc.c (revision 9ce7677cfd7cd871adb457c80bea3b581b839641)
1 /*
2  * linux/fs/lockd/clntproc.c
3  *
4  * RPC procedures for the client side NLM implementation
5  *
6  * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
7  */
8 
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/errno.h>
13 #include <linux/fs.h>
14 #include <linux/nfs_fs.h>
15 #include <linux/utsname.h>
16 #include <linux/smp_lock.h>
17 #include <linux/sunrpc/clnt.h>
18 #include <linux/sunrpc/svc.h>
19 #include <linux/lockd/lockd.h>
20 #include <linux/lockd/sm_inter.h>
21 
22 #define NLMDBG_FACILITY		NLMDBG_CLIENT
23 #define NLMCLNT_GRACE_WAIT	(5*HZ)
24 #define NLMCLNT_POLL_TIMEOUT	(30*HZ)
25 
26 static int	nlmclnt_test(struct nlm_rqst *, struct file_lock *);
27 static int	nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
28 static int	nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
29 static void	nlmclnt_unlock_callback(struct rpc_task *);
30 static void	nlmclnt_cancel_callback(struct rpc_task *);
31 static int	nlm_stat_to_errno(u32 stat);
32 static void	nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
33 
34 /*
35  * Cookie counter for NLM requests
36  */
37 static u32	nlm_cookie = 0x1234;
38 
39 static inline void nlmclnt_next_cookie(struct nlm_cookie *c)
40 {
41 	memcpy(c->data, &nlm_cookie, 4);
42 	memset(c->data+4, 0, 4);
43 	c->len=4;
44 	nlm_cookie++;
45 }
46 
47 static struct nlm_lockowner *nlm_get_lockowner(struct nlm_lockowner *lockowner)
48 {
49 	atomic_inc(&lockowner->count);
50 	return lockowner;
51 }
52 
53 static void nlm_put_lockowner(struct nlm_lockowner *lockowner)
54 {
55 	if (!atomic_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
56 		return;
57 	list_del(&lockowner->list);
58 	spin_unlock(&lockowner->host->h_lock);
59 	nlm_release_host(lockowner->host);
60 	kfree(lockowner);
61 }
62 
63 static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid)
64 {
65 	struct nlm_lockowner *lockowner;
66 	list_for_each_entry(lockowner, &host->h_lockowners, list) {
67 		if (lockowner->pid == pid)
68 			return -EBUSY;
69 	}
70 	return 0;
71 }
72 
73 static inline uint32_t __nlm_alloc_pid(struct nlm_host *host)
74 {
75 	uint32_t res;
76 	do {
77 		res = host->h_pidcount++;
78 	} while (nlm_pidbusy(host, res) < 0);
79 	return res;
80 }
81 
82 static struct nlm_lockowner *__nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner)
83 {
84 	struct nlm_lockowner *lockowner;
85 	list_for_each_entry(lockowner, &host->h_lockowners, list) {
86 		if (lockowner->owner != owner)
87 			continue;
88 		return nlm_get_lockowner(lockowner);
89 	}
90 	return NULL;
91 }
92 
93 static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner)
94 {
95 	struct nlm_lockowner *res, *new = NULL;
96 
97 	spin_lock(&host->h_lock);
98 	res = __nlm_find_lockowner(host, owner);
99 	if (res == NULL) {
100 		spin_unlock(&host->h_lock);
101 		new = (struct nlm_lockowner *)kmalloc(sizeof(*new), GFP_KERNEL);
102 		spin_lock(&host->h_lock);
103 		res = __nlm_find_lockowner(host, owner);
104 		if (res == NULL && new != NULL) {
105 			res = new;
106 			atomic_set(&new->count, 1);
107 			new->owner = owner;
108 			new->pid = __nlm_alloc_pid(host);
109 			new->host = nlm_get_host(host);
110 			list_add(&new->list, &host->h_lockowners);
111 			new = NULL;
112 		}
113 	}
114 	spin_unlock(&host->h_lock);
115 	kfree(new);
116 	return res;
117 }
118 
119 /*
120  * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls
121  */
122 static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
123 {
124 	struct nlm_args	*argp = &req->a_args;
125 	struct nlm_lock	*lock = &argp->lock;
126 
127 	nlmclnt_next_cookie(&argp->cookie);
128 	argp->state   = nsm_local_state;
129 	memcpy(&lock->fh, NFS_FH(fl->fl_file->f_dentry->d_inode), sizeof(struct nfs_fh));
130 	lock->caller  = system_utsname.nodename;
131 	lock->oh.data = req->a_owner;
132 	lock->oh.len  = sprintf(req->a_owner, "%d@%s",
133 				current->pid, system_utsname.nodename);
134 	locks_copy_lock(&lock->fl, fl);
135 }
136 
137 static void nlmclnt_release_lockargs(struct nlm_rqst *req)
138 {
139 	struct file_lock *fl = &req->a_args.lock.fl;
140 
141 	if (fl->fl_ops && fl->fl_ops->fl_release_private)
142 		fl->fl_ops->fl_release_private(fl);
143 }
144 
145 /*
146  * Initialize arguments for GRANTED call. The nlm_rqst structure
147  * has been cleared already.
148  */
149 int
150 nlmclnt_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
151 {
152 	locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
153 	memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
154 	call->a_args.lock.caller = system_utsname.nodename;
155 	call->a_args.lock.oh.len = lock->oh.len;
156 
157 	/* set default data area */
158 	call->a_args.lock.oh.data = call->a_owner;
159 
160 	if (lock->oh.len > NLMCLNT_OHSIZE) {
161 		void *data = kmalloc(lock->oh.len, GFP_KERNEL);
162 		if (!data) {
163 			nlmclnt_freegrantargs(call);
164 			return 0;
165 		}
166 		call->a_args.lock.oh.data = (u8 *) data;
167 	}
168 
169 	memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len);
170 	return 1;
171 }
172 
173 void
174 nlmclnt_freegrantargs(struct nlm_rqst *call)
175 {
176 	struct file_lock *fl = &call->a_args.lock.fl;
177 	/*
178 	 * Check whether we allocated memory for the owner.
179 	 */
180 	if (call->a_args.lock.oh.data != (u8 *) call->a_owner) {
181 		kfree(call->a_args.lock.oh.data);
182 	}
183 	if (fl->fl_ops && fl->fl_ops->fl_release_private)
184 		fl->fl_ops->fl_release_private(fl);
185 }
186 
187 /*
188  * This is the main entry point for the NLM client.
189  */
190 int
191 nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
192 {
193 	struct nfs_server	*nfssrv = NFS_SERVER(inode);
194 	struct nlm_host		*host;
195 	struct nlm_rqst		reqst, *call = &reqst;
196 	sigset_t		oldset;
197 	unsigned long		flags;
198 	int			status, proto, vers;
199 
200 	vers = (NFS_PROTO(inode)->version == 3) ? 4 : 1;
201 	if (NFS_PROTO(inode)->version > 3) {
202 		printk(KERN_NOTICE "NFSv4 file locking not implemented!\n");
203 		return -ENOLCK;
204 	}
205 
206 	/* Retrieve transport protocol from NFS client */
207 	proto = NFS_CLIENT(inode)->cl_xprt->prot;
208 
209 	if (!(host = nlmclnt_lookup_host(NFS_ADDR(inode), proto, vers)))
210 		return -ENOLCK;
211 
212 	/* Create RPC client handle if not there, and copy soft
213 	 * and intr flags from NFS client. */
214 	if (host->h_rpcclnt == NULL) {
215 		struct rpc_clnt	*clnt;
216 
217 		/* Bind an rpc client to this host handle (does not
218 		 * perform a portmapper lookup) */
219 		if (!(clnt = nlm_bind_host(host))) {
220 			status = -ENOLCK;
221 			goto done;
222 		}
223 		clnt->cl_softrtry = nfssrv->client->cl_softrtry;
224 		clnt->cl_intr     = nfssrv->client->cl_intr;
225 		clnt->cl_chatty   = nfssrv->client->cl_chatty;
226 	}
227 
228 	/* Keep the old signal mask */
229 	spin_lock_irqsave(&current->sighand->siglock, flags);
230 	oldset = current->blocked;
231 
232 	/* If we're cleaning up locks because the process is exiting,
233 	 * perform the RPC call asynchronously. */
234 	if ((IS_SETLK(cmd) || IS_SETLKW(cmd))
235 	    && fl->fl_type == F_UNLCK
236 	    && (current->flags & PF_EXITING)) {
237 		sigfillset(&current->blocked);	/* Mask all signals */
238 		recalc_sigpending();
239 		spin_unlock_irqrestore(&current->sighand->siglock, flags);
240 
241 		call = nlmclnt_alloc_call();
242 		if (!call) {
243 			status = -ENOMEM;
244 			goto out_restore;
245 		}
246 		call->a_flags = RPC_TASK_ASYNC;
247 	} else {
248 		spin_unlock_irqrestore(&current->sighand->siglock, flags);
249 		memset(call, 0, sizeof(*call));
250 		locks_init_lock(&call->a_args.lock.fl);
251 		locks_init_lock(&call->a_res.lock.fl);
252 	}
253 	call->a_host = host;
254 
255 	nlmclnt_locks_init_private(fl, host);
256 
257 	/* Set up the argument struct */
258 	nlmclnt_setlockargs(call, fl);
259 
260 	if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
261 		if (fl->fl_type != F_UNLCK) {
262 			call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
263 			status = nlmclnt_lock(call, fl);
264 		} else
265 			status = nlmclnt_unlock(call, fl);
266 	} else if (IS_GETLK(cmd))
267 		status = nlmclnt_test(call, fl);
268 	else
269 		status = -EINVAL;
270 
271  out_restore:
272 	spin_lock_irqsave(&current->sighand->siglock, flags);
273 	current->blocked = oldset;
274 	recalc_sigpending();
275 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
276 
277 done:
278 	dprintk("lockd: clnt proc returns %d\n", status);
279 	nlm_release_host(host);
280 	return status;
281 }
282 EXPORT_SYMBOL(nlmclnt_proc);
283 
284 /*
285  * Allocate an NLM RPC call struct
286  */
287 struct nlm_rqst *
288 nlmclnt_alloc_call(void)
289 {
290 	struct nlm_rqst	*call;
291 
292 	while (!signalled()) {
293 		call = (struct nlm_rqst *) kmalloc(sizeof(struct nlm_rqst), GFP_KERNEL);
294 		if (call) {
295 			memset(call, 0, sizeof(*call));
296 			locks_init_lock(&call->a_args.lock.fl);
297 			locks_init_lock(&call->a_res.lock.fl);
298 			return call;
299 		}
300 		printk("nlmclnt_alloc_call: failed, waiting for memory\n");
301 		schedule_timeout_interruptible(5*HZ);
302 	}
303 	return NULL;
304 }
305 
306 static int nlm_wait_on_grace(wait_queue_head_t *queue)
307 {
308 	DEFINE_WAIT(wait);
309 	int status = -EINTR;
310 
311 	prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE);
312 	if (!signalled ()) {
313 		schedule_timeout(NLMCLNT_GRACE_WAIT);
314 		try_to_freeze();
315 		if (!signalled ())
316 			status = 0;
317 	}
318 	finish_wait(queue, &wait);
319 	return status;
320 }
321 
322 /*
323  * Generic NLM call
324  */
325 static int
326 nlmclnt_call(struct nlm_rqst *req, u32 proc)
327 {
328 	struct nlm_host	*host = req->a_host;
329 	struct rpc_clnt	*clnt;
330 	struct nlm_args	*argp = &req->a_args;
331 	struct nlm_res	*resp = &req->a_res;
332 	struct rpc_message msg = {
333 		.rpc_argp	= argp,
334 		.rpc_resp	= resp,
335 	};
336 	int		status;
337 
338 	dprintk("lockd: call procedure %d on %s\n",
339 			(int)proc, host->h_name);
340 
341 	do {
342 		if (host->h_reclaiming && !argp->reclaim)
343 			goto in_grace_period;
344 
345 		/* If we have no RPC client yet, create one. */
346 		if ((clnt = nlm_bind_host(host)) == NULL)
347 			return -ENOLCK;
348 		msg.rpc_proc = &clnt->cl_procinfo[proc];
349 
350 		/* Perform the RPC call. If an error occurs, try again */
351 		if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) {
352 			dprintk("lockd: rpc_call returned error %d\n", -status);
353 			switch (status) {
354 			case -EPROTONOSUPPORT:
355 				status = -EINVAL;
356 				break;
357 			case -ECONNREFUSED:
358 			case -ETIMEDOUT:
359 			case -ENOTCONN:
360 				nlm_rebind_host(host);
361 				status = -EAGAIN;
362 				break;
363 			case -ERESTARTSYS:
364 				return signalled () ? -EINTR : status;
365 			default:
366 				break;
367 			}
368 			break;
369 		} else
370 		if (resp->status == NLM_LCK_DENIED_GRACE_PERIOD) {
371 			dprintk("lockd: server in grace period\n");
372 			if (argp->reclaim) {
373 				printk(KERN_WARNING
374 				     "lockd: spurious grace period reject?!\n");
375 				return -ENOLCK;
376 			}
377 		} else {
378 			if (!argp->reclaim) {
379 				/* We appear to be out of the grace period */
380 				wake_up_all(&host->h_gracewait);
381 			}
382 			dprintk("lockd: server returns status %d\n", resp->status);
383 			return 0;	/* Okay, call complete */
384 		}
385 
386 in_grace_period:
387 		/*
388 		 * The server has rebooted and appears to be in the grace
389 		 * period during which locks are only allowed to be
390 		 * reclaimed.
391 		 * We can only back off and try again later.
392 		 */
393 		status = nlm_wait_on_grace(&host->h_gracewait);
394 	} while (status == 0);
395 
396 	return status;
397 }
398 
399 /*
400  * Generic NLM call, async version.
401  */
402 int
403 nlmsvc_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
404 {
405 	struct nlm_host	*host = req->a_host;
406 	struct rpc_clnt	*clnt;
407 	struct rpc_message msg = {
408 		.rpc_argp	= &req->a_args,
409 		.rpc_resp	= &req->a_res,
410 	};
411 	int		status;
412 
413 	dprintk("lockd: call procedure %d on %s (async)\n",
414 			(int)proc, host->h_name);
415 
416 	/* If we have no RPC client yet, create one. */
417 	if ((clnt = nlm_bind_host(host)) == NULL)
418 		return -ENOLCK;
419 	msg.rpc_proc = &clnt->cl_procinfo[proc];
420 
421         /* bootstrap and kick off the async RPC call */
422         status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, callback, req);
423 
424 	return status;
425 }
426 
427 static int
428 nlmclnt_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
429 {
430 	struct nlm_host	*host = req->a_host;
431 	struct rpc_clnt	*clnt;
432 	struct nlm_args	*argp = &req->a_args;
433 	struct nlm_res	*resp = &req->a_res;
434 	struct rpc_message msg = {
435 		.rpc_argp	= argp,
436 		.rpc_resp	= resp,
437 	};
438 	int		status;
439 
440 	dprintk("lockd: call procedure %d on %s (async)\n",
441 			(int)proc, host->h_name);
442 
443 	/* If we have no RPC client yet, create one. */
444 	if ((clnt = nlm_bind_host(host)) == NULL)
445 		return -ENOLCK;
446 	msg.rpc_proc = &clnt->cl_procinfo[proc];
447 
448 	/* Increment host refcount */
449 	nlm_get_host(host);
450         /* bootstrap and kick off the async RPC call */
451         status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, callback, req);
452 	if (status < 0)
453 		nlm_release_host(host);
454 	return status;
455 }
456 
457 /*
458  * TEST for the presence of a conflicting lock
459  */
460 static int
461 nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
462 {
463 	int	status;
464 
465 	status = nlmclnt_call(req, NLMPROC_TEST);
466 	nlmclnt_release_lockargs(req);
467 	if (status < 0)
468 		return status;
469 
470 	status = req->a_res.status;
471 	if (status == NLM_LCK_GRANTED) {
472 		fl->fl_type = F_UNLCK;
473 	} if (status == NLM_LCK_DENIED) {
474 		/*
475 		 * Report the conflicting lock back to the application.
476 		 */
477 		locks_copy_lock(fl, &req->a_res.lock.fl);
478 		fl->fl_pid = 0;
479 	} else {
480 		return nlm_stat_to_errno(req->a_res.status);
481 	}
482 
483 	return 0;
484 }
485 
486 static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
487 {
488 	memcpy(&new->fl_u.nfs_fl, &fl->fl_u.nfs_fl, sizeof(new->fl_u.nfs_fl));
489 	nlm_get_lockowner(new->fl_u.nfs_fl.owner);
490 }
491 
492 static void nlmclnt_locks_release_private(struct file_lock *fl)
493 {
494 	nlm_put_lockowner(fl->fl_u.nfs_fl.owner);
495 	fl->fl_ops = NULL;
496 }
497 
498 static struct file_lock_operations nlmclnt_lock_ops = {
499 	.fl_copy_lock = nlmclnt_locks_copy_lock,
500 	.fl_release_private = nlmclnt_locks_release_private,
501 };
502 
503 static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host)
504 {
505 	BUG_ON(fl->fl_ops != NULL);
506 	fl->fl_u.nfs_fl.state = 0;
507 	fl->fl_u.nfs_fl.flags = 0;
508 	fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner);
509 	fl->fl_ops = &nlmclnt_lock_ops;
510 }
511 
512 static void do_vfs_lock(struct file_lock *fl)
513 {
514 	int res = 0;
515 	switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
516 		case FL_POSIX:
517 			res = posix_lock_file_wait(fl->fl_file, fl);
518 			break;
519 		case FL_FLOCK:
520 			res = flock_lock_file_wait(fl->fl_file, fl);
521 			break;
522 		default:
523 			BUG();
524 	}
525 	if (res < 0)
526 		printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n",
527 				__FUNCTION__);
528 }
529 
530 /*
531  * LOCK: Try to create a lock
532  *
533  *			Programmer Harassment Alert
534  *
535  * When given a blocking lock request in a sync RPC call, the HPUX lockd
536  * will faithfully return LCK_BLOCKED but never cares to notify us when
537  * the lock could be granted. This way, our local process could hang
538  * around forever waiting for the callback.
539  *
540  *  Solution A:	Implement busy-waiting
541  *  Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES})
542  *
543  * For now I am implementing solution A, because I hate the idea of
544  * re-implementing lockd for a third time in two months. The async
545  * calls shouldn't be too hard to do, however.
546  *
547  * This is one of the lovely things about standards in the NFS area:
548  * they're so soft and squishy you can't really blame HP for doing this.
549  */
550 static int
551 nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
552 {
553 	struct nlm_host	*host = req->a_host;
554 	struct nlm_res	*resp = &req->a_res;
555 	long timeout;
556 	int status;
557 
558 	if (!host->h_monitored && nsm_monitor(host) < 0) {
559 		printk(KERN_NOTICE "lockd: failed to monitor %s\n",
560 					host->h_name);
561 		status = -ENOLCK;
562 		goto out;
563 	}
564 
565 	if (req->a_args.block) {
566 		status = nlmclnt_prepare_block(req, host, fl);
567 		if (status < 0)
568 			goto out;
569 	}
570 	for(;;) {
571 		status = nlmclnt_call(req, NLMPROC_LOCK);
572 		if (status < 0)
573 			goto out_unblock;
574 		if (resp->status != NLM_LCK_BLOCKED)
575 			break;
576 		/* Wait on an NLM blocking lock */
577 		timeout = nlmclnt_block(req, NLMCLNT_POLL_TIMEOUT);
578 		/* Did a reclaimer thread notify us of a server reboot? */
579 		if (resp->status ==  NLM_LCK_DENIED_GRACE_PERIOD)
580 			continue;
581 		if (resp->status != NLM_LCK_BLOCKED)
582 			break;
583 		if (timeout >= 0)
584 			continue;
585 		/* We were interrupted. Send a CANCEL request to the server
586 		 * and exit
587 		 */
588 		status = (int)timeout;
589 		goto out_unblock;
590 	}
591 
592 	if (resp->status == NLM_LCK_GRANTED) {
593 		fl->fl_u.nfs_fl.state = host->h_state;
594 		fl->fl_u.nfs_fl.flags |= NFS_LCK_GRANTED;
595 		fl->fl_flags |= FL_SLEEP;
596 		do_vfs_lock(fl);
597 	}
598 	status = nlm_stat_to_errno(resp->status);
599 out_unblock:
600 	nlmclnt_finish_block(req);
601 	/* Cancel the blocked request if it is still pending */
602 	if (resp->status == NLM_LCK_BLOCKED)
603 		nlmclnt_cancel(host, fl);
604 out:
605 	nlmclnt_release_lockargs(req);
606 	return status;
607 }
608 
609 /*
610  * RECLAIM: Try to reclaim a lock
611  */
612 int
613 nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
614 {
615 	struct nlm_rqst reqst, *req;
616 	int		status;
617 
618 	req = &reqst;
619 	memset(req, 0, sizeof(*req));
620 	locks_init_lock(&req->a_args.lock.fl);
621 	locks_init_lock(&req->a_res.lock.fl);
622 	req->a_host  = host;
623 	req->a_flags = 0;
624 
625 	/* Set up the argument struct */
626 	nlmclnt_setlockargs(req, fl);
627 	req->a_args.reclaim = 1;
628 
629 	if ((status = nlmclnt_call(req, NLMPROC_LOCK)) >= 0
630 	 && req->a_res.status == NLM_LCK_GRANTED)
631 		return 0;
632 
633 	printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d "
634 				"(errno %d, status %d)\n", fl->fl_pid,
635 				status, req->a_res.status);
636 
637 	/*
638 	 * FIXME: This is a serious failure. We can
639 	 *
640 	 *  a.	Ignore the problem
641 	 *  b.	Send the owning process some signal (Linux doesn't have
642 	 *	SIGLOST, though...)
643 	 *  c.	Retry the operation
644 	 *
645 	 * Until someone comes up with a simple implementation
646 	 * for b or c, I'll choose option a.
647 	 */
648 
649 	return -ENOLCK;
650 }
651 
652 /*
653  * UNLOCK: remove an existing lock
654  */
655 static int
656 nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
657 {
658 	struct nlm_res	*resp = &req->a_res;
659 	int		status;
660 
661 	/* Clean the GRANTED flag now so the lock doesn't get
662 	 * reclaimed while we're stuck in the unlock call. */
663 	fl->fl_u.nfs_fl.flags &= ~NFS_LCK_GRANTED;
664 
665 	if (req->a_flags & RPC_TASK_ASYNC) {
666 		status = nlmclnt_async_call(req, NLMPROC_UNLOCK,
667 					nlmclnt_unlock_callback);
668 		/* Hrmf... Do the unlock early since locks_remove_posix()
669 		 * really expects us to free the lock synchronously */
670 		do_vfs_lock(fl);
671 		if (status < 0) {
672 			nlmclnt_release_lockargs(req);
673 			kfree(req);
674 		}
675 		return status;
676 	}
677 
678 	status = nlmclnt_call(req, NLMPROC_UNLOCK);
679 	nlmclnt_release_lockargs(req);
680 	if (status < 0)
681 		return status;
682 
683 	do_vfs_lock(fl);
684 	if (resp->status == NLM_LCK_GRANTED)
685 		return 0;
686 
687 	if (resp->status != NLM_LCK_DENIED_NOLOCKS)
688 		printk("lockd: unexpected unlock status: %d\n", resp->status);
689 
690 	/* What to do now? I'm out of my depth... */
691 
692 	return -ENOLCK;
693 }
694 
695 static void
696 nlmclnt_unlock_callback(struct rpc_task *task)
697 {
698 	struct nlm_rqst	*req = (struct nlm_rqst *) task->tk_calldata;
699 	int		status = req->a_res.status;
700 
701 	if (RPC_ASSASSINATED(task))
702 		goto die;
703 
704 	if (task->tk_status < 0) {
705 		dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
706 		goto retry_rebind;
707 	}
708 	if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
709 		rpc_delay(task, NLMCLNT_GRACE_WAIT);
710 		goto retry_unlock;
711 	}
712 	if (status != NLM_LCK_GRANTED)
713 		printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);
714 die:
715 	nlm_release_host(req->a_host);
716 	nlmclnt_release_lockargs(req);
717 	kfree(req);
718 	return;
719  retry_rebind:
720 	nlm_rebind_host(req->a_host);
721  retry_unlock:
722 	rpc_restart_call(task);
723 }
724 
725 /*
726  * Cancel a blocked lock request.
727  * We always use an async RPC call for this in order not to hang a
728  * process that has been Ctrl-C'ed.
729  */
730 int
731 nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)
732 {
733 	struct nlm_rqst	*req;
734 	unsigned long	flags;
735 	sigset_t	oldset;
736 	int		status;
737 
738 	/* Block all signals while setting up call */
739 	spin_lock_irqsave(&current->sighand->siglock, flags);
740 	oldset = current->blocked;
741 	sigfillset(&current->blocked);
742 	recalc_sigpending();
743 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
744 
745 	req = nlmclnt_alloc_call();
746 	if (!req)
747 		return -ENOMEM;
748 	req->a_host  = host;
749 	req->a_flags = RPC_TASK_ASYNC;
750 
751 	nlmclnt_setlockargs(req, fl);
752 
753 	status = nlmclnt_async_call(req, NLMPROC_CANCEL,
754 					nlmclnt_cancel_callback);
755 	if (status < 0) {
756 		nlmclnt_release_lockargs(req);
757 		kfree(req);
758 	}
759 
760 	spin_lock_irqsave(&current->sighand->siglock, flags);
761 	current->blocked = oldset;
762 	recalc_sigpending();
763 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
764 
765 	return status;
766 }
767 
768 static void
769 nlmclnt_cancel_callback(struct rpc_task *task)
770 {
771 	struct nlm_rqst	*req = (struct nlm_rqst *) task->tk_calldata;
772 
773 	if (RPC_ASSASSINATED(task))
774 		goto die;
775 
776 	if (task->tk_status < 0) {
777 		dprintk("lockd: CANCEL call error %d, retrying.\n",
778 					task->tk_status);
779 		goto retry_cancel;
780 	}
781 
782 	dprintk("lockd: cancel status %d (task %d)\n",
783 			req->a_res.status, task->tk_pid);
784 
785 	switch (req->a_res.status) {
786 	case NLM_LCK_GRANTED:
787 	case NLM_LCK_DENIED_GRACE_PERIOD:
788 		/* Everything's good */
789 		break;
790 	case NLM_LCK_DENIED_NOLOCKS:
791 		dprintk("lockd: CANCEL failed (server has no locks)\n");
792 		goto retry_cancel;
793 	default:
794 		printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n",
795 			req->a_res.status);
796 	}
797 
798 die:
799 	nlm_release_host(req->a_host);
800 	nlmclnt_release_lockargs(req);
801 	kfree(req);
802 	return;
803 
804 retry_cancel:
805 	nlm_rebind_host(req->a_host);
806 	rpc_restart_call(task);
807 	rpc_delay(task, 30 * HZ);
808 }
809 
810 /*
811  * Convert an NLM status code to a generic kernel errno
812  */
813 static int
814 nlm_stat_to_errno(u32 status)
815 {
816 	switch(status) {
817 	case NLM_LCK_GRANTED:
818 		return 0;
819 	case NLM_LCK_DENIED:
820 		return -EAGAIN;
821 	case NLM_LCK_DENIED_NOLOCKS:
822 	case NLM_LCK_DENIED_GRACE_PERIOD:
823 		return -ENOLCK;
824 	case NLM_LCK_BLOCKED:
825 		printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n");
826 		return -ENOLCK;
827 #ifdef CONFIG_LOCKD_V4
828 	case NLM_DEADLCK:
829 		return -EDEADLK;
830 	case NLM_ROFS:
831 		return -EROFS;
832 	case NLM_STALE_FH:
833 		return -ESTALE;
834 	case NLM_FBIG:
835 		return -EOVERFLOW;
836 	case NLM_FAILED:
837 		return -ENOLCK;
838 #endif
839 	}
840 	printk(KERN_NOTICE "lockd: unexpected server status %d\n", status);
841 	return -ENOLCK;
842 }
843