xref: /linux/fs/lockd/clntproc.c (revision d67b569f5f620c0fb95d5212642746b7ba9d29e4)
1 /*
2  * linux/fs/lockd/clntproc.c
3  *
4  * RPC procedures for the client side NLM implementation
5  *
6  * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
7  */
8 
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/errno.h>
13 #include <linux/fs.h>
14 #include <linux/nfs_fs.h>
15 #include <linux/utsname.h>
16 #include <linux/smp_lock.h>
17 #include <linux/sunrpc/clnt.h>
18 #include <linux/sunrpc/svc.h>
19 #include <linux/lockd/lockd.h>
20 #include <linux/lockd/sm_inter.h>
21 
22 #define NLMDBG_FACILITY		NLMDBG_CLIENT
23 #define NLMCLNT_GRACE_WAIT	(5*HZ)
24 #define NLMCLNT_POLL_TIMEOUT	(30*HZ)
25 
26 static int	nlmclnt_test(struct nlm_rqst *, struct file_lock *);
27 static int	nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
28 static int	nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
29 static void	nlmclnt_unlock_callback(struct rpc_task *);
30 static void	nlmclnt_cancel_callback(struct rpc_task *);
31 static int	nlm_stat_to_errno(u32 stat);
32 static void	nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
33 
34 /*
35  * Cookie counter for NLM requests
36  */
37 static u32	nlm_cookie = 0x1234;
38 
39 static inline void nlmclnt_next_cookie(struct nlm_cookie *c)
40 {
41 	memcpy(c->data, &nlm_cookie, 4);
42 	memset(c->data+4, 0, 4);
43 	c->len=4;
44 	nlm_cookie++;
45 }
46 
47 static struct nlm_lockowner *nlm_get_lockowner(struct nlm_lockowner *lockowner)
48 {
49 	atomic_inc(&lockowner->count);
50 	return lockowner;
51 }
52 
53 static void nlm_put_lockowner(struct nlm_lockowner *lockowner)
54 {
55 	if (!atomic_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
56 		return;
57 	list_del(&lockowner->list);
58 	spin_unlock(&lockowner->host->h_lock);
59 	nlm_release_host(lockowner->host);
60 	kfree(lockowner);
61 }
62 
63 static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid)
64 {
65 	struct nlm_lockowner *lockowner;
66 	list_for_each_entry(lockowner, &host->h_lockowners, list) {
67 		if (lockowner->pid == pid)
68 			return -EBUSY;
69 	}
70 	return 0;
71 }
72 
73 static inline uint32_t __nlm_alloc_pid(struct nlm_host *host)
74 {
75 	uint32_t res;
76 	do {
77 		res = host->h_pidcount++;
78 	} while (nlm_pidbusy(host, res) < 0);
79 	return res;
80 }
81 
82 static struct nlm_lockowner *__nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner)
83 {
84 	struct nlm_lockowner *lockowner;
85 	list_for_each_entry(lockowner, &host->h_lockowners, list) {
86 		if (lockowner->owner != owner)
87 			continue;
88 		return nlm_get_lockowner(lockowner);
89 	}
90 	return NULL;
91 }
92 
93 static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner)
94 {
95 	struct nlm_lockowner *res, *new = NULL;
96 
97 	spin_lock(&host->h_lock);
98 	res = __nlm_find_lockowner(host, owner);
99 	if (res == NULL) {
100 		spin_unlock(&host->h_lock);
101 		new = (struct nlm_lockowner *)kmalloc(sizeof(*new), GFP_KERNEL);
102 		spin_lock(&host->h_lock);
103 		res = __nlm_find_lockowner(host, owner);
104 		if (res == NULL && new != NULL) {
105 			res = new;
106 			atomic_set(&new->count, 1);
107 			new->owner = owner;
108 			new->pid = __nlm_alloc_pid(host);
109 			new->host = nlm_get_host(host);
110 			list_add(&new->list, &host->h_lockowners);
111 			new = NULL;
112 		}
113 	}
114 	spin_unlock(&host->h_lock);
115 	if (new != NULL)
116 		kfree(new);
117 	return res;
118 }
119 
120 /*
121  * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls
122  */
123 static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
124 {
125 	struct nlm_args	*argp = &req->a_args;
126 	struct nlm_lock	*lock = &argp->lock;
127 
128 	nlmclnt_next_cookie(&argp->cookie);
129 	argp->state   = nsm_local_state;
130 	memcpy(&lock->fh, NFS_FH(fl->fl_file->f_dentry->d_inode), sizeof(struct nfs_fh));
131 	lock->caller  = system_utsname.nodename;
132 	lock->oh.data = req->a_owner;
133 	lock->oh.len  = sprintf(req->a_owner, "%d@%s",
134 				current->pid, system_utsname.nodename);
135 	locks_copy_lock(&lock->fl, fl);
136 }
137 
138 static void nlmclnt_release_lockargs(struct nlm_rqst *req)
139 {
140 	struct file_lock *fl = &req->a_args.lock.fl;
141 
142 	if (fl->fl_ops && fl->fl_ops->fl_release_private)
143 		fl->fl_ops->fl_release_private(fl);
144 }
145 
146 /*
147  * Initialize arguments for GRANTED call. The nlm_rqst structure
148  * has been cleared already.
149  */
150 int
151 nlmclnt_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
152 {
153 	locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
154 	memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
155 	call->a_args.lock.caller = system_utsname.nodename;
156 	call->a_args.lock.oh.len = lock->oh.len;
157 
158 	/* set default data area */
159 	call->a_args.lock.oh.data = call->a_owner;
160 
161 	if (lock->oh.len > NLMCLNT_OHSIZE) {
162 		void *data = kmalloc(lock->oh.len, GFP_KERNEL);
163 		if (!data) {
164 			nlmclnt_freegrantargs(call);
165 			return 0;
166 		}
167 		call->a_args.lock.oh.data = (u8 *) data;
168 	}
169 
170 	memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len);
171 	return 1;
172 }
173 
174 void
175 nlmclnt_freegrantargs(struct nlm_rqst *call)
176 {
177 	struct file_lock *fl = &call->a_args.lock.fl;
178 	/*
179 	 * Check whether we allocated memory for the owner.
180 	 */
181 	if (call->a_args.lock.oh.data != (u8 *) call->a_owner) {
182 		kfree(call->a_args.lock.oh.data);
183 	}
184 	if (fl->fl_ops && fl->fl_ops->fl_release_private)
185 		fl->fl_ops->fl_release_private(fl);
186 }
187 
188 /*
189  * This is the main entry point for the NLM client.
190  */
191 int
192 nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
193 {
194 	struct nfs_server	*nfssrv = NFS_SERVER(inode);
195 	struct nlm_host		*host;
196 	struct nlm_rqst		reqst, *call = &reqst;
197 	sigset_t		oldset;
198 	unsigned long		flags;
199 	int			status, proto, vers;
200 
201 	vers = (NFS_PROTO(inode)->version == 3) ? 4 : 1;
202 	if (NFS_PROTO(inode)->version > 3) {
203 		printk(KERN_NOTICE "NFSv4 file locking not implemented!\n");
204 		return -ENOLCK;
205 	}
206 
207 	/* Retrieve transport protocol from NFS client */
208 	proto = NFS_CLIENT(inode)->cl_xprt->prot;
209 
210 	if (!(host = nlmclnt_lookup_host(NFS_ADDR(inode), proto, vers)))
211 		return -ENOLCK;
212 
213 	/* Create RPC client handle if not there, and copy soft
214 	 * and intr flags from NFS client. */
215 	if (host->h_rpcclnt == NULL) {
216 		struct rpc_clnt	*clnt;
217 
218 		/* Bind an rpc client to this host handle (does not
219 		 * perform a portmapper lookup) */
220 		if (!(clnt = nlm_bind_host(host))) {
221 			status = -ENOLCK;
222 			goto done;
223 		}
224 		clnt->cl_softrtry = nfssrv->client->cl_softrtry;
225 		clnt->cl_intr     = nfssrv->client->cl_intr;
226 		clnt->cl_chatty   = nfssrv->client->cl_chatty;
227 	}
228 
229 	/* Keep the old signal mask */
230 	spin_lock_irqsave(&current->sighand->siglock, flags);
231 	oldset = current->blocked;
232 
233 	/* If we're cleaning up locks because the process is exiting,
234 	 * perform the RPC call asynchronously. */
235 	if ((IS_SETLK(cmd) || IS_SETLKW(cmd))
236 	    && fl->fl_type == F_UNLCK
237 	    && (current->flags & PF_EXITING)) {
238 		sigfillset(&current->blocked);	/* Mask all signals */
239 		recalc_sigpending();
240 		spin_unlock_irqrestore(&current->sighand->siglock, flags);
241 
242 		call = nlmclnt_alloc_call();
243 		if (!call) {
244 			status = -ENOMEM;
245 			goto out_restore;
246 		}
247 		call->a_flags = RPC_TASK_ASYNC;
248 	} else {
249 		spin_unlock_irqrestore(&current->sighand->siglock, flags);
250 		memset(call, 0, sizeof(*call));
251 		locks_init_lock(&call->a_args.lock.fl);
252 		locks_init_lock(&call->a_res.lock.fl);
253 	}
254 	call->a_host = host;
255 
256 	nlmclnt_locks_init_private(fl, host);
257 
258 	/* Set up the argument struct */
259 	nlmclnt_setlockargs(call, fl);
260 
261 	if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
262 		if (fl->fl_type != F_UNLCK) {
263 			call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
264 			status = nlmclnt_lock(call, fl);
265 		} else
266 			status = nlmclnt_unlock(call, fl);
267 	} else if (IS_GETLK(cmd))
268 		status = nlmclnt_test(call, fl);
269 	else
270 		status = -EINVAL;
271 
272  out_restore:
273 	spin_lock_irqsave(&current->sighand->siglock, flags);
274 	current->blocked = oldset;
275 	recalc_sigpending();
276 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
277 
278 done:
279 	dprintk("lockd: clnt proc returns %d\n", status);
280 	nlm_release_host(host);
281 	return status;
282 }
283 EXPORT_SYMBOL(nlmclnt_proc);
284 
285 /*
286  * Allocate an NLM RPC call struct
287  */
288 struct nlm_rqst *
289 nlmclnt_alloc_call(void)
290 {
291 	struct nlm_rqst	*call;
292 
293 	while (!signalled()) {
294 		call = (struct nlm_rqst *) kmalloc(sizeof(struct nlm_rqst), GFP_KERNEL);
295 		if (call) {
296 			memset(call, 0, sizeof(*call));
297 			locks_init_lock(&call->a_args.lock.fl);
298 			locks_init_lock(&call->a_res.lock.fl);
299 			return call;
300 		}
301 		printk("nlmclnt_alloc_call: failed, waiting for memory\n");
302 		current->state = TASK_INTERRUPTIBLE;
303 		schedule_timeout(5*HZ);
304 	}
305 	return NULL;
306 }
307 
308 static int nlm_wait_on_grace(wait_queue_head_t *queue)
309 {
310 	DEFINE_WAIT(wait);
311 	int status = -EINTR;
312 
313 	prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE);
314 	if (!signalled ()) {
315 		schedule_timeout(NLMCLNT_GRACE_WAIT);
316 		try_to_freeze();
317 		if (!signalled ())
318 			status = 0;
319 	}
320 	finish_wait(queue, &wait);
321 	return status;
322 }
323 
324 /*
325  * Generic NLM call
326  */
327 static int
328 nlmclnt_call(struct nlm_rqst *req, u32 proc)
329 {
330 	struct nlm_host	*host = req->a_host;
331 	struct rpc_clnt	*clnt;
332 	struct nlm_args	*argp = &req->a_args;
333 	struct nlm_res	*resp = &req->a_res;
334 	struct rpc_message msg = {
335 		.rpc_argp	= argp,
336 		.rpc_resp	= resp,
337 	};
338 	int		status;
339 
340 	dprintk("lockd: call procedure %d on %s\n",
341 			(int)proc, host->h_name);
342 
343 	do {
344 		if (host->h_reclaiming && !argp->reclaim)
345 			goto in_grace_period;
346 
347 		/* If we have no RPC client yet, create one. */
348 		if ((clnt = nlm_bind_host(host)) == NULL)
349 			return -ENOLCK;
350 		msg.rpc_proc = &clnt->cl_procinfo[proc];
351 
352 		/* Perform the RPC call. If an error occurs, try again */
353 		if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) {
354 			dprintk("lockd: rpc_call returned error %d\n", -status);
355 			switch (status) {
356 			case -EPROTONOSUPPORT:
357 				status = -EINVAL;
358 				break;
359 			case -ECONNREFUSED:
360 			case -ETIMEDOUT:
361 			case -ENOTCONN:
362 				nlm_rebind_host(host);
363 				status = -EAGAIN;
364 				break;
365 			case -ERESTARTSYS:
366 				return signalled () ? -EINTR : status;
367 			default:
368 				break;
369 			}
370 			break;
371 		} else
372 		if (resp->status == NLM_LCK_DENIED_GRACE_PERIOD) {
373 			dprintk("lockd: server in grace period\n");
374 			if (argp->reclaim) {
375 				printk(KERN_WARNING
376 				     "lockd: spurious grace period reject?!\n");
377 				return -ENOLCK;
378 			}
379 		} else {
380 			if (!argp->reclaim) {
381 				/* We appear to be out of the grace period */
382 				wake_up_all(&host->h_gracewait);
383 			}
384 			dprintk("lockd: server returns status %d\n", resp->status);
385 			return 0;	/* Okay, call complete */
386 		}
387 
388 in_grace_period:
389 		/*
390 		 * The server has rebooted and appears to be in the grace
391 		 * period during which locks are only allowed to be
392 		 * reclaimed.
393 		 * We can only back off and try again later.
394 		 */
395 		status = nlm_wait_on_grace(&host->h_gracewait);
396 	} while (status == 0);
397 
398 	return status;
399 }
400 
401 /*
402  * Generic NLM call, async version.
403  */
404 int
405 nlmsvc_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
406 {
407 	struct nlm_host	*host = req->a_host;
408 	struct rpc_clnt	*clnt;
409 	struct rpc_message msg = {
410 		.rpc_argp	= &req->a_args,
411 		.rpc_resp	= &req->a_res,
412 	};
413 	int		status;
414 
415 	dprintk("lockd: call procedure %d on %s (async)\n",
416 			(int)proc, host->h_name);
417 
418 	/* If we have no RPC client yet, create one. */
419 	if ((clnt = nlm_bind_host(host)) == NULL)
420 		return -ENOLCK;
421 	msg.rpc_proc = &clnt->cl_procinfo[proc];
422 
423         /* bootstrap and kick off the async RPC call */
424         status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, callback, req);
425 
426 	return status;
427 }
428 
429 static int
430 nlmclnt_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
431 {
432 	struct nlm_host	*host = req->a_host;
433 	struct rpc_clnt	*clnt;
434 	struct nlm_args	*argp = &req->a_args;
435 	struct nlm_res	*resp = &req->a_res;
436 	struct rpc_message msg = {
437 		.rpc_argp	= argp,
438 		.rpc_resp	= resp,
439 	};
440 	int		status;
441 
442 	dprintk("lockd: call procedure %d on %s (async)\n",
443 			(int)proc, host->h_name);
444 
445 	/* If we have no RPC client yet, create one. */
446 	if ((clnt = nlm_bind_host(host)) == NULL)
447 		return -ENOLCK;
448 	msg.rpc_proc = &clnt->cl_procinfo[proc];
449 
450 	/* Increment host refcount */
451 	nlm_get_host(host);
452         /* bootstrap and kick off the async RPC call */
453         status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, callback, req);
454 	if (status < 0)
455 		nlm_release_host(host);
456 	return status;
457 }
458 
459 /*
460  * TEST for the presence of a conflicting lock
461  */
462 static int
463 nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
464 {
465 	int	status;
466 
467 	status = nlmclnt_call(req, NLMPROC_TEST);
468 	nlmclnt_release_lockargs(req);
469 	if (status < 0)
470 		return status;
471 
472 	status = req->a_res.status;
473 	if (status == NLM_LCK_GRANTED) {
474 		fl->fl_type = F_UNLCK;
475 	} if (status == NLM_LCK_DENIED) {
476 		/*
477 		 * Report the conflicting lock back to the application.
478 		 */
479 		locks_copy_lock(fl, &req->a_res.lock.fl);
480 		fl->fl_pid = 0;
481 	} else {
482 		return nlm_stat_to_errno(req->a_res.status);
483 	}
484 
485 	return 0;
486 }
487 
488 static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
489 {
490 	memcpy(&new->fl_u.nfs_fl, &fl->fl_u.nfs_fl, sizeof(new->fl_u.nfs_fl));
491 	nlm_get_lockowner(new->fl_u.nfs_fl.owner);
492 }
493 
494 static void nlmclnt_locks_release_private(struct file_lock *fl)
495 {
496 	nlm_put_lockowner(fl->fl_u.nfs_fl.owner);
497 	fl->fl_ops = NULL;
498 }
499 
500 static struct file_lock_operations nlmclnt_lock_ops = {
501 	.fl_copy_lock = nlmclnt_locks_copy_lock,
502 	.fl_release_private = nlmclnt_locks_release_private,
503 };
504 
505 static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host)
506 {
507 	BUG_ON(fl->fl_ops != NULL);
508 	fl->fl_u.nfs_fl.state = 0;
509 	fl->fl_u.nfs_fl.flags = 0;
510 	fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner);
511 	fl->fl_ops = &nlmclnt_lock_ops;
512 }
513 
514 static void do_vfs_lock(struct file_lock *fl)
515 {
516 	int res = 0;
517 	switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
518 		case FL_POSIX:
519 			res = posix_lock_file_wait(fl->fl_file, fl);
520 			break;
521 		case FL_FLOCK:
522 			res = flock_lock_file_wait(fl->fl_file, fl);
523 			break;
524 		default:
525 			BUG();
526 	}
527 	if (res < 0)
528 		printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n",
529 				__FUNCTION__);
530 }
531 
532 /*
533  * LOCK: Try to create a lock
534  *
535  *			Programmer Harassment Alert
536  *
537  * When given a blocking lock request in a sync RPC call, the HPUX lockd
538  * will faithfully return LCK_BLOCKED but never cares to notify us when
539  * the lock could be granted. This way, our local process could hang
540  * around forever waiting for the callback.
541  *
542  *  Solution A:	Implement busy-waiting
543  *  Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES})
544  *
545  * For now I am implementing solution A, because I hate the idea of
546  * re-implementing lockd for a third time in two months. The async
547  * calls shouldn't be too hard to do, however.
548  *
549  * This is one of the lovely things about standards in the NFS area:
550  * they're so soft and squishy you can't really blame HP for doing this.
551  */
552 static int
553 nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
554 {
555 	struct nlm_host	*host = req->a_host;
556 	struct nlm_res	*resp = &req->a_res;
557 	long timeout;
558 	int status;
559 
560 	if (!host->h_monitored && nsm_monitor(host) < 0) {
561 		printk(KERN_NOTICE "lockd: failed to monitor %s\n",
562 					host->h_name);
563 		status = -ENOLCK;
564 		goto out;
565 	}
566 
567 	if (req->a_args.block) {
568 		status = nlmclnt_prepare_block(req, host, fl);
569 		if (status < 0)
570 			goto out;
571 	}
572 	for(;;) {
573 		status = nlmclnt_call(req, NLMPROC_LOCK);
574 		if (status < 0)
575 			goto out_unblock;
576 		if (resp->status != NLM_LCK_BLOCKED)
577 			break;
578 		/* Wait on an NLM blocking lock */
579 		timeout = nlmclnt_block(req, NLMCLNT_POLL_TIMEOUT);
580 		/* Did a reclaimer thread notify us of a server reboot? */
581 		if (resp->status ==  NLM_LCK_DENIED_GRACE_PERIOD)
582 			continue;
583 		if (resp->status != NLM_LCK_BLOCKED)
584 			break;
585 		if (timeout >= 0)
586 			continue;
587 		/* We were interrupted. Send a CANCEL request to the server
588 		 * and exit
589 		 */
590 		status = (int)timeout;
591 		goto out_unblock;
592 	}
593 
594 	if (resp->status == NLM_LCK_GRANTED) {
595 		fl->fl_u.nfs_fl.state = host->h_state;
596 		fl->fl_u.nfs_fl.flags |= NFS_LCK_GRANTED;
597 		fl->fl_flags |= FL_SLEEP;
598 		do_vfs_lock(fl);
599 	}
600 	status = nlm_stat_to_errno(resp->status);
601 out_unblock:
602 	nlmclnt_finish_block(req);
603 	/* Cancel the blocked request if it is still pending */
604 	if (resp->status == NLM_LCK_BLOCKED)
605 		nlmclnt_cancel(host, fl);
606 out:
607 	nlmclnt_release_lockargs(req);
608 	return status;
609 }
610 
611 /*
612  * RECLAIM: Try to reclaim a lock
613  */
614 int
615 nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
616 {
617 	struct nlm_rqst reqst, *req;
618 	int		status;
619 
620 	req = &reqst;
621 	memset(req, 0, sizeof(*req));
622 	locks_init_lock(&req->a_args.lock.fl);
623 	locks_init_lock(&req->a_res.lock.fl);
624 	req->a_host  = host;
625 	req->a_flags = 0;
626 
627 	/* Set up the argument struct */
628 	nlmclnt_setlockargs(req, fl);
629 	req->a_args.reclaim = 1;
630 
631 	if ((status = nlmclnt_call(req, NLMPROC_LOCK)) >= 0
632 	 && req->a_res.status == NLM_LCK_GRANTED)
633 		return 0;
634 
635 	printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d "
636 				"(errno %d, status %d)\n", fl->fl_pid,
637 				status, req->a_res.status);
638 
639 	/*
640 	 * FIXME: This is a serious failure. We can
641 	 *
642 	 *  a.	Ignore the problem
643 	 *  b.	Send the owning process some signal (Linux doesn't have
644 	 *	SIGLOST, though...)
645 	 *  c.	Retry the operation
646 	 *
647 	 * Until someone comes up with a simple implementation
648 	 * for b or c, I'll choose option a.
649 	 */
650 
651 	return -ENOLCK;
652 }
653 
654 /*
655  * UNLOCK: remove an existing lock
656  */
657 static int
658 nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
659 {
660 	struct nlm_res	*resp = &req->a_res;
661 	int		status;
662 
663 	/* Clean the GRANTED flag now so the lock doesn't get
664 	 * reclaimed while we're stuck in the unlock call. */
665 	fl->fl_u.nfs_fl.flags &= ~NFS_LCK_GRANTED;
666 
667 	if (req->a_flags & RPC_TASK_ASYNC) {
668 		status = nlmclnt_async_call(req, NLMPROC_UNLOCK,
669 					nlmclnt_unlock_callback);
670 		/* Hrmf... Do the unlock early since locks_remove_posix()
671 		 * really expects us to free the lock synchronously */
672 		do_vfs_lock(fl);
673 		if (status < 0) {
674 			nlmclnt_release_lockargs(req);
675 			kfree(req);
676 		}
677 		return status;
678 	}
679 
680 	status = nlmclnt_call(req, NLMPROC_UNLOCK);
681 	nlmclnt_release_lockargs(req);
682 	if (status < 0)
683 		return status;
684 
685 	do_vfs_lock(fl);
686 	if (resp->status == NLM_LCK_GRANTED)
687 		return 0;
688 
689 	if (resp->status != NLM_LCK_DENIED_NOLOCKS)
690 		printk("lockd: unexpected unlock status: %d\n", resp->status);
691 
692 	/* What to do now? I'm out of my depth... */
693 
694 	return -ENOLCK;
695 }
696 
697 static void
698 nlmclnt_unlock_callback(struct rpc_task *task)
699 {
700 	struct nlm_rqst	*req = (struct nlm_rqst *) task->tk_calldata;
701 	int		status = req->a_res.status;
702 
703 	if (RPC_ASSASSINATED(task))
704 		goto die;
705 
706 	if (task->tk_status < 0) {
707 		dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
708 		goto retry_rebind;
709 	}
710 	if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
711 		rpc_delay(task, NLMCLNT_GRACE_WAIT);
712 		goto retry_unlock;
713 	}
714 	if (status != NLM_LCK_GRANTED)
715 		printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);
716 die:
717 	nlm_release_host(req->a_host);
718 	nlmclnt_release_lockargs(req);
719 	kfree(req);
720 	return;
721  retry_rebind:
722 	nlm_rebind_host(req->a_host);
723  retry_unlock:
724 	rpc_restart_call(task);
725 }
726 
727 /*
728  * Cancel a blocked lock request.
729  * We always use an async RPC call for this in order not to hang a
730  * process that has been Ctrl-C'ed.
731  */
732 int
733 nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)
734 {
735 	struct nlm_rqst	*req;
736 	unsigned long	flags;
737 	sigset_t	oldset;
738 	int		status;
739 
740 	/* Block all signals while setting up call */
741 	spin_lock_irqsave(&current->sighand->siglock, flags);
742 	oldset = current->blocked;
743 	sigfillset(&current->blocked);
744 	recalc_sigpending();
745 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
746 
747 	req = nlmclnt_alloc_call();
748 	if (!req)
749 		return -ENOMEM;
750 	req->a_host  = host;
751 	req->a_flags = RPC_TASK_ASYNC;
752 
753 	nlmclnt_setlockargs(req, fl);
754 
755 	status = nlmclnt_async_call(req, NLMPROC_CANCEL,
756 					nlmclnt_cancel_callback);
757 	if (status < 0) {
758 		nlmclnt_release_lockargs(req);
759 		kfree(req);
760 	}
761 
762 	spin_lock_irqsave(&current->sighand->siglock, flags);
763 	current->blocked = oldset;
764 	recalc_sigpending();
765 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
766 
767 	return status;
768 }
769 
770 static void
771 nlmclnt_cancel_callback(struct rpc_task *task)
772 {
773 	struct nlm_rqst	*req = (struct nlm_rqst *) task->tk_calldata;
774 
775 	if (RPC_ASSASSINATED(task))
776 		goto die;
777 
778 	if (task->tk_status < 0) {
779 		dprintk("lockd: CANCEL call error %d, retrying.\n",
780 					task->tk_status);
781 		goto retry_cancel;
782 	}
783 
784 	dprintk("lockd: cancel status %d (task %d)\n",
785 			req->a_res.status, task->tk_pid);
786 
787 	switch (req->a_res.status) {
788 	case NLM_LCK_GRANTED:
789 	case NLM_LCK_DENIED_GRACE_PERIOD:
790 		/* Everything's good */
791 		break;
792 	case NLM_LCK_DENIED_NOLOCKS:
793 		dprintk("lockd: CANCEL failed (server has no locks)\n");
794 		goto retry_cancel;
795 	default:
796 		printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n",
797 			req->a_res.status);
798 	}
799 
800 die:
801 	nlm_release_host(req->a_host);
802 	nlmclnt_release_lockargs(req);
803 	kfree(req);
804 	return;
805 
806 retry_cancel:
807 	nlm_rebind_host(req->a_host);
808 	rpc_restart_call(task);
809 	rpc_delay(task, 30 * HZ);
810 }
811 
812 /*
813  * Convert an NLM status code to a generic kernel errno
814  */
815 static int
816 nlm_stat_to_errno(u32 status)
817 {
818 	switch(status) {
819 	case NLM_LCK_GRANTED:
820 		return 0;
821 	case NLM_LCK_DENIED:
822 		return -EAGAIN;
823 	case NLM_LCK_DENIED_NOLOCKS:
824 	case NLM_LCK_DENIED_GRACE_PERIOD:
825 		return -ENOLCK;
826 	case NLM_LCK_BLOCKED:
827 		printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n");
828 		return -ENOLCK;
829 #ifdef CONFIG_LOCKD_V4
830 	case NLM_DEADLCK:
831 		return -EDEADLK;
832 	case NLM_ROFS:
833 		return -EROFS;
834 	case NLM_STALE_FH:
835 		return -ESTALE;
836 	case NLM_FBIG:
837 		return -EOVERFLOW;
838 	case NLM_FAILED:
839 		return -ENOLCK;
840 #endif
841 	}
842 	printk(KERN_NOTICE "lockd: unexpected server status %d\n", status);
843 	return -ENOLCK;
844 }
845