xref: /linux/fs/lockd/svclock.c (revision 5ea5880764cbb164afb17a62e76ca75dc371409d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/fs/lockd/svclock.c
4  *
5  * Handling of server-side locks, mostly of the blocked variety.
6  * This is the ugliest part of lockd because we tread on very thin ice.
7  * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc.
8  * IMNSHO introducing the grant callback into the NLM protocol was one
9  * of the worst ideas Sun ever had. Except maybe for the idea of doing
10  * NFS file locking at all.
11  *
12  * I'm trying hard to avoid race conditions by protecting most accesses
13  * to a file's list of blocked locks through a semaphore. The global
14  * list of blocked locks is not protected in this fashion however.
15  * Therefore, some functions (such as the RPC callback for the async grant
16  * call) move blocked locks towards the head of the list *while some other
17  * process might be traversing it*. This should not be a problem in
18  * practice, because this will only cause functions traversing the list
19  * to visit some blocks twice.
20  *
21  * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
22  */
23 
24 #include <linux/types.h>
25 #include <linux/slab.h>
26 #include <linux/errno.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/sunrpc/clnt.h>
30 #include <linux/sunrpc/svc_xprt.h>
31 
32 #include "lockd.h"
33 
34 #define NLMDBG_FACILITY		NLMDBG_SVCLOCK
35 
36 static void nlmsvc_release_block(struct nlm_block *block);
37 static void	nlmsvc_insert_block(struct nlm_block *block, unsigned long);
38 static void	nlmsvc_remove_block(struct nlm_block *block);
39 
40 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
41 static void nlmsvc_freegrantargs(struct nlm_rqst *call);
42 static const struct rpc_call_ops nlmsvc_grant_ops;
43 
44 /*
45  * The list of blocked locks to retry
46  */
47 static LIST_HEAD(nlm_blocked);
48 static DEFINE_SPINLOCK(nlm_blocked_lock);
49 
50 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
51 static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
52 {
53 	/*
54 	 * We can get away with a static buffer because this is only called
55 	 * from lockd, which is single-threaded.
56 	 */
57 	static char buf[2*NLM_MAXCOOKIELEN+1];
58 	unsigned int i, len = sizeof(buf);
59 	char *p = buf;
60 
61 	len--;	/* allow for trailing \0 */
62 	if (len < 3)
63 		return "???";
64 	for (i = 0 ; i < cookie->len ; i++) {
65 		if (len < 2) {
66 			strcpy(p-3, "...");
67 			break;
68 		}
69 		sprintf(p, "%02x", cookie->data[i]);
70 		p += 2;
71 		len -= 2;
72 	}
73 	*p = '\0';
74 
75 	return buf;
76 }
77 #else
78 static inline const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
79 {
80 	return "???";
81 }
82 #endif
83 
84 /*
85  * Insert a blocked lock into the global list
86  */
87 static void
88 nlmsvc_insert_block_locked(struct nlm_block *block, unsigned long when)
89 {
90 	struct nlm_block *b;
91 	struct list_head *pos;
92 
93 	dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when);
94 	if (list_empty(&block->b_list)) {
95 		kref_get(&block->b_count);
96 	} else {
97 		list_del_init(&block->b_list);
98 	}
99 
100 	pos = &nlm_blocked;
101 	if (when != NLM_NEVER) {
102 		if ((when += jiffies) == NLM_NEVER)
103 			when ++;
104 		list_for_each(pos, &nlm_blocked) {
105 			b = list_entry(pos, struct nlm_block, b_list);
106 			if (time_after(b->b_when,when) || b->b_when == NLM_NEVER)
107 				break;
108 		}
109 		/* On normal exit from the loop, pos == &nlm_blocked,
110 		 * so we will be adding to the end of the list - good
111 		 */
112 	}
113 
114 	list_add_tail(&block->b_list, pos);
115 	block->b_when = when;
116 }
117 
118 static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
119 {
120 	spin_lock(&nlm_blocked_lock);
121 	nlmsvc_insert_block_locked(block, when);
122 	spin_unlock(&nlm_blocked_lock);
123 }
124 
125 /*
126  * Remove a block from the global list
127  */
128 static inline void
129 nlmsvc_remove_block(struct nlm_block *block)
130 {
131 	spin_lock(&nlm_blocked_lock);
132 	if (!list_empty(&block->b_list)) {
133 		list_del_init(&block->b_list);
134 		spin_unlock(&nlm_blocked_lock);
135 		nlmsvc_release_block(block);
136 		return;
137 	}
138 	spin_unlock(&nlm_blocked_lock);
139 }
140 
141 /*
142  * Find a block for a given lock
143  */
144 static struct nlm_block *
145 nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock)
146 {
147 	struct nlm_block	*block;
148 	struct file_lock	*fl;
149 
150 	dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n",
151 				file, lock->fl.c.flc_pid,
152 				(long long)lock->fl.fl_start,
153 				(long long)lock->fl.fl_end,
154 				lock->fl.c.flc_type);
155 	spin_lock(&nlm_blocked_lock);
156 	list_for_each_entry(block, &nlm_blocked, b_list) {
157 		fl = &block->b_call->a_args.lock.fl;
158 		dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n",
159 				block->b_file, fl->c.flc_pid,
160 				(long long)fl->fl_start,
161 				(long long)fl->fl_end, fl->c.flc_type,
162 				nlmdbg_cookie2a(&block->b_call->a_args.cookie));
163 		if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
164 			kref_get(&block->b_count);
165 			spin_unlock(&nlm_blocked_lock);
166 			return block;
167 		}
168 	}
169 	spin_unlock(&nlm_blocked_lock);
170 
171 	return NULL;
172 }
173 
174 static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b)
175 {
176 	if (a->len != b->len)
177 		return 0;
178 	if (memcmp(a->data, b->data, a->len))
179 		return 0;
180 	return 1;
181 }
182 
183 /*
184  * Find a block with a given NLM cookie.
185  */
186 static inline struct nlm_block *
187 nlmsvc_find_block(struct nlm_cookie *cookie)
188 {
189 	struct nlm_block *block;
190 
191 	spin_lock(&nlm_blocked_lock);
192 	list_for_each_entry(block, &nlm_blocked, b_list) {
193 		if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie))
194 			goto found;
195 	}
196 	spin_unlock(&nlm_blocked_lock);
197 
198 	return NULL;
199 
200 found:
201 	dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block);
202 	kref_get(&block->b_count);
203 	spin_unlock(&nlm_blocked_lock);
204 	return block;
205 }
206 
207 /*
208  * Create a block and initialize it.
209  *
210  * Note: we explicitly set the cookie of the grant reply to that of
211  * the blocked lock request. The spec explicitly mentions that the client
212  * should _not_ rely on the callback containing the same cookie as the
213  * request, but (as I found out later) that's because some implementations
214  * do just this. Never mind the standards comittees, they support our
215  * logging industries.
216  *
217  * 10 years later: I hope we can safely ignore these old and broken
218  * clients by now. Let's fix this so we can uniquely identify an incoming
219  * GRANTED_RES message by cookie, without having to rely on the client's IP
220  * address. --okir
221  */
222 static struct nlm_block *
223 nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
224 		    struct nlm_file *file, struct nlm_lock *lock,
225 		    struct nlm_cookie *cookie)
226 {
227 	struct nlm_block	*block;
228 	struct nlm_rqst		*call = NULL;
229 
230 	call = nlm_alloc_call(host);
231 	if (call == NULL)
232 		return NULL;
233 
234 	/* Allocate memory for block, and initialize arguments */
235 	block = kzalloc_obj(*block);
236 	if (block == NULL)
237 		goto failed;
238 	kref_init(&block->b_count);
239 	INIT_LIST_HEAD(&block->b_list);
240 	INIT_LIST_HEAD(&block->b_flist);
241 
242 	if (!nlmsvc_setgrantargs(call, lock))
243 		goto failed_free;
244 
245 	/* Set notifier function for VFS, and init args */
246 	call->a_args.lock.fl.c.flc_flags |= FL_SLEEP;
247 	call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations;
248 	nlmclnt_next_cookie(&call->a_args.cookie);
249 
250 	dprintk("lockd: created block %p...\n", block);
251 
252 	/* Create and initialize the block */
253 	block->b_daemon = rqstp->rq_server;
254 	block->b_host   = host;
255 	block->b_file   = file;
256 	file->f_count++;
257 
258 	/* Add to file's list of blocks */
259 	list_add(&block->b_flist, &file->f_blocks);
260 
261 	/* Set up RPC arguments for callback */
262 	block->b_call = call;
263 	call->a_flags   = RPC_TASK_ASYNC;
264 	call->a_block = block;
265 
266 	return block;
267 
268 failed_free:
269 	kfree(block);
270 failed:
271 	nlmsvc_release_call(call);
272 	return NULL;
273 }
274 
275 /*
276  * Delete a block.
277  * It is the caller's responsibility to check whether the file
278  * can be closed hereafter.
279  */
280 static int nlmsvc_unlink_block(struct nlm_block *block)
281 {
282 	int status;
283 	dprintk("lockd: unlinking block %p...\n", block);
284 
285 	/* Remove block from list */
286 	status = locks_delete_block(&block->b_call->a_args.lock.fl);
287 	nlmsvc_remove_block(block);
288 	return status;
289 }
290 
291 static void nlmsvc_free_block(struct kref *kref)
292 {
293 	struct nlm_block *block = container_of(kref, struct nlm_block, b_count);
294 	struct nlm_file		*file = block->b_file;
295 
296 	dprintk("lockd: freeing block %p...\n", block);
297 
298 	/* Remove block from file's list of blocks */
299 	list_del_init(&block->b_flist);
300 	mutex_unlock(&file->f_mutex);
301 
302 	nlmsvc_freegrantargs(block->b_call);
303 	nlmsvc_release_call(block->b_call);
304 	nlm_release_file(block->b_file);
305 	kfree(block);
306 }
307 
308 static void nlmsvc_release_block(struct nlm_block *block)
309 {
310 	if (block != NULL)
311 		kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex);
312 }
313 
314 /*
315  * Loop over all blocks and delete blocks held by
316  * a matching host.
317  */
318 void nlmsvc_traverse_blocks(struct nlm_host *host,
319 			struct nlm_file *file,
320 			nlm_host_match_fn_t match)
321 {
322 	struct nlm_block *block, *next;
323 
324 restart:
325 	mutex_lock(&file->f_mutex);
326 	spin_lock(&nlm_blocked_lock);
327 	list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) {
328 		if (!match(block->b_host, host))
329 			continue;
330 		/* Do not destroy blocks that are not on
331 		 * the global retry list - why? */
332 		if (list_empty(&block->b_list))
333 			continue;
334 		kref_get(&block->b_count);
335 		spin_unlock(&nlm_blocked_lock);
336 		mutex_unlock(&file->f_mutex);
337 		nlmsvc_unlink_block(block);
338 		nlmsvc_release_block(block);
339 		goto restart;
340 	}
341 	spin_unlock(&nlm_blocked_lock);
342 	mutex_unlock(&file->f_mutex);
343 }
344 
345 static struct nlm_lockowner *
346 nlmsvc_get_lockowner(struct nlm_lockowner *lockowner)
347 {
348 	refcount_inc(&lockowner->count);
349 	return lockowner;
350 }
351 
352 void nlmsvc_put_lockowner(struct nlm_lockowner *lockowner)
353 {
354 	if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
355 		return;
356 	list_del(&lockowner->list);
357 	spin_unlock(&lockowner->host->h_lock);
358 	nlmsvc_release_host(lockowner->host);
359 	kfree(lockowner);
360 }
361 
362 static struct nlm_lockowner *__nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid)
363 {
364 	struct nlm_lockowner *lockowner;
365 	list_for_each_entry(lockowner, &host->h_lockowners, list) {
366 		if (lockowner->pid != pid)
367 			continue;
368 		return nlmsvc_get_lockowner(lockowner);
369 	}
370 	return NULL;
371 }
372 
373 static struct nlm_lockowner *nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid)
374 {
375 	struct nlm_lockowner *res, *new = NULL;
376 
377 	spin_lock(&host->h_lock);
378 	res = __nlmsvc_find_lockowner(host, pid);
379 
380 	if (res == NULL) {
381 		spin_unlock(&host->h_lock);
382 		new = kmalloc_obj(*res);
383 		spin_lock(&host->h_lock);
384 		res = __nlmsvc_find_lockowner(host, pid);
385 		if (res == NULL && new != NULL) {
386 			res = new;
387 			/* fs/locks.c will manage the refcount through lock_ops */
388 			refcount_set(&new->count, 1);
389 			new->pid = pid;
390 			new->host = nlm_get_host(host);
391 			list_add(&new->list, &host->h_lockowners);
392 			new = NULL;
393 		}
394 	}
395 
396 	spin_unlock(&host->h_lock);
397 	kfree(new);
398 	return res;
399 }
400 
401 void
402 nlmsvc_release_lockowner(struct nlm_lock *lock)
403 {
404 	if (lock->fl.c.flc_owner)
405 		nlmsvc_put_lockowner(lock->fl.c.flc_owner);
406 }
407 
408 void nlmsvc_locks_init_private(struct file_lock *fl, struct nlm_host *host,
409 						pid_t pid)
410 {
411 	fl->c.flc_owner = nlmsvc_find_lockowner(host, pid);
412 }
413 
414 /*
415  * Initialize arguments for GRANTED call. The nlm_rqst structure
416  * has been cleared already.
417  */
418 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
419 {
420 	locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
421 	memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
422 	call->a_args.lock.caller = utsname()->nodename;
423 	call->a_args.lock.oh.len = lock->oh.len;
424 
425 	/* set default data area */
426 	call->a_args.lock.oh.data = call->a_owner;
427 	call->a_args.lock.svid = ((struct nlm_lockowner *) lock->fl.c.flc_owner)->pid;
428 
429 	if (lock->oh.len > NLMCLNT_OHSIZE) {
430 		void *data = kmalloc(lock->oh.len, GFP_KERNEL);
431 		if (!data)
432 			return 0;
433 		call->a_args.lock.oh.data = (u8 *) data;
434 	}
435 
436 	memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len);
437 	return 1;
438 }
439 
440 static void nlmsvc_freegrantargs(struct nlm_rqst *call)
441 {
442 	if (call->a_args.lock.oh.data != call->a_owner)
443 		kfree(call->a_args.lock.oh.data);
444 
445 	locks_release_private(&call->a_args.lock.fl);
446 }
447 
448 /*
449  * Deferred lock request handling for non-blocking lock
450  */
451 static __be32
452 nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block)
453 {
454 	__be32 status = nlm_lck_denied_nolocks;
455 
456 	block->b_flags |= B_QUEUED;
457 
458 	nlmsvc_insert_block(block, NLM_TIMEOUT);
459 
460 	block->b_cache_req = &rqstp->rq_chandle;
461 	if (rqstp->rq_chandle.defer) {
462 		block->b_deferred_req =
463 			rqstp->rq_chandle.defer(block->b_cache_req);
464 		if (block->b_deferred_req != NULL)
465 			status = nlm__int__drop_reply;
466 	}
467 	dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n",
468 		block, block->b_flags, ntohl(status));
469 
470 	return status;
471 }
472 
473 /*
474  * Attempt to establish a lock, and if it can't be granted, block it
475  * if required.
476  */
477 __be32
478 nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
479 	    struct nlm_host *host, struct nlm_lock *lock, int wait,
480 	    struct nlm_cookie *cookie, int reclaim)
481 {
482 	struct inode		*inode __maybe_unused = nlmsvc_file_inode(file);
483 	struct nlm_block	*block = NULL;
484 	int			error;
485 	int			mode;
486 	int			async_block = 0;
487 	__be32			ret;
488 
489 	dprintk("lockd: nlmsvc_lock(%s/%llu, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
490 				inode->i_sb->s_id, inode->i_ino,
491 				lock->fl.c.flc_type,
492 				lock->fl.c.flc_pid,
493 				(long long)lock->fl.fl_start,
494 				(long long)lock->fl.fl_end,
495 				wait);
496 
497 	if (nlmsvc_file_cannot_lock(file))
498 		return nlm_lck_denied_nolocks;
499 
500 	if (!locks_can_async_lock(nlmsvc_file_file(file)->f_op)) {
501 		async_block = wait;
502 		wait = 0;
503 	}
504 
505 	/* Lock file against concurrent access */
506 	mutex_lock(&file->f_mutex);
507 	/* Get existing block (in case client is busy-waiting)
508 	 * or create new block
509 	 */
510 	block = nlmsvc_lookup_block(file, lock);
511 	if (block == NULL) {
512 		block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
513 		ret = nlm_lck_denied_nolocks;
514 		if (block == NULL)
515 			goto out;
516 		lock = &block->b_call->a_args.lock;
517 	} else
518 		lock->fl.c.flc_flags &= ~FL_SLEEP;
519 
520 	if (block->b_flags & B_QUEUED) {
521 		dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n",
522 							block, block->b_flags);
523 		if (block->b_granted) {
524 			nlmsvc_unlink_block(block);
525 			ret = nlm_granted;
526 			goto out;
527 		}
528 		if (block->b_flags & B_TIMED_OUT) {
529 			nlmsvc_unlink_block(block);
530 			ret = nlm_lck_denied;
531 			goto out;
532 		}
533 		ret = nlm__int__drop_reply;
534 		goto out;
535 	}
536 
537 	if (locks_in_grace(SVC_NET(rqstp)) && !reclaim) {
538 		ret = nlm_lck_denied_grace_period;
539 		goto out;
540 	}
541 	if (reclaim && !locks_in_grace(SVC_NET(rqstp))) {
542 		ret = nlm_lck_denied_grace_period;
543 		goto out;
544 	}
545 
546 	spin_lock(&nlm_blocked_lock);
547 	/*
548 	 * If this is a lock request for an already pending
549 	 * lock request we return nlm_lck_blocked without calling
550 	 * vfs_lock_file() again. Otherwise we have two pending
551 	 * requests on the underlaying ->lock() implementation but
552 	 * only one nlm_block to being granted by lm_grant().
553 	 */
554 	if (locks_can_async_lock(nlmsvc_file_file(file)->f_op) &&
555 	    !list_empty(&block->b_list)) {
556 		spin_unlock(&nlm_blocked_lock);
557 		ret = nlm_lck_blocked;
558 		goto out;
559 	}
560 
561 	/* Append to list of blocked */
562 	nlmsvc_insert_block_locked(block, NLM_NEVER);
563 	spin_unlock(&nlm_blocked_lock);
564 
565 	if (!wait)
566 		lock->fl.c.flc_flags &= ~FL_SLEEP;
567 	mode = lock_to_openmode(&lock->fl);
568 	error = vfs_lock_file(file->f_file[mode], F_SETLK, &lock->fl, NULL);
569 	lock->fl.c.flc_flags &= ~FL_SLEEP;
570 
571 	dprintk("lockd: vfs_lock_file returned %d\n", error);
572 	switch (error) {
573 		case 0:
574 			nlmsvc_remove_block(block);
575 			ret = nlm_granted;
576 			goto out;
577 		case -EAGAIN:
578 			if (!wait)
579 				nlmsvc_remove_block(block);
580 			ret = async_block ? nlm_lck_blocked : nlm_lck_denied;
581 			goto out;
582 		case FILE_LOCK_DEFERRED:
583 			if (wait)
584 				break;
585 			/* Filesystem lock operation is in progress
586 			   Add it to the queue waiting for callback */
587 			ret = nlmsvc_defer_lock_rqst(rqstp, block);
588 			goto out;
589 		case -EDEADLK:
590 			nlmsvc_remove_block(block);
591 			ret = nlm__int__deadlock;
592 			goto out;
593 		default:			/* includes ENOLCK */
594 			nlmsvc_remove_block(block);
595 			ret = nlm_lck_denied_nolocks;
596 			goto out;
597 	}
598 
599 	ret = nlm_lck_blocked;
600 out:
601 	mutex_unlock(&file->f_mutex);
602 	nlmsvc_release_block(block);
603 	dprintk("lockd: nlmsvc_lock returned %u\n", ret);
604 	return ret;
605 }
606 
607 /*
608  * Test for presence of a conflicting lock.
609  */
610 __be32
611 nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
612 		struct nlm_host *host, struct nlm_lock *lock,
613 		struct nlm_lock *conflock)
614 {
615 	int			error;
616 	int			mode;
617 	__be32			ret;
618 
619 	dprintk("lockd: nlmsvc_testlock(%s/%llu, ty=%d, %Ld-%Ld)\n",
620 				nlmsvc_file_inode(file)->i_sb->s_id,
621 				nlmsvc_file_inode(file)->i_ino,
622 				lock->fl.c.flc_type,
623 				(long long)lock->fl.fl_start,
624 				(long long)lock->fl.fl_end);
625 
626 	if (nlmsvc_file_cannot_lock(file))
627 		return nlm_lck_denied_nolocks;
628 
629 	if (locks_in_grace(SVC_NET(rqstp))) {
630 		ret = nlm_lck_denied_grace_period;
631 		goto out;
632 	}
633 
634 	mode = lock_to_openmode(&lock->fl);
635 	locks_init_lock(&conflock->fl);
636 	/* vfs_test_lock only uses start, end, and owner, but tests flc_file */
637 	conflock->fl.c.flc_file = lock->fl.c.flc_file;
638 	conflock->fl.fl_start = lock->fl.fl_start;
639 	conflock->fl.fl_end = lock->fl.fl_end;
640 	conflock->fl.c.flc_owner = lock->fl.c.flc_owner;
641 	error = vfs_test_lock(file->f_file[mode], &conflock->fl);
642 	if (error) {
643 		ret = nlm_lck_denied_nolocks;
644 		goto out;
645 	}
646 
647 	if (conflock->fl.c.flc_type == F_UNLCK) {
648 		ret = nlm_granted;
649 		goto out;
650 	}
651 
652 	dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
653 		conflock->fl.c.flc_type, (long long)conflock->fl.fl_start,
654 		(long long)conflock->fl.fl_end);
655 	conflock->caller = "somehost";	/* FIXME */
656 	conflock->len = strlen(conflock->caller);
657 	conflock->oh.len = 0;		/* don't return OH info */
658 	conflock->svid = conflock->fl.c.flc_pid;
659 	locks_release_private(&conflock->fl);
660 
661 	ret = nlm_lck_denied;
662 out:
663 	return ret;
664 }
665 
666 /*
667  * Remove a lock.
668  * This implies a CANCEL call: We send a GRANT_MSG, the client replies
669  * with a GRANT_RES call which gets lost, and calls UNLOCK immediately
670  * afterwards. In this case the block will still be there, and hence
671  * must be removed.
672  */
673 __be32
674 nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
675 {
676 	int	error = 0;
677 
678 	dprintk("lockd: nlmsvc_unlock(%s/%llu, pi=%d, %Ld-%Ld)\n",
679 				nlmsvc_file_inode(file)->i_sb->s_id,
680 				nlmsvc_file_inode(file)->i_ino,
681 				lock->fl.c.flc_pid,
682 				(long long)lock->fl.fl_start,
683 				(long long)lock->fl.fl_end);
684 
685 	if (nlmsvc_file_cannot_lock(file))
686 		return nlm_lck_denied_nolocks;
687 
688 	/* First, cancel any lock that might be there */
689 	nlmsvc_cancel_blocked(net, file, lock);
690 
691 	lock->fl.c.flc_type = F_UNLCK;
692 	lock->fl.c.flc_file = file->f_file[O_RDONLY];
693 	if (lock->fl.c.flc_file)
694 		error = vfs_lock_file(lock->fl.c.flc_file, F_SETLK,
695 					&lock->fl, NULL);
696 	lock->fl.c.flc_file = file->f_file[O_WRONLY];
697 	if (lock->fl.c.flc_file)
698 		error |= vfs_lock_file(lock->fl.c.flc_file, F_SETLK,
699 					&lock->fl, NULL);
700 
701 	return (error < 0)? nlm_lck_denied_nolocks : nlm_granted;
702 }
703 
704 /*
705  * Cancel a previously blocked request.
706  *
707  * A cancel request always overrides any grant that may currently
708  * be in progress.
709  * The calling procedure must check whether the file can be closed.
710  */
711 __be32
712 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
713 {
714 	struct nlm_block	*block;
715 	int status = 0;
716 	int mode;
717 
718 	dprintk("lockd: nlmsvc_cancel(%s/%llu, pi=%d, %Ld-%Ld)\n",
719 				nlmsvc_file_inode(file)->i_sb->s_id,
720 				nlmsvc_file_inode(file)->i_ino,
721 				lock->fl.c.flc_pid,
722 				(long long)lock->fl.fl_start,
723 				(long long)lock->fl.fl_end);
724 
725 	if (nlmsvc_file_cannot_lock(file))
726 		return nlm_lck_denied_nolocks;
727 
728 	if (locks_in_grace(net))
729 		return nlm_lck_denied_grace_period;
730 
731 	mutex_lock(&file->f_mutex);
732 	block = nlmsvc_lookup_block(file, lock);
733 	mutex_unlock(&file->f_mutex);
734 	if (block != NULL) {
735 		struct file_lock *fl = &block->b_call->a_args.lock.fl;
736 
737 		mode = lock_to_openmode(fl);
738 		vfs_cancel_lock(block->b_file->f_file[mode], fl);
739 		status = nlmsvc_unlink_block(block);
740 		nlmsvc_release_block(block);
741 	}
742 	return status ? nlm_lck_denied : nlm_granted;
743 }
744 
745 /*
746  * This is a callback from the filesystem for VFS file lock requests.
747  * It will be used if lm_grant is defined and the filesystem can not
748  * respond to the request immediately.
749  * For SETLK or SETLKW request it will get the local posix lock.
750  * In all cases it will move the block to the head of nlm_blocked q where
751  * nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the
752  * deferred rpc for GETLK and SETLK.
753  */
754 static void
755 nlmsvc_update_deferred_block(struct nlm_block *block, int result)
756 {
757 	block->b_flags |= B_GOT_CALLBACK;
758 	if (result == 0)
759 		block->b_granted = 1;
760 	else
761 		block->b_flags |= B_TIMED_OUT;
762 }
763 
764 static int nlmsvc_grant_deferred(struct file_lock *fl, int result)
765 {
766 	struct nlm_block *block;
767 	int rc = -ENOENT;
768 
769 	spin_lock(&nlm_blocked_lock);
770 	list_for_each_entry(block, &nlm_blocked, b_list) {
771 		if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
772 			dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n",
773 							block, block->b_flags);
774 			if (block->b_flags & B_QUEUED) {
775 				if (block->b_flags & B_TIMED_OUT) {
776 					rc = -ENOLCK;
777 					break;
778 				}
779 				nlmsvc_update_deferred_block(block, result);
780 			} else if (result == 0)
781 				block->b_granted = 1;
782 
783 			nlmsvc_insert_block_locked(block, 0);
784 			svc_wake_up(block->b_daemon);
785 			rc = 0;
786 			break;
787 		}
788 	}
789 	spin_unlock(&nlm_blocked_lock);
790 	if (rc == -ENOENT)
791 		printk(KERN_WARNING "lockd: grant for unknown block\n");
792 	return rc;
793 }
794 
795 /*
796  * Unblock a blocked lock request. This is a callback invoked from the
797  * VFS layer when a lock on which we blocked is removed.
798  *
799  * This function doesn't grant the blocked lock instantly, but rather moves
800  * the block to the head of nlm_blocked where it can be picked up by lockd.
801  */
802 static void
803 nlmsvc_notify_blocked(struct file_lock *fl)
804 {
805 	struct nlm_block	*block;
806 
807 	dprintk("lockd: VFS unblock notification for block %p\n", fl);
808 	spin_lock(&nlm_blocked_lock);
809 	list_for_each_entry(block, &nlm_blocked, b_list) {
810 		if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
811 			nlmsvc_insert_block_locked(block, 0);
812 			spin_unlock(&nlm_blocked_lock);
813 			svc_wake_up(block->b_daemon);
814 			return;
815 		}
816 	}
817 	spin_unlock(&nlm_blocked_lock);
818 	printk(KERN_WARNING "lockd: notification for unknown block!\n");
819 }
820 
821 static fl_owner_t nlmsvc_get_owner(fl_owner_t owner)
822 {
823 	return nlmsvc_get_lockowner(owner);
824 }
825 
826 static void nlmsvc_put_owner(fl_owner_t owner)
827 {
828 	nlmsvc_put_lockowner(owner);
829 }
830 
831 const struct lock_manager_operations nlmsvc_lock_operations = {
832 	.lm_notify = nlmsvc_notify_blocked,
833 	.lm_grant = nlmsvc_grant_deferred,
834 	.lm_get_owner = nlmsvc_get_owner,
835 	.lm_put_owner = nlmsvc_put_owner,
836 };
837 
838 /*
839  * Try to claim a lock that was previously blocked.
840  *
841  * Note that we use both the RPC_GRANTED_MSG call _and_ an async
842  * RPC thread when notifying the client. This seems like overkill...
843  * Here's why:
844  *  -	we don't want to use a synchronous RPC thread, otherwise
845  *	we might find ourselves hanging on a dead portmapper.
846  *  -	Some lockd implementations (e.g. HP) don't react to
847  *	RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls.
848  */
849 static void
850 nlmsvc_grant_blocked(struct nlm_block *block)
851 {
852 	struct nlm_file		*file = block->b_file;
853 	struct nlm_lock		*lock = &block->b_call->a_args.lock;
854 	int			mode;
855 	int			error;
856 	loff_t			fl_start, fl_end;
857 
858 	dprintk("lockd: grant blocked lock %p\n", block);
859 
860 	kref_get(&block->b_count);
861 
862 	/* Unlink block request from list */
863 	nlmsvc_unlink_block(block);
864 
865 	/* If b_granted is true this means we've been here before.
866 	 * Just retry the grant callback, possibly refreshing the RPC
867 	 * binding */
868 	if (block->b_granted) {
869 		nlm_rebind_host(block->b_host);
870 		goto callback;
871 	}
872 
873 	/* Try the lock operation again */
874 	/* vfs_lock_file() can mangle fl_start and fl_end, but we need
875 	 * them unchanged for the GRANT_MSG
876 	 */
877 	lock->fl.c.flc_flags |= FL_SLEEP;
878 	fl_start = lock->fl.fl_start;
879 	fl_end = lock->fl.fl_end;
880 	mode = lock_to_openmode(&lock->fl);
881 	error = vfs_lock_file(file->f_file[mode], F_SETLK, &lock->fl, NULL);
882 	lock->fl.c.flc_flags &= ~FL_SLEEP;
883 	lock->fl.fl_start = fl_start;
884 	lock->fl.fl_end = fl_end;
885 
886 	switch (error) {
887 	case 0:
888 		break;
889 	case FILE_LOCK_DEFERRED:
890 		dprintk("lockd: lock still blocked error %d\n", error);
891 		nlmsvc_insert_block(block, NLM_NEVER);
892 		nlmsvc_release_block(block);
893 		return;
894 	default:
895 		printk(KERN_WARNING "lockd: unexpected error %d in %s!\n",
896 				-error, __func__);
897 		nlmsvc_insert_block(block, 10 * HZ);
898 		nlmsvc_release_block(block);
899 		return;
900 	}
901 
902 callback:
903 	/* Lock was granted by VFS. */
904 	dprintk("lockd: GRANTing blocked lock.\n");
905 	block->b_granted = 1;
906 
907 	/* keep block on the list, but don't reattempt until the RPC
908 	 * completes or the submission fails
909 	 */
910 	nlmsvc_insert_block(block, NLM_NEVER);
911 
912 	/* Call the client -- use a soft RPC task since nlmsvc_retry_blocked
913 	 * will queue up a new one if this one times out
914 	 */
915 	error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG,
916 				&nlmsvc_grant_ops);
917 
918 	/* RPC submission failed, wait a bit and retry */
919 	if (error < 0)
920 		nlmsvc_insert_block(block, 10 * HZ);
921 }
922 
923 /*
924  * This is the callback from the RPC layer when the NLM_GRANTED_MSG
925  * RPC call has succeeded or timed out.
926  * Like all RPC callbacks, it is invoked by the rpciod process, so it
927  * better not sleep. Therefore, we put the blocked lock on the nlm_blocked
928  * chain once more in order to have it removed by lockd itself (which can
929  * then sleep on the file semaphore without disrupting e.g. the nfs client).
930  */
931 static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
932 {
933 	struct nlm_rqst		*call = data;
934 	struct nlm_block	*block = call->a_block;
935 	unsigned long		timeout;
936 
937 	dprintk("lockd: GRANT_MSG RPC callback\n");
938 
939 	spin_lock(&nlm_blocked_lock);
940 	/* if the block is not on a list at this point then it has
941 	 * been invalidated. Don't try to requeue it.
942 	 *
943 	 * FIXME: it's possible that the block is removed from the list
944 	 * after this check but before the nlmsvc_insert_block. In that
945 	 * case it will be added back. Perhaps we need better locking
946 	 * for nlm_blocked?
947 	 */
948 	if (list_empty(&block->b_list))
949 		goto out;
950 
951 	/* Technically, we should down the file semaphore here. Since we
952 	 * move the block towards the head of the queue only, no harm
953 	 * can be done, though. */
954 	if (task->tk_status < 0) {
955 		/* RPC error: Re-insert for retransmission */
956 		timeout = 10 * HZ;
957 	} else {
958 		/* Call was successful, now wait for client callback */
959 		timeout = 60 * HZ;
960 	}
961 	nlmsvc_insert_block_locked(block, timeout);
962 	svc_wake_up(block->b_daemon);
963 out:
964 	spin_unlock(&nlm_blocked_lock);
965 }
966 
967 /*
968  * FIXME: nlmsvc_release_block() grabs a mutex.  This is not allowed for an
969  * .rpc_release rpc_call_op
970  */
971 static void nlmsvc_grant_release(void *data)
972 {
973 	struct nlm_rqst		*call = data;
974 	nlmsvc_release_block(call->a_block);
975 }
976 
977 static const struct rpc_call_ops nlmsvc_grant_ops = {
978 	.rpc_call_done = nlmsvc_grant_callback,
979 	.rpc_release = nlmsvc_grant_release,
980 };
981 
982 /*
983  * We received a GRANT_RES callback. Try to find the corresponding
984  * block.
985  */
986 void
987 nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status)
988 {
989 	struct nlm_block	*block;
990 	struct file_lock	*fl;
991 	int			error;
992 
993 	dprintk("grant_reply: looking for cookie %x, s=%d\n",
994 		*(unsigned int *)(cookie->data), status);
995 	if (!(block = nlmsvc_find_block(cookie)))
996 		return;
997 
998 	switch (status) {
999 	case nlm_lck_denied_grace_period:
1000 		/* Try again in a couple of seconds */
1001 		nlmsvc_insert_block(block, 10 * HZ);
1002 		break;
1003 	case nlm_lck_denied:
1004 		/* Client doesn't want it, just unlock it */
1005 		nlmsvc_unlink_block(block);
1006 		fl = &block->b_call->a_args.lock.fl;
1007 		fl->c.flc_type = F_UNLCK;
1008 		error = vfs_lock_file(fl->c.flc_file, F_SETLK, fl, NULL);
1009 		if (error)
1010 			pr_warn("lockd: unable to unlock lock rejected by client!\n");
1011 		break;
1012 	default:
1013 		/*
1014 		 * Either it was accepted or the status makes no sense
1015 		 * just unlink it either way.
1016 		 */
1017 		nlmsvc_unlink_block(block);
1018 	}
1019 	nlmsvc_release_block(block);
1020 }
1021 
1022 /* Helper function to handle retry of a deferred block.
1023  * If it is a blocking lock, call grant_blocked.
1024  * For a non-blocking lock or test lock, revisit the request.
1025  */
1026 static void
1027 retry_deferred_block(struct nlm_block *block)
1028 {
1029 	if (!(block->b_flags & B_GOT_CALLBACK))
1030 		block->b_flags |= B_TIMED_OUT;
1031 	nlmsvc_insert_block(block, NLM_TIMEOUT);
1032 	dprintk("revisit block %p flags %d\n",	block, block->b_flags);
1033 	if (block->b_deferred_req) {
1034 		block->b_deferred_req->revisit(block->b_deferred_req, 0);
1035 		block->b_deferred_req = NULL;
1036 	}
1037 }
1038 
1039 /*
1040  * Retry all blocked locks that have been notified. This is where lockd
1041  * picks up locks that can be granted, or grant notifications that must
1042  * be retransmitted.
1043  */
1044 void
1045 nlmsvc_retry_blocked(struct svc_rqst *rqstp)
1046 {
1047 	unsigned long	timeout = MAX_SCHEDULE_TIMEOUT;
1048 	struct nlm_block *block;
1049 
1050 	spin_lock(&nlm_blocked_lock);
1051 	while (!list_empty(&nlm_blocked) && !svc_thread_should_stop(rqstp)) {
1052 		block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
1053 
1054 		if (block->b_when == NLM_NEVER)
1055 			break;
1056 		if (time_after(block->b_when, jiffies)) {
1057 			timeout = block->b_when - jiffies;
1058 			break;
1059 		}
1060 		spin_unlock(&nlm_blocked_lock);
1061 
1062 		dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
1063 			block, block->b_when);
1064 		if (block->b_flags & B_QUEUED) {
1065 			dprintk("nlmsvc_retry_blocked delete block (%p, granted=%d, flags=%d)\n",
1066 				block, block->b_granted, block->b_flags);
1067 			retry_deferred_block(block);
1068 		} else
1069 			nlmsvc_grant_blocked(block);
1070 		spin_lock(&nlm_blocked_lock);
1071 	}
1072 	spin_unlock(&nlm_blocked_lock);
1073 
1074 	if (timeout < MAX_SCHEDULE_TIMEOUT)
1075 		mod_timer(&nlmsvc_retry, jiffies + timeout);
1076 }
1077