xref: /linux/fs/lockd/svclock.c (revision 79790b6818e96c58fe2bffee1b418c16e64e7b80)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/fs/lockd/svclock.c
4  *
5  * Handling of server-side locks, mostly of the blocked variety.
6  * This is the ugliest part of lockd because we tread on very thin ice.
7  * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc.
8  * IMNSHO introducing the grant callback into the NLM protocol was one
9  * of the worst ideas Sun ever had. Except maybe for the idea of doing
10  * NFS file locking at all.
11  *
12  * I'm trying hard to avoid race conditions by protecting most accesses
13  * to a file's list of blocked locks through a semaphore. The global
14  * list of blocked locks is not protected in this fashion however.
15  * Therefore, some functions (such as the RPC callback for the async grant
16  * call) move blocked locks towards the head of the list *while some other
17  * process might be traversing it*. This should not be a problem in
18  * practice, because this will only cause functions traversing the list
19  * to visit some blocks twice.
20  *
21  * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
22  */
23 
24 #include <linux/types.h>
25 #include <linux/slab.h>
26 #include <linux/errno.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/sunrpc/clnt.h>
30 #include <linux/sunrpc/svc_xprt.h>
31 #include <linux/lockd/nlm.h>
32 #include <linux/lockd/lockd.h>
33 #include <linux/exportfs.h>
34 
35 #define NLMDBG_FACILITY		NLMDBG_SVCLOCK
36 
37 #ifdef CONFIG_LOCKD_V4
38 #define nlm_deadlock	nlm4_deadlock
39 #else
40 #define nlm_deadlock	nlm_lck_denied
41 #endif
42 
43 static void nlmsvc_release_block(struct nlm_block *block);
44 static void	nlmsvc_insert_block(struct nlm_block *block, unsigned long);
45 static void	nlmsvc_remove_block(struct nlm_block *block);
46 
47 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
48 static void nlmsvc_freegrantargs(struct nlm_rqst *call);
49 static const struct rpc_call_ops nlmsvc_grant_ops;
50 
51 /*
52  * The list of blocked locks to retry
53  */
54 static LIST_HEAD(nlm_blocked);
55 static DEFINE_SPINLOCK(nlm_blocked_lock);
56 
57 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
nlmdbg_cookie2a(const struct nlm_cookie * cookie)58 static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
59 {
60 	/*
61 	 * We can get away with a static buffer because this is only called
62 	 * from lockd, which is single-threaded.
63 	 */
64 	static char buf[2*NLM_MAXCOOKIELEN+1];
65 	unsigned int i, len = sizeof(buf);
66 	char *p = buf;
67 
68 	len--;	/* allow for trailing \0 */
69 	if (len < 3)
70 		return "???";
71 	for (i = 0 ; i < cookie->len ; i++) {
72 		if (len < 2) {
73 			strcpy(p-3, "...");
74 			break;
75 		}
76 		sprintf(p, "%02x", cookie->data[i]);
77 		p += 2;
78 		len -= 2;
79 	}
80 	*p = '\0';
81 
82 	return buf;
83 }
84 #endif
85 
86 /*
87  * Insert a blocked lock into the global list
88  */
89 static void
nlmsvc_insert_block_locked(struct nlm_block * block,unsigned long when)90 nlmsvc_insert_block_locked(struct nlm_block *block, unsigned long when)
91 {
92 	struct nlm_block *b;
93 	struct list_head *pos;
94 
95 	dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when);
96 	if (list_empty(&block->b_list)) {
97 		kref_get(&block->b_count);
98 	} else {
99 		list_del_init(&block->b_list);
100 	}
101 
102 	pos = &nlm_blocked;
103 	if (when != NLM_NEVER) {
104 		if ((when += jiffies) == NLM_NEVER)
105 			when ++;
106 		list_for_each(pos, &nlm_blocked) {
107 			b = list_entry(pos, struct nlm_block, b_list);
108 			if (time_after(b->b_when,when) || b->b_when == NLM_NEVER)
109 				break;
110 		}
111 		/* On normal exit from the loop, pos == &nlm_blocked,
112 		 * so we will be adding to the end of the list - good
113 		 */
114 	}
115 
116 	list_add_tail(&block->b_list, pos);
117 	block->b_when = when;
118 }
119 
nlmsvc_insert_block(struct nlm_block * block,unsigned long when)120 static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
121 {
122 	spin_lock(&nlm_blocked_lock);
123 	nlmsvc_insert_block_locked(block, when);
124 	spin_unlock(&nlm_blocked_lock);
125 }
126 
127 /*
128  * Remove a block from the global list
129  */
130 static inline void
nlmsvc_remove_block(struct nlm_block * block)131 nlmsvc_remove_block(struct nlm_block *block)
132 {
133 	spin_lock(&nlm_blocked_lock);
134 	if (!list_empty(&block->b_list)) {
135 		list_del_init(&block->b_list);
136 		spin_unlock(&nlm_blocked_lock);
137 		nlmsvc_release_block(block);
138 		return;
139 	}
140 	spin_unlock(&nlm_blocked_lock);
141 }
142 
143 /*
144  * Find a block for a given lock
145  */
146 static struct nlm_block *
nlmsvc_lookup_block(struct nlm_file * file,struct nlm_lock * lock)147 nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock)
148 {
149 	struct nlm_block	*block;
150 	struct file_lock	*fl;
151 
152 	dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n",
153 				file, lock->fl.c.flc_pid,
154 				(long long)lock->fl.fl_start,
155 				(long long)lock->fl.fl_end,
156 				lock->fl.c.flc_type);
157 	spin_lock(&nlm_blocked_lock);
158 	list_for_each_entry(block, &nlm_blocked, b_list) {
159 		fl = &block->b_call->a_args.lock.fl;
160 		dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n",
161 				block->b_file, fl->c.flc_pid,
162 				(long long)fl->fl_start,
163 				(long long)fl->fl_end, fl->c.flc_type,
164 				nlmdbg_cookie2a(&block->b_call->a_args.cookie));
165 		if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
166 			kref_get(&block->b_count);
167 			spin_unlock(&nlm_blocked_lock);
168 			return block;
169 		}
170 	}
171 	spin_unlock(&nlm_blocked_lock);
172 
173 	return NULL;
174 }
175 
nlm_cookie_match(struct nlm_cookie * a,struct nlm_cookie * b)176 static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b)
177 {
178 	if (a->len != b->len)
179 		return 0;
180 	if (memcmp(a->data, b->data, a->len))
181 		return 0;
182 	return 1;
183 }
184 
185 /*
186  * Find a block with a given NLM cookie.
187  */
188 static inline struct nlm_block *
nlmsvc_find_block(struct nlm_cookie * cookie)189 nlmsvc_find_block(struct nlm_cookie *cookie)
190 {
191 	struct nlm_block *block;
192 
193 	spin_lock(&nlm_blocked_lock);
194 	list_for_each_entry(block, &nlm_blocked, b_list) {
195 		if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie))
196 			goto found;
197 	}
198 	spin_unlock(&nlm_blocked_lock);
199 
200 	return NULL;
201 
202 found:
203 	dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block);
204 	kref_get(&block->b_count);
205 	spin_unlock(&nlm_blocked_lock);
206 	return block;
207 }
208 
209 /*
210  * Create a block and initialize it.
211  *
212  * Note: we explicitly set the cookie of the grant reply to that of
213  * the blocked lock request. The spec explicitly mentions that the client
214  * should _not_ rely on the callback containing the same cookie as the
215  * request, but (as I found out later) that's because some implementations
216  * do just this. Never mind the standards comittees, they support our
217  * logging industries.
218  *
219  * 10 years later: I hope we can safely ignore these old and broken
220  * clients by now. Let's fix this so we can uniquely identify an incoming
221  * GRANTED_RES message by cookie, without having to rely on the client's IP
222  * address. --okir
223  */
224 static struct nlm_block *
nlmsvc_create_block(struct svc_rqst * rqstp,struct nlm_host * host,struct nlm_file * file,struct nlm_lock * lock,struct nlm_cookie * cookie)225 nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
226 		    struct nlm_file *file, struct nlm_lock *lock,
227 		    struct nlm_cookie *cookie)
228 {
229 	struct nlm_block	*block;
230 	struct nlm_rqst		*call = NULL;
231 
232 	call = nlm_alloc_call(host);
233 	if (call == NULL)
234 		return NULL;
235 
236 	/* Allocate memory for block, and initialize arguments */
237 	block = kzalloc(sizeof(*block), GFP_KERNEL);
238 	if (block == NULL)
239 		goto failed;
240 	kref_init(&block->b_count);
241 	INIT_LIST_HEAD(&block->b_list);
242 	INIT_LIST_HEAD(&block->b_flist);
243 
244 	if (!nlmsvc_setgrantargs(call, lock))
245 		goto failed_free;
246 
247 	/* Set notifier function for VFS, and init args */
248 	call->a_args.lock.fl.c.flc_flags |= FL_SLEEP;
249 	call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations;
250 	nlmclnt_next_cookie(&call->a_args.cookie);
251 
252 	dprintk("lockd: created block %p...\n", block);
253 
254 	/* Create and initialize the block */
255 	block->b_daemon = rqstp->rq_server;
256 	block->b_host   = host;
257 	block->b_file   = file;
258 	file->f_count++;
259 
260 	/* Add to file's list of blocks */
261 	list_add(&block->b_flist, &file->f_blocks);
262 
263 	/* Set up RPC arguments for callback */
264 	block->b_call = call;
265 	call->a_flags   = RPC_TASK_ASYNC;
266 	call->a_block = block;
267 
268 	return block;
269 
270 failed_free:
271 	kfree(block);
272 failed:
273 	nlmsvc_release_call(call);
274 	return NULL;
275 }
276 
277 /*
278  * Delete a block.
279  * It is the caller's responsibility to check whether the file
280  * can be closed hereafter.
281  */
nlmsvc_unlink_block(struct nlm_block * block)282 static int nlmsvc_unlink_block(struct nlm_block *block)
283 {
284 	int status;
285 	dprintk("lockd: unlinking block %p...\n", block);
286 
287 	/* Remove block from list */
288 	status = locks_delete_block(&block->b_call->a_args.lock.fl);
289 	nlmsvc_remove_block(block);
290 	return status;
291 }
292 
nlmsvc_free_block(struct kref * kref)293 static void nlmsvc_free_block(struct kref *kref)
294 {
295 	struct nlm_block *block = container_of(kref, struct nlm_block, b_count);
296 	struct nlm_file		*file = block->b_file;
297 
298 	dprintk("lockd: freeing block %p...\n", block);
299 
300 	/* Remove block from file's list of blocks */
301 	list_del_init(&block->b_flist);
302 	mutex_unlock(&file->f_mutex);
303 
304 	nlmsvc_freegrantargs(block->b_call);
305 	nlmsvc_release_call(block->b_call);
306 	nlm_release_file(block->b_file);
307 	kfree(block);
308 }
309 
nlmsvc_release_block(struct nlm_block * block)310 static void nlmsvc_release_block(struct nlm_block *block)
311 {
312 	if (block != NULL)
313 		kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex);
314 }
315 
316 /*
317  * Loop over all blocks and delete blocks held by
318  * a matching host.
319  */
nlmsvc_traverse_blocks(struct nlm_host * host,struct nlm_file * file,nlm_host_match_fn_t match)320 void nlmsvc_traverse_blocks(struct nlm_host *host,
321 			struct nlm_file *file,
322 			nlm_host_match_fn_t match)
323 {
324 	struct nlm_block *block, *next;
325 
326 restart:
327 	mutex_lock(&file->f_mutex);
328 	spin_lock(&nlm_blocked_lock);
329 	list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) {
330 		if (!match(block->b_host, host))
331 			continue;
332 		/* Do not destroy blocks that are not on
333 		 * the global retry list - why? */
334 		if (list_empty(&block->b_list))
335 			continue;
336 		kref_get(&block->b_count);
337 		spin_unlock(&nlm_blocked_lock);
338 		mutex_unlock(&file->f_mutex);
339 		nlmsvc_unlink_block(block);
340 		nlmsvc_release_block(block);
341 		goto restart;
342 	}
343 	spin_unlock(&nlm_blocked_lock);
344 	mutex_unlock(&file->f_mutex);
345 }
346 
347 static struct nlm_lockowner *
nlmsvc_get_lockowner(struct nlm_lockowner * lockowner)348 nlmsvc_get_lockowner(struct nlm_lockowner *lockowner)
349 {
350 	refcount_inc(&lockowner->count);
351 	return lockowner;
352 }
353 
nlmsvc_put_lockowner(struct nlm_lockowner * lockowner)354 void nlmsvc_put_lockowner(struct nlm_lockowner *lockowner)
355 {
356 	if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
357 		return;
358 	list_del(&lockowner->list);
359 	spin_unlock(&lockowner->host->h_lock);
360 	nlmsvc_release_host(lockowner->host);
361 	kfree(lockowner);
362 }
363 
__nlmsvc_find_lockowner(struct nlm_host * host,pid_t pid)364 static struct nlm_lockowner *__nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid)
365 {
366 	struct nlm_lockowner *lockowner;
367 	list_for_each_entry(lockowner, &host->h_lockowners, list) {
368 		if (lockowner->pid != pid)
369 			continue;
370 		return nlmsvc_get_lockowner(lockowner);
371 	}
372 	return NULL;
373 }
374 
nlmsvc_find_lockowner(struct nlm_host * host,pid_t pid)375 static struct nlm_lockowner *nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid)
376 {
377 	struct nlm_lockowner *res, *new = NULL;
378 
379 	spin_lock(&host->h_lock);
380 	res = __nlmsvc_find_lockowner(host, pid);
381 
382 	if (res == NULL) {
383 		spin_unlock(&host->h_lock);
384 		new = kmalloc(sizeof(*res), GFP_KERNEL);
385 		spin_lock(&host->h_lock);
386 		res = __nlmsvc_find_lockowner(host, pid);
387 		if (res == NULL && new != NULL) {
388 			res = new;
389 			/* fs/locks.c will manage the refcount through lock_ops */
390 			refcount_set(&new->count, 1);
391 			new->pid = pid;
392 			new->host = nlm_get_host(host);
393 			list_add(&new->list, &host->h_lockowners);
394 			new = NULL;
395 		}
396 	}
397 
398 	spin_unlock(&host->h_lock);
399 	kfree(new);
400 	return res;
401 }
402 
403 void
nlmsvc_release_lockowner(struct nlm_lock * lock)404 nlmsvc_release_lockowner(struct nlm_lock *lock)
405 {
406 	if (lock->fl.c.flc_owner)
407 		nlmsvc_put_lockowner(lock->fl.c.flc_owner);
408 }
409 
nlmsvc_locks_init_private(struct file_lock * fl,struct nlm_host * host,pid_t pid)410 void nlmsvc_locks_init_private(struct file_lock *fl, struct nlm_host *host,
411 						pid_t pid)
412 {
413 	fl->c.flc_owner = nlmsvc_find_lockowner(host, pid);
414 }
415 
416 /*
417  * Initialize arguments for GRANTED call. The nlm_rqst structure
418  * has been cleared already.
419  */
nlmsvc_setgrantargs(struct nlm_rqst * call,struct nlm_lock * lock)420 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
421 {
422 	locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
423 	memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
424 	call->a_args.lock.caller = utsname()->nodename;
425 	call->a_args.lock.oh.len = lock->oh.len;
426 
427 	/* set default data area */
428 	call->a_args.lock.oh.data = call->a_owner;
429 	call->a_args.lock.svid = ((struct nlm_lockowner *) lock->fl.c.flc_owner)->pid;
430 
431 	if (lock->oh.len > NLMCLNT_OHSIZE) {
432 		void *data = kmalloc(lock->oh.len, GFP_KERNEL);
433 		if (!data)
434 			return 0;
435 		call->a_args.lock.oh.data = (u8 *) data;
436 	}
437 
438 	memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len);
439 	return 1;
440 }
441 
nlmsvc_freegrantargs(struct nlm_rqst * call)442 static void nlmsvc_freegrantargs(struct nlm_rqst *call)
443 {
444 	if (call->a_args.lock.oh.data != call->a_owner)
445 		kfree(call->a_args.lock.oh.data);
446 
447 	locks_release_private(&call->a_args.lock.fl);
448 }
449 
450 /*
451  * Deferred lock request handling for non-blocking lock
452  */
453 static __be32
nlmsvc_defer_lock_rqst(struct svc_rqst * rqstp,struct nlm_block * block)454 nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block)
455 {
456 	__be32 status = nlm_lck_denied_nolocks;
457 
458 	block->b_flags |= B_QUEUED;
459 
460 	nlmsvc_insert_block(block, NLM_TIMEOUT);
461 
462 	block->b_cache_req = &rqstp->rq_chandle;
463 	if (rqstp->rq_chandle.defer) {
464 		block->b_deferred_req =
465 			rqstp->rq_chandle.defer(block->b_cache_req);
466 		if (block->b_deferred_req != NULL)
467 			status = nlm_drop_reply;
468 	}
469 	dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n",
470 		block, block->b_flags, ntohl(status));
471 
472 	return status;
473 }
474 
475 /*
476  * Attempt to establish a lock, and if it can't be granted, block it
477  * if required.
478  */
479 __be32
nlmsvc_lock(struct svc_rqst * rqstp,struct nlm_file * file,struct nlm_host * host,struct nlm_lock * lock,int wait,struct nlm_cookie * cookie,int reclaim)480 nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
481 	    struct nlm_host *host, struct nlm_lock *lock, int wait,
482 	    struct nlm_cookie *cookie, int reclaim)
483 {
484 	struct inode		*inode = nlmsvc_file_inode(file);
485 	struct nlm_block	*block = NULL;
486 	int			error;
487 	int			mode;
488 	int			async_block = 0;
489 	__be32			ret;
490 
491 	dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
492 				inode->i_sb->s_id, inode->i_ino,
493 				lock->fl.c.flc_type,
494 				lock->fl.c.flc_pid,
495 				(long long)lock->fl.fl_start,
496 				(long long)lock->fl.fl_end,
497 				wait);
498 
499 	if (!exportfs_lock_op_is_async(inode->i_sb->s_export_op)) {
500 		async_block = wait;
501 		wait = 0;
502 	}
503 
504 	/* Lock file against concurrent access */
505 	mutex_lock(&file->f_mutex);
506 	/* Get existing block (in case client is busy-waiting)
507 	 * or create new block
508 	 */
509 	block = nlmsvc_lookup_block(file, lock);
510 	if (block == NULL) {
511 		block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
512 		ret = nlm_lck_denied_nolocks;
513 		if (block == NULL)
514 			goto out;
515 		lock = &block->b_call->a_args.lock;
516 	} else
517 		lock->fl.c.flc_flags &= ~FL_SLEEP;
518 
519 	if (block->b_flags & B_QUEUED) {
520 		dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n",
521 							block, block->b_flags);
522 		if (block->b_granted) {
523 			nlmsvc_unlink_block(block);
524 			ret = nlm_granted;
525 			goto out;
526 		}
527 		if (block->b_flags & B_TIMED_OUT) {
528 			nlmsvc_unlink_block(block);
529 			ret = nlm_lck_denied;
530 			goto out;
531 		}
532 		ret = nlm_drop_reply;
533 		goto out;
534 	}
535 
536 	if (locks_in_grace(SVC_NET(rqstp)) && !reclaim) {
537 		ret = nlm_lck_denied_grace_period;
538 		goto out;
539 	}
540 	if (reclaim && !locks_in_grace(SVC_NET(rqstp))) {
541 		ret = nlm_lck_denied_grace_period;
542 		goto out;
543 	}
544 
545 	spin_lock(&nlm_blocked_lock);
546 	/*
547 	 * If this is a lock request for an already pending
548 	 * lock request we return nlm_lck_blocked without calling
549 	 * vfs_lock_file() again. Otherwise we have two pending
550 	 * requests on the underlaying ->lock() implementation but
551 	 * only one nlm_block to being granted by lm_grant().
552 	 */
553 	if (exportfs_lock_op_is_async(inode->i_sb->s_export_op) &&
554 	    !list_empty(&block->b_list)) {
555 		spin_unlock(&nlm_blocked_lock);
556 		ret = nlm_lck_blocked;
557 		goto out;
558 	}
559 
560 	/* Append to list of blocked */
561 	nlmsvc_insert_block_locked(block, NLM_NEVER);
562 	spin_unlock(&nlm_blocked_lock);
563 
564 	if (!wait)
565 		lock->fl.c.flc_flags &= ~FL_SLEEP;
566 	mode = lock_to_openmode(&lock->fl);
567 	error = vfs_lock_file(file->f_file[mode], F_SETLK, &lock->fl, NULL);
568 	lock->fl.c.flc_flags &= ~FL_SLEEP;
569 
570 	dprintk("lockd: vfs_lock_file returned %d\n", error);
571 	switch (error) {
572 		case 0:
573 			nlmsvc_remove_block(block);
574 			ret = nlm_granted;
575 			goto out;
576 		case -EAGAIN:
577 			if (!wait)
578 				nlmsvc_remove_block(block);
579 			ret = async_block ? nlm_lck_blocked : nlm_lck_denied;
580 			goto out;
581 		case FILE_LOCK_DEFERRED:
582 			if (wait)
583 				break;
584 			/* Filesystem lock operation is in progress
585 			   Add it to the queue waiting for callback */
586 			ret = nlmsvc_defer_lock_rqst(rqstp, block);
587 			goto out;
588 		case -EDEADLK:
589 			nlmsvc_remove_block(block);
590 			ret = nlm_deadlock;
591 			goto out;
592 		default:			/* includes ENOLCK */
593 			nlmsvc_remove_block(block);
594 			ret = nlm_lck_denied_nolocks;
595 			goto out;
596 	}
597 
598 	ret = nlm_lck_blocked;
599 out:
600 	mutex_unlock(&file->f_mutex);
601 	nlmsvc_release_block(block);
602 	dprintk("lockd: nlmsvc_lock returned %u\n", ret);
603 	return ret;
604 }
605 
606 /*
607  * Test for presence of a conflicting lock.
608  */
609 __be32
nlmsvc_testlock(struct svc_rqst * rqstp,struct nlm_file * file,struct nlm_host * host,struct nlm_lock * lock,struct nlm_lock * conflock,struct nlm_cookie * cookie)610 nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
611 		struct nlm_host *host, struct nlm_lock *lock,
612 		struct nlm_lock *conflock, struct nlm_cookie *cookie)
613 {
614 	int			error;
615 	int			mode;
616 	__be32			ret;
617 
618 	dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
619 				nlmsvc_file_inode(file)->i_sb->s_id,
620 				nlmsvc_file_inode(file)->i_ino,
621 				lock->fl.c.flc_type,
622 				(long long)lock->fl.fl_start,
623 				(long long)lock->fl.fl_end);
624 
625 	if (locks_in_grace(SVC_NET(rqstp))) {
626 		ret = nlm_lck_denied_grace_period;
627 		goto out;
628 	}
629 
630 	mode = lock_to_openmode(&lock->fl);
631 	error = vfs_test_lock(file->f_file[mode], &lock->fl);
632 	if (error) {
633 		/* We can't currently deal with deferred test requests */
634 		if (error == FILE_LOCK_DEFERRED)
635 			WARN_ON_ONCE(1);
636 
637 		ret = nlm_lck_denied_nolocks;
638 		goto out;
639 	}
640 
641 	if (lock->fl.c.flc_type == F_UNLCK) {
642 		ret = nlm_granted;
643 		goto out;
644 	}
645 
646 	dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
647 		lock->fl.c.flc_type, (long long)lock->fl.fl_start,
648 		(long long)lock->fl.fl_end);
649 	conflock->caller = "somehost";	/* FIXME */
650 	conflock->len = strlen(conflock->caller);
651 	conflock->oh.len = 0;		/* don't return OH info */
652 	conflock->svid = lock->fl.c.flc_pid;
653 	conflock->fl.c.flc_type = lock->fl.c.flc_type;
654 	conflock->fl.fl_start = lock->fl.fl_start;
655 	conflock->fl.fl_end = lock->fl.fl_end;
656 	locks_release_private(&lock->fl);
657 
658 	ret = nlm_lck_denied;
659 out:
660 	return ret;
661 }
662 
663 /*
664  * Remove a lock.
665  * This implies a CANCEL call: We send a GRANT_MSG, the client replies
666  * with a GRANT_RES call which gets lost, and calls UNLOCK immediately
667  * afterwards. In this case the block will still be there, and hence
668  * must be removed.
669  */
670 __be32
nlmsvc_unlock(struct net * net,struct nlm_file * file,struct nlm_lock * lock)671 nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
672 {
673 	int	error = 0;
674 
675 	dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
676 				nlmsvc_file_inode(file)->i_sb->s_id,
677 				nlmsvc_file_inode(file)->i_ino,
678 				lock->fl.c.flc_pid,
679 				(long long)lock->fl.fl_start,
680 				(long long)lock->fl.fl_end);
681 
682 	/* First, cancel any lock that might be there */
683 	nlmsvc_cancel_blocked(net, file, lock);
684 
685 	lock->fl.c.flc_type = F_UNLCK;
686 	lock->fl.c.flc_file = file->f_file[O_RDONLY];
687 	if (lock->fl.c.flc_file)
688 		error = vfs_lock_file(lock->fl.c.flc_file, F_SETLK,
689 					&lock->fl, NULL);
690 	lock->fl.c.flc_file = file->f_file[O_WRONLY];
691 	if (lock->fl.c.flc_file)
692 		error |= vfs_lock_file(lock->fl.c.flc_file, F_SETLK,
693 					&lock->fl, NULL);
694 
695 	return (error < 0)? nlm_lck_denied_nolocks : nlm_granted;
696 }
697 
698 /*
699  * Cancel a previously blocked request.
700  *
701  * A cancel request always overrides any grant that may currently
702  * be in progress.
703  * The calling procedure must check whether the file can be closed.
704  */
705 __be32
nlmsvc_cancel_blocked(struct net * net,struct nlm_file * file,struct nlm_lock * lock)706 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
707 {
708 	struct nlm_block	*block;
709 	int status = 0;
710 	int mode;
711 
712 	dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
713 				nlmsvc_file_inode(file)->i_sb->s_id,
714 				nlmsvc_file_inode(file)->i_ino,
715 				lock->fl.c.flc_pid,
716 				(long long)lock->fl.fl_start,
717 				(long long)lock->fl.fl_end);
718 
719 	if (locks_in_grace(net))
720 		return nlm_lck_denied_grace_period;
721 
722 	mutex_lock(&file->f_mutex);
723 	block = nlmsvc_lookup_block(file, lock);
724 	mutex_unlock(&file->f_mutex);
725 	if (block != NULL) {
726 		struct file_lock *fl = &block->b_call->a_args.lock.fl;
727 
728 		mode = lock_to_openmode(fl);
729 		vfs_cancel_lock(block->b_file->f_file[mode], fl);
730 		status = nlmsvc_unlink_block(block);
731 		nlmsvc_release_block(block);
732 	}
733 	return status ? nlm_lck_denied : nlm_granted;
734 }
735 
736 /*
737  * This is a callback from the filesystem for VFS file lock requests.
738  * It will be used if lm_grant is defined and the filesystem can not
739  * respond to the request immediately.
740  * For SETLK or SETLKW request it will get the local posix lock.
741  * In all cases it will move the block to the head of nlm_blocked q where
742  * nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the
743  * deferred rpc for GETLK and SETLK.
744  */
745 static void
nlmsvc_update_deferred_block(struct nlm_block * block,int result)746 nlmsvc_update_deferred_block(struct nlm_block *block, int result)
747 {
748 	block->b_flags |= B_GOT_CALLBACK;
749 	if (result == 0)
750 		block->b_granted = 1;
751 	else
752 		block->b_flags |= B_TIMED_OUT;
753 }
754 
nlmsvc_grant_deferred(struct file_lock * fl,int result)755 static int nlmsvc_grant_deferred(struct file_lock *fl, int result)
756 {
757 	struct nlm_block *block;
758 	int rc = -ENOENT;
759 
760 	spin_lock(&nlm_blocked_lock);
761 	list_for_each_entry(block, &nlm_blocked, b_list) {
762 		if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
763 			dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n",
764 							block, block->b_flags);
765 			if (block->b_flags & B_QUEUED) {
766 				if (block->b_flags & B_TIMED_OUT) {
767 					rc = -ENOLCK;
768 					break;
769 				}
770 				nlmsvc_update_deferred_block(block, result);
771 			} else if (result == 0)
772 				block->b_granted = 1;
773 
774 			nlmsvc_insert_block_locked(block, 0);
775 			svc_wake_up(block->b_daemon);
776 			rc = 0;
777 			break;
778 		}
779 	}
780 	spin_unlock(&nlm_blocked_lock);
781 	if (rc == -ENOENT)
782 		printk(KERN_WARNING "lockd: grant for unknown block\n");
783 	return rc;
784 }
785 
786 /*
787  * Unblock a blocked lock request. This is a callback invoked from the
788  * VFS layer when a lock on which we blocked is removed.
789  *
790  * This function doesn't grant the blocked lock instantly, but rather moves
791  * the block to the head of nlm_blocked where it can be picked up by lockd.
792  */
793 static void
nlmsvc_notify_blocked(struct file_lock * fl)794 nlmsvc_notify_blocked(struct file_lock *fl)
795 {
796 	struct nlm_block	*block;
797 
798 	dprintk("lockd: VFS unblock notification for block %p\n", fl);
799 	spin_lock(&nlm_blocked_lock);
800 	list_for_each_entry(block, &nlm_blocked, b_list) {
801 		if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
802 			nlmsvc_insert_block_locked(block, 0);
803 			spin_unlock(&nlm_blocked_lock);
804 			svc_wake_up(block->b_daemon);
805 			return;
806 		}
807 	}
808 	spin_unlock(&nlm_blocked_lock);
809 	printk(KERN_WARNING "lockd: notification for unknown block!\n");
810 }
811 
nlmsvc_get_owner(fl_owner_t owner)812 static fl_owner_t nlmsvc_get_owner(fl_owner_t owner)
813 {
814 	return nlmsvc_get_lockowner(owner);
815 }
816 
nlmsvc_put_owner(fl_owner_t owner)817 static void nlmsvc_put_owner(fl_owner_t owner)
818 {
819 	nlmsvc_put_lockowner(owner);
820 }
821 
822 const struct lock_manager_operations nlmsvc_lock_operations = {
823 	.lm_notify = nlmsvc_notify_blocked,
824 	.lm_grant = nlmsvc_grant_deferred,
825 	.lm_get_owner = nlmsvc_get_owner,
826 	.lm_put_owner = nlmsvc_put_owner,
827 };
828 
829 /*
830  * Try to claim a lock that was previously blocked.
831  *
832  * Note that we use both the RPC_GRANTED_MSG call _and_ an async
833  * RPC thread when notifying the client. This seems like overkill...
834  * Here's why:
835  *  -	we don't want to use a synchronous RPC thread, otherwise
836  *	we might find ourselves hanging on a dead portmapper.
837  *  -	Some lockd implementations (e.g. HP) don't react to
838  *	RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls.
839  */
840 static void
nlmsvc_grant_blocked(struct nlm_block * block)841 nlmsvc_grant_blocked(struct nlm_block *block)
842 {
843 	struct nlm_file		*file = block->b_file;
844 	struct nlm_lock		*lock = &block->b_call->a_args.lock;
845 	int			mode;
846 	int			error;
847 	loff_t			fl_start, fl_end;
848 
849 	dprintk("lockd: grant blocked lock %p\n", block);
850 
851 	kref_get(&block->b_count);
852 
853 	/* Unlink block request from list */
854 	nlmsvc_unlink_block(block);
855 
856 	/* If b_granted is true this means we've been here before.
857 	 * Just retry the grant callback, possibly refreshing the RPC
858 	 * binding */
859 	if (block->b_granted) {
860 		nlm_rebind_host(block->b_host);
861 		goto callback;
862 	}
863 
864 	/* Try the lock operation again */
865 	/* vfs_lock_file() can mangle fl_start and fl_end, but we need
866 	 * them unchanged for the GRANT_MSG
867 	 */
868 	lock->fl.c.flc_flags |= FL_SLEEP;
869 	fl_start = lock->fl.fl_start;
870 	fl_end = lock->fl.fl_end;
871 	mode = lock_to_openmode(&lock->fl);
872 	error = vfs_lock_file(file->f_file[mode], F_SETLK, &lock->fl, NULL);
873 	lock->fl.c.flc_flags &= ~FL_SLEEP;
874 	lock->fl.fl_start = fl_start;
875 	lock->fl.fl_end = fl_end;
876 
877 	switch (error) {
878 	case 0:
879 		break;
880 	case FILE_LOCK_DEFERRED:
881 		dprintk("lockd: lock still blocked error %d\n", error);
882 		nlmsvc_insert_block(block, NLM_NEVER);
883 		nlmsvc_release_block(block);
884 		return;
885 	default:
886 		printk(KERN_WARNING "lockd: unexpected error %d in %s!\n",
887 				-error, __func__);
888 		nlmsvc_insert_block(block, 10 * HZ);
889 		nlmsvc_release_block(block);
890 		return;
891 	}
892 
893 callback:
894 	/* Lock was granted by VFS. */
895 	dprintk("lockd: GRANTing blocked lock.\n");
896 	block->b_granted = 1;
897 
898 	/* keep block on the list, but don't reattempt until the RPC
899 	 * completes or the submission fails
900 	 */
901 	nlmsvc_insert_block(block, NLM_NEVER);
902 
903 	/* Call the client -- use a soft RPC task since nlmsvc_retry_blocked
904 	 * will queue up a new one if this one times out
905 	 */
906 	error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG,
907 				&nlmsvc_grant_ops);
908 
909 	/* RPC submission failed, wait a bit and retry */
910 	if (error < 0)
911 		nlmsvc_insert_block(block, 10 * HZ);
912 }
913 
914 /*
915  * This is the callback from the RPC layer when the NLM_GRANTED_MSG
916  * RPC call has succeeded or timed out.
917  * Like all RPC callbacks, it is invoked by the rpciod process, so it
918  * better not sleep. Therefore, we put the blocked lock on the nlm_blocked
919  * chain once more in order to have it removed by lockd itself (which can
920  * then sleep on the file semaphore without disrupting e.g. the nfs client).
921  */
nlmsvc_grant_callback(struct rpc_task * task,void * data)922 static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
923 {
924 	struct nlm_rqst		*call = data;
925 	struct nlm_block	*block = call->a_block;
926 	unsigned long		timeout;
927 
928 	dprintk("lockd: GRANT_MSG RPC callback\n");
929 
930 	spin_lock(&nlm_blocked_lock);
931 	/* if the block is not on a list at this point then it has
932 	 * been invalidated. Don't try to requeue it.
933 	 *
934 	 * FIXME: it's possible that the block is removed from the list
935 	 * after this check but before the nlmsvc_insert_block. In that
936 	 * case it will be added back. Perhaps we need better locking
937 	 * for nlm_blocked?
938 	 */
939 	if (list_empty(&block->b_list))
940 		goto out;
941 
942 	/* Technically, we should down the file semaphore here. Since we
943 	 * move the block towards the head of the queue only, no harm
944 	 * can be done, though. */
945 	if (task->tk_status < 0) {
946 		/* RPC error: Re-insert for retransmission */
947 		timeout = 10 * HZ;
948 	} else {
949 		/* Call was successful, now wait for client callback */
950 		timeout = 60 * HZ;
951 	}
952 	nlmsvc_insert_block_locked(block, timeout);
953 	svc_wake_up(block->b_daemon);
954 out:
955 	spin_unlock(&nlm_blocked_lock);
956 }
957 
958 /*
959  * FIXME: nlmsvc_release_block() grabs a mutex.  This is not allowed for an
960  * .rpc_release rpc_call_op
961  */
nlmsvc_grant_release(void * data)962 static void nlmsvc_grant_release(void *data)
963 {
964 	struct nlm_rqst		*call = data;
965 	nlmsvc_release_block(call->a_block);
966 }
967 
968 static const struct rpc_call_ops nlmsvc_grant_ops = {
969 	.rpc_call_done = nlmsvc_grant_callback,
970 	.rpc_release = nlmsvc_grant_release,
971 };
972 
973 /*
974  * We received a GRANT_RES callback. Try to find the corresponding
975  * block.
976  */
977 void
nlmsvc_grant_reply(struct nlm_cookie * cookie,__be32 status)978 nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status)
979 {
980 	struct nlm_block	*block;
981 	struct file_lock	*fl;
982 	int			error;
983 
984 	dprintk("grant_reply: looking for cookie %x, s=%d \n",
985 		*(unsigned int *)(cookie->data), status);
986 	if (!(block = nlmsvc_find_block(cookie)))
987 		return;
988 
989 	switch (status) {
990 	case nlm_lck_denied_grace_period:
991 		/* Try again in a couple of seconds */
992 		nlmsvc_insert_block(block, 10 * HZ);
993 		break;
994 	case nlm_lck_denied:
995 		/* Client doesn't want it, just unlock it */
996 		nlmsvc_unlink_block(block);
997 		fl = &block->b_call->a_args.lock.fl;
998 		fl->c.flc_type = F_UNLCK;
999 		error = vfs_lock_file(fl->c.flc_file, F_SETLK, fl, NULL);
1000 		if (error)
1001 			pr_warn("lockd: unable to unlock lock rejected by client!\n");
1002 		break;
1003 	default:
1004 		/*
1005 		 * Either it was accepted or the status makes no sense
1006 		 * just unlink it either way.
1007 		 */
1008 		nlmsvc_unlink_block(block);
1009 	}
1010 	nlmsvc_release_block(block);
1011 }
1012 
1013 /* Helper function to handle retry of a deferred block.
1014  * If it is a blocking lock, call grant_blocked.
1015  * For a non-blocking lock or test lock, revisit the request.
1016  */
1017 static void
retry_deferred_block(struct nlm_block * block)1018 retry_deferred_block(struct nlm_block *block)
1019 {
1020 	if (!(block->b_flags & B_GOT_CALLBACK))
1021 		block->b_flags |= B_TIMED_OUT;
1022 	nlmsvc_insert_block(block, NLM_TIMEOUT);
1023 	dprintk("revisit block %p flags %d\n",	block, block->b_flags);
1024 	if (block->b_deferred_req) {
1025 		block->b_deferred_req->revisit(block->b_deferred_req, 0);
1026 		block->b_deferred_req = NULL;
1027 	}
1028 }
1029 
1030 /*
1031  * Retry all blocked locks that have been notified. This is where lockd
1032  * picks up locks that can be granted, or grant notifications that must
1033  * be retransmitted.
1034  */
1035 void
nlmsvc_retry_blocked(struct svc_rqst * rqstp)1036 nlmsvc_retry_blocked(struct svc_rqst *rqstp)
1037 {
1038 	unsigned long	timeout = MAX_SCHEDULE_TIMEOUT;
1039 	struct nlm_block *block;
1040 
1041 	spin_lock(&nlm_blocked_lock);
1042 	while (!list_empty(&nlm_blocked) && !svc_thread_should_stop(rqstp)) {
1043 		block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
1044 
1045 		if (block->b_when == NLM_NEVER)
1046 			break;
1047 		if (time_after(block->b_when, jiffies)) {
1048 			timeout = block->b_when - jiffies;
1049 			break;
1050 		}
1051 		spin_unlock(&nlm_blocked_lock);
1052 
1053 		dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
1054 			block, block->b_when);
1055 		if (block->b_flags & B_QUEUED) {
1056 			dprintk("nlmsvc_retry_blocked delete block (%p, granted=%d, flags=%d)\n",
1057 				block, block->b_granted, block->b_flags);
1058 			retry_deferred_block(block);
1059 		} else
1060 			nlmsvc_grant_blocked(block);
1061 		spin_lock(&nlm_blocked_lock);
1062 	}
1063 	spin_unlock(&nlm_blocked_lock);
1064 
1065 	if (timeout < MAX_SCHEDULE_TIMEOUT)
1066 		mod_timer(&nlmsvc_retry, jiffies + timeout);
1067 }
1068