xref: /linux/fs/lockd/svclock.c (revision b0319c4642638bad4b36974055b1c0894b2c7aa9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/fs/lockd/svclock.c
4  *
5  * Handling of server-side locks, mostly of the blocked variety.
6  * This is the ugliest part of lockd because we tread on very thin ice.
7  * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc.
8  * IMNSHO introducing the grant callback into the NLM protocol was one
9  * of the worst ideas Sun ever had. Except maybe for the idea of doing
10  * NFS file locking at all.
11  *
12  * I'm trying hard to avoid race conditions by protecting most accesses
13  * to a file's list of blocked locks through a semaphore. The global
14  * list of blocked locks is not protected in this fashion however.
15  * Therefore, some functions (such as the RPC callback for the async grant
16  * call) move blocked locks towards the head of the list *while some other
17  * process might be traversing it*. This should not be a problem in
18  * practice, because this will only cause functions traversing the list
19  * to visit some blocks twice.
20  *
21  * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
22  */
23 
24 #include <linux/types.h>
25 #include <linux/slab.h>
26 #include <linux/errno.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/sunrpc/clnt.h>
30 #include <linux/sunrpc/svc_xprt.h>
31 #include <linux/lockd/nlm.h>
32 #include <linux/lockd/lockd.h>
33 
34 #define NLMDBG_FACILITY		NLMDBG_SVCLOCK
35 
36 #ifdef CONFIG_LOCKD_V4
37 #define nlm_deadlock	nlm4_deadlock
38 #else
39 #define nlm_deadlock	nlm_lck_denied
40 #endif
41 
42 static void nlmsvc_release_block(struct nlm_block *block);
43 static void	nlmsvc_insert_block(struct nlm_block *block, unsigned long);
44 static void	nlmsvc_remove_block(struct nlm_block *block);
45 
46 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
47 static void nlmsvc_freegrantargs(struct nlm_rqst *call);
48 static const struct rpc_call_ops nlmsvc_grant_ops;
49 
50 /*
51  * The list of blocked locks to retry
52  */
53 static LIST_HEAD(nlm_blocked);
54 static DEFINE_SPINLOCK(nlm_blocked_lock);
55 
56 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
nlmdbg_cookie2a(const struct nlm_cookie * cookie)57 static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
58 {
59 	/*
60 	 * We can get away with a static buffer because this is only called
61 	 * from lockd, which is single-threaded.
62 	 */
63 	static char buf[2*NLM_MAXCOOKIELEN+1];
64 	unsigned int i, len = sizeof(buf);
65 	char *p = buf;
66 
67 	len--;	/* allow for trailing \0 */
68 	if (len < 3)
69 		return "???";
70 	for (i = 0 ; i < cookie->len ; i++) {
71 		if (len < 2) {
72 			strcpy(p-3, "...");
73 			break;
74 		}
75 		sprintf(p, "%02x", cookie->data[i]);
76 		p += 2;
77 		len -= 2;
78 	}
79 	*p = '\0';
80 
81 	return buf;
82 }
83 #endif
84 
85 /*
86  * Insert a blocked lock into the global list
87  */
88 static void
nlmsvc_insert_block_locked(struct nlm_block * block,unsigned long when)89 nlmsvc_insert_block_locked(struct nlm_block *block, unsigned long when)
90 {
91 	struct nlm_block *b;
92 	struct list_head *pos;
93 
94 	dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when);
95 	if (list_empty(&block->b_list)) {
96 		kref_get(&block->b_count);
97 	} else {
98 		list_del_init(&block->b_list);
99 	}
100 
101 	pos = &nlm_blocked;
102 	if (when != NLM_NEVER) {
103 		if ((when += jiffies) == NLM_NEVER)
104 			when ++;
105 		list_for_each(pos, &nlm_blocked) {
106 			b = list_entry(pos, struct nlm_block, b_list);
107 			if (time_after(b->b_when,when) || b->b_when == NLM_NEVER)
108 				break;
109 		}
110 		/* On normal exit from the loop, pos == &nlm_blocked,
111 		 * so we will be adding to the end of the list - good
112 		 */
113 	}
114 
115 	list_add_tail(&block->b_list, pos);
116 	block->b_when = when;
117 }
118 
nlmsvc_insert_block(struct nlm_block * block,unsigned long when)119 static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
120 {
121 	spin_lock(&nlm_blocked_lock);
122 	nlmsvc_insert_block_locked(block, when);
123 	spin_unlock(&nlm_blocked_lock);
124 }
125 
126 /*
127  * Remove a block from the global list
128  */
129 static inline void
nlmsvc_remove_block(struct nlm_block * block)130 nlmsvc_remove_block(struct nlm_block *block)
131 {
132 	spin_lock(&nlm_blocked_lock);
133 	if (!list_empty(&block->b_list)) {
134 		list_del_init(&block->b_list);
135 		spin_unlock(&nlm_blocked_lock);
136 		nlmsvc_release_block(block);
137 		return;
138 	}
139 	spin_unlock(&nlm_blocked_lock);
140 }
141 
142 /*
143  * Find a block for a given lock
144  */
145 static struct nlm_block *
nlmsvc_lookup_block(struct nlm_file * file,struct nlm_lock * lock)146 nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock)
147 {
148 	struct nlm_block	*block;
149 	struct file_lock	*fl;
150 
151 	dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n",
152 				file, lock->fl.c.flc_pid,
153 				(long long)lock->fl.fl_start,
154 				(long long)lock->fl.fl_end,
155 				lock->fl.c.flc_type);
156 	spin_lock(&nlm_blocked_lock);
157 	list_for_each_entry(block, &nlm_blocked, b_list) {
158 		fl = &block->b_call->a_args.lock.fl;
159 		dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n",
160 				block->b_file, fl->c.flc_pid,
161 				(long long)fl->fl_start,
162 				(long long)fl->fl_end, fl->c.flc_type,
163 				nlmdbg_cookie2a(&block->b_call->a_args.cookie));
164 		if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
165 			kref_get(&block->b_count);
166 			spin_unlock(&nlm_blocked_lock);
167 			return block;
168 		}
169 	}
170 	spin_unlock(&nlm_blocked_lock);
171 
172 	return NULL;
173 }
174 
nlm_cookie_match(struct nlm_cookie * a,struct nlm_cookie * b)175 static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b)
176 {
177 	if (a->len != b->len)
178 		return 0;
179 	if (memcmp(a->data, b->data, a->len))
180 		return 0;
181 	return 1;
182 }
183 
184 /*
185  * Find a block with a given NLM cookie.
186  */
187 static inline struct nlm_block *
nlmsvc_find_block(struct nlm_cookie * cookie)188 nlmsvc_find_block(struct nlm_cookie *cookie)
189 {
190 	struct nlm_block *block;
191 
192 	spin_lock(&nlm_blocked_lock);
193 	list_for_each_entry(block, &nlm_blocked, b_list) {
194 		if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie))
195 			goto found;
196 	}
197 	spin_unlock(&nlm_blocked_lock);
198 
199 	return NULL;
200 
201 found:
202 	dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block);
203 	kref_get(&block->b_count);
204 	spin_unlock(&nlm_blocked_lock);
205 	return block;
206 }
207 
208 /*
209  * Create a block and initialize it.
210  *
211  * Note: we explicitly set the cookie of the grant reply to that of
212  * the blocked lock request. The spec explicitly mentions that the client
213  * should _not_ rely on the callback containing the same cookie as the
214  * request, but (as I found out later) that's because some implementations
215  * do just this. Never mind the standards comittees, they support our
216  * logging industries.
217  *
218  * 10 years later: I hope we can safely ignore these old and broken
219  * clients by now. Let's fix this so we can uniquely identify an incoming
220  * GRANTED_RES message by cookie, without having to rely on the client's IP
221  * address. --okir
222  */
223 static struct nlm_block *
nlmsvc_create_block(struct svc_rqst * rqstp,struct nlm_host * host,struct nlm_file * file,struct nlm_lock * lock,struct nlm_cookie * cookie)224 nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
225 		    struct nlm_file *file, struct nlm_lock *lock,
226 		    struct nlm_cookie *cookie)
227 {
228 	struct nlm_block	*block;
229 	struct nlm_rqst		*call = NULL;
230 
231 	call = nlm_alloc_call(host);
232 	if (call == NULL)
233 		return NULL;
234 
235 	/* Allocate memory for block, and initialize arguments */
236 	block = kzalloc(sizeof(*block), GFP_KERNEL);
237 	if (block == NULL)
238 		goto failed;
239 	kref_init(&block->b_count);
240 	INIT_LIST_HEAD(&block->b_list);
241 	INIT_LIST_HEAD(&block->b_flist);
242 
243 	if (!nlmsvc_setgrantargs(call, lock))
244 		goto failed_free;
245 
246 	/* Set notifier function for VFS, and init args */
247 	call->a_args.lock.fl.c.flc_flags |= FL_SLEEP;
248 	call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations;
249 	nlmclnt_next_cookie(&call->a_args.cookie);
250 
251 	dprintk("lockd: created block %p...\n", block);
252 
253 	/* Create and initialize the block */
254 	block->b_daemon = rqstp->rq_server;
255 	block->b_host   = host;
256 	block->b_file   = file;
257 	file->f_count++;
258 
259 	/* Add to file's list of blocks */
260 	list_add(&block->b_flist, &file->f_blocks);
261 
262 	/* Set up RPC arguments for callback */
263 	block->b_call = call;
264 	call->a_flags   = RPC_TASK_ASYNC;
265 	call->a_block = block;
266 
267 	return block;
268 
269 failed_free:
270 	kfree(block);
271 failed:
272 	nlmsvc_release_call(call);
273 	return NULL;
274 }
275 
276 /*
277  * Delete a block.
278  * It is the caller's responsibility to check whether the file
279  * can be closed hereafter.
280  */
nlmsvc_unlink_block(struct nlm_block * block)281 static int nlmsvc_unlink_block(struct nlm_block *block)
282 {
283 	int status;
284 	dprintk("lockd: unlinking block %p...\n", block);
285 
286 	/* Remove block from list */
287 	status = locks_delete_block(&block->b_call->a_args.lock.fl);
288 	nlmsvc_remove_block(block);
289 	return status;
290 }
291 
nlmsvc_free_block(struct kref * kref)292 static void nlmsvc_free_block(struct kref *kref)
293 {
294 	struct nlm_block *block = container_of(kref, struct nlm_block, b_count);
295 	struct nlm_file		*file = block->b_file;
296 
297 	dprintk("lockd: freeing block %p...\n", block);
298 
299 	/* Remove block from file's list of blocks */
300 	list_del_init(&block->b_flist);
301 	mutex_unlock(&file->f_mutex);
302 
303 	nlmsvc_freegrantargs(block->b_call);
304 	nlmsvc_release_call(block->b_call);
305 	nlm_release_file(block->b_file);
306 	kfree(block);
307 }
308 
nlmsvc_release_block(struct nlm_block * block)309 static void nlmsvc_release_block(struct nlm_block *block)
310 {
311 	if (block != NULL)
312 		kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex);
313 }
314 
315 /*
316  * Loop over all blocks and delete blocks held by
317  * a matching host.
318  */
nlmsvc_traverse_blocks(struct nlm_host * host,struct nlm_file * file,nlm_host_match_fn_t match)319 void nlmsvc_traverse_blocks(struct nlm_host *host,
320 			struct nlm_file *file,
321 			nlm_host_match_fn_t match)
322 {
323 	struct nlm_block *block, *next;
324 
325 restart:
326 	mutex_lock(&file->f_mutex);
327 	spin_lock(&nlm_blocked_lock);
328 	list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) {
329 		if (!match(block->b_host, host))
330 			continue;
331 		/* Do not destroy blocks that are not on
332 		 * the global retry list - why? */
333 		if (list_empty(&block->b_list))
334 			continue;
335 		kref_get(&block->b_count);
336 		spin_unlock(&nlm_blocked_lock);
337 		mutex_unlock(&file->f_mutex);
338 		nlmsvc_unlink_block(block);
339 		nlmsvc_release_block(block);
340 		goto restart;
341 	}
342 	spin_unlock(&nlm_blocked_lock);
343 	mutex_unlock(&file->f_mutex);
344 }
345 
346 static struct nlm_lockowner *
nlmsvc_get_lockowner(struct nlm_lockowner * lockowner)347 nlmsvc_get_lockowner(struct nlm_lockowner *lockowner)
348 {
349 	refcount_inc(&lockowner->count);
350 	return lockowner;
351 }
352 
nlmsvc_put_lockowner(struct nlm_lockowner * lockowner)353 void nlmsvc_put_lockowner(struct nlm_lockowner *lockowner)
354 {
355 	if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
356 		return;
357 	list_del(&lockowner->list);
358 	spin_unlock(&lockowner->host->h_lock);
359 	nlmsvc_release_host(lockowner->host);
360 	kfree(lockowner);
361 }
362 
__nlmsvc_find_lockowner(struct nlm_host * host,pid_t pid)363 static struct nlm_lockowner *__nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid)
364 {
365 	struct nlm_lockowner *lockowner;
366 	list_for_each_entry(lockowner, &host->h_lockowners, list) {
367 		if (lockowner->pid != pid)
368 			continue;
369 		return nlmsvc_get_lockowner(lockowner);
370 	}
371 	return NULL;
372 }
373 
nlmsvc_find_lockowner(struct nlm_host * host,pid_t pid)374 static struct nlm_lockowner *nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid)
375 {
376 	struct nlm_lockowner *res, *new = NULL;
377 
378 	spin_lock(&host->h_lock);
379 	res = __nlmsvc_find_lockowner(host, pid);
380 
381 	if (res == NULL) {
382 		spin_unlock(&host->h_lock);
383 		new = kmalloc(sizeof(*res), GFP_KERNEL);
384 		spin_lock(&host->h_lock);
385 		res = __nlmsvc_find_lockowner(host, pid);
386 		if (res == NULL && new != NULL) {
387 			res = new;
388 			/* fs/locks.c will manage the refcount through lock_ops */
389 			refcount_set(&new->count, 1);
390 			new->pid = pid;
391 			new->host = nlm_get_host(host);
392 			list_add(&new->list, &host->h_lockowners);
393 			new = NULL;
394 		}
395 	}
396 
397 	spin_unlock(&host->h_lock);
398 	kfree(new);
399 	return res;
400 }
401 
402 void
nlmsvc_release_lockowner(struct nlm_lock * lock)403 nlmsvc_release_lockowner(struct nlm_lock *lock)
404 {
405 	if (lock->fl.c.flc_owner)
406 		nlmsvc_put_lockowner(lock->fl.c.flc_owner);
407 }
408 
nlmsvc_locks_init_private(struct file_lock * fl,struct nlm_host * host,pid_t pid)409 void nlmsvc_locks_init_private(struct file_lock *fl, struct nlm_host *host,
410 						pid_t pid)
411 {
412 	fl->c.flc_owner = nlmsvc_find_lockowner(host, pid);
413 }
414 
415 /*
416  * Initialize arguments for GRANTED call. The nlm_rqst structure
417  * has been cleared already.
418  */
nlmsvc_setgrantargs(struct nlm_rqst * call,struct nlm_lock * lock)419 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
420 {
421 	locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
422 	memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
423 	call->a_args.lock.caller = utsname()->nodename;
424 	call->a_args.lock.oh.len = lock->oh.len;
425 
426 	/* set default data area */
427 	call->a_args.lock.oh.data = call->a_owner;
428 	call->a_args.lock.svid = ((struct nlm_lockowner *) lock->fl.c.flc_owner)->pid;
429 
430 	if (lock->oh.len > NLMCLNT_OHSIZE) {
431 		void *data = kmalloc(lock->oh.len, GFP_KERNEL);
432 		if (!data)
433 			return 0;
434 		call->a_args.lock.oh.data = (u8 *) data;
435 	}
436 
437 	memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len);
438 	return 1;
439 }
440 
nlmsvc_freegrantargs(struct nlm_rqst * call)441 static void nlmsvc_freegrantargs(struct nlm_rqst *call)
442 {
443 	if (call->a_args.lock.oh.data != call->a_owner)
444 		kfree(call->a_args.lock.oh.data);
445 
446 	locks_release_private(&call->a_args.lock.fl);
447 }
448 
449 /*
450  * Deferred lock request handling for non-blocking lock
451  */
452 static __be32
nlmsvc_defer_lock_rqst(struct svc_rqst * rqstp,struct nlm_block * block)453 nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block)
454 {
455 	__be32 status = nlm_lck_denied_nolocks;
456 
457 	block->b_flags |= B_QUEUED;
458 
459 	nlmsvc_insert_block(block, NLM_TIMEOUT);
460 
461 	block->b_cache_req = &rqstp->rq_chandle;
462 	if (rqstp->rq_chandle.defer) {
463 		block->b_deferred_req =
464 			rqstp->rq_chandle.defer(block->b_cache_req);
465 		if (block->b_deferred_req != NULL)
466 			status = nlm_drop_reply;
467 	}
468 	dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n",
469 		block, block->b_flags, ntohl(status));
470 
471 	return status;
472 }
473 
474 /*
475  * Attempt to establish a lock, and if it can't be granted, block it
476  * if required.
477  */
478 __be32
nlmsvc_lock(struct svc_rqst * rqstp,struct nlm_file * file,struct nlm_host * host,struct nlm_lock * lock,int wait,struct nlm_cookie * cookie,int reclaim)479 nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
480 	    struct nlm_host *host, struct nlm_lock *lock, int wait,
481 	    struct nlm_cookie *cookie, int reclaim)
482 {
483 	struct inode		*inode __maybe_unused = nlmsvc_file_inode(file);
484 	struct nlm_block	*block = NULL;
485 	int			error;
486 	int			mode;
487 	int			async_block = 0;
488 	__be32			ret;
489 
490 	dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
491 				inode->i_sb->s_id, inode->i_ino,
492 				lock->fl.c.flc_type,
493 				lock->fl.c.flc_pid,
494 				(long long)lock->fl.fl_start,
495 				(long long)lock->fl.fl_end,
496 				wait);
497 
498 	if (nlmsvc_file_cannot_lock(file))
499 		return nlm_lck_denied_nolocks;
500 
501 	if (!locks_can_async_lock(nlmsvc_file_file(file)->f_op)) {
502 		async_block = wait;
503 		wait = 0;
504 	}
505 
506 	/* Lock file against concurrent access */
507 	mutex_lock(&file->f_mutex);
508 	/* Get existing block (in case client is busy-waiting)
509 	 * or create new block
510 	 */
511 	block = nlmsvc_lookup_block(file, lock);
512 	if (block == NULL) {
513 		block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
514 		ret = nlm_lck_denied_nolocks;
515 		if (block == NULL)
516 			goto out;
517 		lock = &block->b_call->a_args.lock;
518 	} else
519 		lock->fl.c.flc_flags &= ~FL_SLEEP;
520 
521 	if (block->b_flags & B_QUEUED) {
522 		dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n",
523 							block, block->b_flags);
524 		if (block->b_granted) {
525 			nlmsvc_unlink_block(block);
526 			ret = nlm_granted;
527 			goto out;
528 		}
529 		if (block->b_flags & B_TIMED_OUT) {
530 			nlmsvc_unlink_block(block);
531 			ret = nlm_lck_denied;
532 			goto out;
533 		}
534 		ret = nlm_drop_reply;
535 		goto out;
536 	}
537 
538 	if (locks_in_grace(SVC_NET(rqstp)) && !reclaim) {
539 		ret = nlm_lck_denied_grace_period;
540 		goto out;
541 	}
542 	if (reclaim && !locks_in_grace(SVC_NET(rqstp))) {
543 		ret = nlm_lck_denied_grace_period;
544 		goto out;
545 	}
546 
547 	spin_lock(&nlm_blocked_lock);
548 	/*
549 	 * If this is a lock request for an already pending
550 	 * lock request we return nlm_lck_blocked without calling
551 	 * vfs_lock_file() again. Otherwise we have two pending
552 	 * requests on the underlaying ->lock() implementation but
553 	 * only one nlm_block to being granted by lm_grant().
554 	 */
555 	if (locks_can_async_lock(nlmsvc_file_file(file)->f_op) &&
556 	    !list_empty(&block->b_list)) {
557 		spin_unlock(&nlm_blocked_lock);
558 		ret = nlm_lck_blocked;
559 		goto out;
560 	}
561 
562 	/* Append to list of blocked */
563 	nlmsvc_insert_block_locked(block, NLM_NEVER);
564 	spin_unlock(&nlm_blocked_lock);
565 
566 	if (!wait)
567 		lock->fl.c.flc_flags &= ~FL_SLEEP;
568 	mode = lock_to_openmode(&lock->fl);
569 	error = vfs_lock_file(file->f_file[mode], F_SETLK, &lock->fl, NULL);
570 	lock->fl.c.flc_flags &= ~FL_SLEEP;
571 
572 	dprintk("lockd: vfs_lock_file returned %d\n", error);
573 	switch (error) {
574 		case 0:
575 			nlmsvc_remove_block(block);
576 			ret = nlm_granted;
577 			goto out;
578 		case -EAGAIN:
579 			if (!wait)
580 				nlmsvc_remove_block(block);
581 			ret = async_block ? nlm_lck_blocked : nlm_lck_denied;
582 			goto out;
583 		case FILE_LOCK_DEFERRED:
584 			if (wait)
585 				break;
586 			/* Filesystem lock operation is in progress
587 			   Add it to the queue waiting for callback */
588 			ret = nlmsvc_defer_lock_rqst(rqstp, block);
589 			goto out;
590 		case -EDEADLK:
591 			nlmsvc_remove_block(block);
592 			ret = nlm_deadlock;
593 			goto out;
594 		default:			/* includes ENOLCK */
595 			nlmsvc_remove_block(block);
596 			ret = nlm_lck_denied_nolocks;
597 			goto out;
598 	}
599 
600 	ret = nlm_lck_blocked;
601 out:
602 	mutex_unlock(&file->f_mutex);
603 	nlmsvc_release_block(block);
604 	dprintk("lockd: nlmsvc_lock returned %u\n", ret);
605 	return ret;
606 }
607 
608 /*
609  * Test for presence of a conflicting lock.
610  */
611 __be32
nlmsvc_testlock(struct svc_rqst * rqstp,struct nlm_file * file,struct nlm_host * host,struct nlm_lock * lock,struct nlm_lock * conflock)612 nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
613 		struct nlm_host *host, struct nlm_lock *lock,
614 		struct nlm_lock *conflock)
615 {
616 	int			error;
617 	int			mode;
618 	__be32			ret;
619 
620 	dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
621 				nlmsvc_file_inode(file)->i_sb->s_id,
622 				nlmsvc_file_inode(file)->i_ino,
623 				lock->fl.c.flc_type,
624 				(long long)lock->fl.fl_start,
625 				(long long)lock->fl.fl_end);
626 
627 	if (nlmsvc_file_cannot_lock(file))
628 		return nlm_lck_denied_nolocks;
629 
630 	if (locks_in_grace(SVC_NET(rqstp))) {
631 		ret = nlm_lck_denied_grace_period;
632 		goto out;
633 	}
634 
635 	mode = lock_to_openmode(&lock->fl);
636 	error = vfs_test_lock(file->f_file[mode], &lock->fl);
637 	if (error) {
638 		/* We can't currently deal with deferred test requests */
639 		if (error == FILE_LOCK_DEFERRED)
640 			WARN_ON_ONCE(1);
641 
642 		ret = nlm_lck_denied_nolocks;
643 		goto out;
644 	}
645 
646 	if (lock->fl.c.flc_type == F_UNLCK) {
647 		ret = nlm_granted;
648 		goto out;
649 	}
650 
651 	dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
652 		lock->fl.c.flc_type, (long long)lock->fl.fl_start,
653 		(long long)lock->fl.fl_end);
654 	conflock->caller = "somehost";	/* FIXME */
655 	conflock->len = strlen(conflock->caller);
656 	conflock->oh.len = 0;		/* don't return OH info */
657 	conflock->svid = lock->fl.c.flc_pid;
658 	conflock->fl.c.flc_type = lock->fl.c.flc_type;
659 	conflock->fl.fl_start = lock->fl.fl_start;
660 	conflock->fl.fl_end = lock->fl.fl_end;
661 	locks_release_private(&lock->fl);
662 
663 	ret = nlm_lck_denied;
664 out:
665 	return ret;
666 }
667 
668 /*
669  * Remove a lock.
670  * This implies a CANCEL call: We send a GRANT_MSG, the client replies
671  * with a GRANT_RES call which gets lost, and calls UNLOCK immediately
672  * afterwards. In this case the block will still be there, and hence
673  * must be removed.
674  */
675 __be32
nlmsvc_unlock(struct net * net,struct nlm_file * file,struct nlm_lock * lock)676 nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
677 {
678 	int	error = 0;
679 
680 	dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
681 				nlmsvc_file_inode(file)->i_sb->s_id,
682 				nlmsvc_file_inode(file)->i_ino,
683 				lock->fl.c.flc_pid,
684 				(long long)lock->fl.fl_start,
685 				(long long)lock->fl.fl_end);
686 
687 	if (nlmsvc_file_cannot_lock(file))
688 		return nlm_lck_denied_nolocks;
689 
690 	/* First, cancel any lock that might be there */
691 	nlmsvc_cancel_blocked(net, file, lock);
692 
693 	lock->fl.c.flc_type = F_UNLCK;
694 	lock->fl.c.flc_file = file->f_file[O_RDONLY];
695 	if (lock->fl.c.flc_file)
696 		error = vfs_lock_file(lock->fl.c.flc_file, F_SETLK,
697 					&lock->fl, NULL);
698 	lock->fl.c.flc_file = file->f_file[O_WRONLY];
699 	if (lock->fl.c.flc_file)
700 		error |= vfs_lock_file(lock->fl.c.flc_file, F_SETLK,
701 					&lock->fl, NULL);
702 
703 	return (error < 0)? nlm_lck_denied_nolocks : nlm_granted;
704 }
705 
706 /*
707  * Cancel a previously blocked request.
708  *
709  * A cancel request always overrides any grant that may currently
710  * be in progress.
711  * The calling procedure must check whether the file can be closed.
712  */
713 __be32
nlmsvc_cancel_blocked(struct net * net,struct nlm_file * file,struct nlm_lock * lock)714 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
715 {
716 	struct nlm_block	*block;
717 	int status = 0;
718 	int mode;
719 
720 	dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
721 				nlmsvc_file_inode(file)->i_sb->s_id,
722 				nlmsvc_file_inode(file)->i_ino,
723 				lock->fl.c.flc_pid,
724 				(long long)lock->fl.fl_start,
725 				(long long)lock->fl.fl_end);
726 
727 	if (nlmsvc_file_cannot_lock(file))
728 		return nlm_lck_denied_nolocks;
729 
730 	if (locks_in_grace(net))
731 		return nlm_lck_denied_grace_period;
732 
733 	mutex_lock(&file->f_mutex);
734 	block = nlmsvc_lookup_block(file, lock);
735 	mutex_unlock(&file->f_mutex);
736 	if (block != NULL) {
737 		struct file_lock *fl = &block->b_call->a_args.lock.fl;
738 
739 		mode = lock_to_openmode(fl);
740 		vfs_cancel_lock(block->b_file->f_file[mode], fl);
741 		status = nlmsvc_unlink_block(block);
742 		nlmsvc_release_block(block);
743 	}
744 	return status ? nlm_lck_denied : nlm_granted;
745 }
746 
747 /*
748  * This is a callback from the filesystem for VFS file lock requests.
749  * It will be used if lm_grant is defined and the filesystem can not
750  * respond to the request immediately.
751  * For SETLK or SETLKW request it will get the local posix lock.
752  * In all cases it will move the block to the head of nlm_blocked q where
753  * nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the
754  * deferred rpc for GETLK and SETLK.
755  */
756 static void
nlmsvc_update_deferred_block(struct nlm_block * block,int result)757 nlmsvc_update_deferred_block(struct nlm_block *block, int result)
758 {
759 	block->b_flags |= B_GOT_CALLBACK;
760 	if (result == 0)
761 		block->b_granted = 1;
762 	else
763 		block->b_flags |= B_TIMED_OUT;
764 }
765 
nlmsvc_grant_deferred(struct file_lock * fl,int result)766 static int nlmsvc_grant_deferred(struct file_lock *fl, int result)
767 {
768 	struct nlm_block *block;
769 	int rc = -ENOENT;
770 
771 	spin_lock(&nlm_blocked_lock);
772 	list_for_each_entry(block, &nlm_blocked, b_list) {
773 		if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
774 			dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n",
775 							block, block->b_flags);
776 			if (block->b_flags & B_QUEUED) {
777 				if (block->b_flags & B_TIMED_OUT) {
778 					rc = -ENOLCK;
779 					break;
780 				}
781 				nlmsvc_update_deferred_block(block, result);
782 			} else if (result == 0)
783 				block->b_granted = 1;
784 
785 			nlmsvc_insert_block_locked(block, 0);
786 			svc_wake_up(block->b_daemon);
787 			rc = 0;
788 			break;
789 		}
790 	}
791 	spin_unlock(&nlm_blocked_lock);
792 	if (rc == -ENOENT)
793 		printk(KERN_WARNING "lockd: grant for unknown block\n");
794 	return rc;
795 }
796 
797 /*
798  * Unblock a blocked lock request. This is a callback invoked from the
799  * VFS layer when a lock on which we blocked is removed.
800  *
801  * This function doesn't grant the blocked lock instantly, but rather moves
802  * the block to the head of nlm_blocked where it can be picked up by lockd.
803  */
804 static void
nlmsvc_notify_blocked(struct file_lock * fl)805 nlmsvc_notify_blocked(struct file_lock *fl)
806 {
807 	struct nlm_block	*block;
808 
809 	dprintk("lockd: VFS unblock notification for block %p\n", fl);
810 	spin_lock(&nlm_blocked_lock);
811 	list_for_each_entry(block, &nlm_blocked, b_list) {
812 		if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
813 			nlmsvc_insert_block_locked(block, 0);
814 			spin_unlock(&nlm_blocked_lock);
815 			svc_wake_up(block->b_daemon);
816 			return;
817 		}
818 	}
819 	spin_unlock(&nlm_blocked_lock);
820 	printk(KERN_WARNING "lockd: notification for unknown block!\n");
821 }
822 
nlmsvc_get_owner(fl_owner_t owner)823 static fl_owner_t nlmsvc_get_owner(fl_owner_t owner)
824 {
825 	return nlmsvc_get_lockowner(owner);
826 }
827 
nlmsvc_put_owner(fl_owner_t owner)828 static void nlmsvc_put_owner(fl_owner_t owner)
829 {
830 	nlmsvc_put_lockowner(owner);
831 }
832 
833 const struct lock_manager_operations nlmsvc_lock_operations = {
834 	.lm_notify = nlmsvc_notify_blocked,
835 	.lm_grant = nlmsvc_grant_deferred,
836 	.lm_get_owner = nlmsvc_get_owner,
837 	.lm_put_owner = nlmsvc_put_owner,
838 };
839 
840 /*
841  * Try to claim a lock that was previously blocked.
842  *
843  * Note that we use both the RPC_GRANTED_MSG call _and_ an async
844  * RPC thread when notifying the client. This seems like overkill...
845  * Here's why:
846  *  -	we don't want to use a synchronous RPC thread, otherwise
847  *	we might find ourselves hanging on a dead portmapper.
848  *  -	Some lockd implementations (e.g. HP) don't react to
849  *	RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls.
850  */
851 static void
nlmsvc_grant_blocked(struct nlm_block * block)852 nlmsvc_grant_blocked(struct nlm_block *block)
853 {
854 	struct nlm_file		*file = block->b_file;
855 	struct nlm_lock		*lock = &block->b_call->a_args.lock;
856 	int			mode;
857 	int			error;
858 	loff_t			fl_start, fl_end;
859 
860 	dprintk("lockd: grant blocked lock %p\n", block);
861 
862 	kref_get(&block->b_count);
863 
864 	/* Unlink block request from list */
865 	nlmsvc_unlink_block(block);
866 
867 	/* If b_granted is true this means we've been here before.
868 	 * Just retry the grant callback, possibly refreshing the RPC
869 	 * binding */
870 	if (block->b_granted) {
871 		nlm_rebind_host(block->b_host);
872 		goto callback;
873 	}
874 
875 	/* Try the lock operation again */
876 	/* vfs_lock_file() can mangle fl_start and fl_end, but we need
877 	 * them unchanged for the GRANT_MSG
878 	 */
879 	lock->fl.c.flc_flags |= FL_SLEEP;
880 	fl_start = lock->fl.fl_start;
881 	fl_end = lock->fl.fl_end;
882 	mode = lock_to_openmode(&lock->fl);
883 	error = vfs_lock_file(file->f_file[mode], F_SETLK, &lock->fl, NULL);
884 	lock->fl.c.flc_flags &= ~FL_SLEEP;
885 	lock->fl.fl_start = fl_start;
886 	lock->fl.fl_end = fl_end;
887 
888 	switch (error) {
889 	case 0:
890 		break;
891 	case FILE_LOCK_DEFERRED:
892 		dprintk("lockd: lock still blocked error %d\n", error);
893 		nlmsvc_insert_block(block, NLM_NEVER);
894 		nlmsvc_release_block(block);
895 		return;
896 	default:
897 		printk(KERN_WARNING "lockd: unexpected error %d in %s!\n",
898 				-error, __func__);
899 		nlmsvc_insert_block(block, 10 * HZ);
900 		nlmsvc_release_block(block);
901 		return;
902 	}
903 
904 callback:
905 	/* Lock was granted by VFS. */
906 	dprintk("lockd: GRANTing blocked lock.\n");
907 	block->b_granted = 1;
908 
909 	/* keep block on the list, but don't reattempt until the RPC
910 	 * completes or the submission fails
911 	 */
912 	nlmsvc_insert_block(block, NLM_NEVER);
913 
914 	/* Call the client -- use a soft RPC task since nlmsvc_retry_blocked
915 	 * will queue up a new one if this one times out
916 	 */
917 	error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG,
918 				&nlmsvc_grant_ops);
919 
920 	/* RPC submission failed, wait a bit and retry */
921 	if (error < 0)
922 		nlmsvc_insert_block(block, 10 * HZ);
923 }
924 
925 /*
926  * This is the callback from the RPC layer when the NLM_GRANTED_MSG
927  * RPC call has succeeded or timed out.
928  * Like all RPC callbacks, it is invoked by the rpciod process, so it
929  * better not sleep. Therefore, we put the blocked lock on the nlm_blocked
930  * chain once more in order to have it removed by lockd itself (which can
931  * then sleep on the file semaphore without disrupting e.g. the nfs client).
932  */
nlmsvc_grant_callback(struct rpc_task * task,void * data)933 static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
934 {
935 	struct nlm_rqst		*call = data;
936 	struct nlm_block	*block = call->a_block;
937 	unsigned long		timeout;
938 
939 	dprintk("lockd: GRANT_MSG RPC callback\n");
940 
941 	spin_lock(&nlm_blocked_lock);
942 	/* if the block is not on a list at this point then it has
943 	 * been invalidated. Don't try to requeue it.
944 	 *
945 	 * FIXME: it's possible that the block is removed from the list
946 	 * after this check but before the nlmsvc_insert_block. In that
947 	 * case it will be added back. Perhaps we need better locking
948 	 * for nlm_blocked?
949 	 */
950 	if (list_empty(&block->b_list))
951 		goto out;
952 
953 	/* Technically, we should down the file semaphore here. Since we
954 	 * move the block towards the head of the queue only, no harm
955 	 * can be done, though. */
956 	if (task->tk_status < 0) {
957 		/* RPC error: Re-insert for retransmission */
958 		timeout = 10 * HZ;
959 	} else {
960 		/* Call was successful, now wait for client callback */
961 		timeout = 60 * HZ;
962 	}
963 	nlmsvc_insert_block_locked(block, timeout);
964 	svc_wake_up(block->b_daemon);
965 out:
966 	spin_unlock(&nlm_blocked_lock);
967 }
968 
969 /*
970  * FIXME: nlmsvc_release_block() grabs a mutex.  This is not allowed for an
971  * .rpc_release rpc_call_op
972  */
nlmsvc_grant_release(void * data)973 static void nlmsvc_grant_release(void *data)
974 {
975 	struct nlm_rqst		*call = data;
976 	nlmsvc_release_block(call->a_block);
977 }
978 
979 static const struct rpc_call_ops nlmsvc_grant_ops = {
980 	.rpc_call_done = nlmsvc_grant_callback,
981 	.rpc_release = nlmsvc_grant_release,
982 };
983 
984 /*
985  * We received a GRANT_RES callback. Try to find the corresponding
986  * block.
987  */
988 void
nlmsvc_grant_reply(struct nlm_cookie * cookie,__be32 status)989 nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status)
990 {
991 	struct nlm_block	*block;
992 	struct file_lock	*fl;
993 	int			error;
994 
995 	dprintk("grant_reply: looking for cookie %x, s=%d\n",
996 		*(unsigned int *)(cookie->data), status);
997 	if (!(block = nlmsvc_find_block(cookie)))
998 		return;
999 
1000 	switch (status) {
1001 	case nlm_lck_denied_grace_period:
1002 		/* Try again in a couple of seconds */
1003 		nlmsvc_insert_block(block, 10 * HZ);
1004 		break;
1005 	case nlm_lck_denied:
1006 		/* Client doesn't want it, just unlock it */
1007 		nlmsvc_unlink_block(block);
1008 		fl = &block->b_call->a_args.lock.fl;
1009 		fl->c.flc_type = F_UNLCK;
1010 		error = vfs_lock_file(fl->c.flc_file, F_SETLK, fl, NULL);
1011 		if (error)
1012 			pr_warn("lockd: unable to unlock lock rejected by client!\n");
1013 		break;
1014 	default:
1015 		/*
1016 		 * Either it was accepted or the status makes no sense
1017 		 * just unlink it either way.
1018 		 */
1019 		nlmsvc_unlink_block(block);
1020 	}
1021 	nlmsvc_release_block(block);
1022 }
1023 
1024 /* Helper function to handle retry of a deferred block.
1025  * If it is a blocking lock, call grant_blocked.
1026  * For a non-blocking lock or test lock, revisit the request.
1027  */
1028 static void
retry_deferred_block(struct nlm_block * block)1029 retry_deferred_block(struct nlm_block *block)
1030 {
1031 	if (!(block->b_flags & B_GOT_CALLBACK))
1032 		block->b_flags |= B_TIMED_OUT;
1033 	nlmsvc_insert_block(block, NLM_TIMEOUT);
1034 	dprintk("revisit block %p flags %d\n",	block, block->b_flags);
1035 	if (block->b_deferred_req) {
1036 		block->b_deferred_req->revisit(block->b_deferred_req, 0);
1037 		block->b_deferred_req = NULL;
1038 	}
1039 }
1040 
1041 /*
1042  * Retry all blocked locks that have been notified. This is where lockd
1043  * picks up locks that can be granted, or grant notifications that must
1044  * be retransmitted.
1045  */
1046 void
nlmsvc_retry_blocked(struct svc_rqst * rqstp)1047 nlmsvc_retry_blocked(struct svc_rqst *rqstp)
1048 {
1049 	unsigned long	timeout = MAX_SCHEDULE_TIMEOUT;
1050 	struct nlm_block *block;
1051 
1052 	spin_lock(&nlm_blocked_lock);
1053 	while (!list_empty(&nlm_blocked) && !svc_thread_should_stop(rqstp)) {
1054 		block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
1055 
1056 		if (block->b_when == NLM_NEVER)
1057 			break;
1058 		if (time_after(block->b_when, jiffies)) {
1059 			timeout = block->b_when - jiffies;
1060 			break;
1061 		}
1062 		spin_unlock(&nlm_blocked_lock);
1063 
1064 		dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
1065 			block, block->b_when);
1066 		if (block->b_flags & B_QUEUED) {
1067 			dprintk("nlmsvc_retry_blocked delete block (%p, granted=%d, flags=%d)\n",
1068 				block, block->b_granted, block->b_flags);
1069 			retry_deferred_block(block);
1070 		} else
1071 			nlmsvc_grant_blocked(block);
1072 		spin_lock(&nlm_blocked_lock);
1073 	}
1074 	spin_unlock(&nlm_blocked_lock);
1075 
1076 	if (timeout < MAX_SCHEDULE_TIMEOUT)
1077 		mod_timer(&nlmsvc_retry, jiffies + timeout);
1078 }
1079