xref: /linux/fs/nfs/delegation.c (revision c43990162fc7f9d2f15a12797fdc6f9c0905f704)
1 /*
2  * linux/fs/nfs/delegation.c
3  *
4  * Copyright (C) 2004 Trond Myklebust
5  *
6  * NFS file delegation management
7  *
8  */
9 #include <linux/completion.h>
10 #include <linux/kthread.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/spinlock.h>
14 
15 #include <linux/nfs4.h>
16 #include <linux/nfs_fs.h>
17 #include <linux/nfs_xdr.h>
18 
19 #include "nfs4_fs.h"
20 #include "delegation.h"
21 #include "internal.h"
22 
23 static void nfs_free_delegation(struct nfs_delegation *delegation)
24 {
25 	if (delegation->cred)
26 		put_rpccred(delegation->cred);
27 	kfree(delegation);
28 }
29 
30 static void nfs_free_delegation_callback(struct rcu_head *head)
31 {
32 	struct nfs_delegation *delegation = container_of(head, struct nfs_delegation, rcu);
33 
34 	nfs_free_delegation(delegation);
35 }
36 
37 static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
38 {
39 	struct inode *inode = state->inode;
40 	struct file_lock *fl;
41 	int status;
42 
43 	for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
44 		if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
45 			continue;
46 		if ((struct nfs_open_context *)fl->fl_file->private_data != ctx)
47 			continue;
48 		status = nfs4_lock_delegation_recall(state, fl);
49 		if (status >= 0)
50 			continue;
51 		switch (status) {
52 			default:
53 				printk(KERN_ERR "%s: unhandled error %d.\n",
54 						__FUNCTION__, status);
55 			case -NFS4ERR_EXPIRED:
56 				/* kill_proc(fl->fl_pid, SIGLOST, 1); */
57 			case -NFS4ERR_STALE_CLIENTID:
58 				nfs4_schedule_state_recovery(NFS_SERVER(inode)->nfs_client);
59 				goto out_err;
60 		}
61 	}
62 	return 0;
63 out_err:
64 	return status;
65 }
66 
67 static void nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid)
68 {
69 	struct nfs_inode *nfsi = NFS_I(inode);
70 	struct nfs_open_context *ctx;
71 	struct nfs4_state *state;
72 	int err;
73 
74 again:
75 	spin_lock(&inode->i_lock);
76 	list_for_each_entry(ctx, &nfsi->open_files, list) {
77 		state = ctx->state;
78 		if (state == NULL)
79 			continue;
80 		if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
81 			continue;
82 		if (memcmp(state->stateid.data, stateid->data, sizeof(state->stateid.data)) != 0)
83 			continue;
84 		get_nfs_open_context(ctx);
85 		spin_unlock(&inode->i_lock);
86 		err = nfs4_open_delegation_recall(ctx, state, stateid);
87 		if (err >= 0)
88 			err = nfs_delegation_claim_locks(ctx, state);
89 		put_nfs_open_context(ctx);
90 		if (err != 0)
91 			return;
92 		goto again;
93 	}
94 	spin_unlock(&inode->i_lock);
95 }
96 
97 /*
98  * Set up a delegation on an inode
99  */
100 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
101 {
102 	struct nfs_delegation *delegation = NFS_I(inode)->delegation;
103 
104 	if (delegation == NULL)
105 		return;
106 	memcpy(delegation->stateid.data, res->delegation.data,
107 			sizeof(delegation->stateid.data));
108 	delegation->type = res->delegation_type;
109 	delegation->maxsize = res->maxsize;
110 	put_rpccred(cred);
111 	delegation->cred = get_rpccred(cred);
112 	delegation->flags &= ~NFS_DELEGATION_NEED_RECLAIM;
113 	NFS_I(inode)->delegation_state = delegation->type;
114 	smp_wmb();
115 }
116 
117 /*
118  * Set up a delegation on an inode
119  */
120 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
121 {
122 	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
123 	struct nfs_inode *nfsi = NFS_I(inode);
124 	struct nfs_delegation *delegation;
125 	int status = 0;
126 
127 	delegation = kmalloc(sizeof(*delegation), GFP_KERNEL);
128 	if (delegation == NULL)
129 		return -ENOMEM;
130 	memcpy(delegation->stateid.data, res->delegation.data,
131 			sizeof(delegation->stateid.data));
132 	delegation->type = res->delegation_type;
133 	delegation->maxsize = res->maxsize;
134 	delegation->change_attr = nfsi->change_attr;
135 	delegation->cred = get_rpccred(cred);
136 	delegation->inode = inode;
137 
138 	spin_lock(&clp->cl_lock);
139 	if (rcu_dereference(nfsi->delegation) == NULL) {
140 		list_add_rcu(&delegation->super_list, &clp->cl_delegations);
141 		nfsi->delegation_state = delegation->type;
142 		rcu_assign_pointer(nfsi->delegation, delegation);
143 		delegation = NULL;
144 	} else {
145 		if (memcmp(&delegation->stateid, &nfsi->delegation->stateid,
146 					sizeof(delegation->stateid)) != 0 ||
147 				delegation->type != nfsi->delegation->type) {
148 			printk("%s: server %u.%u.%u.%u, handed out a duplicate delegation!\n",
149 					__FUNCTION__, NIPQUAD(clp->cl_addr.sin_addr));
150 			status = -EIO;
151 		}
152 	}
153 
154 	/* Ensure we revalidate the attributes and page cache! */
155 	spin_lock(&inode->i_lock);
156 	nfsi->cache_validity |= NFS_INO_REVAL_FORCED;
157 	spin_unlock(&inode->i_lock);
158 
159 	spin_unlock(&clp->cl_lock);
160 	kfree(delegation);
161 	return status;
162 }
163 
164 static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
165 {
166 	int res = 0;
167 
168 	res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid);
169 	call_rcu(&delegation->rcu, nfs_free_delegation_callback);
170 	return res;
171 }
172 
173 /* Sync all data to disk upon delegation return */
174 static void nfs_msync_inode(struct inode *inode)
175 {
176 	filemap_fdatawrite(inode->i_mapping);
177 	nfs_wb_all(inode);
178 	filemap_fdatawait(inode->i_mapping);
179 }
180 
181 /*
182  * Basic procedure for returning a delegation to the server
183  */
184 static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
185 {
186 	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
187 	struct nfs_inode *nfsi = NFS_I(inode);
188 
189 	nfs_msync_inode(inode);
190 	down_read(&clp->cl_sem);
191 	/* Guard against new delegated open calls */
192 	down_write(&nfsi->rwsem);
193 	nfs_delegation_claim_opens(inode, &delegation->stateid);
194 	up_write(&nfsi->rwsem);
195 	up_read(&clp->cl_sem);
196 	nfs_msync_inode(inode);
197 
198 	return nfs_do_return_delegation(inode, delegation);
199 }
200 
201 static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi, const nfs4_stateid *stateid)
202 {
203 	struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation);
204 
205 	if (delegation == NULL)
206 		goto nomatch;
207 	if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data,
208 				sizeof(delegation->stateid.data)) != 0)
209 		goto nomatch;
210 	list_del_rcu(&delegation->super_list);
211 	nfsi->delegation_state = 0;
212 	rcu_assign_pointer(nfsi->delegation, NULL);
213 	return delegation;
214 nomatch:
215 	return NULL;
216 }
217 
218 int nfs_inode_return_delegation(struct inode *inode)
219 {
220 	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
221 	struct nfs_inode *nfsi = NFS_I(inode);
222 	struct nfs_delegation *delegation;
223 	int err = 0;
224 
225 	if (rcu_dereference(nfsi->delegation) != NULL) {
226 		spin_lock(&clp->cl_lock);
227 		delegation = nfs_detach_delegation_locked(nfsi, NULL);
228 		spin_unlock(&clp->cl_lock);
229 		if (delegation != NULL)
230 			err = __nfs_inode_return_delegation(inode, delegation);
231 	}
232 	return err;
233 }
234 
235 /*
236  * Return all delegations associated to a super block
237  */
238 void nfs_return_all_delegations(struct super_block *sb)
239 {
240 	struct nfs_client *clp = NFS_SB(sb)->nfs_client;
241 	struct nfs_delegation *delegation;
242 	struct inode *inode;
243 
244 	if (clp == NULL)
245 		return;
246 restart:
247 	rcu_read_lock();
248 	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
249 		if (delegation->inode->i_sb != sb)
250 			continue;
251 		inode = igrab(delegation->inode);
252 		if (inode == NULL)
253 			continue;
254 		spin_lock(&clp->cl_lock);
255 		delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
256 		spin_unlock(&clp->cl_lock);
257 		rcu_read_unlock();
258 		if (delegation != NULL)
259 			__nfs_inode_return_delegation(inode, delegation);
260 		iput(inode);
261 		goto restart;
262 	}
263 	rcu_read_unlock();
264 }
265 
266 static int nfs_do_expire_all_delegations(void *ptr)
267 {
268 	struct nfs_client *clp = ptr;
269 	struct nfs_delegation *delegation;
270 	struct inode *inode;
271 
272 	allow_signal(SIGKILL);
273 restart:
274 	if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) != 0)
275 		goto out;
276 	if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0)
277 		goto out;
278 	rcu_read_lock();
279 	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
280 		inode = igrab(delegation->inode);
281 		if (inode == NULL)
282 			continue;
283 		spin_lock(&clp->cl_lock);
284 		delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
285 		spin_unlock(&clp->cl_lock);
286 		rcu_read_unlock();
287 		if (delegation)
288 			__nfs_inode_return_delegation(inode, delegation);
289 		iput(inode);
290 		goto restart;
291 	}
292 	rcu_read_unlock();
293 out:
294 	nfs_put_client(clp);
295 	module_put_and_exit(0);
296 }
297 
298 void nfs_expire_all_delegations(struct nfs_client *clp)
299 {
300 	struct task_struct *task;
301 
302 	__module_get(THIS_MODULE);
303 	atomic_inc(&clp->cl_count);
304 	task = kthread_run(nfs_do_expire_all_delegations, clp,
305 			"%u.%u.%u.%u-delegreturn",
306 			NIPQUAD(clp->cl_addr.sin_addr));
307 	if (!IS_ERR(task))
308 		return;
309 	nfs_put_client(clp);
310 	module_put(THIS_MODULE);
311 }
312 
313 /*
314  * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
315  */
316 void nfs_handle_cb_pathdown(struct nfs_client *clp)
317 {
318 	struct nfs_delegation *delegation;
319 	struct inode *inode;
320 
321 	if (clp == NULL)
322 		return;
323 restart:
324 	rcu_read_lock();
325 	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
326 		inode = igrab(delegation->inode);
327 		if (inode == NULL)
328 			continue;
329 		spin_lock(&clp->cl_lock);
330 		delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
331 		spin_unlock(&clp->cl_lock);
332 		rcu_read_unlock();
333 		if (delegation != NULL)
334 			__nfs_inode_return_delegation(inode, delegation);
335 		iput(inode);
336 		goto restart;
337 	}
338 	rcu_read_unlock();
339 }
340 
341 struct recall_threadargs {
342 	struct inode *inode;
343 	struct nfs_client *clp;
344 	const nfs4_stateid *stateid;
345 
346 	struct completion started;
347 	int result;
348 };
349 
350 static int recall_thread(void *data)
351 {
352 	struct recall_threadargs *args = (struct recall_threadargs *)data;
353 	struct inode *inode = igrab(args->inode);
354 	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
355 	struct nfs_inode *nfsi = NFS_I(inode);
356 	struct nfs_delegation *delegation;
357 
358 	daemonize("nfsv4-delegreturn");
359 
360 	nfs_msync_inode(inode);
361 	down_read(&clp->cl_sem);
362 	down_write(&nfsi->rwsem);
363 	spin_lock(&clp->cl_lock);
364 	delegation = nfs_detach_delegation_locked(nfsi, args->stateid);
365 	if (delegation != NULL)
366 		args->result = 0;
367 	else
368 		args->result = -ENOENT;
369 	spin_unlock(&clp->cl_lock);
370 	complete(&args->started);
371 	nfs_delegation_claim_opens(inode, args->stateid);
372 	up_write(&nfsi->rwsem);
373 	up_read(&clp->cl_sem);
374 	nfs_msync_inode(inode);
375 
376 	if (delegation != NULL)
377 		nfs_do_return_delegation(inode, delegation);
378 	iput(inode);
379 	module_put_and_exit(0);
380 }
381 
382 /*
383  * Asynchronous delegation recall!
384  */
385 int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
386 {
387 	struct recall_threadargs data = {
388 		.inode = inode,
389 		.stateid = stateid,
390 	};
391 	int status;
392 
393 	init_completion(&data.started);
394 	__module_get(THIS_MODULE);
395 	status = kernel_thread(recall_thread, &data, CLONE_KERNEL);
396 	if (status < 0)
397 		goto out_module_put;
398 	wait_for_completion(&data.started);
399 	return data.result;
400 out_module_put:
401 	module_put(THIS_MODULE);
402 	return status;
403 }
404 
405 /*
406  * Retrieve the inode associated with a delegation
407  */
408 struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle)
409 {
410 	struct nfs_delegation *delegation;
411 	struct inode *res = NULL;
412 	rcu_read_lock();
413 	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
414 		if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
415 			res = igrab(delegation->inode);
416 			break;
417 		}
418 	}
419 	rcu_read_unlock();
420 	return res;
421 }
422 
423 /*
424  * Mark all delegations as needing to be reclaimed
425  */
426 void nfs_delegation_mark_reclaim(struct nfs_client *clp)
427 {
428 	struct nfs_delegation *delegation;
429 	rcu_read_lock();
430 	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list)
431 		delegation->flags |= NFS_DELEGATION_NEED_RECLAIM;
432 	rcu_read_unlock();
433 }
434 
435 /*
436  * Reap all unclaimed delegations after reboot recovery is done
437  */
438 void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
439 {
440 	struct nfs_delegation *delegation;
441 restart:
442 	rcu_read_lock();
443 	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
444 		if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0)
445 			continue;
446 		spin_lock(&clp->cl_lock);
447 		delegation = nfs_detach_delegation_locked(NFS_I(delegation->inode), NULL);
448 		spin_unlock(&clp->cl_lock);
449 		rcu_read_unlock();
450 		if (delegation != NULL)
451 			call_rcu(&delegation->rcu, nfs_free_delegation_callback);
452 		goto restart;
453 	}
454 	rcu_read_unlock();
455 }
456 
457 int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
458 {
459 	struct nfs_inode *nfsi = NFS_I(inode);
460 	struct nfs_delegation *delegation;
461 	int ret = 0;
462 
463 	rcu_read_lock();
464 	delegation = rcu_dereference(nfsi->delegation);
465 	if (delegation != NULL) {
466 		memcpy(dst->data, delegation->stateid.data, sizeof(dst->data));
467 		ret = 1;
468 	}
469 	rcu_read_unlock();
470 	return ret;
471 }
472