xref: /linux/fs/nfs/delegation.c (revision 9ce7677cfd7cd871adb457c80bea3b581b839641)
1 /*
2  * linux/fs/nfs/delegation.c
3  *
4  * Copyright (C) 2004 Trond Myklebust
5  *
6  * NFS file delegation management
7  *
8  */
9 #include <linux/config.h>
10 #include <linux/completion.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/spinlock.h>
14 
15 #include <linux/nfs4.h>
16 #include <linux/nfs_fs.h>
17 #include <linux/nfs_xdr.h>
18 
19 #include "nfs4_fs.h"
20 #include "delegation.h"
21 
22 static struct nfs_delegation *nfs_alloc_delegation(void)
23 {
24 	return (struct nfs_delegation *)kmalloc(sizeof(struct nfs_delegation), GFP_KERNEL);
25 }
26 
27 static void nfs_free_delegation(struct nfs_delegation *delegation)
28 {
29 	if (delegation->cred)
30 		put_rpccred(delegation->cred);
31 	kfree(delegation);
32 }
33 
34 static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
35 {
36 	struct inode *inode = state->inode;
37 	struct file_lock *fl;
38 	int status;
39 
40 	for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
41 		if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
42 			continue;
43 		if ((struct nfs_open_context *)fl->fl_file->private_data != ctx)
44 			continue;
45 		status = nfs4_lock_delegation_recall(state, fl);
46 		if (status >= 0)
47 			continue;
48 		switch (status) {
49 			default:
50 				printk(KERN_ERR "%s: unhandled error %d.\n",
51 						__FUNCTION__, status);
52 			case -NFS4ERR_EXPIRED:
53 				/* kill_proc(fl->fl_pid, SIGLOST, 1); */
54 			case -NFS4ERR_STALE_CLIENTID:
55 				nfs4_schedule_state_recovery(NFS_SERVER(inode)->nfs4_state);
56 				goto out_err;
57 		}
58 	}
59 	return 0;
60 out_err:
61 	return status;
62 }
63 
64 static void nfs_delegation_claim_opens(struct inode *inode)
65 {
66 	struct nfs_inode *nfsi = NFS_I(inode);
67 	struct nfs_open_context *ctx;
68 	struct nfs4_state *state;
69 	int err;
70 
71 again:
72 	spin_lock(&inode->i_lock);
73 	list_for_each_entry(ctx, &nfsi->open_files, list) {
74 		state = ctx->state;
75 		if (state == NULL)
76 			continue;
77 		if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
78 			continue;
79 		get_nfs_open_context(ctx);
80 		spin_unlock(&inode->i_lock);
81 		err = nfs4_open_delegation_recall(ctx->dentry, state);
82 		if (err >= 0)
83 			err = nfs_delegation_claim_locks(ctx, state);
84 		put_nfs_open_context(ctx);
85 		if (err != 0)
86 			return;
87 		goto again;
88 	}
89 	spin_unlock(&inode->i_lock);
90 }
91 
92 /*
93  * Set up a delegation on an inode
94  */
95 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
96 {
97 	struct nfs_delegation *delegation = NFS_I(inode)->delegation;
98 
99 	if (delegation == NULL)
100 		return;
101 	memcpy(delegation->stateid.data, res->delegation.data,
102 			sizeof(delegation->stateid.data));
103 	delegation->type = res->delegation_type;
104 	delegation->maxsize = res->maxsize;
105 	put_rpccred(cred);
106 	delegation->cred = get_rpccred(cred);
107 	delegation->flags &= ~NFS_DELEGATION_NEED_RECLAIM;
108 	NFS_I(inode)->delegation_state = delegation->type;
109 	smp_wmb();
110 }
111 
112 /*
113  * Set up a delegation on an inode
114  */
115 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
116 {
117 	struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
118 	struct nfs_inode *nfsi = NFS_I(inode);
119 	struct nfs_delegation *delegation;
120 	int status = 0;
121 
122 	/* Ensure we first revalidate the attributes and page cache! */
123 	if ((nfsi->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_ATTR)))
124 		__nfs_revalidate_inode(NFS_SERVER(inode), inode);
125 
126 	delegation = nfs_alloc_delegation();
127 	if (delegation == NULL)
128 		return -ENOMEM;
129 	memcpy(delegation->stateid.data, res->delegation.data,
130 			sizeof(delegation->stateid.data));
131 	delegation->type = res->delegation_type;
132 	delegation->maxsize = res->maxsize;
133 	delegation->cred = get_rpccred(cred);
134 	delegation->inode = inode;
135 
136 	spin_lock(&clp->cl_lock);
137 	if (nfsi->delegation == NULL) {
138 		list_add(&delegation->super_list, &clp->cl_delegations);
139 		nfsi->delegation = delegation;
140 		nfsi->delegation_state = delegation->type;
141 		delegation = NULL;
142 	} else {
143 		if (memcmp(&delegation->stateid, &nfsi->delegation->stateid,
144 					sizeof(delegation->stateid)) != 0 ||
145 				delegation->type != nfsi->delegation->type) {
146 			printk("%s: server %u.%u.%u.%u, handed out a duplicate delegation!\n",
147 					__FUNCTION__, NIPQUAD(clp->cl_addr));
148 			status = -EIO;
149 		}
150 	}
151 	spin_unlock(&clp->cl_lock);
152 	kfree(delegation);
153 	return status;
154 }
155 
156 static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
157 {
158 	int res = 0;
159 
160 	__nfs_revalidate_inode(NFS_SERVER(inode), inode);
161 
162 	res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid);
163 	nfs_free_delegation(delegation);
164 	return res;
165 }
166 
167 /* Sync all data to disk upon delegation return */
168 static void nfs_msync_inode(struct inode *inode)
169 {
170 	filemap_fdatawrite(inode->i_mapping);
171 	nfs_wb_all(inode);
172 	filemap_fdatawait(inode->i_mapping);
173 }
174 
175 /*
176  * Basic procedure for returning a delegation to the server
177  */
178 int __nfs_inode_return_delegation(struct inode *inode)
179 {
180 	struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
181 	struct nfs_inode *nfsi = NFS_I(inode);
182 	struct nfs_delegation *delegation;
183 	int res = 0;
184 
185 	nfs_msync_inode(inode);
186 	down_read(&clp->cl_sem);
187 	/* Guard against new delegated open calls */
188 	down_write(&nfsi->rwsem);
189 	spin_lock(&clp->cl_lock);
190 	delegation = nfsi->delegation;
191 	if (delegation != NULL) {
192 		list_del_init(&delegation->super_list);
193 		nfsi->delegation = NULL;
194 		nfsi->delegation_state = 0;
195 	}
196 	spin_unlock(&clp->cl_lock);
197 	nfs_delegation_claim_opens(inode);
198 	up_write(&nfsi->rwsem);
199 	up_read(&clp->cl_sem);
200 	nfs_msync_inode(inode);
201 
202 	if (delegation != NULL)
203 		res = nfs_do_return_delegation(inode, delegation);
204 	return res;
205 }
206 
207 /*
208  * Return all delegations associated to a super block
209  */
210 void nfs_return_all_delegations(struct super_block *sb)
211 {
212 	struct nfs4_client *clp = NFS_SB(sb)->nfs4_state;
213 	struct nfs_delegation *delegation;
214 	struct inode *inode;
215 
216 	if (clp == NULL)
217 		return;
218 restart:
219 	spin_lock(&clp->cl_lock);
220 	list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
221 		if (delegation->inode->i_sb != sb)
222 			continue;
223 		inode = igrab(delegation->inode);
224 		if (inode == NULL)
225 			continue;
226 		spin_unlock(&clp->cl_lock);
227 		nfs_inode_return_delegation(inode);
228 		iput(inode);
229 		goto restart;
230 	}
231 	spin_unlock(&clp->cl_lock);
232 }
233 
234 /*
235  * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
236  */
237 void nfs_handle_cb_pathdown(struct nfs4_client *clp)
238 {
239 	struct nfs_delegation *delegation;
240 	struct inode *inode;
241 
242 	if (clp == NULL)
243 		return;
244 restart:
245 	spin_lock(&clp->cl_lock);
246 	list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
247 		inode = igrab(delegation->inode);
248 		if (inode == NULL)
249 			continue;
250 		spin_unlock(&clp->cl_lock);
251 		nfs_inode_return_delegation(inode);
252 		iput(inode);
253 		goto restart;
254 	}
255 	spin_unlock(&clp->cl_lock);
256 }
257 
258 struct recall_threadargs {
259 	struct inode *inode;
260 	struct nfs4_client *clp;
261 	const nfs4_stateid *stateid;
262 
263 	struct completion started;
264 	int result;
265 };
266 
267 static int recall_thread(void *data)
268 {
269 	struct recall_threadargs *args = (struct recall_threadargs *)data;
270 	struct inode *inode = igrab(args->inode);
271 	struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
272 	struct nfs_inode *nfsi = NFS_I(inode);
273 	struct nfs_delegation *delegation;
274 
275 	daemonize("nfsv4-delegreturn");
276 
277 	nfs_msync_inode(inode);
278 	down_read(&clp->cl_sem);
279 	down_write(&nfsi->rwsem);
280 	spin_lock(&clp->cl_lock);
281 	delegation = nfsi->delegation;
282 	if (delegation != NULL && memcmp(delegation->stateid.data,
283 				args->stateid->data,
284 				sizeof(delegation->stateid.data)) == 0) {
285 		list_del_init(&delegation->super_list);
286 		nfsi->delegation = NULL;
287 		nfsi->delegation_state = 0;
288 		args->result = 0;
289 	} else {
290 		delegation = NULL;
291 		args->result = -ENOENT;
292 	}
293 	spin_unlock(&clp->cl_lock);
294 	complete(&args->started);
295 	nfs_delegation_claim_opens(inode);
296 	up_write(&nfsi->rwsem);
297 	up_read(&clp->cl_sem);
298 	nfs_msync_inode(inode);
299 
300 	if (delegation != NULL)
301 		nfs_do_return_delegation(inode, delegation);
302 	iput(inode);
303 	module_put_and_exit(0);
304 }
305 
306 /*
307  * Asynchronous delegation recall!
308  */
309 int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
310 {
311 	struct recall_threadargs data = {
312 		.inode = inode,
313 		.stateid = stateid,
314 	};
315 	int status;
316 
317 	init_completion(&data.started);
318 	__module_get(THIS_MODULE);
319 	status = kernel_thread(recall_thread, &data, CLONE_KERNEL);
320 	if (status < 0)
321 		goto out_module_put;
322 	wait_for_completion(&data.started);
323 	return data.result;
324 out_module_put:
325 	module_put(THIS_MODULE);
326 	return status;
327 }
328 
329 /*
330  * Retrieve the inode associated with a delegation
331  */
332 struct inode *nfs_delegation_find_inode(struct nfs4_client *clp, const struct nfs_fh *fhandle)
333 {
334 	struct nfs_delegation *delegation;
335 	struct inode *res = NULL;
336 	spin_lock(&clp->cl_lock);
337 	list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
338 		if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
339 			res = igrab(delegation->inode);
340 			break;
341 		}
342 	}
343 	spin_unlock(&clp->cl_lock);
344 	return res;
345 }
346 
347 /*
348  * Mark all delegations as needing to be reclaimed
349  */
350 void nfs_delegation_mark_reclaim(struct nfs4_client *clp)
351 {
352 	struct nfs_delegation *delegation;
353 	spin_lock(&clp->cl_lock);
354 	list_for_each_entry(delegation, &clp->cl_delegations, super_list)
355 		delegation->flags |= NFS_DELEGATION_NEED_RECLAIM;
356 	spin_unlock(&clp->cl_lock);
357 }
358 
359 /*
360  * Reap all unclaimed delegations after reboot recovery is done
361  */
362 void nfs_delegation_reap_unclaimed(struct nfs4_client *clp)
363 {
364 	struct nfs_delegation *delegation, *n;
365 	LIST_HEAD(head);
366 	spin_lock(&clp->cl_lock);
367 	list_for_each_entry_safe(delegation, n, &clp->cl_delegations, super_list) {
368 		if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0)
369 			continue;
370 		list_move(&delegation->super_list, &head);
371 		NFS_I(delegation->inode)->delegation = NULL;
372 		NFS_I(delegation->inode)->delegation_state = 0;
373 	}
374 	spin_unlock(&clp->cl_lock);
375 	while(!list_empty(&head)) {
376 		delegation = list_entry(head.next, struct nfs_delegation, super_list);
377 		list_del(&delegation->super_list);
378 		nfs_free_delegation(delegation);
379 	}
380 }
381