xref: /linux/fs/nfsd/nfs4layouts.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2014 Christoph Hellwig.
4  */
5 #include <linux/blkdev.h>
6 #include <linux/kmod.h>
7 #include <linux/file.h>
8 #include <linux/jhash.h>
9 #include <linux/sched.h>
10 #include <linux/sunrpc/addr.h>
11 
12 #include "pnfs.h"
13 #include "netns.h"
14 #include "trace.h"
15 
16 #define NFSDDBG_FACILITY                NFSDDBG_PNFS
17 
18 struct nfs4_layout {
19 	struct list_head		lo_perstate;
20 	struct nfs4_layout_stateid	*lo_state;
21 	struct nfsd4_layout_seg		lo_seg;
22 };
23 
24 static struct kmem_cache *nfs4_layout_cache;
25 static struct kmem_cache *nfs4_layout_stateid_cache;
26 
27 static const struct nfsd4_callback_ops nfsd4_cb_layout_ops;
28 static const struct lease_manager_operations nfsd4_layouts_lm_ops;
29 
30 const struct nfsd4_layout_ops *nfsd4_layout_ops[LAYOUT_TYPE_MAX] =  {
31 #ifdef CONFIG_NFSD_FLEXFILELAYOUT
32 	[LAYOUT_FLEX_FILES]	= &ff_layout_ops,
33 #endif
34 #ifdef CONFIG_NFSD_BLOCKLAYOUT
35 	[LAYOUT_BLOCK_VOLUME]	= &bl_layout_ops,
36 #endif
37 #ifdef CONFIG_NFSD_SCSILAYOUT
38 	[LAYOUT_SCSI]		= &scsi_layout_ops,
39 #endif
40 };
41 
42 /* pNFS device ID to export fsid mapping */
43 #define DEVID_HASH_BITS	8
44 #define DEVID_HASH_SIZE	(1 << DEVID_HASH_BITS)
45 #define DEVID_HASH_MASK	(DEVID_HASH_SIZE - 1)
46 static u64 nfsd_devid_seq = 1;
47 static struct list_head nfsd_devid_hash[DEVID_HASH_SIZE];
48 static DEFINE_SPINLOCK(nfsd_devid_lock);
49 
devid_hashfn(u64 idx)50 static inline u32 devid_hashfn(u64 idx)
51 {
52 	return jhash_2words(idx, idx >> 32, 0) & DEVID_HASH_MASK;
53 }
54 
55 static void
nfsd4_alloc_devid_map(const struct svc_fh * fhp)56 nfsd4_alloc_devid_map(const struct svc_fh *fhp)
57 {
58 	const struct knfsd_fh *fh = &fhp->fh_handle;
59 	size_t fsid_len = key_len(fh->fh_fsid_type);
60 	struct nfsd4_deviceid_map *map, *old;
61 	int i;
62 
63 	map = kzalloc(sizeof(*map) + fsid_len, GFP_KERNEL);
64 	if (!map)
65 		return;
66 
67 	map->fsid_type = fh->fh_fsid_type;
68 	memcpy(&map->fsid, fh->fh_fsid, fsid_len);
69 
70 	spin_lock(&nfsd_devid_lock);
71 	if (fhp->fh_export->ex_devid_map)
72 		goto out_unlock;
73 
74 	for (i = 0; i < DEVID_HASH_SIZE; i++) {
75 		list_for_each_entry(old, &nfsd_devid_hash[i], hash) {
76 			if (old->fsid_type != fh->fh_fsid_type)
77 				continue;
78 			if (memcmp(old->fsid, fh->fh_fsid,
79 					key_len(old->fsid_type)))
80 				continue;
81 
82 			fhp->fh_export->ex_devid_map = old;
83 			goto out_unlock;
84 		}
85 	}
86 
87 	map->idx = nfsd_devid_seq++;
88 	list_add_tail_rcu(&map->hash, &nfsd_devid_hash[devid_hashfn(map->idx)]);
89 	fhp->fh_export->ex_devid_map = map;
90 	map = NULL;
91 
92 out_unlock:
93 	spin_unlock(&nfsd_devid_lock);
94 	kfree(map);
95 }
96 
97 struct nfsd4_deviceid_map *
nfsd4_find_devid_map(int idx)98 nfsd4_find_devid_map(int idx)
99 {
100 	struct nfsd4_deviceid_map *map, *ret = NULL;
101 
102 	rcu_read_lock();
103 	list_for_each_entry_rcu(map, &nfsd_devid_hash[devid_hashfn(idx)], hash)
104 		if (map->idx == idx)
105 			ret = map;
106 	rcu_read_unlock();
107 
108 	return ret;
109 }
110 
111 int
nfsd4_set_deviceid(struct nfsd4_deviceid * id,const struct svc_fh * fhp,u32 device_generation)112 nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp,
113 		u32 device_generation)
114 {
115 	if (!fhp->fh_export->ex_devid_map) {
116 		nfsd4_alloc_devid_map(fhp);
117 		if (!fhp->fh_export->ex_devid_map)
118 			return -ENOMEM;
119 	}
120 
121 	id->fsid_idx = fhp->fh_export->ex_devid_map->idx;
122 	id->generation = device_generation;
123 	id->pad = 0;
124 	return 0;
125 }
126 
nfsd4_setup_layout_type(struct svc_export * exp)127 void nfsd4_setup_layout_type(struct svc_export *exp)
128 {
129 #if defined(CONFIG_NFSD_BLOCKLAYOUT) || defined(CONFIG_NFSD_SCSILAYOUT)
130 	struct super_block *sb = exp->ex_path.mnt->mnt_sb;
131 #endif
132 
133 	if (!(exp->ex_flags & NFSEXP_PNFS))
134 		return;
135 
136 #ifdef CONFIG_NFSD_FLEXFILELAYOUT
137 	exp->ex_layout_types |= 1 << LAYOUT_FLEX_FILES;
138 #endif
139 #ifdef CONFIG_NFSD_BLOCKLAYOUT
140 	if (sb->s_export_op->get_uuid &&
141 	    sb->s_export_op->map_blocks &&
142 	    sb->s_export_op->commit_blocks)
143 		exp->ex_layout_types |= 1 << LAYOUT_BLOCK_VOLUME;
144 #endif
145 #ifdef CONFIG_NFSD_SCSILAYOUT
146 	if (sb->s_export_op->map_blocks &&
147 	    sb->s_export_op->commit_blocks &&
148 	    sb->s_bdev &&
149 	    sb->s_bdev->bd_disk->fops->pr_ops &&
150 	    sb->s_bdev->bd_disk->fops->get_unique_id)
151 		exp->ex_layout_types |= 1 << LAYOUT_SCSI;
152 #endif
153 }
154 
nfsd4_close_layout(struct nfs4_layout_stateid * ls)155 void nfsd4_close_layout(struct nfs4_layout_stateid *ls)
156 {
157 	struct nfsd_file *fl;
158 
159 	spin_lock(&ls->ls_stid.sc_file->fi_lock);
160 	fl = ls->ls_file;
161 	ls->ls_file = NULL;
162 	spin_unlock(&ls->ls_stid.sc_file->fi_lock);
163 
164 	if (fl) {
165 		if (!nfsd4_layout_ops[ls->ls_layout_type]->disable_recalls)
166 			kernel_setlease(fl->nf_file, F_UNLCK, NULL,
167 					(void **)&ls);
168 		nfsd_file_put(fl);
169 	}
170 }
171 
172 static void
nfsd4_free_layout_stateid(struct nfs4_stid * stid)173 nfsd4_free_layout_stateid(struct nfs4_stid *stid)
174 {
175 	struct nfs4_layout_stateid *ls = layoutstateid(stid);
176 	struct nfs4_client *clp = ls->ls_stid.sc_client;
177 	struct nfs4_file *fp = ls->ls_stid.sc_file;
178 
179 	trace_nfsd_layoutstate_free(&ls->ls_stid.sc_stateid);
180 
181 	spin_lock(&clp->cl_lock);
182 	list_del_init(&ls->ls_perclnt);
183 	spin_unlock(&clp->cl_lock);
184 
185 	spin_lock(&fp->fi_lock);
186 	list_del_init(&ls->ls_perfile);
187 	spin_unlock(&fp->fi_lock);
188 
189 	nfsd4_close_layout(ls);
190 
191 	if (ls->ls_recalled)
192 		atomic_dec(&ls->ls_stid.sc_file->fi_lo_recalls);
193 
194 	kmem_cache_free(nfs4_layout_stateid_cache, ls);
195 }
196 
197 static int
nfsd4_layout_setlease(struct nfs4_layout_stateid * ls)198 nfsd4_layout_setlease(struct nfs4_layout_stateid *ls)
199 {
200 	struct file_lease *fl;
201 	int status;
202 
203 	if (nfsd4_layout_ops[ls->ls_layout_type]->disable_recalls)
204 		return 0;
205 
206 	fl = locks_alloc_lease();
207 	if (!fl)
208 		return -ENOMEM;
209 	locks_init_lease(fl);
210 	fl->fl_lmops = &nfsd4_layouts_lm_ops;
211 	fl->c.flc_flags = FL_LAYOUT;
212 	fl->c.flc_type = F_RDLCK;
213 	fl->c.flc_owner = ls;
214 	fl->c.flc_pid = current->tgid;
215 	fl->c.flc_file = ls->ls_file->nf_file;
216 
217 	status = kernel_setlease(fl->c.flc_file, fl->c.flc_type, &fl, NULL);
218 	if (status) {
219 		locks_free_lease(fl);
220 		return status;
221 	}
222 	BUG_ON(fl != NULL);
223 	return 0;
224 }
225 
226 static struct nfs4_layout_stateid *
nfsd4_alloc_layout_stateid(struct nfsd4_compound_state * cstate,struct nfs4_stid * parent,u32 layout_type)227 nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
228 		struct nfs4_stid *parent, u32 layout_type)
229 {
230 	struct nfs4_client *clp = cstate->clp;
231 	struct nfs4_file *fp = parent->sc_file;
232 	struct nfs4_layout_stateid *ls;
233 	struct nfs4_stid *stp;
234 
235 	stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache,
236 					nfsd4_free_layout_stateid);
237 	if (!stp)
238 		return NULL;
239 
240 	get_nfs4_file(fp);
241 	stp->sc_file = fp;
242 
243 	ls = layoutstateid(stp);
244 	INIT_LIST_HEAD(&ls->ls_perclnt);
245 	INIT_LIST_HEAD(&ls->ls_perfile);
246 	spin_lock_init(&ls->ls_lock);
247 	INIT_LIST_HEAD(&ls->ls_layouts);
248 	mutex_init(&ls->ls_mutex);
249 	ls->ls_layout_type = layout_type;
250 	nfsd4_init_cb(&ls->ls_recall, clp, &nfsd4_cb_layout_ops,
251 			NFSPROC4_CLNT_CB_LAYOUT);
252 
253 	if (parent->sc_type == SC_TYPE_DELEG)
254 		ls->ls_file = nfsd_file_get(fp->fi_deleg_file);
255 	else
256 		ls->ls_file = find_any_file(fp);
257 	BUG_ON(!ls->ls_file);
258 
259 	if (nfsd4_layout_setlease(ls)) {
260 		nfsd_file_put(ls->ls_file);
261 		put_nfs4_file(fp);
262 		kmem_cache_free(nfs4_layout_stateid_cache, ls);
263 		return NULL;
264 	}
265 
266 	spin_lock(&clp->cl_lock);
267 	stp->sc_type = SC_TYPE_LAYOUT;
268 	list_add(&ls->ls_perclnt, &clp->cl_lo_states);
269 	spin_unlock(&clp->cl_lock);
270 
271 	spin_lock(&fp->fi_lock);
272 	list_add(&ls->ls_perfile, &fp->fi_lo_states);
273 	spin_unlock(&fp->fi_lock);
274 
275 	trace_nfsd_layoutstate_alloc(&ls->ls_stid.sc_stateid);
276 	return ls;
277 }
278 
279 __be32
nfsd4_preprocess_layout_stateid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,stateid_t * stateid,bool create,u32 layout_type,struct nfs4_layout_stateid ** lsp)280 nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp,
281 		struct nfsd4_compound_state *cstate, stateid_t *stateid,
282 		bool create, u32 layout_type, struct nfs4_layout_stateid **lsp)
283 {
284 	struct nfs4_layout_stateid *ls;
285 	struct nfs4_stid *stid;
286 	unsigned short typemask = SC_TYPE_LAYOUT;
287 	__be32 status;
288 
289 	if (create)
290 		typemask |= (SC_TYPE_OPEN | SC_TYPE_LOCK | SC_TYPE_DELEG);
291 
292 	status = nfsd4_lookup_stateid(cstate, stateid, typemask, 0, &stid,
293 			net_generic(SVC_NET(rqstp), nfsd_net_id));
294 	if (status)
295 		goto out;
296 
297 	if (!fh_match(&cstate->current_fh.fh_handle,
298 		      &stid->sc_file->fi_fhandle)) {
299 		status = nfserr_bad_stateid;
300 		goto out_put_stid;
301 	}
302 
303 	if (stid->sc_type != SC_TYPE_LAYOUT) {
304 		ls = nfsd4_alloc_layout_stateid(cstate, stid, layout_type);
305 		nfs4_put_stid(stid);
306 
307 		status = nfserr_jukebox;
308 		if (!ls)
309 			goto out;
310 		mutex_lock(&ls->ls_mutex);
311 	} else {
312 		ls = container_of(stid, struct nfs4_layout_stateid, ls_stid);
313 
314 		status = nfserr_bad_stateid;
315 		mutex_lock(&ls->ls_mutex);
316 		if (nfsd4_stateid_generation_after(stateid, &stid->sc_stateid))
317 			goto out_unlock_stid;
318 		if (layout_type != ls->ls_layout_type)
319 			goto out_unlock_stid;
320 	}
321 
322 	*lsp = ls;
323 	return 0;
324 
325 out_unlock_stid:
326 	mutex_unlock(&ls->ls_mutex);
327 out_put_stid:
328 	nfs4_put_stid(stid);
329 out:
330 	return status;
331 }
332 
333 static void
nfsd4_recall_file_layout(struct nfs4_layout_stateid * ls)334 nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls)
335 {
336 	spin_lock(&ls->ls_lock);
337 	if (ls->ls_recalled)
338 		goto out_unlock;
339 
340 	if (list_empty(&ls->ls_layouts))
341 		goto out_unlock;
342 
343 	ls->ls_recalled = true;
344 	atomic_inc(&ls->ls_stid.sc_file->fi_lo_recalls);
345 	trace_nfsd_layout_recall(&ls->ls_stid.sc_stateid);
346 
347 	refcount_inc(&ls->ls_stid.sc_count);
348 	nfsd4_run_cb(&ls->ls_recall);
349 
350 out_unlock:
351 	spin_unlock(&ls->ls_lock);
352 }
353 
354 static inline u64
layout_end(struct nfsd4_layout_seg * seg)355 layout_end(struct nfsd4_layout_seg *seg)
356 {
357 	u64 end = seg->offset + seg->length;
358 	return end >= seg->offset ? end : NFS4_MAX_UINT64;
359 }
360 
361 static void
layout_update_len(struct nfsd4_layout_seg * lo,u64 end)362 layout_update_len(struct nfsd4_layout_seg *lo, u64 end)
363 {
364 	if (end == NFS4_MAX_UINT64)
365 		lo->length = NFS4_MAX_UINT64;
366 	else
367 		lo->length = end - lo->offset;
368 }
369 
370 static bool
layouts_overlapping(struct nfs4_layout * lo,struct nfsd4_layout_seg * s)371 layouts_overlapping(struct nfs4_layout *lo, struct nfsd4_layout_seg *s)
372 {
373 	if (s->iomode != IOMODE_ANY && s->iomode != lo->lo_seg.iomode)
374 		return false;
375 	if (layout_end(&lo->lo_seg) <= s->offset)
376 		return false;
377 	if (layout_end(s) <= lo->lo_seg.offset)
378 		return false;
379 	return true;
380 }
381 
382 static bool
layouts_try_merge(struct nfsd4_layout_seg * lo,struct nfsd4_layout_seg * new)383 layouts_try_merge(struct nfsd4_layout_seg *lo, struct nfsd4_layout_seg *new)
384 {
385 	if (lo->iomode != new->iomode)
386 		return false;
387 	if (layout_end(new) < lo->offset)
388 		return false;
389 	if (layout_end(lo) < new->offset)
390 		return false;
391 
392 	lo->offset = min(lo->offset, new->offset);
393 	layout_update_len(lo, max(layout_end(lo), layout_end(new)));
394 	return true;
395 }
396 
397 static __be32
nfsd4_recall_conflict(struct nfs4_layout_stateid * ls)398 nfsd4_recall_conflict(struct nfs4_layout_stateid *ls)
399 {
400 	struct nfs4_file *fp = ls->ls_stid.sc_file;
401 	struct nfs4_layout_stateid *l, *n;
402 	__be32 nfserr = nfs_ok;
403 
404 	assert_spin_locked(&fp->fi_lock);
405 
406 	list_for_each_entry_safe(l, n, &fp->fi_lo_states, ls_perfile) {
407 		if (l != ls) {
408 			nfsd4_recall_file_layout(l);
409 			nfserr = nfserr_recallconflict;
410 		}
411 	}
412 
413 	return nfserr;
414 }
415 
416 __be32
nfsd4_insert_layout(struct nfsd4_layoutget * lgp,struct nfs4_layout_stateid * ls)417 nfsd4_insert_layout(struct nfsd4_layoutget *lgp, struct nfs4_layout_stateid *ls)
418 {
419 	struct nfsd4_layout_seg *seg = &lgp->lg_seg;
420 	struct nfs4_file *fp = ls->ls_stid.sc_file;
421 	struct nfs4_layout *lp, *new = NULL;
422 	__be32 nfserr;
423 
424 	spin_lock(&fp->fi_lock);
425 	nfserr = nfsd4_recall_conflict(ls);
426 	if (nfserr)
427 		goto out;
428 	spin_lock(&ls->ls_lock);
429 	list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) {
430 		if (layouts_try_merge(&lp->lo_seg, seg))
431 			goto done;
432 	}
433 	spin_unlock(&ls->ls_lock);
434 	spin_unlock(&fp->fi_lock);
435 
436 	new = kmem_cache_alloc(nfs4_layout_cache, GFP_KERNEL);
437 	if (!new)
438 		return nfserr_jukebox;
439 	memcpy(&new->lo_seg, seg, sizeof(new->lo_seg));
440 	new->lo_state = ls;
441 
442 	spin_lock(&fp->fi_lock);
443 	nfserr = nfsd4_recall_conflict(ls);
444 	if (nfserr)
445 		goto out;
446 	spin_lock(&ls->ls_lock);
447 	list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) {
448 		if (layouts_try_merge(&lp->lo_seg, seg))
449 			goto done;
450 	}
451 
452 	refcount_inc(&ls->ls_stid.sc_count);
453 	list_add_tail(&new->lo_perstate, &ls->ls_layouts);
454 	new = NULL;
455 done:
456 	nfs4_inc_and_copy_stateid(&lgp->lg_sid, &ls->ls_stid);
457 	spin_unlock(&ls->ls_lock);
458 out:
459 	spin_unlock(&fp->fi_lock);
460 	if (new)
461 		kmem_cache_free(nfs4_layout_cache, new);
462 	return nfserr;
463 }
464 
465 static void
nfsd4_free_layouts(struct list_head * reaplist)466 nfsd4_free_layouts(struct list_head *reaplist)
467 {
468 	while (!list_empty(reaplist)) {
469 		struct nfs4_layout *lp = list_first_entry(reaplist,
470 				struct nfs4_layout, lo_perstate);
471 
472 		list_del(&lp->lo_perstate);
473 		nfs4_put_stid(&lp->lo_state->ls_stid);
474 		kmem_cache_free(nfs4_layout_cache, lp);
475 	}
476 }
477 
478 static void
nfsd4_return_file_layout(struct nfs4_layout * lp,struct nfsd4_layout_seg * seg,struct list_head * reaplist)479 nfsd4_return_file_layout(struct nfs4_layout *lp, struct nfsd4_layout_seg *seg,
480 		struct list_head *reaplist)
481 {
482 	struct nfsd4_layout_seg *lo = &lp->lo_seg;
483 	u64 end = layout_end(lo);
484 
485 	if (seg->offset <= lo->offset) {
486 		if (layout_end(seg) >= end) {
487 			list_move_tail(&lp->lo_perstate, reaplist);
488 			return;
489 		}
490 		lo->offset = layout_end(seg);
491 	} else {
492 		/* retain the whole layout segment on a split. */
493 		if (layout_end(seg) < end) {
494 			dprintk("%s: split not supported\n", __func__);
495 			return;
496 		}
497 		end = seg->offset;
498 	}
499 
500 	layout_update_len(lo, end);
501 }
502 
503 __be32
nfsd4_return_file_layouts(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,struct nfsd4_layoutreturn * lrp)504 nfsd4_return_file_layouts(struct svc_rqst *rqstp,
505 		struct nfsd4_compound_state *cstate,
506 		struct nfsd4_layoutreturn *lrp)
507 {
508 	struct nfs4_layout_stateid *ls;
509 	struct nfs4_layout *lp, *n;
510 	LIST_HEAD(reaplist);
511 	__be32 nfserr;
512 	int found = 0;
513 
514 	nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lrp->lr_sid,
515 						false, lrp->lr_layout_type,
516 						&ls);
517 	if (nfserr) {
518 		trace_nfsd_layout_return_lookup_fail(&lrp->lr_sid);
519 		return nfserr;
520 	}
521 
522 	spin_lock(&ls->ls_lock);
523 	list_for_each_entry_safe(lp, n, &ls->ls_layouts, lo_perstate) {
524 		if (layouts_overlapping(lp, &lrp->lr_seg)) {
525 			nfsd4_return_file_layout(lp, &lrp->lr_seg, &reaplist);
526 			found++;
527 		}
528 	}
529 	if (!list_empty(&ls->ls_layouts)) {
530 		if (found)
531 			nfs4_inc_and_copy_stateid(&lrp->lr_sid, &ls->ls_stid);
532 		lrp->lrs_present = true;
533 	} else {
534 		trace_nfsd_layoutstate_unhash(&ls->ls_stid.sc_stateid);
535 		ls->ls_stid.sc_status |= SC_STATUS_CLOSED;
536 		lrp->lrs_present = false;
537 	}
538 	spin_unlock(&ls->ls_lock);
539 
540 	mutex_unlock(&ls->ls_mutex);
541 	nfs4_put_stid(&ls->ls_stid);
542 	nfsd4_free_layouts(&reaplist);
543 	return nfs_ok;
544 }
545 
546 __be32
nfsd4_return_client_layouts(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,struct nfsd4_layoutreturn * lrp)547 nfsd4_return_client_layouts(struct svc_rqst *rqstp,
548 		struct nfsd4_compound_state *cstate,
549 		struct nfsd4_layoutreturn *lrp)
550 {
551 	struct nfs4_layout_stateid *ls, *n;
552 	struct nfs4_client *clp = cstate->clp;
553 	struct nfs4_layout *lp, *t;
554 	LIST_HEAD(reaplist);
555 
556 	lrp->lrs_present = false;
557 
558 	spin_lock(&clp->cl_lock);
559 	list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) {
560 		if (ls->ls_layout_type != lrp->lr_layout_type)
561 			continue;
562 
563 		if (lrp->lr_return_type == RETURN_FSID &&
564 		    !fh_fsid_match(&ls->ls_stid.sc_file->fi_fhandle,
565 				   &cstate->current_fh.fh_handle))
566 			continue;
567 
568 		spin_lock(&ls->ls_lock);
569 		list_for_each_entry_safe(lp, t, &ls->ls_layouts, lo_perstate) {
570 			if (lrp->lr_seg.iomode == IOMODE_ANY ||
571 			    lrp->lr_seg.iomode == lp->lo_seg.iomode)
572 				list_move_tail(&lp->lo_perstate, &reaplist);
573 		}
574 		spin_unlock(&ls->ls_lock);
575 	}
576 	spin_unlock(&clp->cl_lock);
577 
578 	nfsd4_free_layouts(&reaplist);
579 	return 0;
580 }
581 
582 static void
nfsd4_return_all_layouts(struct nfs4_layout_stateid * ls,struct list_head * reaplist)583 nfsd4_return_all_layouts(struct nfs4_layout_stateid *ls,
584 		struct list_head *reaplist)
585 {
586 	spin_lock(&ls->ls_lock);
587 	list_splice_init(&ls->ls_layouts, reaplist);
588 	spin_unlock(&ls->ls_lock);
589 }
590 
591 void
nfsd4_return_all_client_layouts(struct nfs4_client * clp)592 nfsd4_return_all_client_layouts(struct nfs4_client *clp)
593 {
594 	struct nfs4_layout_stateid *ls, *n;
595 	LIST_HEAD(reaplist);
596 
597 	spin_lock(&clp->cl_lock);
598 	list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt)
599 		nfsd4_return_all_layouts(ls, &reaplist);
600 	spin_unlock(&clp->cl_lock);
601 
602 	nfsd4_free_layouts(&reaplist);
603 }
604 
605 void
nfsd4_return_all_file_layouts(struct nfs4_client * clp,struct nfs4_file * fp)606 nfsd4_return_all_file_layouts(struct nfs4_client *clp, struct nfs4_file *fp)
607 {
608 	struct nfs4_layout_stateid *ls, *n;
609 	LIST_HEAD(reaplist);
610 
611 	spin_lock(&fp->fi_lock);
612 	list_for_each_entry_safe(ls, n, &fp->fi_lo_states, ls_perfile) {
613 		if (ls->ls_stid.sc_client == clp)
614 			nfsd4_return_all_layouts(ls, &reaplist);
615 	}
616 	spin_unlock(&fp->fi_lock);
617 
618 	nfsd4_free_layouts(&reaplist);
619 }
620 
621 static void
nfsd4_cb_layout_fail(struct nfs4_layout_stateid * ls,struct nfsd_file * file)622 nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls, struct nfsd_file *file)
623 {
624 	struct nfs4_client *clp = ls->ls_stid.sc_client;
625 	char addr_str[INET6_ADDRSTRLEN];
626 	static char const nfsd_recall_failed[] = "/sbin/nfsd-recall-failed";
627 	static char *envp[] = {
628 		"HOME=/",
629 		"TERM=linux",
630 		"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
631 		NULL
632 	};
633 	char *argv[8];
634 	int error;
635 
636 	rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str));
637 
638 	printk(KERN_WARNING
639 		"nfsd: client %s failed to respond to layout recall. "
640 		"  Fencing..\n", addr_str);
641 
642 	argv[0] = (char *)nfsd_recall_failed;
643 	argv[1] = addr_str;
644 	argv[2] = file->nf_file->f_path.mnt->mnt_sb->s_id;
645 	argv[3] = NULL;
646 
647 	error = call_usermodehelper(nfsd_recall_failed, argv, envp,
648 				    UMH_WAIT_PROC);
649 	if (error) {
650 		printk(KERN_ERR "nfsd: fence failed for client %s: %d!\n",
651 			addr_str, error);
652 	}
653 }
654 
655 static void
nfsd4_cb_layout_prepare(struct nfsd4_callback * cb)656 nfsd4_cb_layout_prepare(struct nfsd4_callback *cb)
657 {
658 	struct nfs4_layout_stateid *ls =
659 		container_of(cb, struct nfs4_layout_stateid, ls_recall);
660 
661 	mutex_lock(&ls->ls_mutex);
662 	nfs4_inc_and_copy_stateid(&ls->ls_recall_sid, &ls->ls_stid);
663 	mutex_unlock(&ls->ls_mutex);
664 }
665 
666 static int
nfsd4_cb_layout_done(struct nfsd4_callback * cb,struct rpc_task * task)667 nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task)
668 {
669 	struct nfs4_layout_stateid *ls =
670 		container_of(cb, struct nfs4_layout_stateid, ls_recall);
671 	struct nfsd_net *nn;
672 	ktime_t now, cutoff;
673 	const struct nfsd4_layout_ops *ops;
674 	struct nfsd_file *fl;
675 
676 	trace_nfsd_cb_layout_done(&ls->ls_stid.sc_stateid, task);
677 	switch (task->tk_status) {
678 	case 0:
679 	case -NFS4ERR_DELAY:
680 		/*
681 		 * Anything left? If not, then call it done. Note that we don't
682 		 * take the spinlock since this is an optimization and nothing
683 		 * should get added until the cb counter goes to zero.
684 		 */
685 		if (list_empty(&ls->ls_layouts))
686 			return 1;
687 
688 		/* Poll the client until it's done with the layout */
689 		now = ktime_get();
690 		nn = net_generic(ls->ls_stid.sc_client->net, nfsd_net_id);
691 
692 		/* Client gets 2 lease periods to return it */
693 		cutoff = ktime_add_ns(task->tk_start,
694 					 (u64)nn->nfsd4_lease * NSEC_PER_SEC * 2);
695 
696 		if (ktime_before(now, cutoff)) {
697 			rpc_delay(task, HZ/100); /* 10 mili-seconds */
698 			return 0;
699 		}
700 		fallthrough;
701 	default:
702 		/*
703 		 * Unknown error or non-responding client, we'll need to fence.
704 		 */
705 		trace_nfsd_layout_recall_fail(&ls->ls_stid.sc_stateid);
706 		rcu_read_lock();
707 		fl = nfsd_file_get(ls->ls_file);
708 		rcu_read_unlock();
709 		if (fl) {
710 			ops = nfsd4_layout_ops[ls->ls_layout_type];
711 			if (ops->fence_client)
712 				ops->fence_client(ls, fl);
713 			else
714 				nfsd4_cb_layout_fail(ls, fl);
715 			nfsd_file_put(fl);
716 		}
717 		return 1;
718 	case -NFS4ERR_NOMATCHING_LAYOUT:
719 		trace_nfsd_layout_recall_done(&ls->ls_stid.sc_stateid);
720 		task->tk_status = 0;
721 		return 1;
722 	}
723 }
724 
725 static void
nfsd4_cb_layout_release(struct nfsd4_callback * cb)726 nfsd4_cb_layout_release(struct nfsd4_callback *cb)
727 {
728 	struct nfs4_layout_stateid *ls =
729 		container_of(cb, struct nfs4_layout_stateid, ls_recall);
730 	LIST_HEAD(reaplist);
731 
732 	trace_nfsd_layout_recall_release(&ls->ls_stid.sc_stateid);
733 
734 	nfsd4_return_all_layouts(ls, &reaplist);
735 	nfsd4_free_layouts(&reaplist);
736 	nfs4_put_stid(&ls->ls_stid);
737 }
738 
739 static const struct nfsd4_callback_ops nfsd4_cb_layout_ops = {
740 	.prepare	= nfsd4_cb_layout_prepare,
741 	.done		= nfsd4_cb_layout_done,
742 	.release	= nfsd4_cb_layout_release,
743 	.opcode		= OP_CB_LAYOUTRECALL,
744 };
745 
746 static bool
nfsd4_layout_lm_break(struct file_lease * fl)747 nfsd4_layout_lm_break(struct file_lease *fl)
748 {
749 	/*
750 	 * We don't want the locks code to timeout the lease for us;
751 	 * we'll remove it ourself if a layout isn't returned
752 	 * in time:
753 	 */
754 	fl->fl_break_time = 0;
755 	nfsd4_recall_file_layout(fl->c.flc_owner);
756 	return false;
757 }
758 
759 static int
nfsd4_layout_lm_change(struct file_lease * onlist,int arg,struct list_head * dispose)760 nfsd4_layout_lm_change(struct file_lease *onlist, int arg,
761 		struct list_head *dispose)
762 {
763 	BUG_ON(!(arg & F_UNLCK));
764 	return lease_modify(onlist, arg, dispose);
765 }
766 
767 static const struct lease_manager_operations nfsd4_layouts_lm_ops = {
768 	.lm_break	= nfsd4_layout_lm_break,
769 	.lm_change	= nfsd4_layout_lm_change,
770 };
771 
772 int
nfsd4_init_pnfs(void)773 nfsd4_init_pnfs(void)
774 {
775 	int i;
776 
777 	for (i = 0; i < DEVID_HASH_SIZE; i++)
778 		INIT_LIST_HEAD(&nfsd_devid_hash[i]);
779 
780 	nfs4_layout_cache = KMEM_CACHE(nfs4_layout, 0);
781 	if (!nfs4_layout_cache)
782 		return -ENOMEM;
783 
784 	nfs4_layout_stateid_cache = KMEM_CACHE(nfs4_layout_stateid, 0);
785 	if (!nfs4_layout_stateid_cache) {
786 		kmem_cache_destroy(nfs4_layout_cache);
787 		return -ENOMEM;
788 	}
789 	return 0;
790 }
791 
792 void
nfsd4_exit_pnfs(void)793 nfsd4_exit_pnfs(void)
794 {
795 	int i;
796 
797 	kmem_cache_destroy(nfs4_layout_cache);
798 	kmem_cache_destroy(nfs4_layout_stateid_cache);
799 
800 	for (i = 0; i < DEVID_HASH_SIZE; i++) {
801 		struct nfsd4_deviceid_map *map, *n;
802 
803 		list_for_each_entry_safe(map, n, &nfsd_devid_hash[i], hash)
804 			kfree(map);
805 	}
806 }
807