xref: /linux/fs/nfs/flexfilelayout/flexfilelayout.c (revision b7e32ae6664285e156e9f0cd821e63e19798baf7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Module for pnfs flexfile layout driver.
4  *
5  * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
6  *
7  * Tao Peng <bergwolf@primarydata.com>
8  */
9 
10 #include <linux/nfs_fs.h>
11 #include <linux/nfs_mount.h>
12 #include <linux/nfs_page.h>
13 #include <linux/module.h>
14 #include <linux/file.h>
15 #include <linux/sched/mm.h>
16 
17 #include <linux/sunrpc/metrics.h>
18 
19 #include "flexfilelayout.h"
20 #include "../nfs4session.h"
21 #include "../nfs4idmap.h"
22 #include "../internal.h"
23 #include "../delegation.h"
24 #include "../nfs4trace.h"
25 #include "../iostat.h"
26 #include "../nfs.h"
27 #include "../nfs42.h"
28 
29 #define NFSDBG_FACILITY         NFSDBG_PNFS_LD
30 
31 #define FF_LAYOUT_POLL_RETRY_MAX     (15*HZ)
32 #define FF_LAYOUTRETURN_MAXERR 20
33 
34 enum nfs4_ff_op_type {
35 	NFS4_FF_OP_LAYOUTSTATS,
36 	NFS4_FF_OP_LAYOUTRETURN,
37 };
38 
39 static unsigned short io_maxretrans;
40 
41 static const struct pnfs_commit_ops ff_layout_commit_ops;
42 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
43 		struct nfs_pgio_header *hdr);
44 static int
45 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
46 			       struct nfs42_layoutstat_devinfo *devinfo,
47 			       int dev_limit, enum nfs4_ff_op_type type);
48 static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
49 			      const struct nfs42_layoutstat_devinfo *devinfo,
50 			      struct nfs4_ff_layout_mirror *mirror);
51 
52 static struct pnfs_layout_hdr *
53 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
54 {
55 	struct nfs4_flexfile_layout *ffl;
56 
57 	ffl = kzalloc(sizeof(*ffl), gfp_flags);
58 	if (ffl) {
59 		pnfs_init_ds_commit_info(&ffl->commit_info);
60 		INIT_LIST_HEAD(&ffl->error_list);
61 		INIT_LIST_HEAD(&ffl->mirrors);
62 		ffl->last_report_time = ktime_get();
63 		ffl->commit_info.ops = &ff_layout_commit_ops;
64 		return &ffl->generic_hdr;
65 	} else
66 		return NULL;
67 }
68 
69 static void
70 ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
71 {
72 	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(lo);
73 	struct nfs4_ff_layout_ds_err *err, *n;
74 
75 	list_for_each_entry_safe(err, n, &ffl->error_list, list) {
76 		list_del(&err->list);
77 		kfree(err);
78 	}
79 	kfree_rcu(ffl, generic_hdr.plh_rcu);
80 }
81 
82 static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
83 {
84 	__be32 *p;
85 
86 	p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
87 	if (unlikely(p == NULL))
88 		return -ENOBUFS;
89 	stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
90 	memcpy(stateid->data, p, NFS4_STATEID_SIZE);
91 	dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
92 		p[0], p[1], p[2], p[3]);
93 	return 0;
94 }
95 
96 static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
97 {
98 	__be32 *p;
99 
100 	p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
101 	if (unlikely(!p))
102 		return -ENOBUFS;
103 	memcpy(devid, p, NFS4_DEVICEID4_SIZE);
104 	nfs4_print_deviceid(devid);
105 	return 0;
106 }
107 
108 static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
109 {
110 	__be32 *p;
111 
112 	p = xdr_inline_decode(xdr, 4);
113 	if (unlikely(!p))
114 		return -ENOBUFS;
115 	fh->size = be32_to_cpup(p++);
116 	if (fh->size > NFS_MAXFHSIZE) {
117 		printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
118 		       fh->size);
119 		return -EOVERFLOW;
120 	}
121 	/* fh.data */
122 	p = xdr_inline_decode(xdr, fh->size);
123 	if (unlikely(!p))
124 		return -ENOBUFS;
125 	memcpy(&fh->data, p, fh->size);
126 	dprintk("%s: fh len %d\n", __func__, fh->size);
127 
128 	return 0;
129 }
130 
131 /*
132  * Currently only stringified uids and gids are accepted.
133  * I.e., kerberos is not supported to the DSes, so no pricipals.
134  *
135  * That means that one common function will suffice, but when
136  * principals are added, this should be split to accomodate
137  * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
138  */
139 static int
140 decode_name(struct xdr_stream *xdr, u32 *id)
141 {
142 	__be32 *p;
143 	int len;
144 
145 	/* opaque_length(4)*/
146 	p = xdr_inline_decode(xdr, 4);
147 	if (unlikely(!p))
148 		return -ENOBUFS;
149 	len = be32_to_cpup(p++);
150 	if (len < 0)
151 		return -EINVAL;
152 
153 	dprintk("%s: len %u\n", __func__, len);
154 
155 	/* opaque body */
156 	p = xdr_inline_decode(xdr, len);
157 	if (unlikely(!p))
158 		return -ENOBUFS;
159 
160 	if (!nfs_map_string_to_numeric((char *)p, len, id))
161 		return -EINVAL;
162 
163 	return 0;
164 }
165 
166 static struct nfsd_file *
167 ff_local_open_fh(struct pnfs_layout_segment *lseg, u32 ds_idx,
168 		 struct nfs_client *clp, const struct cred *cred,
169 		 struct nfs_fh *fh, fmode_t mode)
170 {
171 #if IS_ENABLED(CONFIG_NFS_LOCALIO)
172 	struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
173 
174 	return nfs_local_open_fh(clp, cred, fh, &mirror->nfl, mode);
175 #else
176 	return NULL;
177 #endif
178 }
179 
180 static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
181 		const struct nfs4_ff_layout_mirror *m2)
182 {
183 	int i, j;
184 
185 	if (m1->fh_versions_cnt != m2->fh_versions_cnt)
186 		return false;
187 	for (i = 0; i < m1->fh_versions_cnt; i++) {
188 		bool found_fh = false;
189 		for (j = 0; j < m2->fh_versions_cnt; j++) {
190 			if (nfs_compare_fh(&m1->fh_versions[i],
191 					&m2->fh_versions[j]) == 0) {
192 				found_fh = true;
193 				break;
194 			}
195 		}
196 		if (!found_fh)
197 			return false;
198 	}
199 	return true;
200 }
201 
202 static struct nfs4_ff_layout_mirror *
203 ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
204 		struct nfs4_ff_layout_mirror *mirror)
205 {
206 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
207 	struct nfs4_ff_layout_mirror *pos;
208 	struct inode *inode = lo->plh_inode;
209 
210 	spin_lock(&inode->i_lock);
211 	list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
212 		if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0)
213 			continue;
214 		if (!ff_mirror_match_fh(mirror, pos))
215 			continue;
216 		if (refcount_inc_not_zero(&pos->ref)) {
217 			spin_unlock(&inode->i_lock);
218 			return pos;
219 		}
220 	}
221 	list_add(&mirror->mirrors, &ff_layout->mirrors);
222 	mirror->layout = lo;
223 	spin_unlock(&inode->i_lock);
224 	return mirror;
225 }
226 
227 static void
228 ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
229 {
230 	struct inode *inode;
231 	if (mirror->layout == NULL)
232 		return;
233 	inode = mirror->layout->plh_inode;
234 	spin_lock(&inode->i_lock);
235 	list_del(&mirror->mirrors);
236 	spin_unlock(&inode->i_lock);
237 	mirror->layout = NULL;
238 }
239 
240 static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
241 {
242 	struct nfs4_ff_layout_mirror *mirror;
243 
244 	mirror = kzalloc(sizeof(*mirror), gfp_flags);
245 	if (mirror != NULL) {
246 		spin_lock_init(&mirror->lock);
247 		refcount_set(&mirror->ref, 1);
248 		INIT_LIST_HEAD(&mirror->mirrors);
249 		nfs_localio_file_init(&mirror->nfl);
250 	}
251 	return mirror;
252 }
253 
254 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
255 {
256 	const struct cred *cred;
257 
258 	ff_layout_remove_mirror(mirror);
259 	kfree(mirror->fh_versions);
260 	nfs_close_local_fh(&mirror->nfl);
261 	cred = rcu_access_pointer(mirror->ro_cred);
262 	put_cred(cred);
263 	cred = rcu_access_pointer(mirror->rw_cred);
264 	put_cred(cred);
265 	nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
266 	kfree(mirror);
267 }
268 
269 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
270 {
271 	if (mirror != NULL && refcount_dec_and_test(&mirror->ref))
272 		ff_layout_free_mirror(mirror);
273 }
274 
275 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
276 {
277 	u32 i;
278 
279 	for (i = 0; i < fls->mirror_array_cnt; i++)
280 		ff_layout_put_mirror(fls->mirror_array[i]);
281 }
282 
283 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
284 {
285 	if (fls) {
286 		ff_layout_free_mirror_array(fls);
287 		kfree(fls);
288 	}
289 }
290 
291 static bool
292 ff_lseg_match_mirrors(struct pnfs_layout_segment *l1,
293 		struct pnfs_layout_segment *l2)
294 {
295 	const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1);
296 	const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l2);
297 	u32 i;
298 
299 	if (fl1->mirror_array_cnt != fl2->mirror_array_cnt)
300 		return false;
301 	for (i = 0; i < fl1->mirror_array_cnt; i++) {
302 		if (fl1->mirror_array[i] != fl2->mirror_array[i])
303 			return false;
304 	}
305 	return true;
306 }
307 
308 static bool
309 ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
310 		const struct pnfs_layout_range *l2)
311 {
312 	u64 end1, end2;
313 
314 	if (l1->iomode != l2->iomode)
315 		return l1->iomode != IOMODE_READ;
316 	end1 = pnfs_calc_offset_end(l1->offset, l1->length);
317 	end2 = pnfs_calc_offset_end(l2->offset, l2->length);
318 	if (end1 < l2->offset)
319 		return false;
320 	if (end2 < l1->offset)
321 		return true;
322 	return l2->offset <= l1->offset;
323 }
324 
325 static bool
326 ff_lseg_merge(struct pnfs_layout_segment *new,
327 		struct pnfs_layout_segment *old)
328 {
329 	u64 new_end, old_end;
330 
331 	if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
332 		return false;
333 	if (new->pls_range.iomode != old->pls_range.iomode)
334 		return false;
335 	old_end = pnfs_calc_offset_end(old->pls_range.offset,
336 			old->pls_range.length);
337 	if (old_end < new->pls_range.offset)
338 		return false;
339 	new_end = pnfs_calc_offset_end(new->pls_range.offset,
340 			new->pls_range.length);
341 	if (new_end < old->pls_range.offset)
342 		return false;
343 	if (!ff_lseg_match_mirrors(new, old))
344 		return false;
345 
346 	/* Mergeable: copy info from 'old' to 'new' */
347 	if (new_end < old_end)
348 		new_end = old_end;
349 	if (new->pls_range.offset < old->pls_range.offset)
350 		new->pls_range.offset = old->pls_range.offset;
351 	new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
352 			new_end);
353 	if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
354 		set_bit(NFS_LSEG_ROC, &new->pls_flags);
355 	return true;
356 }
357 
358 static void
359 ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
360 		struct pnfs_layout_segment *lseg,
361 		struct list_head *free_me)
362 {
363 	pnfs_generic_layout_insert_lseg(lo, lseg,
364 			ff_lseg_range_is_after,
365 			ff_lseg_merge,
366 			free_me);
367 }
368 
369 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
370 {
371 	int i, j;
372 
373 	for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
374 		for (j = i + 1; j < fls->mirror_array_cnt; j++)
375 			if (fls->mirror_array[i]->efficiency <
376 			    fls->mirror_array[j]->efficiency)
377 				swap(fls->mirror_array[i],
378 				     fls->mirror_array[j]);
379 	}
380 }
381 
382 static struct pnfs_layout_segment *
383 ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
384 		     struct nfs4_layoutget_res *lgr,
385 		     gfp_t gfp_flags)
386 {
387 	struct pnfs_layout_segment *ret;
388 	struct nfs4_ff_layout_segment *fls = NULL;
389 	struct xdr_stream stream;
390 	struct xdr_buf buf;
391 	struct page *scratch;
392 	u64 stripe_unit;
393 	u32 mirror_array_cnt;
394 	__be32 *p;
395 	int i, rc;
396 
397 	dprintk("--> %s\n", __func__);
398 	scratch = alloc_page(gfp_flags);
399 	if (!scratch)
400 		return ERR_PTR(-ENOMEM);
401 
402 	xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
403 			      lgr->layoutp->len);
404 	xdr_set_scratch_page(&stream, scratch);
405 
406 	/* stripe unit and mirror_array_cnt */
407 	rc = -EIO;
408 	p = xdr_inline_decode(&stream, 8 + 4);
409 	if (!p)
410 		goto out_err_free;
411 
412 	p = xdr_decode_hyper(p, &stripe_unit);
413 	mirror_array_cnt = be32_to_cpup(p++);
414 	dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
415 		stripe_unit, mirror_array_cnt);
416 
417 	if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
418 	    mirror_array_cnt == 0)
419 		goto out_err_free;
420 
421 	rc = -ENOMEM;
422 	fls = kzalloc(struct_size(fls, mirror_array, mirror_array_cnt),
423 			gfp_flags);
424 	if (!fls)
425 		goto out_err_free;
426 
427 	fls->mirror_array_cnt = mirror_array_cnt;
428 	fls->stripe_unit = stripe_unit;
429 
430 	for (i = 0; i < fls->mirror_array_cnt; i++) {
431 		struct nfs4_ff_layout_mirror *mirror;
432 		struct cred *kcred;
433 		const struct cred __rcu *cred;
434 		kuid_t uid;
435 		kgid_t gid;
436 		u32 ds_count, fh_count, id;
437 		int j;
438 
439 		rc = -EIO;
440 		p = xdr_inline_decode(&stream, 4);
441 		if (!p)
442 			goto out_err_free;
443 		ds_count = be32_to_cpup(p);
444 
445 		/* FIXME: allow for striping? */
446 		if (ds_count != 1)
447 			goto out_err_free;
448 
449 		fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
450 		if (fls->mirror_array[i] == NULL) {
451 			rc = -ENOMEM;
452 			goto out_err_free;
453 		}
454 
455 		fls->mirror_array[i]->ds_count = ds_count;
456 
457 		/* deviceid */
458 		rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid);
459 		if (rc)
460 			goto out_err_free;
461 
462 		/* efficiency */
463 		rc = -EIO;
464 		p = xdr_inline_decode(&stream, 4);
465 		if (!p)
466 			goto out_err_free;
467 		fls->mirror_array[i]->efficiency = be32_to_cpup(p);
468 
469 		/* stateid */
470 		rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
471 		if (rc)
472 			goto out_err_free;
473 
474 		/* fh */
475 		rc = -EIO;
476 		p = xdr_inline_decode(&stream, 4);
477 		if (!p)
478 			goto out_err_free;
479 		fh_count = be32_to_cpup(p);
480 
481 		fls->mirror_array[i]->fh_versions =
482 			kcalloc(fh_count, sizeof(struct nfs_fh),
483 				gfp_flags);
484 		if (fls->mirror_array[i]->fh_versions == NULL) {
485 			rc = -ENOMEM;
486 			goto out_err_free;
487 		}
488 
489 		for (j = 0; j < fh_count; j++) {
490 			rc = decode_nfs_fh(&stream,
491 					   &fls->mirror_array[i]->fh_versions[j]);
492 			if (rc)
493 				goto out_err_free;
494 		}
495 
496 		fls->mirror_array[i]->fh_versions_cnt = fh_count;
497 
498 		/* user */
499 		rc = decode_name(&stream, &id);
500 		if (rc)
501 			goto out_err_free;
502 
503 		uid = make_kuid(&init_user_ns, id);
504 
505 		/* group */
506 		rc = decode_name(&stream, &id);
507 		if (rc)
508 			goto out_err_free;
509 
510 		gid = make_kgid(&init_user_ns, id);
511 
512 		if (gfp_flags & __GFP_FS)
513 			kcred = prepare_kernel_cred(&init_task);
514 		else {
515 			unsigned int nofs_flags = memalloc_nofs_save();
516 			kcred = prepare_kernel_cred(&init_task);
517 			memalloc_nofs_restore(nofs_flags);
518 		}
519 		rc = -ENOMEM;
520 		if (!kcred)
521 			goto out_err_free;
522 		kcred->fsuid = uid;
523 		kcred->fsgid = gid;
524 		cred = RCU_INITIALIZER(kcred);
525 
526 		if (lgr->range.iomode == IOMODE_READ)
527 			rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
528 		else
529 			rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
530 
531 		mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
532 		if (mirror != fls->mirror_array[i]) {
533 			/* swap cred ptrs so free_mirror will clean up old */
534 			if (lgr->range.iomode == IOMODE_READ) {
535 				cred = xchg(&mirror->ro_cred, cred);
536 				rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
537 			} else {
538 				cred = xchg(&mirror->rw_cred, cred);
539 				rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
540 			}
541 			ff_layout_free_mirror(fls->mirror_array[i]);
542 			fls->mirror_array[i] = mirror;
543 		}
544 
545 		dprintk("%s: iomode %s uid %u gid %u\n", __func__,
546 			lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
547 			from_kuid(&init_user_ns, uid),
548 			from_kgid(&init_user_ns, gid));
549 	}
550 
551 	p = xdr_inline_decode(&stream, 4);
552 	if (!p)
553 		goto out_sort_mirrors;
554 	fls->flags = be32_to_cpup(p);
555 
556 	p = xdr_inline_decode(&stream, 4);
557 	if (!p)
558 		goto out_sort_mirrors;
559 	for (i=0; i < fls->mirror_array_cnt; i++)
560 		fls->mirror_array[i]->report_interval = be32_to_cpup(p);
561 
562 out_sort_mirrors:
563 	ff_layout_sort_mirrors(fls);
564 	ret = &fls->generic_hdr;
565 	dprintk("<-- %s (success)\n", __func__);
566 out_free_page:
567 	__free_page(scratch);
568 	return ret;
569 out_err_free:
570 	_ff_layout_free_lseg(fls);
571 	ret = ERR_PTR(rc);
572 	dprintk("<-- %s (%d)\n", __func__, rc);
573 	goto out_free_page;
574 }
575 
576 static void
577 ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
578 {
579 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
580 
581 	dprintk("--> %s\n", __func__);
582 
583 	if (lseg->pls_range.iomode == IOMODE_RW) {
584 		struct nfs4_flexfile_layout *ffl;
585 		struct inode *inode;
586 
587 		ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
588 		inode = ffl->generic_hdr.plh_inode;
589 		spin_lock(&inode->i_lock);
590 		pnfs_generic_ds_cinfo_release_lseg(&ffl->commit_info, lseg);
591 		spin_unlock(&inode->i_lock);
592 	}
593 	_ff_layout_free_lseg(fls);
594 }
595 
596 static void
597 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
598 {
599 	/* first IO request? */
600 	if (atomic_inc_return(&timer->n_ops) == 1) {
601 		timer->start_time = now;
602 	}
603 }
604 
605 static ktime_t
606 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
607 {
608 	ktime_t start;
609 
610 	if (atomic_dec_return(&timer->n_ops) < 0)
611 		WARN_ON_ONCE(1);
612 
613 	start = timer->start_time;
614 	timer->start_time = now;
615 	return ktime_sub(now, start);
616 }
617 
618 static bool
619 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
620 			    struct nfs4_ff_layoutstat *layoutstat,
621 			    ktime_t now)
622 {
623 	s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
624 	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
625 
626 	nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
627 	if (!mirror->start_time)
628 		mirror->start_time = now;
629 	if (mirror->report_interval != 0)
630 		report_interval = (s64)mirror->report_interval * 1000LL;
631 	else if (layoutstats_timer != 0)
632 		report_interval = (s64)layoutstats_timer * 1000LL;
633 	if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
634 			report_interval) {
635 		ffl->last_report_time = now;
636 		return true;
637 	}
638 
639 	return false;
640 }
641 
642 static void
643 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
644 		__u64 requested)
645 {
646 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
647 
648 	iostat->ops_requested++;
649 	iostat->bytes_requested += requested;
650 }
651 
652 static void
653 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
654 		__u64 requested,
655 		__u64 completed,
656 		ktime_t time_completed,
657 		ktime_t time_started)
658 {
659 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
660 	ktime_t completion_time = ktime_sub(time_completed, time_started);
661 	ktime_t timer;
662 
663 	iostat->ops_completed++;
664 	iostat->bytes_completed += completed;
665 	iostat->bytes_not_delivered += requested - completed;
666 
667 	timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
668 	iostat->total_busy_time =
669 			ktime_add(iostat->total_busy_time, timer);
670 	iostat->aggregate_completion_time =
671 			ktime_add(iostat->aggregate_completion_time,
672 					completion_time);
673 }
674 
675 static void
676 nfs4_ff_layout_stat_io_start_read(struct inode *inode,
677 		struct nfs4_ff_layout_mirror *mirror,
678 		__u64 requested, ktime_t now)
679 {
680 	bool report;
681 
682 	spin_lock(&mirror->lock);
683 	report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
684 	nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
685 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
686 	spin_unlock(&mirror->lock);
687 
688 	if (report)
689 		pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
690 }
691 
692 static void
693 nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
694 		struct nfs4_ff_layout_mirror *mirror,
695 		__u64 requested,
696 		__u64 completed)
697 {
698 	spin_lock(&mirror->lock);
699 	nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
700 			requested, completed,
701 			ktime_get(), task->tk_start);
702 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
703 	spin_unlock(&mirror->lock);
704 }
705 
706 static void
707 nfs4_ff_layout_stat_io_start_write(struct inode *inode,
708 		struct nfs4_ff_layout_mirror *mirror,
709 		__u64 requested, ktime_t now)
710 {
711 	bool report;
712 
713 	spin_lock(&mirror->lock);
714 	report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
715 	nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
716 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
717 	spin_unlock(&mirror->lock);
718 
719 	if (report)
720 		pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
721 }
722 
723 static void
724 nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
725 		struct nfs4_ff_layout_mirror *mirror,
726 		__u64 requested,
727 		__u64 completed,
728 		enum nfs3_stable_how committed)
729 {
730 	if (committed == NFS_UNSTABLE)
731 		requested = completed = 0;
732 
733 	spin_lock(&mirror->lock);
734 	nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
735 			requested, completed, ktime_get(), task->tk_start);
736 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
737 	spin_unlock(&mirror->lock);
738 }
739 
740 static void
741 ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, u32 idx)
742 {
743 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
744 
745 	if (devid)
746 		nfs4_mark_deviceid_unavailable(devid);
747 }
748 
749 static void
750 ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, u32 idx)
751 {
752 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
753 
754 	if (devid)
755 		nfs4_mark_deviceid_available(devid);
756 }
757 
758 static struct nfs4_pnfs_ds *
759 ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
760 			     u32 start_idx, u32 *best_idx,
761 			     bool check_device)
762 {
763 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
764 	struct nfs4_ff_layout_mirror *mirror;
765 	struct nfs4_pnfs_ds *ds = ERR_PTR(-EAGAIN);
766 	u32 idx;
767 
768 	/* mirrors are initially sorted by efficiency */
769 	for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
770 		mirror = FF_LAYOUT_COMP(lseg, idx);
771 		ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
772 		if (IS_ERR(ds))
773 			continue;
774 
775 		if (check_device &&
776 		    nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node)) {
777 			// reinitialize the error state in case if this is the last iteration
778 			ds = ERR_PTR(-EINVAL);
779 			continue;
780 		}
781 
782 		*best_idx = idx;
783 		break;
784 	}
785 
786 	return ds;
787 }
788 
789 static struct nfs4_pnfs_ds *
790 ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg,
791 				 u32 start_idx, u32 *best_idx)
792 {
793 	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, false);
794 }
795 
796 static struct nfs4_pnfs_ds *
797 ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg,
798 				   u32 start_idx, u32 *best_idx)
799 {
800 	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, true);
801 }
802 
803 static struct nfs4_pnfs_ds *
804 ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
805 				  u32 start_idx, u32 *best_idx)
806 {
807 	struct nfs4_pnfs_ds *ds;
808 
809 	ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx);
810 	if (!IS_ERR(ds))
811 		return ds;
812 	return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
813 }
814 
815 static struct nfs4_pnfs_ds *
816 ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio,
817 			  u32 *best_idx)
818 {
819 	struct pnfs_layout_segment *lseg = pgio->pg_lseg;
820 	struct nfs4_pnfs_ds *ds;
821 
822 	ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx,
823 					       best_idx);
824 	if (!IS_ERR(ds) || !pgio->pg_mirror_idx)
825 		return ds;
826 	return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx);
827 }
828 
829 static void
830 ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
831 		      struct nfs_page *req,
832 		      bool strict_iomode)
833 {
834 	pnfs_put_lseg(pgio->pg_lseg);
835 	pgio->pg_lseg =
836 		pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
837 				   req_offset(req), req->wb_bytes, IOMODE_READ,
838 				   strict_iomode, nfs_io_gfp_mask());
839 	if (IS_ERR(pgio->pg_lseg)) {
840 		pgio->pg_error = PTR_ERR(pgio->pg_lseg);
841 		pgio->pg_lseg = NULL;
842 	}
843 }
844 
845 static void
846 ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
847 			struct nfs_page *req)
848 {
849 	struct nfs_pgio_mirror *pgm;
850 	struct nfs4_ff_layout_mirror *mirror;
851 	struct nfs4_pnfs_ds *ds;
852 	u32 ds_idx;
853 
854 	if (NFS_SERVER(pgio->pg_inode)->flags &
855 			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
856 		pgio->pg_maxretrans = io_maxretrans;
857 retry:
858 	pnfs_generic_pg_check_layout(pgio, req);
859 	/* Use full layout for now */
860 	if (!pgio->pg_lseg) {
861 		ff_layout_pg_get_read(pgio, req, false);
862 		if (!pgio->pg_lseg)
863 			goto out_nolseg;
864 	}
865 	if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
866 		ff_layout_pg_get_read(pgio, req, true);
867 		if (!pgio->pg_lseg)
868 			goto out_nolseg;
869 	}
870 	/* Reset wb_nio, since getting layout segment was successful */
871 	req->wb_nio = 0;
872 
873 	ds = ff_layout_get_ds_for_read(pgio, &ds_idx);
874 	if (IS_ERR(ds)) {
875 		if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
876 			goto out_mds;
877 		pnfs_generic_pg_cleanup(pgio);
878 		/* Sleep for 1 second before retrying */
879 		ssleep(1);
880 		goto retry;
881 	}
882 
883 	mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
884 	pgm = &pgio->pg_mirrors[0];
885 	pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
886 
887 	pgio->pg_mirror_idx = ds_idx;
888 	return;
889 out_nolseg:
890 	if (pgio->pg_error < 0) {
891 		if (pgio->pg_error != -EAGAIN)
892 			return;
893 		/* Retry getting layout segment if lower layer returned -EAGAIN */
894 		if (pgio->pg_maxretrans && req->wb_nio++ > pgio->pg_maxretrans) {
895 			if (NFS_SERVER(pgio->pg_inode)->flags & NFS_MOUNT_SOFTERR)
896 				pgio->pg_error = -ETIMEDOUT;
897 			else
898 				pgio->pg_error = -EIO;
899 			return;
900 		}
901 		pgio->pg_error = 0;
902 		/* Sleep for 1 second before retrying */
903 		ssleep(1);
904 		goto retry;
905 	}
906 out_mds:
907 	trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode,
908 			0, NFS4_MAX_UINT64, IOMODE_READ,
909 			NFS_I(pgio->pg_inode)->layout,
910 			pgio->pg_lseg);
911 	pgio->pg_maxretrans = 0;
912 	nfs_pageio_reset_read_mds(pgio);
913 }
914 
915 static void
916 ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
917 			struct nfs_page *req)
918 {
919 	struct nfs4_ff_layout_mirror *mirror;
920 	struct nfs_pgio_mirror *pgm;
921 	struct nfs4_pnfs_ds *ds;
922 	u32 i;
923 
924 retry:
925 	pnfs_generic_pg_check_layout(pgio, req);
926 	if (!pgio->pg_lseg) {
927 		pgio->pg_lseg =
928 			pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
929 					   req_offset(req), req->wb_bytes,
930 					   IOMODE_RW, false, nfs_io_gfp_mask());
931 		if (IS_ERR(pgio->pg_lseg)) {
932 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
933 			pgio->pg_lseg = NULL;
934 			return;
935 		}
936 	}
937 	/* If no lseg, fall back to write through mds */
938 	if (pgio->pg_lseg == NULL)
939 		goto out_mds;
940 
941 	/* Use a direct mapping of ds_idx to pgio mirror_idx */
942 	if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))
943 		goto out_eagain;
944 
945 	for (i = 0; i < pgio->pg_mirror_count; i++) {
946 		mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
947 		ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true);
948 		if (IS_ERR(ds)) {
949 			if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
950 				goto out_mds;
951 			pnfs_generic_pg_cleanup(pgio);
952 			/* Sleep for 1 second before retrying */
953 			ssleep(1);
954 			goto retry;
955 		}
956 		pgm = &pgio->pg_mirrors[i];
957 		pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
958 	}
959 
960 	if (NFS_SERVER(pgio->pg_inode)->flags &
961 			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
962 		pgio->pg_maxretrans = io_maxretrans;
963 	return;
964 out_eagain:
965 	pnfs_generic_pg_cleanup(pgio);
966 	pgio->pg_error = -EAGAIN;
967 	return;
968 out_mds:
969 	trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode,
970 			0, NFS4_MAX_UINT64, IOMODE_RW,
971 			NFS_I(pgio->pg_inode)->layout,
972 			pgio->pg_lseg);
973 	pgio->pg_maxretrans = 0;
974 	nfs_pageio_reset_write_mds(pgio);
975 	pgio->pg_error = -EAGAIN;
976 }
977 
978 static unsigned int
979 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
980 				    struct nfs_page *req)
981 {
982 	if (!pgio->pg_lseg) {
983 		pgio->pg_lseg =
984 			pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
985 					   req_offset(req), req->wb_bytes,
986 					   IOMODE_RW, false, nfs_io_gfp_mask());
987 		if (IS_ERR(pgio->pg_lseg)) {
988 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
989 			pgio->pg_lseg = NULL;
990 			goto out;
991 		}
992 	}
993 	if (pgio->pg_lseg)
994 		return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
995 
996 	trace_pnfs_mds_fallback_pg_get_mirror_count(pgio->pg_inode,
997 			0, NFS4_MAX_UINT64, IOMODE_RW,
998 			NFS_I(pgio->pg_inode)->layout,
999 			pgio->pg_lseg);
1000 	/* no lseg means that pnfs is not in use, so no mirroring here */
1001 	nfs_pageio_reset_write_mds(pgio);
1002 out:
1003 	return 1;
1004 }
1005 
1006 static u32
1007 ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
1008 {
1009 	u32 old = desc->pg_mirror_idx;
1010 
1011 	desc->pg_mirror_idx = idx;
1012 	return old;
1013 }
1014 
1015 static struct nfs_pgio_mirror *
1016 ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
1017 {
1018 	return &desc->pg_mirrors[idx];
1019 }
1020 
1021 static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
1022 	.pg_init = ff_layout_pg_init_read,
1023 	.pg_test = pnfs_generic_pg_test,
1024 	.pg_doio = pnfs_generic_pg_readpages,
1025 	.pg_cleanup = pnfs_generic_pg_cleanup,
1026 };
1027 
1028 static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
1029 	.pg_init = ff_layout_pg_init_write,
1030 	.pg_test = pnfs_generic_pg_test,
1031 	.pg_doio = pnfs_generic_pg_writepages,
1032 	.pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
1033 	.pg_cleanup = pnfs_generic_pg_cleanup,
1034 	.pg_get_mirror = ff_layout_pg_get_mirror_write,
1035 	.pg_set_mirror = ff_layout_pg_set_mirror_write,
1036 };
1037 
1038 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
1039 {
1040 	struct rpc_task *task = &hdr->task;
1041 
1042 	pnfs_layoutcommit_inode(hdr->inode, false);
1043 
1044 	if (retry_pnfs) {
1045 		dprintk("%s Reset task %5u for i/o through pNFS "
1046 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1047 			hdr->task.tk_pid,
1048 			hdr->inode->i_sb->s_id,
1049 			(unsigned long long)NFS_FILEID(hdr->inode),
1050 			hdr->args.count,
1051 			(unsigned long long)hdr->args.offset);
1052 
1053 		hdr->completion_ops->reschedule_io(hdr);
1054 		return;
1055 	}
1056 
1057 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1058 		dprintk("%s Reset task %5u for i/o through MDS "
1059 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1060 			hdr->task.tk_pid,
1061 			hdr->inode->i_sb->s_id,
1062 			(unsigned long long)NFS_FILEID(hdr->inode),
1063 			hdr->args.count,
1064 			(unsigned long long)hdr->args.offset);
1065 
1066 		trace_pnfs_mds_fallback_write_done(hdr->inode,
1067 				hdr->args.offset, hdr->args.count,
1068 				IOMODE_RW, NFS_I(hdr->inode)->layout,
1069 				hdr->lseg);
1070 		task->tk_status = pnfs_write_done_resend_to_mds(hdr);
1071 	}
1072 }
1073 
1074 static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr)
1075 {
1076 	u32 idx = hdr->pgio_mirror_idx + 1;
1077 	u32 new_idx = 0;
1078 	struct nfs4_pnfs_ds *ds;
1079 
1080 	ds = ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx);
1081 	if (IS_ERR(ds))
1082 		pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1083 	else
1084 		ff_layout_send_layouterror(hdr->lseg);
1085 	pnfs_read_resend_pnfs(hdr, new_idx);
1086 }
1087 
1088 static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
1089 {
1090 	struct rpc_task *task = &hdr->task;
1091 
1092 	pnfs_layoutcommit_inode(hdr->inode, false);
1093 	pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1094 
1095 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1096 		dprintk("%s Reset task %5u for i/o through MDS "
1097 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1098 			hdr->task.tk_pid,
1099 			hdr->inode->i_sb->s_id,
1100 			(unsigned long long)NFS_FILEID(hdr->inode),
1101 			hdr->args.count,
1102 			(unsigned long long)hdr->args.offset);
1103 
1104 		trace_pnfs_mds_fallback_read_done(hdr->inode,
1105 				hdr->args.offset, hdr->args.count,
1106 				IOMODE_READ, NFS_I(hdr->inode)->layout,
1107 				hdr->lseg);
1108 		task->tk_status = pnfs_read_done_resend_to_mds(hdr);
1109 	}
1110 }
1111 
1112 static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1113 					   u32 op_status,
1114 					   struct nfs4_state *state,
1115 					   struct nfs_client *clp,
1116 					   struct pnfs_layout_segment *lseg,
1117 					   u32 idx)
1118 {
1119 	struct pnfs_layout_hdr *lo = lseg->pls_layout;
1120 	struct inode *inode = lo->plh_inode;
1121 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1122 	struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
1123 
1124 	switch (op_status) {
1125 	case NFS4_OK:
1126 	case NFS4ERR_NXIO:
1127 		break;
1128 	case NFSERR_PERM:
1129 		if (!task->tk_xprt)
1130 			break;
1131 		xprt_force_disconnect(task->tk_xprt);
1132 		goto out_retry;
1133 	case NFS4ERR_BADSESSION:
1134 	case NFS4ERR_BADSLOT:
1135 	case NFS4ERR_BAD_HIGH_SLOT:
1136 	case NFS4ERR_DEADSESSION:
1137 	case NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1138 	case NFS4ERR_SEQ_FALSE_RETRY:
1139 	case NFS4ERR_SEQ_MISORDERED:
1140 		dprintk("%s ERROR %d, Reset session. Exchangeid "
1141 			"flags 0x%x\n", __func__, task->tk_status,
1142 			clp->cl_exchange_flags);
1143 		nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
1144 		goto out_retry;
1145 	case NFS4ERR_DELAY:
1146 		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1147 		fallthrough;
1148 	case NFS4ERR_GRACE:
1149 		rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1150 		goto out_retry;
1151 	case NFS4ERR_RETRY_UNCACHED_REP:
1152 		goto out_retry;
1153 	/* Invalidate Layout errors */
1154 	case NFS4ERR_PNFS_NO_LAYOUT:
1155 	case NFS4ERR_STALE:
1156 	case NFS4ERR_BADHANDLE:
1157 	case NFS4ERR_ISDIR:
1158 	case NFS4ERR_FHEXPIRED:
1159 	case NFS4ERR_WRONG_TYPE:
1160 		dprintk("%s Invalid layout error %d\n", __func__,
1161 			task->tk_status);
1162 		/*
1163 		 * Destroy layout so new i/o will get a new layout.
1164 		 * Layout will not be destroyed until all current lseg
1165 		 * references are put. Mark layout as invalid to resend failed
1166 		 * i/o and all i/o waiting on the slot table to the MDS until
1167 		 * layout is destroyed and a new valid layout is obtained.
1168 		 */
1169 		pnfs_destroy_layout(NFS_I(inode));
1170 		rpc_wake_up(&tbl->slot_tbl_waitq);
1171 		goto reset;
1172 	default:
1173 		break;
1174 	}
1175 
1176 	switch (task->tk_status) {
1177 	/* RPC connection errors */
1178 	case -ENETDOWN:
1179 	case -ENETUNREACH:
1180 		if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags))
1181 			return -NFS4ERR_FATAL_IOERROR;
1182 		fallthrough;
1183 	case -ECONNREFUSED:
1184 	case -EHOSTDOWN:
1185 	case -EHOSTUNREACH:
1186 	case -EIO:
1187 	case -ETIMEDOUT:
1188 	case -EPIPE:
1189 	case -EPROTO:
1190 	case -ENODEV:
1191 		dprintk("%s DS connection error %d\n", __func__,
1192 			task->tk_status);
1193 		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1194 				&devid->deviceid);
1195 		rpc_wake_up(&tbl->slot_tbl_waitq);
1196 		break;
1197 	default:
1198 		break;
1199 	}
1200 
1201 	if (ff_layout_avoid_mds_available_ds(lseg))
1202 		return -NFS4ERR_RESET_TO_PNFS;
1203 reset:
1204 	dprintk("%s Retry through MDS. Error %d\n", __func__,
1205 		task->tk_status);
1206 	return -NFS4ERR_RESET_TO_MDS;
1207 
1208 out_retry:
1209 	task->tk_status = 0;
1210 	return -EAGAIN;
1211 }
1212 
1213 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
1214 static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1215 					   u32 op_status,
1216 					   struct nfs_client *clp,
1217 					   struct pnfs_layout_segment *lseg,
1218 					   u32 idx)
1219 {
1220 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1221 
1222 	switch (op_status) {
1223 	case NFS_OK:
1224 	case NFSERR_NXIO:
1225 		break;
1226 	case NFSERR_PERM:
1227 		if (!task->tk_xprt)
1228 			break;
1229 		xprt_force_disconnect(task->tk_xprt);
1230 		goto out_retry;
1231 	case NFSERR_ACCES:
1232 	case NFSERR_BADHANDLE:
1233 	case NFSERR_FBIG:
1234 	case NFSERR_IO:
1235 	case NFSERR_NOSPC:
1236 	case NFSERR_ROFS:
1237 	case NFSERR_STALE:
1238 		goto out_reset_to_pnfs;
1239 	case NFSERR_JUKEBOX:
1240 		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1241 		goto out_retry;
1242 	default:
1243 		break;
1244 	}
1245 
1246 	switch (task->tk_status) {
1247 	/* File access problems. Don't mark the device as unavailable */
1248 	case -EACCES:
1249 	case -ESTALE:
1250 	case -EISDIR:
1251 	case -EBADHANDLE:
1252 	case -ELOOP:
1253 	case -ENOSPC:
1254 		break;
1255 	case -EJUKEBOX:
1256 		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1257 		goto out_retry;
1258 	case -ENETDOWN:
1259 	case -ENETUNREACH:
1260 		if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags))
1261 			return -NFS4ERR_FATAL_IOERROR;
1262 		fallthrough;
1263 	default:
1264 		dprintk("%s DS connection error %d\n", __func__,
1265 			task->tk_status);
1266 		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1267 				&devid->deviceid);
1268 	}
1269 out_reset_to_pnfs:
1270 	/* FIXME: Need to prevent infinite looping here. */
1271 	return -NFS4ERR_RESET_TO_PNFS;
1272 out_retry:
1273 	task->tk_status = 0;
1274 	rpc_restart_call_prepare(task);
1275 	rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1276 	return -EAGAIN;
1277 }
1278 
1279 static int ff_layout_async_handle_error(struct rpc_task *task,
1280 					u32 op_status,
1281 					struct nfs4_state *state,
1282 					struct nfs_client *clp,
1283 					struct pnfs_layout_segment *lseg,
1284 					u32 idx)
1285 {
1286 	int vers = clp->cl_nfs_mod->rpc_vers->number;
1287 
1288 	if (task->tk_status >= 0) {
1289 		ff_layout_mark_ds_reachable(lseg, idx);
1290 		return 0;
1291 	}
1292 
1293 	/* Handle the case of an invalid layout segment */
1294 	if (!pnfs_is_valid_lseg(lseg))
1295 		return -NFS4ERR_RESET_TO_PNFS;
1296 
1297 	switch (vers) {
1298 	case 3:
1299 		return ff_layout_async_handle_error_v3(task, op_status, clp,
1300 						       lseg, idx);
1301 	case 4:
1302 		return ff_layout_async_handle_error_v4(task, op_status, state,
1303 						       clp, lseg, idx);
1304 	default:
1305 		/* should never happen */
1306 		WARN_ON_ONCE(1);
1307 		return 0;
1308 	}
1309 }
1310 
1311 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1312 					u32 idx, u64 offset, u64 length,
1313 					u32 *op_status, int opnum, int error)
1314 {
1315 	struct nfs4_ff_layout_mirror *mirror;
1316 	u32 status = *op_status;
1317 	int err;
1318 
1319 	if (status == 0) {
1320 		switch (error) {
1321 		case -ETIMEDOUT:
1322 		case -EPFNOSUPPORT:
1323 		case -EPROTONOSUPPORT:
1324 		case -EOPNOTSUPP:
1325 		case -EINVAL:
1326 		case -ECONNREFUSED:
1327 		case -ECONNRESET:
1328 		case -EHOSTDOWN:
1329 		case -EHOSTUNREACH:
1330 		case -ENETDOWN:
1331 		case -ENETUNREACH:
1332 		case -EADDRINUSE:
1333 		case -ENOBUFS:
1334 		case -EPIPE:
1335 		case -EPERM:
1336 		case -EPROTO:
1337 		case -ENODEV:
1338 			*op_status = status = NFS4ERR_NXIO;
1339 			break;
1340 		case -EACCES:
1341 			*op_status = status = NFS4ERR_ACCESS;
1342 			break;
1343 		default:
1344 			return;
1345 		}
1346 	}
1347 
1348 	mirror = FF_LAYOUT_COMP(lseg, idx);
1349 	err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1350 				       mirror, offset, length, status, opnum,
1351 				       nfs_io_gfp_mask());
1352 
1353 	switch (status) {
1354 	case NFS4ERR_DELAY:
1355 	case NFS4ERR_GRACE:
1356 	case NFS4ERR_PERM:
1357 		break;
1358 	case NFS4ERR_NXIO:
1359 		ff_layout_mark_ds_unreachable(lseg, idx);
1360 		/*
1361 		 * Don't return the layout if this is a read and we still
1362 		 * have layouts to try
1363 		 */
1364 		if (opnum == OP_READ)
1365 			break;
1366 		fallthrough;
1367 	default:
1368 		pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
1369 						  lseg);
1370 	}
1371 
1372 	dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1373 }
1374 
1375 /* NFS_PROTO call done callback routines */
1376 static int ff_layout_read_done_cb(struct rpc_task *task,
1377 				struct nfs_pgio_header *hdr)
1378 {
1379 	int err;
1380 
1381 	if (task->tk_status < 0) {
1382 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1383 					    hdr->args.offset, hdr->args.count,
1384 					    &hdr->res.op_status, OP_READ,
1385 					    task->tk_status);
1386 		trace_ff_layout_read_error(hdr, task->tk_status);
1387 	}
1388 
1389 	err = ff_layout_async_handle_error(task, hdr->res.op_status,
1390 					   hdr->args.context->state,
1391 					   hdr->ds_clp, hdr->lseg,
1392 					   hdr->pgio_mirror_idx);
1393 
1394 	trace_nfs4_pnfs_read(hdr, err);
1395 	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1396 	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1397 	switch (err) {
1398 	case -NFS4ERR_RESET_TO_PNFS:
1399 		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1400 		return task->tk_status;
1401 	case -NFS4ERR_RESET_TO_MDS:
1402 		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1403 		return task->tk_status;
1404 	case -EAGAIN:
1405 		goto out_eagain;
1406 	case -NFS4ERR_FATAL_IOERROR:
1407 		task->tk_status = -EIO;
1408 		return 0;
1409 	}
1410 
1411 	return 0;
1412 out_eagain:
1413 	rpc_restart_call_prepare(task);
1414 	return -EAGAIN;
1415 }
1416 
1417 static bool
1418 ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1419 {
1420 	return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1421 }
1422 
1423 /*
1424  * We reference the rpc_cred of the first WRITE that triggers the need for
1425  * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1426  * rfc5661 is not clear about which credential should be used.
1427  *
1428  * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1429  * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1430  * we always send layoutcommit after DS writes.
1431  */
1432 static void
1433 ff_layout_set_layoutcommit(struct inode *inode,
1434 		struct pnfs_layout_segment *lseg,
1435 		loff_t end_offset)
1436 {
1437 	if (!ff_layout_need_layoutcommit(lseg))
1438 		return;
1439 
1440 	pnfs_set_layoutcommit(inode, lseg, end_offset);
1441 	dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
1442 		(unsigned long long) NFS_I(inode)->layout->plh_lwb);
1443 }
1444 
1445 static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
1446 		struct nfs_pgio_header *hdr)
1447 {
1448 	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1449 		return;
1450 	nfs4_ff_layout_stat_io_start_read(hdr->inode,
1451 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1452 			hdr->args.count,
1453 			task->tk_start);
1454 }
1455 
1456 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
1457 		struct nfs_pgio_header *hdr)
1458 {
1459 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1460 		return;
1461 	nfs4_ff_layout_stat_io_end_read(task,
1462 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1463 			hdr->args.count,
1464 			hdr->res.count);
1465 	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1466 }
1467 
1468 static int ff_layout_read_prepare_common(struct rpc_task *task,
1469 					 struct nfs_pgio_header *hdr)
1470 {
1471 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1472 		rpc_exit(task, -EIO);
1473 		return -EIO;
1474 	}
1475 
1476 	if (!pnfs_is_valid_lseg(hdr->lseg)) {
1477 		rpc_exit(task, -EAGAIN);
1478 		return -EAGAIN;
1479 	}
1480 
1481 	ff_layout_read_record_layoutstats_start(task, hdr);
1482 	return 0;
1483 }
1484 
1485 /*
1486  * Call ops for the async read/write cases
1487  * In the case of dense layouts, the offset needs to be reset to its
1488  * original value.
1489  */
1490 static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1491 {
1492 	struct nfs_pgio_header *hdr = data;
1493 
1494 	if (ff_layout_read_prepare_common(task, hdr))
1495 		return;
1496 
1497 	rpc_call_start(task);
1498 }
1499 
1500 static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1501 {
1502 	struct nfs_pgio_header *hdr = data;
1503 
1504 	if (nfs4_setup_sequence(hdr->ds_clp,
1505 				&hdr->args.seq_args,
1506 				&hdr->res.seq_res,
1507 				task))
1508 		return;
1509 
1510 	ff_layout_read_prepare_common(task, hdr);
1511 }
1512 
1513 static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1514 {
1515 	struct nfs_pgio_header *hdr = data;
1516 
1517 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1518 	    task->tk_status == 0) {
1519 		nfs4_sequence_done(task, &hdr->res.seq_res);
1520 		return;
1521 	}
1522 
1523 	/* Note this may cause RPC to be resent */
1524 	hdr->mds_ops->rpc_call_done(task, hdr);
1525 }
1526 
1527 static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1528 {
1529 	struct nfs_pgio_header *hdr = data;
1530 
1531 	ff_layout_read_record_layoutstats_done(task, hdr);
1532 	rpc_count_iostats_metrics(task,
1533 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1534 }
1535 
1536 static void ff_layout_read_release(void *data)
1537 {
1538 	struct nfs_pgio_header *hdr = data;
1539 
1540 	ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
1541 	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
1542 		ff_layout_resend_pnfs_read(hdr);
1543 	else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1544 		ff_layout_reset_read(hdr);
1545 	pnfs_generic_rw_release(data);
1546 }
1547 
1548 
1549 static int ff_layout_write_done_cb(struct rpc_task *task,
1550 				struct nfs_pgio_header *hdr)
1551 {
1552 	loff_t end_offs = 0;
1553 	int err;
1554 
1555 	if (task->tk_status < 0) {
1556 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1557 					    hdr->args.offset, hdr->args.count,
1558 					    &hdr->res.op_status, OP_WRITE,
1559 					    task->tk_status);
1560 		trace_ff_layout_write_error(hdr, task->tk_status);
1561 	}
1562 
1563 	err = ff_layout_async_handle_error(task, hdr->res.op_status,
1564 					   hdr->args.context->state,
1565 					   hdr->ds_clp, hdr->lseg,
1566 					   hdr->pgio_mirror_idx);
1567 
1568 	trace_nfs4_pnfs_write(hdr, err);
1569 	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1570 	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1571 	switch (err) {
1572 	case -NFS4ERR_RESET_TO_PNFS:
1573 		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1574 		return task->tk_status;
1575 	case -NFS4ERR_RESET_TO_MDS:
1576 		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1577 		return task->tk_status;
1578 	case -EAGAIN:
1579 		return -EAGAIN;
1580 	case -NFS4ERR_FATAL_IOERROR:
1581 		task->tk_status = -EIO;
1582 		return 0;
1583 	}
1584 
1585 	if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1586 	    hdr->res.verf->committed == NFS_DATA_SYNC)
1587 		end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
1588 
1589 	/* Note: if the write is unstable, don't set end_offs until commit */
1590 	ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
1591 
1592 	/* zero out fattr since we don't care DS attr at all */
1593 	hdr->fattr.valid = 0;
1594 	if (task->tk_status >= 0)
1595 		nfs_writeback_update_inode(hdr);
1596 
1597 	return 0;
1598 }
1599 
1600 static int ff_layout_commit_done_cb(struct rpc_task *task,
1601 				     struct nfs_commit_data *data)
1602 {
1603 	int err;
1604 
1605 	if (task->tk_status < 0) {
1606 		ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1607 					    data->args.offset, data->args.count,
1608 					    &data->res.op_status, OP_COMMIT,
1609 					    task->tk_status);
1610 		trace_ff_layout_commit_error(data, task->tk_status);
1611 	}
1612 
1613 	err = ff_layout_async_handle_error(task, data->res.op_status,
1614 					   NULL, data->ds_clp, data->lseg,
1615 					   data->ds_commit_index);
1616 
1617 	trace_nfs4_pnfs_commit_ds(data, err);
1618 	switch (err) {
1619 	case -NFS4ERR_RESET_TO_PNFS:
1620 		pnfs_generic_prepare_to_resend_writes(data);
1621 		return -EAGAIN;
1622 	case -NFS4ERR_RESET_TO_MDS:
1623 		pnfs_generic_prepare_to_resend_writes(data);
1624 		return -EAGAIN;
1625 	case -EAGAIN:
1626 		rpc_restart_call_prepare(task);
1627 		return -EAGAIN;
1628 	case -NFS4ERR_FATAL_IOERROR:
1629 		task->tk_status = -EIO;
1630 		return 0;
1631 	}
1632 
1633 	ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
1634 
1635 	return 0;
1636 }
1637 
1638 static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
1639 		struct nfs_pgio_header *hdr)
1640 {
1641 	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1642 		return;
1643 	nfs4_ff_layout_stat_io_start_write(hdr->inode,
1644 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1645 			hdr->args.count,
1646 			task->tk_start);
1647 }
1648 
1649 static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
1650 		struct nfs_pgio_header *hdr)
1651 {
1652 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1653 		return;
1654 	nfs4_ff_layout_stat_io_end_write(task,
1655 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1656 			hdr->args.count, hdr->res.count,
1657 			hdr->res.verf->committed);
1658 	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1659 }
1660 
1661 static int ff_layout_write_prepare_common(struct rpc_task *task,
1662 					  struct nfs_pgio_header *hdr)
1663 {
1664 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1665 		rpc_exit(task, -EIO);
1666 		return -EIO;
1667 	}
1668 
1669 	if (!pnfs_is_valid_lseg(hdr->lseg)) {
1670 		rpc_exit(task, -EAGAIN);
1671 		return -EAGAIN;
1672 	}
1673 
1674 	ff_layout_write_record_layoutstats_start(task, hdr);
1675 	return 0;
1676 }
1677 
1678 static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1679 {
1680 	struct nfs_pgio_header *hdr = data;
1681 
1682 	if (ff_layout_write_prepare_common(task, hdr))
1683 		return;
1684 
1685 	rpc_call_start(task);
1686 }
1687 
1688 static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1689 {
1690 	struct nfs_pgio_header *hdr = data;
1691 
1692 	if (nfs4_setup_sequence(hdr->ds_clp,
1693 				&hdr->args.seq_args,
1694 				&hdr->res.seq_res,
1695 				task))
1696 		return;
1697 
1698 	ff_layout_write_prepare_common(task, hdr);
1699 }
1700 
1701 static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1702 {
1703 	struct nfs_pgio_header *hdr = data;
1704 
1705 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1706 	    task->tk_status == 0) {
1707 		nfs4_sequence_done(task, &hdr->res.seq_res);
1708 		return;
1709 	}
1710 
1711 	/* Note this may cause RPC to be resent */
1712 	hdr->mds_ops->rpc_call_done(task, hdr);
1713 }
1714 
1715 static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1716 {
1717 	struct nfs_pgio_header *hdr = data;
1718 
1719 	ff_layout_write_record_layoutstats_done(task, hdr);
1720 	rpc_count_iostats_metrics(task,
1721 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1722 }
1723 
1724 static void ff_layout_write_release(void *data)
1725 {
1726 	struct nfs_pgio_header *hdr = data;
1727 
1728 	ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
1729 	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
1730 		ff_layout_send_layouterror(hdr->lseg);
1731 		ff_layout_reset_write(hdr, true);
1732 	} else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1733 		ff_layout_reset_write(hdr, false);
1734 	pnfs_generic_rw_release(data);
1735 }
1736 
1737 static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
1738 		struct nfs_commit_data *cdata)
1739 {
1740 	if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
1741 		return;
1742 	nfs4_ff_layout_stat_io_start_write(cdata->inode,
1743 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1744 			0, task->tk_start);
1745 }
1746 
1747 static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
1748 		struct nfs_commit_data *cdata)
1749 {
1750 	struct nfs_page *req;
1751 	__u64 count = 0;
1752 
1753 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
1754 		return;
1755 
1756 	if (task->tk_status == 0) {
1757 		list_for_each_entry(req, &cdata->pages, wb_list)
1758 			count += req->wb_bytes;
1759 	}
1760 	nfs4_ff_layout_stat_io_end_write(task,
1761 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1762 			count, count, NFS_FILE_SYNC);
1763 	set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
1764 }
1765 
1766 static int ff_layout_commit_prepare_common(struct rpc_task *task,
1767 					   struct nfs_commit_data *cdata)
1768 {
1769 	if (!pnfs_is_valid_lseg(cdata->lseg)) {
1770 		rpc_exit(task, -EAGAIN);
1771 		return -EAGAIN;
1772 	}
1773 
1774 	ff_layout_commit_record_layoutstats_start(task, cdata);
1775 	return 0;
1776 }
1777 
1778 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1779 {
1780 	if (ff_layout_commit_prepare_common(task, data))
1781 		return;
1782 
1783 	rpc_call_start(task);
1784 }
1785 
1786 static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1787 {
1788 	struct nfs_commit_data *wdata = data;
1789 
1790 	if (nfs4_setup_sequence(wdata->ds_clp,
1791 				&wdata->args.seq_args,
1792 				&wdata->res.seq_res,
1793 				task))
1794 		return;
1795 	ff_layout_commit_prepare_common(task, data);
1796 }
1797 
1798 static void ff_layout_commit_done(struct rpc_task *task, void *data)
1799 {
1800 	pnfs_generic_write_commit_done(task, data);
1801 }
1802 
1803 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1804 {
1805 	struct nfs_commit_data *cdata = data;
1806 
1807 	ff_layout_commit_record_layoutstats_done(task, cdata);
1808 	rpc_count_iostats_metrics(task,
1809 	    &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1810 }
1811 
1812 static void ff_layout_commit_release(void *data)
1813 {
1814 	struct nfs_commit_data *cdata = data;
1815 
1816 	ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
1817 	pnfs_generic_commit_release(data);
1818 }
1819 
1820 static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1821 	.rpc_call_prepare = ff_layout_read_prepare_v3,
1822 	.rpc_call_done = ff_layout_read_call_done,
1823 	.rpc_count_stats = ff_layout_read_count_stats,
1824 	.rpc_release = ff_layout_read_release,
1825 };
1826 
1827 static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1828 	.rpc_call_prepare = ff_layout_read_prepare_v4,
1829 	.rpc_call_done = ff_layout_read_call_done,
1830 	.rpc_count_stats = ff_layout_read_count_stats,
1831 	.rpc_release = ff_layout_read_release,
1832 };
1833 
1834 static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1835 	.rpc_call_prepare = ff_layout_write_prepare_v3,
1836 	.rpc_call_done = ff_layout_write_call_done,
1837 	.rpc_count_stats = ff_layout_write_count_stats,
1838 	.rpc_release = ff_layout_write_release,
1839 };
1840 
1841 static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1842 	.rpc_call_prepare = ff_layout_write_prepare_v4,
1843 	.rpc_call_done = ff_layout_write_call_done,
1844 	.rpc_count_stats = ff_layout_write_count_stats,
1845 	.rpc_release = ff_layout_write_release,
1846 };
1847 
1848 static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1849 	.rpc_call_prepare = ff_layout_commit_prepare_v3,
1850 	.rpc_call_done = ff_layout_commit_done,
1851 	.rpc_count_stats = ff_layout_commit_count_stats,
1852 	.rpc_release = ff_layout_commit_release,
1853 };
1854 
1855 static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1856 	.rpc_call_prepare = ff_layout_commit_prepare_v4,
1857 	.rpc_call_done = ff_layout_commit_done,
1858 	.rpc_count_stats = ff_layout_commit_count_stats,
1859 	.rpc_release = ff_layout_commit_release,
1860 };
1861 
1862 static enum pnfs_try_status
1863 ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1864 {
1865 	struct pnfs_layout_segment *lseg = hdr->lseg;
1866 	struct nfs4_pnfs_ds *ds;
1867 	struct rpc_clnt *ds_clnt;
1868 	struct nfsd_file *localio;
1869 	struct nfs4_ff_layout_mirror *mirror;
1870 	const struct cred *ds_cred;
1871 	loff_t offset = hdr->args.offset;
1872 	u32 idx = hdr->pgio_mirror_idx;
1873 	int vers;
1874 	struct nfs_fh *fh;
1875 	bool ds_fatal_error = false;
1876 
1877 	dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
1878 		__func__, hdr->inode->i_ino,
1879 		hdr->args.pgbase, (size_t)hdr->args.count, offset);
1880 
1881 	mirror = FF_LAYOUT_COMP(lseg, idx);
1882 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
1883 	if (IS_ERR(ds)) {
1884 		ds_fatal_error = nfs_error_is_fatal(PTR_ERR(ds));
1885 		goto out_failed;
1886 	}
1887 
1888 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1889 						   hdr->inode);
1890 	if (IS_ERR(ds_clnt))
1891 		goto out_failed;
1892 
1893 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1894 	if (!ds_cred)
1895 		goto out_failed;
1896 
1897 	vers = nfs4_ff_layout_ds_version(mirror);
1898 
1899 	dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1900 		ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
1901 
1902 	hdr->pgio_done_cb = ff_layout_read_done_cb;
1903 	refcount_inc(&ds->ds_clp->cl_count);
1904 	hdr->ds_clp = ds->ds_clp;
1905 	fh = nfs4_ff_layout_select_ds_fh(mirror);
1906 	if (fh)
1907 		hdr->args.fh = fh;
1908 
1909 	nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1910 
1911 	/*
1912 	 * Note that if we ever decide to split across DSes,
1913 	 * then we may need to handle dense-like offsets.
1914 	 */
1915 	hdr->args.offset = offset;
1916 	hdr->mds_offset = offset;
1917 
1918 	/* Start IO accounting for local read */
1919 	localio = ff_local_open_fh(lseg, idx, ds->ds_clp, ds_cred, fh, FMODE_READ);
1920 	if (localio) {
1921 		hdr->task.tk_start = ktime_get();
1922 		ff_layout_read_record_layoutstats_start(&hdr->task, hdr);
1923 	}
1924 
1925 	/* Perform an asynchronous read to ds */
1926 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1927 			  vers == 3 ? &ff_layout_read_call_ops_v3 :
1928 				      &ff_layout_read_call_ops_v4,
1929 			  0, RPC_TASK_SOFTCONN, localio);
1930 	put_cred(ds_cred);
1931 	return PNFS_ATTEMPTED;
1932 
1933 out_failed:
1934 	if (ff_layout_avoid_mds_available_ds(lseg) && !ds_fatal_error)
1935 		return PNFS_TRY_AGAIN;
1936 	trace_pnfs_mds_fallback_read_pagelist(hdr->inode,
1937 			hdr->args.offset, hdr->args.count,
1938 			IOMODE_READ, NFS_I(hdr->inode)->layout, lseg);
1939 	return PNFS_NOT_ATTEMPTED;
1940 }
1941 
1942 /* Perform async writes. */
1943 static enum pnfs_try_status
1944 ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1945 {
1946 	struct pnfs_layout_segment *lseg = hdr->lseg;
1947 	struct nfs4_pnfs_ds *ds;
1948 	struct rpc_clnt *ds_clnt;
1949 	struct nfsd_file *localio;
1950 	struct nfs4_ff_layout_mirror *mirror;
1951 	const struct cred *ds_cred;
1952 	loff_t offset = hdr->args.offset;
1953 	int vers;
1954 	struct nfs_fh *fh;
1955 	u32 idx = hdr->pgio_mirror_idx;
1956 	bool ds_fatal_error = false;
1957 
1958 	mirror = FF_LAYOUT_COMP(lseg, idx);
1959 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1960 	if (IS_ERR(ds)) {
1961 		ds_fatal_error = nfs_error_is_fatal(PTR_ERR(ds));
1962 		goto out_failed;
1963 	}
1964 
1965 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1966 						   hdr->inode);
1967 	if (IS_ERR(ds_clnt))
1968 		goto out_failed;
1969 
1970 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1971 	if (!ds_cred)
1972 		goto out_failed;
1973 
1974 	vers = nfs4_ff_layout_ds_version(mirror);
1975 
1976 	dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
1977 		__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1978 		offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count),
1979 		vers);
1980 
1981 	hdr->pgio_done_cb = ff_layout_write_done_cb;
1982 	refcount_inc(&ds->ds_clp->cl_count);
1983 	hdr->ds_clp = ds->ds_clp;
1984 	hdr->ds_commit_idx = idx;
1985 	fh = nfs4_ff_layout_select_ds_fh(mirror);
1986 	if (fh)
1987 		hdr->args.fh = fh;
1988 
1989 	nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1990 
1991 	/*
1992 	 * Note that if we ever decide to split across DSes,
1993 	 * then we may need to handle dense-like offsets.
1994 	 */
1995 	hdr->args.offset = offset;
1996 
1997 	/* Start IO accounting for local write */
1998 	localio = ff_local_open_fh(lseg, idx, ds->ds_clp, ds_cred, fh,
1999 				   FMODE_READ|FMODE_WRITE);
2000 	if (localio) {
2001 		hdr->task.tk_start = ktime_get();
2002 		ff_layout_write_record_layoutstats_start(&hdr->task, hdr);
2003 	}
2004 
2005 	/* Perform an asynchronous write */
2006 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
2007 			  vers == 3 ? &ff_layout_write_call_ops_v3 :
2008 				      &ff_layout_write_call_ops_v4,
2009 			  sync, RPC_TASK_SOFTCONN, localio);
2010 	put_cred(ds_cred);
2011 	return PNFS_ATTEMPTED;
2012 
2013 out_failed:
2014 	if (ff_layout_avoid_mds_available_ds(lseg) && !ds_fatal_error)
2015 		return PNFS_TRY_AGAIN;
2016 	trace_pnfs_mds_fallback_write_pagelist(hdr->inode,
2017 			hdr->args.offset, hdr->args.count,
2018 			IOMODE_RW, NFS_I(hdr->inode)->layout, lseg);
2019 	return PNFS_NOT_ATTEMPTED;
2020 }
2021 
2022 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
2023 {
2024 	return i;
2025 }
2026 
2027 static struct nfs_fh *
2028 select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
2029 {
2030 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2031 
2032 	/* FIXME: Assume that there is only one NFS version available
2033 	 * for the DS.
2034 	 */
2035 	return &flseg->mirror_array[i]->fh_versions[0];
2036 }
2037 
2038 static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
2039 {
2040 	struct pnfs_layout_segment *lseg = data->lseg;
2041 	struct nfs4_pnfs_ds *ds;
2042 	struct rpc_clnt *ds_clnt;
2043 	struct nfsd_file *localio;
2044 	struct nfs4_ff_layout_mirror *mirror;
2045 	const struct cred *ds_cred;
2046 	u32 idx;
2047 	int vers, ret;
2048 	struct nfs_fh *fh;
2049 
2050 	if (!lseg || !(pnfs_is_valid_lseg(lseg) ||
2051 	    test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)))
2052 		goto out_err;
2053 
2054 	idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
2055 	mirror = FF_LAYOUT_COMP(lseg, idx);
2056 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
2057 	if (IS_ERR(ds))
2058 		goto out_err;
2059 
2060 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
2061 						   data->inode);
2062 	if (IS_ERR(ds_clnt))
2063 		goto out_err;
2064 
2065 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred);
2066 	if (!ds_cred)
2067 		goto out_err;
2068 
2069 	vers = nfs4_ff_layout_ds_version(mirror);
2070 
2071 	dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
2072 		data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
2073 		vers);
2074 	data->commit_done_cb = ff_layout_commit_done_cb;
2075 	data->cred = ds_cred;
2076 	refcount_inc(&ds->ds_clp->cl_count);
2077 	data->ds_clp = ds->ds_clp;
2078 	fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
2079 	if (fh)
2080 		data->args.fh = fh;
2081 
2082 	/* Start IO accounting for local commit */
2083 	localio = ff_local_open_fh(lseg, idx, ds->ds_clp, ds_cred, fh,
2084 				   FMODE_READ|FMODE_WRITE);
2085 	if (localio) {
2086 		data->task.tk_start = ktime_get();
2087 		ff_layout_commit_record_layoutstats_start(&data->task, data);
2088 	}
2089 
2090 	ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
2091 				   vers == 3 ? &ff_layout_commit_call_ops_v3 :
2092 					       &ff_layout_commit_call_ops_v4,
2093 				   how, RPC_TASK_SOFTCONN, localio);
2094 	put_cred(ds_cred);
2095 	return ret;
2096 out_err:
2097 	pnfs_generic_prepare_to_resend_writes(data);
2098 	pnfs_generic_commit_release(data);
2099 	return -EAGAIN;
2100 }
2101 
2102 static int
2103 ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
2104 			   int how, struct nfs_commit_info *cinfo)
2105 {
2106 	return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
2107 					    ff_layout_initiate_commit);
2108 }
2109 
2110 static bool ff_layout_match_rw(const struct rpc_task *task,
2111 			       const struct nfs_pgio_header *hdr,
2112 			       const struct pnfs_layout_segment *lseg)
2113 {
2114 	return hdr->lseg == lseg;
2115 }
2116 
2117 static bool ff_layout_match_commit(const struct rpc_task *task,
2118 				   const struct nfs_commit_data *cdata,
2119 				   const struct pnfs_layout_segment *lseg)
2120 {
2121 	return cdata->lseg == lseg;
2122 }
2123 
2124 static bool ff_layout_match_io(const struct rpc_task *task, const void *data)
2125 {
2126 	const struct rpc_call_ops *ops = task->tk_ops;
2127 
2128 	if (ops == &ff_layout_read_call_ops_v3 ||
2129 	    ops == &ff_layout_read_call_ops_v4 ||
2130 	    ops == &ff_layout_write_call_ops_v3 ||
2131 	    ops == &ff_layout_write_call_ops_v4)
2132 		return ff_layout_match_rw(task, task->tk_calldata, data);
2133 	if (ops == &ff_layout_commit_call_ops_v3 ||
2134 	    ops == &ff_layout_commit_call_ops_v4)
2135 		return ff_layout_match_commit(task, task->tk_calldata, data);
2136 	return false;
2137 }
2138 
2139 static void ff_layout_cancel_io(struct pnfs_layout_segment *lseg)
2140 {
2141 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2142 	struct nfs4_ff_layout_mirror *mirror;
2143 	struct nfs4_ff_layout_ds *mirror_ds;
2144 	struct nfs4_pnfs_ds *ds;
2145 	struct nfs_client *ds_clp;
2146 	struct rpc_clnt *clnt;
2147 	u32 idx;
2148 
2149 	for (idx = 0; idx < flseg->mirror_array_cnt; idx++) {
2150 		mirror = flseg->mirror_array[idx];
2151 		mirror_ds = mirror->mirror_ds;
2152 		if (IS_ERR_OR_NULL(mirror_ds))
2153 			continue;
2154 		ds = mirror->mirror_ds->ds;
2155 		if (!ds)
2156 			continue;
2157 		ds_clp = ds->ds_clp;
2158 		if (!ds_clp)
2159 			continue;
2160 		clnt = ds_clp->cl_rpcclient;
2161 		if (!clnt)
2162 			continue;
2163 		if (!rpc_cancel_tasks(clnt, -EAGAIN, ff_layout_match_io, lseg))
2164 			continue;
2165 		rpc_clnt_disconnect(clnt);
2166 	}
2167 }
2168 
2169 static struct pnfs_ds_commit_info *
2170 ff_layout_get_ds_info(struct inode *inode)
2171 {
2172 	struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
2173 
2174 	if (layout == NULL)
2175 		return NULL;
2176 
2177 	return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
2178 }
2179 
2180 static void
2181 ff_layout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
2182 		struct pnfs_layout_segment *lseg)
2183 {
2184 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2185 	struct inode *inode = lseg->pls_layout->plh_inode;
2186 	struct pnfs_commit_array *array, *new;
2187 
2188 	new = pnfs_alloc_commit_array(flseg->mirror_array_cnt,
2189 				      nfs_io_gfp_mask());
2190 	if (new) {
2191 		spin_lock(&inode->i_lock);
2192 		array = pnfs_add_commit_array(fl_cinfo, new, lseg);
2193 		spin_unlock(&inode->i_lock);
2194 		if (array != new)
2195 			pnfs_free_commit_array(new);
2196 	}
2197 }
2198 
2199 static void
2200 ff_layout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
2201 		struct inode *inode)
2202 {
2203 	spin_lock(&inode->i_lock);
2204 	pnfs_generic_ds_cinfo_destroy(fl_cinfo);
2205 	spin_unlock(&inode->i_lock);
2206 }
2207 
2208 static void
2209 ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
2210 {
2211 	nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
2212 						  id_node));
2213 }
2214 
2215 static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
2216 				  const struct nfs4_layoutreturn_args *args,
2217 				  const struct nfs4_flexfile_layoutreturn_args *ff_args)
2218 {
2219 	__be32 *start;
2220 
2221 	start = xdr_reserve_space(xdr, 4);
2222 	if (unlikely(!start))
2223 		return -E2BIG;
2224 
2225 	*start = cpu_to_be32(ff_args->num_errors);
2226 	/* This assume we always return _ALL_ layouts */
2227 	return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
2228 }
2229 
2230 static void
2231 ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
2232 			    const nfs4_stateid *stateid,
2233 			    const struct nfs42_layoutstat_devinfo *devinfo)
2234 {
2235 	__be32 *p;
2236 
2237 	p = xdr_reserve_space(xdr, 8 + 8);
2238 	p = xdr_encode_hyper(p, devinfo->offset);
2239 	p = xdr_encode_hyper(p, devinfo->length);
2240 	encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
2241 	p = xdr_reserve_space(xdr, 4*8);
2242 	p = xdr_encode_hyper(p, devinfo->read_count);
2243 	p = xdr_encode_hyper(p, devinfo->read_bytes);
2244 	p = xdr_encode_hyper(p, devinfo->write_count);
2245 	p = xdr_encode_hyper(p, devinfo->write_bytes);
2246 	encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
2247 }
2248 
2249 static void
2250 ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
2251 			    const nfs4_stateid *stateid,
2252 			    const struct nfs42_layoutstat_devinfo *devinfo)
2253 {
2254 	ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
2255 	ff_layout_encode_ff_layoutupdate(xdr, devinfo,
2256 			devinfo->ld_private.data);
2257 }
2258 
2259 /* report nothing for now */
2260 static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
2261 		const struct nfs4_layoutreturn_args *args,
2262 		struct nfs4_flexfile_layoutreturn_args *ff_args)
2263 {
2264 	__be32 *p;
2265 	int i;
2266 
2267 	p = xdr_reserve_space(xdr, 4);
2268 	*p = cpu_to_be32(ff_args->num_dev);
2269 	for (i = 0; i < ff_args->num_dev; i++)
2270 		ff_layout_encode_ff_iostat(xdr,
2271 				&args->layout->plh_stateid,
2272 				&ff_args->devinfo[i]);
2273 }
2274 
2275 static void
2276 ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
2277 		unsigned int num_entries)
2278 {
2279 	unsigned int i;
2280 
2281 	for (i = 0; i < num_entries; i++) {
2282 		if (!devinfo[i].ld_private.ops)
2283 			continue;
2284 		if (!devinfo[i].ld_private.ops->free)
2285 			continue;
2286 		devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
2287 	}
2288 }
2289 
2290 static struct nfs4_deviceid_node *
2291 ff_layout_alloc_deviceid_node(struct nfs_server *server,
2292 			      struct pnfs_device *pdev, gfp_t gfp_flags)
2293 {
2294 	struct nfs4_ff_layout_ds *dsaddr;
2295 
2296 	dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
2297 	if (!dsaddr)
2298 		return NULL;
2299 	return &dsaddr->id_node;
2300 }
2301 
2302 static void
2303 ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
2304 		const void *voidargs,
2305 		const struct nfs4_xdr_opaque_data *ff_opaque)
2306 {
2307 	const struct nfs4_layoutreturn_args *args = voidargs;
2308 	struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
2309 	struct xdr_buf tmp_buf = {
2310 		.head = {
2311 			[0] = {
2312 				.iov_base = page_address(ff_args->pages[0]),
2313 			},
2314 		},
2315 		.buflen = PAGE_SIZE,
2316 	};
2317 	struct xdr_stream tmp_xdr;
2318 	__be32 *start;
2319 
2320 	dprintk("%s: Begin\n", __func__);
2321 
2322 	xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL);
2323 
2324 	ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
2325 	ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
2326 
2327 	start = xdr_reserve_space(xdr, 4);
2328 	*start = cpu_to_be32(tmp_buf.len);
2329 	xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
2330 
2331 	dprintk("%s: Return\n", __func__);
2332 }
2333 
2334 static void
2335 ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
2336 {
2337 	struct nfs4_flexfile_layoutreturn_args *ff_args;
2338 
2339 	if (!args->data)
2340 		return;
2341 	ff_args = args->data;
2342 	args->data = NULL;
2343 
2344 	ff_layout_free_ds_ioerr(&ff_args->errors);
2345 	ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
2346 
2347 	put_page(ff_args->pages[0]);
2348 	kfree(ff_args);
2349 }
2350 
2351 static const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
2352 	.encode = ff_layout_encode_layoutreturn,
2353 	.free = ff_layout_free_layoutreturn,
2354 };
2355 
2356 static int
2357 ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
2358 {
2359 	struct nfs4_flexfile_layoutreturn_args *ff_args;
2360 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
2361 
2362 	ff_args = kmalloc(sizeof(*ff_args), nfs_io_gfp_mask());
2363 	if (!ff_args)
2364 		goto out_nomem;
2365 	ff_args->pages[0] = alloc_page(nfs_io_gfp_mask());
2366 	if (!ff_args->pages[0])
2367 		goto out_nomem_free;
2368 
2369 	INIT_LIST_HEAD(&ff_args->errors);
2370 	ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
2371 			&args->range, &ff_args->errors,
2372 			FF_LAYOUTRETURN_MAXERR);
2373 
2374 	spin_lock(&args->inode->i_lock);
2375 	ff_args->num_dev = ff_layout_mirror_prepare_stats(
2376 		&ff_layout->generic_hdr, &ff_args->devinfo[0],
2377 		ARRAY_SIZE(ff_args->devinfo), NFS4_FF_OP_LAYOUTRETURN);
2378 	spin_unlock(&args->inode->i_lock);
2379 
2380 	args->ld_private->ops = &layoutreturn_ops;
2381 	args->ld_private->data = ff_args;
2382 	return 0;
2383 out_nomem_free:
2384 	kfree(ff_args);
2385 out_nomem:
2386 	return -ENOMEM;
2387 }
2388 
2389 #ifdef CONFIG_NFS_V4_2
2390 void
2391 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2392 {
2393 	struct pnfs_layout_hdr *lo = lseg->pls_layout;
2394 	struct nfs42_layout_error *errors;
2395 	LIST_HEAD(head);
2396 
2397 	if (!nfs_server_capable(lo->plh_inode, NFS_CAP_LAYOUTERROR))
2398 		return;
2399 	ff_layout_fetch_ds_ioerr(lo, &lseg->pls_range, &head, -1);
2400 	if (list_empty(&head))
2401 		return;
2402 
2403 	errors = kmalloc_array(NFS42_LAYOUTERROR_MAX, sizeof(*errors),
2404 			       nfs_io_gfp_mask());
2405 	if (errors != NULL) {
2406 		const struct nfs4_ff_layout_ds_err *pos;
2407 		size_t n = 0;
2408 
2409 		list_for_each_entry(pos, &head, list) {
2410 			errors[n].offset = pos->offset;
2411 			errors[n].length = pos->length;
2412 			nfs4_stateid_copy(&errors[n].stateid, &pos->stateid);
2413 			errors[n].errors[0].dev_id = pos->deviceid;
2414 			errors[n].errors[0].status = pos->status;
2415 			errors[n].errors[0].opnum = pos->opnum;
2416 			n++;
2417 			if (!list_is_last(&pos->list, &head) &&
2418 			    n < NFS42_LAYOUTERROR_MAX)
2419 				continue;
2420 			if (nfs42_proc_layouterror(lseg, errors, n) < 0)
2421 				break;
2422 			n = 0;
2423 		}
2424 		kfree(errors);
2425 	}
2426 	ff_layout_free_ds_ioerr(&head);
2427 }
2428 #else
2429 void
2430 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2431 {
2432 }
2433 #endif
2434 
2435 static int
2436 ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
2437 {
2438 	const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
2439 
2440 	return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
2441 }
2442 
2443 static size_t
2444 ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
2445 			  const int buflen)
2446 {
2447 	const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
2448 	const struct in6_addr *addr = &sin6->sin6_addr;
2449 
2450 	/*
2451 	 * RFC 4291, Section 2.2.2
2452 	 *
2453 	 * Shorthanded ANY address
2454 	 */
2455 	if (ipv6_addr_any(addr))
2456 		return snprintf(buf, buflen, "::");
2457 
2458 	/*
2459 	 * RFC 4291, Section 2.2.2
2460 	 *
2461 	 * Shorthanded loopback address
2462 	 */
2463 	if (ipv6_addr_loopback(addr))
2464 		return snprintf(buf, buflen, "::1");
2465 
2466 	/*
2467 	 * RFC 4291, Section 2.2.3
2468 	 *
2469 	 * Special presentation address format for mapped v4
2470 	 * addresses.
2471 	 */
2472 	if (ipv6_addr_v4mapped(addr))
2473 		return snprintf(buf, buflen, "::ffff:%pI4",
2474 					&addr->s6_addr32[3]);
2475 
2476 	/*
2477 	 * RFC 4291, Section 2.2.1
2478 	 */
2479 	return snprintf(buf, buflen, "%pI6c", addr);
2480 }
2481 
2482 /* Derived from rpc_sockaddr2uaddr */
2483 static void
2484 ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
2485 {
2486 	struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
2487 	char portbuf[RPCBIND_MAXUADDRPLEN];
2488 	char addrbuf[RPCBIND_MAXUADDRLEN];
2489 	unsigned short port;
2490 	int len, netid_len;
2491 	__be32 *p;
2492 
2493 	switch (sap->sa_family) {
2494 	case AF_INET:
2495 		if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
2496 			return;
2497 		port = ntohs(((struct sockaddr_in *)sap)->sin_port);
2498 		break;
2499 	case AF_INET6:
2500 		if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
2501 			return;
2502 		port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
2503 		break;
2504 	default:
2505 		WARN_ON_ONCE(1);
2506 		return;
2507 	}
2508 
2509 	snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
2510 	len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
2511 
2512 	netid_len = strlen(da->da_netid);
2513 	p = xdr_reserve_space(xdr, 4 + netid_len);
2514 	xdr_encode_opaque(p, da->da_netid, netid_len);
2515 
2516 	p = xdr_reserve_space(xdr, 4 + len);
2517 	xdr_encode_opaque(p, addrbuf, len);
2518 }
2519 
2520 static void
2521 ff_layout_encode_nfstime(struct xdr_stream *xdr,
2522 			 ktime_t t)
2523 {
2524 	struct timespec64 ts;
2525 	__be32 *p;
2526 
2527 	p = xdr_reserve_space(xdr, 12);
2528 	ts = ktime_to_timespec64(t);
2529 	p = xdr_encode_hyper(p, ts.tv_sec);
2530 	*p++ = cpu_to_be32(ts.tv_nsec);
2531 }
2532 
2533 static void
2534 ff_layout_encode_io_latency(struct xdr_stream *xdr,
2535 			    struct nfs4_ff_io_stat *stat)
2536 {
2537 	__be32 *p;
2538 
2539 	p = xdr_reserve_space(xdr, 5 * 8);
2540 	p = xdr_encode_hyper(p, stat->ops_requested);
2541 	p = xdr_encode_hyper(p, stat->bytes_requested);
2542 	p = xdr_encode_hyper(p, stat->ops_completed);
2543 	p = xdr_encode_hyper(p, stat->bytes_completed);
2544 	p = xdr_encode_hyper(p, stat->bytes_not_delivered);
2545 	ff_layout_encode_nfstime(xdr, stat->total_busy_time);
2546 	ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
2547 }
2548 
2549 static void
2550 ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
2551 			      const struct nfs42_layoutstat_devinfo *devinfo,
2552 			      struct nfs4_ff_layout_mirror *mirror)
2553 {
2554 	struct nfs4_pnfs_ds_addr *da;
2555 	struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
2556 	struct nfs_fh *fh = &mirror->fh_versions[0];
2557 	__be32 *p;
2558 
2559 	da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
2560 	dprintk("%s: DS %s: encoding address %s\n",
2561 		__func__, ds->ds_remotestr, da->da_remotestr);
2562 	/* netaddr4 */
2563 	ff_layout_encode_netaddr(xdr, da);
2564 	/* nfs_fh4 */
2565 	p = xdr_reserve_space(xdr, 4 + fh->size);
2566 	xdr_encode_opaque(p, fh->data, fh->size);
2567 	/* ff_io_latency4 read */
2568 	spin_lock(&mirror->lock);
2569 	ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
2570 	/* ff_io_latency4 write */
2571 	ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
2572 	spin_unlock(&mirror->lock);
2573 	/* nfstime4 */
2574 	ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
2575 	/* bool */
2576 	p = xdr_reserve_space(xdr, 4);
2577 	*p = cpu_to_be32(false);
2578 }
2579 
2580 static void
2581 ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
2582 			     const struct nfs4_xdr_opaque_data *opaque)
2583 {
2584 	struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
2585 			struct nfs42_layoutstat_devinfo, ld_private);
2586 	__be32 *start;
2587 
2588 	/* layoutupdate length */
2589 	start = xdr_reserve_space(xdr, 4);
2590 	ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
2591 
2592 	*start = cpu_to_be32((xdr->p - start - 1) * 4);
2593 }
2594 
2595 static void
2596 ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
2597 {
2598 	struct nfs4_ff_layout_mirror *mirror = opaque->data;
2599 
2600 	ff_layout_put_mirror(mirror);
2601 }
2602 
2603 static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
2604 	.encode = ff_layout_encode_layoutstats,
2605 	.free	= ff_layout_free_layoutstats,
2606 };
2607 
2608 static int
2609 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
2610 			       struct nfs42_layoutstat_devinfo *devinfo,
2611 			       int dev_limit, enum nfs4_ff_op_type type)
2612 {
2613 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
2614 	struct nfs4_ff_layout_mirror *mirror;
2615 	struct nfs4_deviceid_node *dev;
2616 	int i = 0;
2617 
2618 	list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2619 		if (i >= dev_limit)
2620 			break;
2621 		if (IS_ERR_OR_NULL(mirror->mirror_ds))
2622 			continue;
2623 		if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL,
2624 					&mirror->flags) &&
2625 		    type != NFS4_FF_OP_LAYOUTRETURN)
2626 			continue;
2627 		/* mirror refcount put in cleanup_layoutstats */
2628 		if (!refcount_inc_not_zero(&mirror->ref))
2629 			continue;
2630 		dev = &mirror->mirror_ds->id_node;
2631 		memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
2632 		devinfo->offset = 0;
2633 		devinfo->length = NFS4_MAX_UINT64;
2634 		spin_lock(&mirror->lock);
2635 		devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
2636 		devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
2637 		devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
2638 		devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
2639 		spin_unlock(&mirror->lock);
2640 		devinfo->layout_type = LAYOUT_FLEX_FILES;
2641 		devinfo->ld_private.ops = &layoutstat_ops;
2642 		devinfo->ld_private.data = mirror;
2643 
2644 		devinfo++;
2645 		i++;
2646 	}
2647 	return i;
2648 }
2649 
2650 static int ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
2651 {
2652 	struct pnfs_layout_hdr *lo;
2653 	struct nfs4_flexfile_layout *ff_layout;
2654 	const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2655 
2656 	/* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2657 	args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo),
2658 				      nfs_io_gfp_mask());
2659 	if (!args->devinfo)
2660 		return -ENOMEM;
2661 
2662 	spin_lock(&args->inode->i_lock);
2663 	lo = NFS_I(args->inode)->layout;
2664 	if (lo && pnfs_layout_is_valid(lo)) {
2665 		ff_layout = FF_LAYOUT_FROM_HDR(lo);
2666 		args->num_dev = ff_layout_mirror_prepare_stats(
2667 			&ff_layout->generic_hdr, &args->devinfo[0], dev_count,
2668 			NFS4_FF_OP_LAYOUTSTATS);
2669 	} else
2670 		args->num_dev = 0;
2671 	spin_unlock(&args->inode->i_lock);
2672 	if (!args->num_dev) {
2673 		kfree(args->devinfo);
2674 		args->devinfo = NULL;
2675 		return -ENOENT;
2676 	}
2677 
2678 	return 0;
2679 }
2680 
2681 static int
2682 ff_layout_set_layoutdriver(struct nfs_server *server,
2683 		const struct nfs_fh *dummy)
2684 {
2685 #if IS_ENABLED(CONFIG_NFS_V4_2)
2686 	server->caps |= NFS_CAP_LAYOUTSTATS | NFS_CAP_REBOOT_LAYOUTRETURN;
2687 #endif
2688 	return 0;
2689 }
2690 
2691 static const struct pnfs_commit_ops ff_layout_commit_ops = {
2692 	.setup_ds_info		= ff_layout_setup_ds_info,
2693 	.release_ds_info	= ff_layout_release_ds_info,
2694 	.mark_request_commit	= pnfs_layout_mark_request_commit,
2695 	.clear_request_commit	= pnfs_generic_clear_request_commit,
2696 	.scan_commit_lists	= pnfs_generic_scan_commit_lists,
2697 	.recover_commit_reqs	= pnfs_generic_recover_commit_reqs,
2698 	.commit_pagelist	= ff_layout_commit_pagelist,
2699 };
2700 
2701 static struct pnfs_layoutdriver_type flexfilelayout_type = {
2702 	.id			= LAYOUT_FLEX_FILES,
2703 	.name			= "LAYOUT_FLEX_FILES",
2704 	.owner			= THIS_MODULE,
2705 	.flags			= PNFS_LAYOUTGET_ON_OPEN,
2706 	.max_layoutget_response	= 4096, /* 1 page or so... */
2707 	.set_layoutdriver	= ff_layout_set_layoutdriver,
2708 	.alloc_layout_hdr	= ff_layout_alloc_layout_hdr,
2709 	.free_layout_hdr	= ff_layout_free_layout_hdr,
2710 	.alloc_lseg		= ff_layout_alloc_lseg,
2711 	.free_lseg		= ff_layout_free_lseg,
2712 	.add_lseg		= ff_layout_add_lseg,
2713 	.pg_read_ops		= &ff_layout_pg_read_ops,
2714 	.pg_write_ops		= &ff_layout_pg_write_ops,
2715 	.get_ds_info		= ff_layout_get_ds_info,
2716 	.free_deviceid_node	= ff_layout_free_deviceid_node,
2717 	.read_pagelist		= ff_layout_read_pagelist,
2718 	.write_pagelist		= ff_layout_write_pagelist,
2719 	.alloc_deviceid_node    = ff_layout_alloc_deviceid_node,
2720 	.prepare_layoutreturn   = ff_layout_prepare_layoutreturn,
2721 	.sync			= pnfs_nfs_generic_sync,
2722 	.prepare_layoutstats	= ff_layout_prepare_layoutstats,
2723 	.cancel_io		= ff_layout_cancel_io,
2724 };
2725 
2726 static int __init nfs4flexfilelayout_init(void)
2727 {
2728 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
2729 	       __func__);
2730 	return pnfs_register_layoutdriver(&flexfilelayout_type);
2731 }
2732 
2733 static void __exit nfs4flexfilelayout_exit(void)
2734 {
2735 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2736 	       __func__);
2737 	pnfs_unregister_layoutdriver(&flexfilelayout_type);
2738 }
2739 
2740 MODULE_ALIAS("nfs-layouttype4-4");
2741 
2742 MODULE_LICENSE("GPL");
2743 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2744 
2745 module_init(nfs4flexfilelayout_init);
2746 module_exit(nfs4flexfilelayout_exit);
2747 
2748 module_param(io_maxretrans, ushort, 0644);
2749 MODULE_PARM_DESC(io_maxretrans, "The  number of times the NFSv4.1 client "
2750 			"retries an I/O request before returning an error. ");
2751