xref: /linux/fs/nfs/flexfilelayout/flexfilelayout.c (revision 24168c5e6dfbdd5b414f048f47f75d64533296ca)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Module for pnfs flexfile layout driver.
4  *
5  * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
6  *
7  * Tao Peng <bergwolf@primarydata.com>
8  */
9 
10 #include <linux/nfs_fs.h>
11 #include <linux/nfs_mount.h>
12 #include <linux/nfs_page.h>
13 #include <linux/module.h>
14 #include <linux/sched/mm.h>
15 
16 #include <linux/sunrpc/metrics.h>
17 
18 #include "flexfilelayout.h"
19 #include "../nfs4session.h"
20 #include "../nfs4idmap.h"
21 #include "../internal.h"
22 #include "../delegation.h"
23 #include "../nfs4trace.h"
24 #include "../iostat.h"
25 #include "../nfs.h"
26 #include "../nfs42.h"
27 
28 #define NFSDBG_FACILITY         NFSDBG_PNFS_LD
29 
30 #define FF_LAYOUT_POLL_RETRY_MAX     (15*HZ)
31 #define FF_LAYOUTRETURN_MAXERR 20
32 
33 enum nfs4_ff_op_type {
34 	NFS4_FF_OP_LAYOUTSTATS,
35 	NFS4_FF_OP_LAYOUTRETURN,
36 };
37 
38 static unsigned short io_maxretrans;
39 
40 static const struct pnfs_commit_ops ff_layout_commit_ops;
41 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
42 		struct nfs_pgio_header *hdr);
43 static int
44 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
45 			       struct nfs42_layoutstat_devinfo *devinfo,
46 			       int dev_limit, enum nfs4_ff_op_type type);
47 static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
48 			      const struct nfs42_layoutstat_devinfo *devinfo,
49 			      struct nfs4_ff_layout_mirror *mirror);
50 
51 static struct pnfs_layout_hdr *
52 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
53 {
54 	struct nfs4_flexfile_layout *ffl;
55 
56 	ffl = kzalloc(sizeof(*ffl), gfp_flags);
57 	if (ffl) {
58 		pnfs_init_ds_commit_info(&ffl->commit_info);
59 		INIT_LIST_HEAD(&ffl->error_list);
60 		INIT_LIST_HEAD(&ffl->mirrors);
61 		ffl->last_report_time = ktime_get();
62 		ffl->commit_info.ops = &ff_layout_commit_ops;
63 		return &ffl->generic_hdr;
64 	} else
65 		return NULL;
66 }
67 
68 static void
69 ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
70 {
71 	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(lo);
72 	struct nfs4_ff_layout_ds_err *err, *n;
73 
74 	list_for_each_entry_safe(err, n, &ffl->error_list, list) {
75 		list_del(&err->list);
76 		kfree(err);
77 	}
78 	kfree_rcu(ffl, generic_hdr.plh_rcu);
79 }
80 
81 static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
82 {
83 	__be32 *p;
84 
85 	p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
86 	if (unlikely(p == NULL))
87 		return -ENOBUFS;
88 	stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
89 	memcpy(stateid->data, p, NFS4_STATEID_SIZE);
90 	dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
91 		p[0], p[1], p[2], p[3]);
92 	return 0;
93 }
94 
95 static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
96 {
97 	__be32 *p;
98 
99 	p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
100 	if (unlikely(!p))
101 		return -ENOBUFS;
102 	memcpy(devid, p, NFS4_DEVICEID4_SIZE);
103 	nfs4_print_deviceid(devid);
104 	return 0;
105 }
106 
107 static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
108 {
109 	__be32 *p;
110 
111 	p = xdr_inline_decode(xdr, 4);
112 	if (unlikely(!p))
113 		return -ENOBUFS;
114 	fh->size = be32_to_cpup(p++);
115 	if (fh->size > NFS_MAXFHSIZE) {
116 		printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
117 		       fh->size);
118 		return -EOVERFLOW;
119 	}
120 	/* fh.data */
121 	p = xdr_inline_decode(xdr, fh->size);
122 	if (unlikely(!p))
123 		return -ENOBUFS;
124 	memcpy(&fh->data, p, fh->size);
125 	dprintk("%s: fh len %d\n", __func__, fh->size);
126 
127 	return 0;
128 }
129 
130 /*
131  * Currently only stringified uids and gids are accepted.
132  * I.e., kerberos is not supported to the DSes, so no pricipals.
133  *
134  * That means that one common function will suffice, but when
135  * principals are added, this should be split to accomodate
136  * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
137  */
138 static int
139 decode_name(struct xdr_stream *xdr, u32 *id)
140 {
141 	__be32 *p;
142 	int len;
143 
144 	/* opaque_length(4)*/
145 	p = xdr_inline_decode(xdr, 4);
146 	if (unlikely(!p))
147 		return -ENOBUFS;
148 	len = be32_to_cpup(p++);
149 	if (len < 0)
150 		return -EINVAL;
151 
152 	dprintk("%s: len %u\n", __func__, len);
153 
154 	/* opaque body */
155 	p = xdr_inline_decode(xdr, len);
156 	if (unlikely(!p))
157 		return -ENOBUFS;
158 
159 	if (!nfs_map_string_to_numeric((char *)p, len, id))
160 		return -EINVAL;
161 
162 	return 0;
163 }
164 
165 static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
166 		const struct nfs4_ff_layout_mirror *m2)
167 {
168 	int i, j;
169 
170 	if (m1->fh_versions_cnt != m2->fh_versions_cnt)
171 		return false;
172 	for (i = 0; i < m1->fh_versions_cnt; i++) {
173 		bool found_fh = false;
174 		for (j = 0; j < m2->fh_versions_cnt; j++) {
175 			if (nfs_compare_fh(&m1->fh_versions[i],
176 					&m2->fh_versions[j]) == 0) {
177 				found_fh = true;
178 				break;
179 			}
180 		}
181 		if (!found_fh)
182 			return false;
183 	}
184 	return true;
185 }
186 
187 static struct nfs4_ff_layout_mirror *
188 ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
189 		struct nfs4_ff_layout_mirror *mirror)
190 {
191 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
192 	struct nfs4_ff_layout_mirror *pos;
193 	struct inode *inode = lo->plh_inode;
194 
195 	spin_lock(&inode->i_lock);
196 	list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
197 		if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0)
198 			continue;
199 		if (!ff_mirror_match_fh(mirror, pos))
200 			continue;
201 		if (refcount_inc_not_zero(&pos->ref)) {
202 			spin_unlock(&inode->i_lock);
203 			return pos;
204 		}
205 	}
206 	list_add(&mirror->mirrors, &ff_layout->mirrors);
207 	mirror->layout = lo;
208 	spin_unlock(&inode->i_lock);
209 	return mirror;
210 }
211 
212 static void
213 ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
214 {
215 	struct inode *inode;
216 	if (mirror->layout == NULL)
217 		return;
218 	inode = mirror->layout->plh_inode;
219 	spin_lock(&inode->i_lock);
220 	list_del(&mirror->mirrors);
221 	spin_unlock(&inode->i_lock);
222 	mirror->layout = NULL;
223 }
224 
225 static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
226 {
227 	struct nfs4_ff_layout_mirror *mirror;
228 
229 	mirror = kzalloc(sizeof(*mirror), gfp_flags);
230 	if (mirror != NULL) {
231 		spin_lock_init(&mirror->lock);
232 		refcount_set(&mirror->ref, 1);
233 		INIT_LIST_HEAD(&mirror->mirrors);
234 	}
235 	return mirror;
236 }
237 
238 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
239 {
240 	const struct cred	*cred;
241 
242 	ff_layout_remove_mirror(mirror);
243 	kfree(mirror->fh_versions);
244 	cred = rcu_access_pointer(mirror->ro_cred);
245 	put_cred(cred);
246 	cred = rcu_access_pointer(mirror->rw_cred);
247 	put_cred(cred);
248 	nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
249 	kfree(mirror);
250 }
251 
252 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
253 {
254 	if (mirror != NULL && refcount_dec_and_test(&mirror->ref))
255 		ff_layout_free_mirror(mirror);
256 }
257 
258 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
259 {
260 	u32 i;
261 
262 	for (i = 0; i < fls->mirror_array_cnt; i++)
263 		ff_layout_put_mirror(fls->mirror_array[i]);
264 }
265 
266 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
267 {
268 	if (fls) {
269 		ff_layout_free_mirror_array(fls);
270 		kfree(fls);
271 	}
272 }
273 
274 static bool
275 ff_lseg_match_mirrors(struct pnfs_layout_segment *l1,
276 		struct pnfs_layout_segment *l2)
277 {
278 	const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1);
279 	const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l1);
280 	u32 i;
281 
282 	if (fl1->mirror_array_cnt != fl2->mirror_array_cnt)
283 		return false;
284 	for (i = 0; i < fl1->mirror_array_cnt; i++) {
285 		if (fl1->mirror_array[i] != fl2->mirror_array[i])
286 			return false;
287 	}
288 	return true;
289 }
290 
291 static bool
292 ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
293 		const struct pnfs_layout_range *l2)
294 {
295 	u64 end1, end2;
296 
297 	if (l1->iomode != l2->iomode)
298 		return l1->iomode != IOMODE_READ;
299 	end1 = pnfs_calc_offset_end(l1->offset, l1->length);
300 	end2 = pnfs_calc_offset_end(l2->offset, l2->length);
301 	if (end1 < l2->offset)
302 		return false;
303 	if (end2 < l1->offset)
304 		return true;
305 	return l2->offset <= l1->offset;
306 }
307 
308 static bool
309 ff_lseg_merge(struct pnfs_layout_segment *new,
310 		struct pnfs_layout_segment *old)
311 {
312 	u64 new_end, old_end;
313 
314 	if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
315 		return false;
316 	if (new->pls_range.iomode != old->pls_range.iomode)
317 		return false;
318 	old_end = pnfs_calc_offset_end(old->pls_range.offset,
319 			old->pls_range.length);
320 	if (old_end < new->pls_range.offset)
321 		return false;
322 	new_end = pnfs_calc_offset_end(new->pls_range.offset,
323 			new->pls_range.length);
324 	if (new_end < old->pls_range.offset)
325 		return false;
326 	if (!ff_lseg_match_mirrors(new, old))
327 		return false;
328 
329 	/* Mergeable: copy info from 'old' to 'new' */
330 	if (new_end < old_end)
331 		new_end = old_end;
332 	if (new->pls_range.offset < old->pls_range.offset)
333 		new->pls_range.offset = old->pls_range.offset;
334 	new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
335 			new_end);
336 	if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
337 		set_bit(NFS_LSEG_ROC, &new->pls_flags);
338 	return true;
339 }
340 
341 static void
342 ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
343 		struct pnfs_layout_segment *lseg,
344 		struct list_head *free_me)
345 {
346 	pnfs_generic_layout_insert_lseg(lo, lseg,
347 			ff_lseg_range_is_after,
348 			ff_lseg_merge,
349 			free_me);
350 }
351 
352 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
353 {
354 	int i, j;
355 
356 	for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
357 		for (j = i + 1; j < fls->mirror_array_cnt; j++)
358 			if (fls->mirror_array[i]->efficiency <
359 			    fls->mirror_array[j]->efficiency)
360 				swap(fls->mirror_array[i],
361 				     fls->mirror_array[j]);
362 	}
363 }
364 
365 static struct pnfs_layout_segment *
366 ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
367 		     struct nfs4_layoutget_res *lgr,
368 		     gfp_t gfp_flags)
369 {
370 	struct pnfs_layout_segment *ret;
371 	struct nfs4_ff_layout_segment *fls = NULL;
372 	struct xdr_stream stream;
373 	struct xdr_buf buf;
374 	struct page *scratch;
375 	u64 stripe_unit;
376 	u32 mirror_array_cnt;
377 	__be32 *p;
378 	int i, rc;
379 
380 	dprintk("--> %s\n", __func__);
381 	scratch = alloc_page(gfp_flags);
382 	if (!scratch)
383 		return ERR_PTR(-ENOMEM);
384 
385 	xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
386 			      lgr->layoutp->len);
387 	xdr_set_scratch_page(&stream, scratch);
388 
389 	/* stripe unit and mirror_array_cnt */
390 	rc = -EIO;
391 	p = xdr_inline_decode(&stream, 8 + 4);
392 	if (!p)
393 		goto out_err_free;
394 
395 	p = xdr_decode_hyper(p, &stripe_unit);
396 	mirror_array_cnt = be32_to_cpup(p++);
397 	dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
398 		stripe_unit, mirror_array_cnt);
399 
400 	if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
401 	    mirror_array_cnt == 0)
402 		goto out_err_free;
403 
404 	rc = -ENOMEM;
405 	fls = kzalloc(struct_size(fls, mirror_array, mirror_array_cnt),
406 			gfp_flags);
407 	if (!fls)
408 		goto out_err_free;
409 
410 	fls->mirror_array_cnt = mirror_array_cnt;
411 	fls->stripe_unit = stripe_unit;
412 
413 	for (i = 0; i < fls->mirror_array_cnt; i++) {
414 		struct nfs4_ff_layout_mirror *mirror;
415 		struct cred *kcred;
416 		const struct cred __rcu *cred;
417 		kuid_t uid;
418 		kgid_t gid;
419 		u32 ds_count, fh_count, id;
420 		int j;
421 
422 		rc = -EIO;
423 		p = xdr_inline_decode(&stream, 4);
424 		if (!p)
425 			goto out_err_free;
426 		ds_count = be32_to_cpup(p);
427 
428 		/* FIXME: allow for striping? */
429 		if (ds_count != 1)
430 			goto out_err_free;
431 
432 		fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
433 		if (fls->mirror_array[i] == NULL) {
434 			rc = -ENOMEM;
435 			goto out_err_free;
436 		}
437 
438 		fls->mirror_array[i]->ds_count = ds_count;
439 
440 		/* deviceid */
441 		rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid);
442 		if (rc)
443 			goto out_err_free;
444 
445 		/* efficiency */
446 		rc = -EIO;
447 		p = xdr_inline_decode(&stream, 4);
448 		if (!p)
449 			goto out_err_free;
450 		fls->mirror_array[i]->efficiency = be32_to_cpup(p);
451 
452 		/* stateid */
453 		rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
454 		if (rc)
455 			goto out_err_free;
456 
457 		/* fh */
458 		rc = -EIO;
459 		p = xdr_inline_decode(&stream, 4);
460 		if (!p)
461 			goto out_err_free;
462 		fh_count = be32_to_cpup(p);
463 
464 		fls->mirror_array[i]->fh_versions =
465 			kcalloc(fh_count, sizeof(struct nfs_fh),
466 				gfp_flags);
467 		if (fls->mirror_array[i]->fh_versions == NULL) {
468 			rc = -ENOMEM;
469 			goto out_err_free;
470 		}
471 
472 		for (j = 0; j < fh_count; j++) {
473 			rc = decode_nfs_fh(&stream,
474 					   &fls->mirror_array[i]->fh_versions[j]);
475 			if (rc)
476 				goto out_err_free;
477 		}
478 
479 		fls->mirror_array[i]->fh_versions_cnt = fh_count;
480 
481 		/* user */
482 		rc = decode_name(&stream, &id);
483 		if (rc)
484 			goto out_err_free;
485 
486 		uid = make_kuid(&init_user_ns, id);
487 
488 		/* group */
489 		rc = decode_name(&stream, &id);
490 		if (rc)
491 			goto out_err_free;
492 
493 		gid = make_kgid(&init_user_ns, id);
494 
495 		if (gfp_flags & __GFP_FS)
496 			kcred = prepare_kernel_cred(&init_task);
497 		else {
498 			unsigned int nofs_flags = memalloc_nofs_save();
499 			kcred = prepare_kernel_cred(&init_task);
500 			memalloc_nofs_restore(nofs_flags);
501 		}
502 		rc = -ENOMEM;
503 		if (!kcred)
504 			goto out_err_free;
505 		kcred->fsuid = uid;
506 		kcred->fsgid = gid;
507 		cred = RCU_INITIALIZER(kcred);
508 
509 		if (lgr->range.iomode == IOMODE_READ)
510 			rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
511 		else
512 			rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
513 
514 		mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
515 		if (mirror != fls->mirror_array[i]) {
516 			/* swap cred ptrs so free_mirror will clean up old */
517 			if (lgr->range.iomode == IOMODE_READ) {
518 				cred = xchg(&mirror->ro_cred, cred);
519 				rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
520 			} else {
521 				cred = xchg(&mirror->rw_cred, cred);
522 				rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
523 			}
524 			ff_layout_free_mirror(fls->mirror_array[i]);
525 			fls->mirror_array[i] = mirror;
526 		}
527 
528 		dprintk("%s: iomode %s uid %u gid %u\n", __func__,
529 			lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
530 			from_kuid(&init_user_ns, uid),
531 			from_kgid(&init_user_ns, gid));
532 	}
533 
534 	p = xdr_inline_decode(&stream, 4);
535 	if (!p)
536 		goto out_sort_mirrors;
537 	fls->flags = be32_to_cpup(p);
538 
539 	p = xdr_inline_decode(&stream, 4);
540 	if (!p)
541 		goto out_sort_mirrors;
542 	for (i=0; i < fls->mirror_array_cnt; i++)
543 		fls->mirror_array[i]->report_interval = be32_to_cpup(p);
544 
545 out_sort_mirrors:
546 	ff_layout_sort_mirrors(fls);
547 	ret = &fls->generic_hdr;
548 	dprintk("<-- %s (success)\n", __func__);
549 out_free_page:
550 	__free_page(scratch);
551 	return ret;
552 out_err_free:
553 	_ff_layout_free_lseg(fls);
554 	ret = ERR_PTR(rc);
555 	dprintk("<-- %s (%d)\n", __func__, rc);
556 	goto out_free_page;
557 }
558 
559 static void
560 ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
561 {
562 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
563 
564 	dprintk("--> %s\n", __func__);
565 
566 	if (lseg->pls_range.iomode == IOMODE_RW) {
567 		struct nfs4_flexfile_layout *ffl;
568 		struct inode *inode;
569 
570 		ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
571 		inode = ffl->generic_hdr.plh_inode;
572 		spin_lock(&inode->i_lock);
573 		pnfs_generic_ds_cinfo_release_lseg(&ffl->commit_info, lseg);
574 		spin_unlock(&inode->i_lock);
575 	}
576 	_ff_layout_free_lseg(fls);
577 }
578 
579 static void
580 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
581 {
582 	/* first IO request? */
583 	if (atomic_inc_return(&timer->n_ops) == 1) {
584 		timer->start_time = now;
585 	}
586 }
587 
588 static ktime_t
589 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
590 {
591 	ktime_t start;
592 
593 	if (atomic_dec_return(&timer->n_ops) < 0)
594 		WARN_ON_ONCE(1);
595 
596 	start = timer->start_time;
597 	timer->start_time = now;
598 	return ktime_sub(now, start);
599 }
600 
601 static bool
602 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
603 			    struct nfs4_ff_layoutstat *layoutstat,
604 			    ktime_t now)
605 {
606 	s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
607 	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
608 
609 	nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
610 	if (!mirror->start_time)
611 		mirror->start_time = now;
612 	if (mirror->report_interval != 0)
613 		report_interval = (s64)mirror->report_interval * 1000LL;
614 	else if (layoutstats_timer != 0)
615 		report_interval = (s64)layoutstats_timer * 1000LL;
616 	if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
617 			report_interval) {
618 		ffl->last_report_time = now;
619 		return true;
620 	}
621 
622 	return false;
623 }
624 
625 static void
626 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
627 		__u64 requested)
628 {
629 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
630 
631 	iostat->ops_requested++;
632 	iostat->bytes_requested += requested;
633 }
634 
635 static void
636 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
637 		__u64 requested,
638 		__u64 completed,
639 		ktime_t time_completed,
640 		ktime_t time_started)
641 {
642 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
643 	ktime_t completion_time = ktime_sub(time_completed, time_started);
644 	ktime_t timer;
645 
646 	iostat->ops_completed++;
647 	iostat->bytes_completed += completed;
648 	iostat->bytes_not_delivered += requested - completed;
649 
650 	timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
651 	iostat->total_busy_time =
652 			ktime_add(iostat->total_busy_time, timer);
653 	iostat->aggregate_completion_time =
654 			ktime_add(iostat->aggregate_completion_time,
655 					completion_time);
656 }
657 
658 static void
659 nfs4_ff_layout_stat_io_start_read(struct inode *inode,
660 		struct nfs4_ff_layout_mirror *mirror,
661 		__u64 requested, ktime_t now)
662 {
663 	bool report;
664 
665 	spin_lock(&mirror->lock);
666 	report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
667 	nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
668 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
669 	spin_unlock(&mirror->lock);
670 
671 	if (report)
672 		pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
673 }
674 
675 static void
676 nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
677 		struct nfs4_ff_layout_mirror *mirror,
678 		__u64 requested,
679 		__u64 completed)
680 {
681 	spin_lock(&mirror->lock);
682 	nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
683 			requested, completed,
684 			ktime_get(), task->tk_start);
685 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
686 	spin_unlock(&mirror->lock);
687 }
688 
689 static void
690 nfs4_ff_layout_stat_io_start_write(struct inode *inode,
691 		struct nfs4_ff_layout_mirror *mirror,
692 		__u64 requested, ktime_t now)
693 {
694 	bool report;
695 
696 	spin_lock(&mirror->lock);
697 	report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
698 	nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
699 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
700 	spin_unlock(&mirror->lock);
701 
702 	if (report)
703 		pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
704 }
705 
706 static void
707 nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
708 		struct nfs4_ff_layout_mirror *mirror,
709 		__u64 requested,
710 		__u64 completed,
711 		enum nfs3_stable_how committed)
712 {
713 	if (committed == NFS_UNSTABLE)
714 		requested = completed = 0;
715 
716 	spin_lock(&mirror->lock);
717 	nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
718 			requested, completed, ktime_get(), task->tk_start);
719 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
720 	spin_unlock(&mirror->lock);
721 }
722 
723 static void
724 ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, u32 idx)
725 {
726 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
727 
728 	if (devid)
729 		nfs4_mark_deviceid_unavailable(devid);
730 }
731 
732 static void
733 ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, u32 idx)
734 {
735 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
736 
737 	if (devid)
738 		nfs4_mark_deviceid_available(devid);
739 }
740 
741 static struct nfs4_pnfs_ds *
742 ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
743 			     u32 start_idx, u32 *best_idx,
744 			     bool check_device)
745 {
746 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
747 	struct nfs4_ff_layout_mirror *mirror;
748 	struct nfs4_pnfs_ds *ds;
749 	u32 idx;
750 
751 	/* mirrors are initially sorted by efficiency */
752 	for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
753 		mirror = FF_LAYOUT_COMP(lseg, idx);
754 		ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
755 		if (!ds)
756 			continue;
757 
758 		if (check_device &&
759 		    nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node))
760 			continue;
761 
762 		*best_idx = idx;
763 		return ds;
764 	}
765 
766 	return NULL;
767 }
768 
769 static struct nfs4_pnfs_ds *
770 ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg,
771 				 u32 start_idx, u32 *best_idx)
772 {
773 	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, false);
774 }
775 
776 static struct nfs4_pnfs_ds *
777 ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg,
778 				   u32 start_idx, u32 *best_idx)
779 {
780 	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, true);
781 }
782 
783 static struct nfs4_pnfs_ds *
784 ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
785 				  u32 start_idx, u32 *best_idx)
786 {
787 	struct nfs4_pnfs_ds *ds;
788 
789 	ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx);
790 	if (ds)
791 		return ds;
792 	return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
793 }
794 
795 static struct nfs4_pnfs_ds *
796 ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio,
797 			  u32 *best_idx)
798 {
799 	struct pnfs_layout_segment *lseg = pgio->pg_lseg;
800 	struct nfs4_pnfs_ds *ds;
801 
802 	ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx,
803 					       best_idx);
804 	if (ds || !pgio->pg_mirror_idx)
805 		return ds;
806 	return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx);
807 }
808 
809 static void
810 ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
811 		      struct nfs_page *req,
812 		      bool strict_iomode)
813 {
814 	pnfs_put_lseg(pgio->pg_lseg);
815 	pgio->pg_lseg =
816 		pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
817 				   req_offset(req), req->wb_bytes, IOMODE_READ,
818 				   strict_iomode, nfs_io_gfp_mask());
819 	if (IS_ERR(pgio->pg_lseg)) {
820 		pgio->pg_error = PTR_ERR(pgio->pg_lseg);
821 		pgio->pg_lseg = NULL;
822 	}
823 }
824 
825 static void
826 ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
827 			struct nfs_page *req)
828 {
829 	struct nfs_pgio_mirror *pgm;
830 	struct nfs4_ff_layout_mirror *mirror;
831 	struct nfs4_pnfs_ds *ds;
832 	u32 ds_idx;
833 
834 retry:
835 	pnfs_generic_pg_check_layout(pgio, req);
836 	/* Use full layout for now */
837 	if (!pgio->pg_lseg) {
838 		ff_layout_pg_get_read(pgio, req, false);
839 		if (!pgio->pg_lseg)
840 			goto out_nolseg;
841 	}
842 	if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
843 		ff_layout_pg_get_read(pgio, req, true);
844 		if (!pgio->pg_lseg)
845 			goto out_nolseg;
846 	}
847 
848 	ds = ff_layout_get_ds_for_read(pgio, &ds_idx);
849 	if (!ds) {
850 		if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
851 			goto out_mds;
852 		pnfs_generic_pg_cleanup(pgio);
853 		/* Sleep for 1 second before retrying */
854 		ssleep(1);
855 		goto retry;
856 	}
857 
858 	mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
859 	pgm = &pgio->pg_mirrors[0];
860 	pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
861 
862 	pgio->pg_mirror_idx = ds_idx;
863 
864 	if (NFS_SERVER(pgio->pg_inode)->flags &
865 			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
866 		pgio->pg_maxretrans = io_maxretrans;
867 	return;
868 out_nolseg:
869 	if (pgio->pg_error < 0)
870 		return;
871 out_mds:
872 	trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode,
873 			0, NFS4_MAX_UINT64, IOMODE_READ,
874 			NFS_I(pgio->pg_inode)->layout,
875 			pgio->pg_lseg);
876 	pgio->pg_maxretrans = 0;
877 	nfs_pageio_reset_read_mds(pgio);
878 }
879 
880 static void
881 ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
882 			struct nfs_page *req)
883 {
884 	struct nfs4_ff_layout_mirror *mirror;
885 	struct nfs_pgio_mirror *pgm;
886 	struct nfs4_pnfs_ds *ds;
887 	u32 i;
888 
889 retry:
890 	pnfs_generic_pg_check_layout(pgio, req);
891 	if (!pgio->pg_lseg) {
892 		pgio->pg_lseg =
893 			pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
894 					   req_offset(req), req->wb_bytes,
895 					   IOMODE_RW, false, nfs_io_gfp_mask());
896 		if (IS_ERR(pgio->pg_lseg)) {
897 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
898 			pgio->pg_lseg = NULL;
899 			return;
900 		}
901 	}
902 	/* If no lseg, fall back to write through mds */
903 	if (pgio->pg_lseg == NULL)
904 		goto out_mds;
905 
906 	/* Use a direct mapping of ds_idx to pgio mirror_idx */
907 	if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))
908 		goto out_eagain;
909 
910 	for (i = 0; i < pgio->pg_mirror_count; i++) {
911 		mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
912 		ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true);
913 		if (!ds) {
914 			if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
915 				goto out_mds;
916 			pnfs_generic_pg_cleanup(pgio);
917 			/* Sleep for 1 second before retrying */
918 			ssleep(1);
919 			goto retry;
920 		}
921 		pgm = &pgio->pg_mirrors[i];
922 		pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
923 	}
924 
925 	if (NFS_SERVER(pgio->pg_inode)->flags &
926 			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
927 		pgio->pg_maxretrans = io_maxretrans;
928 	return;
929 out_eagain:
930 	pnfs_generic_pg_cleanup(pgio);
931 	pgio->pg_error = -EAGAIN;
932 	return;
933 out_mds:
934 	trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode,
935 			0, NFS4_MAX_UINT64, IOMODE_RW,
936 			NFS_I(pgio->pg_inode)->layout,
937 			pgio->pg_lseg);
938 	pgio->pg_maxretrans = 0;
939 	nfs_pageio_reset_write_mds(pgio);
940 	pgio->pg_error = -EAGAIN;
941 }
942 
943 static unsigned int
944 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
945 				    struct nfs_page *req)
946 {
947 	if (!pgio->pg_lseg) {
948 		pgio->pg_lseg =
949 			pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
950 					   req_offset(req), req->wb_bytes,
951 					   IOMODE_RW, false, nfs_io_gfp_mask());
952 		if (IS_ERR(pgio->pg_lseg)) {
953 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
954 			pgio->pg_lseg = NULL;
955 			goto out;
956 		}
957 	}
958 	if (pgio->pg_lseg)
959 		return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
960 
961 	trace_pnfs_mds_fallback_pg_get_mirror_count(pgio->pg_inode,
962 			0, NFS4_MAX_UINT64, IOMODE_RW,
963 			NFS_I(pgio->pg_inode)->layout,
964 			pgio->pg_lseg);
965 	/* no lseg means that pnfs is not in use, so no mirroring here */
966 	nfs_pageio_reset_write_mds(pgio);
967 out:
968 	return 1;
969 }
970 
971 static u32
972 ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
973 {
974 	u32 old = desc->pg_mirror_idx;
975 
976 	desc->pg_mirror_idx = idx;
977 	return old;
978 }
979 
980 static struct nfs_pgio_mirror *
981 ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
982 {
983 	return &desc->pg_mirrors[idx];
984 }
985 
986 static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
987 	.pg_init = ff_layout_pg_init_read,
988 	.pg_test = pnfs_generic_pg_test,
989 	.pg_doio = pnfs_generic_pg_readpages,
990 	.pg_cleanup = pnfs_generic_pg_cleanup,
991 };
992 
993 static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
994 	.pg_init = ff_layout_pg_init_write,
995 	.pg_test = pnfs_generic_pg_test,
996 	.pg_doio = pnfs_generic_pg_writepages,
997 	.pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
998 	.pg_cleanup = pnfs_generic_pg_cleanup,
999 	.pg_get_mirror = ff_layout_pg_get_mirror_write,
1000 	.pg_set_mirror = ff_layout_pg_set_mirror_write,
1001 };
1002 
1003 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
1004 {
1005 	struct rpc_task *task = &hdr->task;
1006 
1007 	pnfs_layoutcommit_inode(hdr->inode, false);
1008 
1009 	if (retry_pnfs) {
1010 		dprintk("%s Reset task %5u for i/o through pNFS "
1011 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1012 			hdr->task.tk_pid,
1013 			hdr->inode->i_sb->s_id,
1014 			(unsigned long long)NFS_FILEID(hdr->inode),
1015 			hdr->args.count,
1016 			(unsigned long long)hdr->args.offset);
1017 
1018 		hdr->completion_ops->reschedule_io(hdr);
1019 		return;
1020 	}
1021 
1022 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1023 		dprintk("%s Reset task %5u for i/o through MDS "
1024 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1025 			hdr->task.tk_pid,
1026 			hdr->inode->i_sb->s_id,
1027 			(unsigned long long)NFS_FILEID(hdr->inode),
1028 			hdr->args.count,
1029 			(unsigned long long)hdr->args.offset);
1030 
1031 		trace_pnfs_mds_fallback_write_done(hdr->inode,
1032 				hdr->args.offset, hdr->args.count,
1033 				IOMODE_RW, NFS_I(hdr->inode)->layout,
1034 				hdr->lseg);
1035 		task->tk_status = pnfs_write_done_resend_to_mds(hdr);
1036 	}
1037 }
1038 
1039 static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr)
1040 {
1041 	u32 idx = hdr->pgio_mirror_idx + 1;
1042 	u32 new_idx = 0;
1043 
1044 	if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx))
1045 		ff_layout_send_layouterror(hdr->lseg);
1046 	else
1047 		pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1048 	pnfs_read_resend_pnfs(hdr, new_idx);
1049 }
1050 
1051 static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
1052 {
1053 	struct rpc_task *task = &hdr->task;
1054 
1055 	pnfs_layoutcommit_inode(hdr->inode, false);
1056 	pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1057 
1058 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1059 		dprintk("%s Reset task %5u for i/o through MDS "
1060 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1061 			hdr->task.tk_pid,
1062 			hdr->inode->i_sb->s_id,
1063 			(unsigned long long)NFS_FILEID(hdr->inode),
1064 			hdr->args.count,
1065 			(unsigned long long)hdr->args.offset);
1066 
1067 		trace_pnfs_mds_fallback_read_done(hdr->inode,
1068 				hdr->args.offset, hdr->args.count,
1069 				IOMODE_READ, NFS_I(hdr->inode)->layout,
1070 				hdr->lseg);
1071 		task->tk_status = pnfs_read_done_resend_to_mds(hdr);
1072 	}
1073 }
1074 
1075 static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1076 					   struct nfs4_state *state,
1077 					   struct nfs_client *clp,
1078 					   struct pnfs_layout_segment *lseg,
1079 					   u32 idx)
1080 {
1081 	struct pnfs_layout_hdr *lo = lseg->pls_layout;
1082 	struct inode *inode = lo->plh_inode;
1083 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1084 	struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
1085 
1086 	switch (task->tk_status) {
1087 	case -NFS4ERR_BADSESSION:
1088 	case -NFS4ERR_BADSLOT:
1089 	case -NFS4ERR_BAD_HIGH_SLOT:
1090 	case -NFS4ERR_DEADSESSION:
1091 	case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1092 	case -NFS4ERR_SEQ_FALSE_RETRY:
1093 	case -NFS4ERR_SEQ_MISORDERED:
1094 		dprintk("%s ERROR %d, Reset session. Exchangeid "
1095 			"flags 0x%x\n", __func__, task->tk_status,
1096 			clp->cl_exchange_flags);
1097 		nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
1098 		break;
1099 	case -NFS4ERR_DELAY:
1100 	case -NFS4ERR_GRACE:
1101 		rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1102 		break;
1103 	case -NFS4ERR_RETRY_UNCACHED_REP:
1104 		break;
1105 	/* Invalidate Layout errors */
1106 	case -NFS4ERR_PNFS_NO_LAYOUT:
1107 	case -ESTALE:           /* mapped NFS4ERR_STALE */
1108 	case -EBADHANDLE:       /* mapped NFS4ERR_BADHANDLE */
1109 	case -EISDIR:           /* mapped NFS4ERR_ISDIR */
1110 	case -NFS4ERR_FHEXPIRED:
1111 	case -NFS4ERR_WRONG_TYPE:
1112 		dprintk("%s Invalid layout error %d\n", __func__,
1113 			task->tk_status);
1114 		/*
1115 		 * Destroy layout so new i/o will get a new layout.
1116 		 * Layout will not be destroyed until all current lseg
1117 		 * references are put. Mark layout as invalid to resend failed
1118 		 * i/o and all i/o waiting on the slot table to the MDS until
1119 		 * layout is destroyed and a new valid layout is obtained.
1120 		 */
1121 		pnfs_destroy_layout(NFS_I(inode));
1122 		rpc_wake_up(&tbl->slot_tbl_waitq);
1123 		goto reset;
1124 	/* RPC connection errors */
1125 	case -ECONNREFUSED:
1126 	case -EHOSTDOWN:
1127 	case -EHOSTUNREACH:
1128 	case -ENETUNREACH:
1129 	case -EIO:
1130 	case -ETIMEDOUT:
1131 	case -EPIPE:
1132 	case -EPROTO:
1133 	case -ENODEV:
1134 		dprintk("%s DS connection error %d\n", __func__,
1135 			task->tk_status);
1136 		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1137 				&devid->deviceid);
1138 		rpc_wake_up(&tbl->slot_tbl_waitq);
1139 		fallthrough;
1140 	default:
1141 		if (ff_layout_avoid_mds_available_ds(lseg))
1142 			return -NFS4ERR_RESET_TO_PNFS;
1143 reset:
1144 		dprintk("%s Retry through MDS. Error %d\n", __func__,
1145 			task->tk_status);
1146 		return -NFS4ERR_RESET_TO_MDS;
1147 	}
1148 	task->tk_status = 0;
1149 	return -EAGAIN;
1150 }
1151 
1152 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
1153 static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1154 					   struct pnfs_layout_segment *lseg,
1155 					   u32 idx)
1156 {
1157 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1158 
1159 	switch (task->tk_status) {
1160 	/* File access problems. Don't mark the device as unavailable */
1161 	case -EACCES:
1162 	case -ESTALE:
1163 	case -EISDIR:
1164 	case -EBADHANDLE:
1165 	case -ELOOP:
1166 	case -ENOSPC:
1167 		break;
1168 	case -EJUKEBOX:
1169 		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1170 		goto out_retry;
1171 	default:
1172 		dprintk("%s DS connection error %d\n", __func__,
1173 			task->tk_status);
1174 		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1175 				&devid->deviceid);
1176 	}
1177 	/* FIXME: Need to prevent infinite looping here. */
1178 	return -NFS4ERR_RESET_TO_PNFS;
1179 out_retry:
1180 	task->tk_status = 0;
1181 	rpc_restart_call_prepare(task);
1182 	rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1183 	return -EAGAIN;
1184 }
1185 
1186 static int ff_layout_async_handle_error(struct rpc_task *task,
1187 					struct nfs4_state *state,
1188 					struct nfs_client *clp,
1189 					struct pnfs_layout_segment *lseg,
1190 					u32 idx)
1191 {
1192 	int vers = clp->cl_nfs_mod->rpc_vers->number;
1193 
1194 	if (task->tk_status >= 0) {
1195 		ff_layout_mark_ds_reachable(lseg, idx);
1196 		return 0;
1197 	}
1198 
1199 	/* Handle the case of an invalid layout segment */
1200 	if (!pnfs_is_valid_lseg(lseg))
1201 		return -NFS4ERR_RESET_TO_PNFS;
1202 
1203 	switch (vers) {
1204 	case 3:
1205 		return ff_layout_async_handle_error_v3(task, lseg, idx);
1206 	case 4:
1207 		return ff_layout_async_handle_error_v4(task, state, clp,
1208 						       lseg, idx);
1209 	default:
1210 		/* should never happen */
1211 		WARN_ON_ONCE(1);
1212 		return 0;
1213 	}
1214 }
1215 
1216 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1217 					u32 idx, u64 offset, u64 length,
1218 					u32 *op_status, int opnum, int error)
1219 {
1220 	struct nfs4_ff_layout_mirror *mirror;
1221 	u32 status = *op_status;
1222 	int err;
1223 
1224 	if (status == 0) {
1225 		switch (error) {
1226 		case -ETIMEDOUT:
1227 		case -EPFNOSUPPORT:
1228 		case -EPROTONOSUPPORT:
1229 		case -EOPNOTSUPP:
1230 		case -EINVAL:
1231 		case -ECONNREFUSED:
1232 		case -ECONNRESET:
1233 		case -EHOSTDOWN:
1234 		case -EHOSTUNREACH:
1235 		case -ENETUNREACH:
1236 		case -EADDRINUSE:
1237 		case -ENOBUFS:
1238 		case -EPIPE:
1239 		case -EPERM:
1240 		case -EPROTO:
1241 		case -ENODEV:
1242 			*op_status = status = NFS4ERR_NXIO;
1243 			break;
1244 		case -EACCES:
1245 			*op_status = status = NFS4ERR_ACCESS;
1246 			break;
1247 		default:
1248 			return;
1249 		}
1250 	}
1251 
1252 	mirror = FF_LAYOUT_COMP(lseg, idx);
1253 	err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1254 				       mirror, offset, length, status, opnum,
1255 				       nfs_io_gfp_mask());
1256 
1257 	switch (status) {
1258 	case NFS4ERR_DELAY:
1259 	case NFS4ERR_GRACE:
1260 		break;
1261 	case NFS4ERR_NXIO:
1262 		ff_layout_mark_ds_unreachable(lseg, idx);
1263 		/*
1264 		 * Don't return the layout if this is a read and we still
1265 		 * have layouts to try
1266 		 */
1267 		if (opnum == OP_READ)
1268 			break;
1269 		fallthrough;
1270 	default:
1271 		pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
1272 						  lseg);
1273 	}
1274 
1275 	dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1276 }
1277 
1278 /* NFS_PROTO call done callback routines */
1279 static int ff_layout_read_done_cb(struct rpc_task *task,
1280 				struct nfs_pgio_header *hdr)
1281 {
1282 	int err;
1283 
1284 	if (task->tk_status < 0) {
1285 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1286 					    hdr->args.offset, hdr->args.count,
1287 					    &hdr->res.op_status, OP_READ,
1288 					    task->tk_status);
1289 		trace_ff_layout_read_error(hdr);
1290 	}
1291 
1292 	err = ff_layout_async_handle_error(task, hdr->args.context->state,
1293 					   hdr->ds_clp, hdr->lseg,
1294 					   hdr->pgio_mirror_idx);
1295 
1296 	trace_nfs4_pnfs_read(hdr, err);
1297 	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1298 	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1299 	switch (err) {
1300 	case -NFS4ERR_RESET_TO_PNFS:
1301 		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1302 		return task->tk_status;
1303 	case -NFS4ERR_RESET_TO_MDS:
1304 		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1305 		return task->tk_status;
1306 	case -EAGAIN:
1307 		goto out_eagain;
1308 	}
1309 
1310 	return 0;
1311 out_eagain:
1312 	rpc_restart_call_prepare(task);
1313 	return -EAGAIN;
1314 }
1315 
1316 static bool
1317 ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1318 {
1319 	return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1320 }
1321 
1322 /*
1323  * We reference the rpc_cred of the first WRITE that triggers the need for
1324  * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1325  * rfc5661 is not clear about which credential should be used.
1326  *
1327  * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1328  * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1329  * we always send layoutcommit after DS writes.
1330  */
1331 static void
1332 ff_layout_set_layoutcommit(struct inode *inode,
1333 		struct pnfs_layout_segment *lseg,
1334 		loff_t end_offset)
1335 {
1336 	if (!ff_layout_need_layoutcommit(lseg))
1337 		return;
1338 
1339 	pnfs_set_layoutcommit(inode, lseg, end_offset);
1340 	dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
1341 		(unsigned long long) NFS_I(inode)->layout->plh_lwb);
1342 }
1343 
1344 static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
1345 		struct nfs_pgio_header *hdr)
1346 {
1347 	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1348 		return;
1349 	nfs4_ff_layout_stat_io_start_read(hdr->inode,
1350 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1351 			hdr->args.count,
1352 			task->tk_start);
1353 }
1354 
1355 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
1356 		struct nfs_pgio_header *hdr)
1357 {
1358 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1359 		return;
1360 	nfs4_ff_layout_stat_io_end_read(task,
1361 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1362 			hdr->args.count,
1363 			hdr->res.count);
1364 	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1365 }
1366 
1367 static int ff_layout_read_prepare_common(struct rpc_task *task,
1368 					 struct nfs_pgio_header *hdr)
1369 {
1370 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1371 		rpc_exit(task, -EIO);
1372 		return -EIO;
1373 	}
1374 
1375 	if (!pnfs_is_valid_lseg(hdr->lseg)) {
1376 		rpc_exit(task, -EAGAIN);
1377 		return -EAGAIN;
1378 	}
1379 
1380 	ff_layout_read_record_layoutstats_start(task, hdr);
1381 	return 0;
1382 }
1383 
1384 /*
1385  * Call ops for the async read/write cases
1386  * In the case of dense layouts, the offset needs to be reset to its
1387  * original value.
1388  */
1389 static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1390 {
1391 	struct nfs_pgio_header *hdr = data;
1392 
1393 	if (ff_layout_read_prepare_common(task, hdr))
1394 		return;
1395 
1396 	rpc_call_start(task);
1397 }
1398 
1399 static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1400 {
1401 	struct nfs_pgio_header *hdr = data;
1402 
1403 	if (nfs4_setup_sequence(hdr->ds_clp,
1404 				&hdr->args.seq_args,
1405 				&hdr->res.seq_res,
1406 				task))
1407 		return;
1408 
1409 	ff_layout_read_prepare_common(task, hdr);
1410 }
1411 
1412 static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1413 {
1414 	struct nfs_pgio_header *hdr = data;
1415 
1416 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1417 	    task->tk_status == 0) {
1418 		nfs4_sequence_done(task, &hdr->res.seq_res);
1419 		return;
1420 	}
1421 
1422 	/* Note this may cause RPC to be resent */
1423 	hdr->mds_ops->rpc_call_done(task, hdr);
1424 }
1425 
1426 static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1427 {
1428 	struct nfs_pgio_header *hdr = data;
1429 
1430 	ff_layout_read_record_layoutstats_done(task, hdr);
1431 	rpc_count_iostats_metrics(task,
1432 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1433 }
1434 
1435 static void ff_layout_read_release(void *data)
1436 {
1437 	struct nfs_pgio_header *hdr = data;
1438 
1439 	ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
1440 	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
1441 		ff_layout_resend_pnfs_read(hdr);
1442 	else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1443 		ff_layout_reset_read(hdr);
1444 	pnfs_generic_rw_release(data);
1445 }
1446 
1447 
1448 static int ff_layout_write_done_cb(struct rpc_task *task,
1449 				struct nfs_pgio_header *hdr)
1450 {
1451 	loff_t end_offs = 0;
1452 	int err;
1453 
1454 	if (task->tk_status < 0) {
1455 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1456 					    hdr->args.offset, hdr->args.count,
1457 					    &hdr->res.op_status, OP_WRITE,
1458 					    task->tk_status);
1459 		trace_ff_layout_write_error(hdr);
1460 	}
1461 
1462 	err = ff_layout_async_handle_error(task, hdr->args.context->state,
1463 					   hdr->ds_clp, hdr->lseg,
1464 					   hdr->pgio_mirror_idx);
1465 
1466 	trace_nfs4_pnfs_write(hdr, err);
1467 	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1468 	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1469 	switch (err) {
1470 	case -NFS4ERR_RESET_TO_PNFS:
1471 		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1472 		return task->tk_status;
1473 	case -NFS4ERR_RESET_TO_MDS:
1474 		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1475 		return task->tk_status;
1476 	case -EAGAIN:
1477 		return -EAGAIN;
1478 	}
1479 
1480 	if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1481 	    hdr->res.verf->committed == NFS_DATA_SYNC)
1482 		end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
1483 
1484 	/* Note: if the write is unstable, don't set end_offs until commit */
1485 	ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
1486 
1487 	/* zero out fattr since we don't care DS attr at all */
1488 	hdr->fattr.valid = 0;
1489 	if (task->tk_status >= 0)
1490 		nfs_writeback_update_inode(hdr);
1491 
1492 	return 0;
1493 }
1494 
1495 static int ff_layout_commit_done_cb(struct rpc_task *task,
1496 				     struct nfs_commit_data *data)
1497 {
1498 	int err;
1499 
1500 	if (task->tk_status < 0) {
1501 		ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1502 					    data->args.offset, data->args.count,
1503 					    &data->res.op_status, OP_COMMIT,
1504 					    task->tk_status);
1505 		trace_ff_layout_commit_error(data);
1506 	}
1507 
1508 	err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
1509 					   data->lseg, data->ds_commit_index);
1510 
1511 	trace_nfs4_pnfs_commit_ds(data, err);
1512 	switch (err) {
1513 	case -NFS4ERR_RESET_TO_PNFS:
1514 		pnfs_generic_prepare_to_resend_writes(data);
1515 		return -EAGAIN;
1516 	case -NFS4ERR_RESET_TO_MDS:
1517 		pnfs_generic_prepare_to_resend_writes(data);
1518 		return -EAGAIN;
1519 	case -EAGAIN:
1520 		rpc_restart_call_prepare(task);
1521 		return -EAGAIN;
1522 	}
1523 
1524 	ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
1525 
1526 	return 0;
1527 }
1528 
1529 static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
1530 		struct nfs_pgio_header *hdr)
1531 {
1532 	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1533 		return;
1534 	nfs4_ff_layout_stat_io_start_write(hdr->inode,
1535 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1536 			hdr->args.count,
1537 			task->tk_start);
1538 }
1539 
1540 static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
1541 		struct nfs_pgio_header *hdr)
1542 {
1543 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1544 		return;
1545 	nfs4_ff_layout_stat_io_end_write(task,
1546 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1547 			hdr->args.count, hdr->res.count,
1548 			hdr->res.verf->committed);
1549 	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1550 }
1551 
1552 static int ff_layout_write_prepare_common(struct rpc_task *task,
1553 					  struct nfs_pgio_header *hdr)
1554 {
1555 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1556 		rpc_exit(task, -EIO);
1557 		return -EIO;
1558 	}
1559 
1560 	if (!pnfs_is_valid_lseg(hdr->lseg)) {
1561 		rpc_exit(task, -EAGAIN);
1562 		return -EAGAIN;
1563 	}
1564 
1565 	ff_layout_write_record_layoutstats_start(task, hdr);
1566 	return 0;
1567 }
1568 
1569 static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1570 {
1571 	struct nfs_pgio_header *hdr = data;
1572 
1573 	if (ff_layout_write_prepare_common(task, hdr))
1574 		return;
1575 
1576 	rpc_call_start(task);
1577 }
1578 
1579 static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1580 {
1581 	struct nfs_pgio_header *hdr = data;
1582 
1583 	if (nfs4_setup_sequence(hdr->ds_clp,
1584 				&hdr->args.seq_args,
1585 				&hdr->res.seq_res,
1586 				task))
1587 		return;
1588 
1589 	ff_layout_write_prepare_common(task, hdr);
1590 }
1591 
1592 static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1593 {
1594 	struct nfs_pgio_header *hdr = data;
1595 
1596 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1597 	    task->tk_status == 0) {
1598 		nfs4_sequence_done(task, &hdr->res.seq_res);
1599 		return;
1600 	}
1601 
1602 	/* Note this may cause RPC to be resent */
1603 	hdr->mds_ops->rpc_call_done(task, hdr);
1604 }
1605 
1606 static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1607 {
1608 	struct nfs_pgio_header *hdr = data;
1609 
1610 	ff_layout_write_record_layoutstats_done(task, hdr);
1611 	rpc_count_iostats_metrics(task,
1612 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1613 }
1614 
1615 static void ff_layout_write_release(void *data)
1616 {
1617 	struct nfs_pgio_header *hdr = data;
1618 
1619 	ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
1620 	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
1621 		ff_layout_send_layouterror(hdr->lseg);
1622 		ff_layout_reset_write(hdr, true);
1623 	} else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1624 		ff_layout_reset_write(hdr, false);
1625 	pnfs_generic_rw_release(data);
1626 }
1627 
1628 static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
1629 		struct nfs_commit_data *cdata)
1630 {
1631 	if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
1632 		return;
1633 	nfs4_ff_layout_stat_io_start_write(cdata->inode,
1634 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1635 			0, task->tk_start);
1636 }
1637 
1638 static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
1639 		struct nfs_commit_data *cdata)
1640 {
1641 	struct nfs_page *req;
1642 	__u64 count = 0;
1643 
1644 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
1645 		return;
1646 
1647 	if (task->tk_status == 0) {
1648 		list_for_each_entry(req, &cdata->pages, wb_list)
1649 			count += req->wb_bytes;
1650 	}
1651 	nfs4_ff_layout_stat_io_end_write(task,
1652 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1653 			count, count, NFS_FILE_SYNC);
1654 	set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
1655 }
1656 
1657 static int ff_layout_commit_prepare_common(struct rpc_task *task,
1658 					   struct nfs_commit_data *cdata)
1659 {
1660 	if (!pnfs_is_valid_lseg(cdata->lseg)) {
1661 		rpc_exit(task, -EAGAIN);
1662 		return -EAGAIN;
1663 	}
1664 
1665 	ff_layout_commit_record_layoutstats_start(task, cdata);
1666 	return 0;
1667 }
1668 
1669 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1670 {
1671 	if (ff_layout_commit_prepare_common(task, data))
1672 		return;
1673 
1674 	rpc_call_start(task);
1675 }
1676 
1677 static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1678 {
1679 	struct nfs_commit_data *wdata = data;
1680 
1681 	if (nfs4_setup_sequence(wdata->ds_clp,
1682 				&wdata->args.seq_args,
1683 				&wdata->res.seq_res,
1684 				task))
1685 		return;
1686 	ff_layout_commit_prepare_common(task, data);
1687 }
1688 
1689 static void ff_layout_commit_done(struct rpc_task *task, void *data)
1690 {
1691 	pnfs_generic_write_commit_done(task, data);
1692 }
1693 
1694 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1695 {
1696 	struct nfs_commit_data *cdata = data;
1697 
1698 	ff_layout_commit_record_layoutstats_done(task, cdata);
1699 	rpc_count_iostats_metrics(task,
1700 	    &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1701 }
1702 
1703 static void ff_layout_commit_release(void *data)
1704 {
1705 	struct nfs_commit_data *cdata = data;
1706 
1707 	ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
1708 	pnfs_generic_commit_release(data);
1709 }
1710 
1711 static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1712 	.rpc_call_prepare = ff_layout_read_prepare_v3,
1713 	.rpc_call_done = ff_layout_read_call_done,
1714 	.rpc_count_stats = ff_layout_read_count_stats,
1715 	.rpc_release = ff_layout_read_release,
1716 };
1717 
1718 static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1719 	.rpc_call_prepare = ff_layout_read_prepare_v4,
1720 	.rpc_call_done = ff_layout_read_call_done,
1721 	.rpc_count_stats = ff_layout_read_count_stats,
1722 	.rpc_release = ff_layout_read_release,
1723 };
1724 
1725 static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1726 	.rpc_call_prepare = ff_layout_write_prepare_v3,
1727 	.rpc_call_done = ff_layout_write_call_done,
1728 	.rpc_count_stats = ff_layout_write_count_stats,
1729 	.rpc_release = ff_layout_write_release,
1730 };
1731 
1732 static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1733 	.rpc_call_prepare = ff_layout_write_prepare_v4,
1734 	.rpc_call_done = ff_layout_write_call_done,
1735 	.rpc_count_stats = ff_layout_write_count_stats,
1736 	.rpc_release = ff_layout_write_release,
1737 };
1738 
1739 static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1740 	.rpc_call_prepare = ff_layout_commit_prepare_v3,
1741 	.rpc_call_done = ff_layout_commit_done,
1742 	.rpc_count_stats = ff_layout_commit_count_stats,
1743 	.rpc_release = ff_layout_commit_release,
1744 };
1745 
1746 static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1747 	.rpc_call_prepare = ff_layout_commit_prepare_v4,
1748 	.rpc_call_done = ff_layout_commit_done,
1749 	.rpc_count_stats = ff_layout_commit_count_stats,
1750 	.rpc_release = ff_layout_commit_release,
1751 };
1752 
1753 static enum pnfs_try_status
1754 ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1755 {
1756 	struct pnfs_layout_segment *lseg = hdr->lseg;
1757 	struct nfs4_pnfs_ds *ds;
1758 	struct rpc_clnt *ds_clnt;
1759 	struct nfs4_ff_layout_mirror *mirror;
1760 	const struct cred *ds_cred;
1761 	loff_t offset = hdr->args.offset;
1762 	u32 idx = hdr->pgio_mirror_idx;
1763 	int vers;
1764 	struct nfs_fh *fh;
1765 
1766 	dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
1767 		__func__, hdr->inode->i_ino,
1768 		hdr->args.pgbase, (size_t)hdr->args.count, offset);
1769 
1770 	mirror = FF_LAYOUT_COMP(lseg, idx);
1771 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
1772 	if (!ds)
1773 		goto out_failed;
1774 
1775 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1776 						   hdr->inode);
1777 	if (IS_ERR(ds_clnt))
1778 		goto out_failed;
1779 
1780 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1781 	if (!ds_cred)
1782 		goto out_failed;
1783 
1784 	vers = nfs4_ff_layout_ds_version(mirror);
1785 
1786 	dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1787 		ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
1788 
1789 	hdr->pgio_done_cb = ff_layout_read_done_cb;
1790 	refcount_inc(&ds->ds_clp->cl_count);
1791 	hdr->ds_clp = ds->ds_clp;
1792 	fh = nfs4_ff_layout_select_ds_fh(mirror);
1793 	if (fh)
1794 		hdr->args.fh = fh;
1795 
1796 	nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1797 
1798 	/*
1799 	 * Note that if we ever decide to split across DSes,
1800 	 * then we may need to handle dense-like offsets.
1801 	 */
1802 	hdr->args.offset = offset;
1803 	hdr->mds_offset = offset;
1804 
1805 	/* Perform an asynchronous read to ds */
1806 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1807 			  vers == 3 ? &ff_layout_read_call_ops_v3 :
1808 				      &ff_layout_read_call_ops_v4,
1809 			  0, RPC_TASK_SOFTCONN);
1810 	put_cred(ds_cred);
1811 	return PNFS_ATTEMPTED;
1812 
1813 out_failed:
1814 	if (ff_layout_avoid_mds_available_ds(lseg))
1815 		return PNFS_TRY_AGAIN;
1816 	trace_pnfs_mds_fallback_read_pagelist(hdr->inode,
1817 			hdr->args.offset, hdr->args.count,
1818 			IOMODE_READ, NFS_I(hdr->inode)->layout, lseg);
1819 	return PNFS_NOT_ATTEMPTED;
1820 }
1821 
1822 /* Perform async writes. */
1823 static enum pnfs_try_status
1824 ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1825 {
1826 	struct pnfs_layout_segment *lseg = hdr->lseg;
1827 	struct nfs4_pnfs_ds *ds;
1828 	struct rpc_clnt *ds_clnt;
1829 	struct nfs4_ff_layout_mirror *mirror;
1830 	const struct cred *ds_cred;
1831 	loff_t offset = hdr->args.offset;
1832 	int vers;
1833 	struct nfs_fh *fh;
1834 	u32 idx = hdr->pgio_mirror_idx;
1835 
1836 	mirror = FF_LAYOUT_COMP(lseg, idx);
1837 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1838 	if (!ds)
1839 		goto out_failed;
1840 
1841 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1842 						   hdr->inode);
1843 	if (IS_ERR(ds_clnt))
1844 		goto out_failed;
1845 
1846 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1847 	if (!ds_cred)
1848 		goto out_failed;
1849 
1850 	vers = nfs4_ff_layout_ds_version(mirror);
1851 
1852 	dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
1853 		__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1854 		offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count),
1855 		vers);
1856 
1857 	hdr->pgio_done_cb = ff_layout_write_done_cb;
1858 	refcount_inc(&ds->ds_clp->cl_count);
1859 	hdr->ds_clp = ds->ds_clp;
1860 	hdr->ds_commit_idx = idx;
1861 	fh = nfs4_ff_layout_select_ds_fh(mirror);
1862 	if (fh)
1863 		hdr->args.fh = fh;
1864 
1865 	nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1866 
1867 	/*
1868 	 * Note that if we ever decide to split across DSes,
1869 	 * then we may need to handle dense-like offsets.
1870 	 */
1871 	hdr->args.offset = offset;
1872 
1873 	/* Perform an asynchronous write */
1874 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1875 			  vers == 3 ? &ff_layout_write_call_ops_v3 :
1876 				      &ff_layout_write_call_ops_v4,
1877 			  sync, RPC_TASK_SOFTCONN);
1878 	put_cred(ds_cred);
1879 	return PNFS_ATTEMPTED;
1880 
1881 out_failed:
1882 	if (ff_layout_avoid_mds_available_ds(lseg))
1883 		return PNFS_TRY_AGAIN;
1884 	trace_pnfs_mds_fallback_write_pagelist(hdr->inode,
1885 			hdr->args.offset, hdr->args.count,
1886 			IOMODE_RW, NFS_I(hdr->inode)->layout, lseg);
1887 	return PNFS_NOT_ATTEMPTED;
1888 }
1889 
1890 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1891 {
1892 	return i;
1893 }
1894 
1895 static struct nfs_fh *
1896 select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1897 {
1898 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1899 
1900 	/* FIXME: Assume that there is only one NFS version available
1901 	 * for the DS.
1902 	 */
1903 	return &flseg->mirror_array[i]->fh_versions[0];
1904 }
1905 
1906 static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
1907 {
1908 	struct pnfs_layout_segment *lseg = data->lseg;
1909 	struct nfs4_pnfs_ds *ds;
1910 	struct rpc_clnt *ds_clnt;
1911 	struct nfs4_ff_layout_mirror *mirror;
1912 	const struct cred *ds_cred;
1913 	u32 idx;
1914 	int vers, ret;
1915 	struct nfs_fh *fh;
1916 
1917 	if (!lseg || !(pnfs_is_valid_lseg(lseg) ||
1918 	    test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)))
1919 		goto out_err;
1920 
1921 	idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1922 	mirror = FF_LAYOUT_COMP(lseg, idx);
1923 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1924 	if (!ds)
1925 		goto out_err;
1926 
1927 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1928 						   data->inode);
1929 	if (IS_ERR(ds_clnt))
1930 		goto out_err;
1931 
1932 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred);
1933 	if (!ds_cred)
1934 		goto out_err;
1935 
1936 	vers = nfs4_ff_layout_ds_version(mirror);
1937 
1938 	dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
1939 		data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
1940 		vers);
1941 	data->commit_done_cb = ff_layout_commit_done_cb;
1942 	data->cred = ds_cred;
1943 	refcount_inc(&ds->ds_clp->cl_count);
1944 	data->ds_clp = ds->ds_clp;
1945 	fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
1946 	if (fh)
1947 		data->args.fh = fh;
1948 
1949 	ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
1950 				   vers == 3 ? &ff_layout_commit_call_ops_v3 :
1951 					       &ff_layout_commit_call_ops_v4,
1952 				   how, RPC_TASK_SOFTCONN);
1953 	put_cred(ds_cred);
1954 	return ret;
1955 out_err:
1956 	pnfs_generic_prepare_to_resend_writes(data);
1957 	pnfs_generic_commit_release(data);
1958 	return -EAGAIN;
1959 }
1960 
1961 static int
1962 ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
1963 			   int how, struct nfs_commit_info *cinfo)
1964 {
1965 	return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
1966 					    ff_layout_initiate_commit);
1967 }
1968 
1969 static bool ff_layout_match_rw(const struct rpc_task *task,
1970 			       const struct nfs_pgio_header *hdr,
1971 			       const struct pnfs_layout_segment *lseg)
1972 {
1973 	return hdr->lseg == lseg;
1974 }
1975 
1976 static bool ff_layout_match_commit(const struct rpc_task *task,
1977 				   const struct nfs_commit_data *cdata,
1978 				   const struct pnfs_layout_segment *lseg)
1979 {
1980 	return cdata->lseg == lseg;
1981 }
1982 
1983 static bool ff_layout_match_io(const struct rpc_task *task, const void *data)
1984 {
1985 	const struct rpc_call_ops *ops = task->tk_ops;
1986 
1987 	if (ops == &ff_layout_read_call_ops_v3 ||
1988 	    ops == &ff_layout_read_call_ops_v4 ||
1989 	    ops == &ff_layout_write_call_ops_v3 ||
1990 	    ops == &ff_layout_write_call_ops_v4)
1991 		return ff_layout_match_rw(task, task->tk_calldata, data);
1992 	if (ops == &ff_layout_commit_call_ops_v3 ||
1993 	    ops == &ff_layout_commit_call_ops_v4)
1994 		return ff_layout_match_commit(task, task->tk_calldata, data);
1995 	return false;
1996 }
1997 
1998 static void ff_layout_cancel_io(struct pnfs_layout_segment *lseg)
1999 {
2000 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2001 	struct nfs4_ff_layout_mirror *mirror;
2002 	struct nfs4_ff_layout_ds *mirror_ds;
2003 	struct nfs4_pnfs_ds *ds;
2004 	struct nfs_client *ds_clp;
2005 	struct rpc_clnt *clnt;
2006 	u32 idx;
2007 
2008 	for (idx = 0; idx < flseg->mirror_array_cnt; idx++) {
2009 		mirror = flseg->mirror_array[idx];
2010 		mirror_ds = mirror->mirror_ds;
2011 		if (IS_ERR_OR_NULL(mirror_ds))
2012 			continue;
2013 		ds = mirror->mirror_ds->ds;
2014 		if (!ds)
2015 			continue;
2016 		ds_clp = ds->ds_clp;
2017 		if (!ds_clp)
2018 			continue;
2019 		clnt = ds_clp->cl_rpcclient;
2020 		if (!clnt)
2021 			continue;
2022 		if (!rpc_cancel_tasks(clnt, -EAGAIN, ff_layout_match_io, lseg))
2023 			continue;
2024 		rpc_clnt_disconnect(clnt);
2025 	}
2026 }
2027 
2028 static struct pnfs_ds_commit_info *
2029 ff_layout_get_ds_info(struct inode *inode)
2030 {
2031 	struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
2032 
2033 	if (layout == NULL)
2034 		return NULL;
2035 
2036 	return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
2037 }
2038 
2039 static void
2040 ff_layout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
2041 		struct pnfs_layout_segment *lseg)
2042 {
2043 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2044 	struct inode *inode = lseg->pls_layout->plh_inode;
2045 	struct pnfs_commit_array *array, *new;
2046 
2047 	new = pnfs_alloc_commit_array(flseg->mirror_array_cnt,
2048 				      nfs_io_gfp_mask());
2049 	if (new) {
2050 		spin_lock(&inode->i_lock);
2051 		array = pnfs_add_commit_array(fl_cinfo, new, lseg);
2052 		spin_unlock(&inode->i_lock);
2053 		if (array != new)
2054 			pnfs_free_commit_array(new);
2055 	}
2056 }
2057 
2058 static void
2059 ff_layout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
2060 		struct inode *inode)
2061 {
2062 	spin_lock(&inode->i_lock);
2063 	pnfs_generic_ds_cinfo_destroy(fl_cinfo);
2064 	spin_unlock(&inode->i_lock);
2065 }
2066 
2067 static void
2068 ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
2069 {
2070 	nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
2071 						  id_node));
2072 }
2073 
2074 static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
2075 				  const struct nfs4_layoutreturn_args *args,
2076 				  const struct nfs4_flexfile_layoutreturn_args *ff_args)
2077 {
2078 	__be32 *start;
2079 
2080 	start = xdr_reserve_space(xdr, 4);
2081 	if (unlikely(!start))
2082 		return -E2BIG;
2083 
2084 	*start = cpu_to_be32(ff_args->num_errors);
2085 	/* This assume we always return _ALL_ layouts */
2086 	return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
2087 }
2088 
2089 static void
2090 encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
2091 {
2092 	WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
2093 }
2094 
2095 static void
2096 ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
2097 			    const nfs4_stateid *stateid,
2098 			    const struct nfs42_layoutstat_devinfo *devinfo)
2099 {
2100 	__be32 *p;
2101 
2102 	p = xdr_reserve_space(xdr, 8 + 8);
2103 	p = xdr_encode_hyper(p, devinfo->offset);
2104 	p = xdr_encode_hyper(p, devinfo->length);
2105 	encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
2106 	p = xdr_reserve_space(xdr, 4*8);
2107 	p = xdr_encode_hyper(p, devinfo->read_count);
2108 	p = xdr_encode_hyper(p, devinfo->read_bytes);
2109 	p = xdr_encode_hyper(p, devinfo->write_count);
2110 	p = xdr_encode_hyper(p, devinfo->write_bytes);
2111 	encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
2112 }
2113 
2114 static void
2115 ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
2116 			    const nfs4_stateid *stateid,
2117 			    const struct nfs42_layoutstat_devinfo *devinfo)
2118 {
2119 	ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
2120 	ff_layout_encode_ff_layoutupdate(xdr, devinfo,
2121 			devinfo->ld_private.data);
2122 }
2123 
2124 /* report nothing for now */
2125 static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
2126 		const struct nfs4_layoutreturn_args *args,
2127 		struct nfs4_flexfile_layoutreturn_args *ff_args)
2128 {
2129 	__be32 *p;
2130 	int i;
2131 
2132 	p = xdr_reserve_space(xdr, 4);
2133 	*p = cpu_to_be32(ff_args->num_dev);
2134 	for (i = 0; i < ff_args->num_dev; i++)
2135 		ff_layout_encode_ff_iostat(xdr,
2136 				&args->layout->plh_stateid,
2137 				&ff_args->devinfo[i]);
2138 }
2139 
2140 static void
2141 ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
2142 		unsigned int num_entries)
2143 {
2144 	unsigned int i;
2145 
2146 	for (i = 0; i < num_entries; i++) {
2147 		if (!devinfo[i].ld_private.ops)
2148 			continue;
2149 		if (!devinfo[i].ld_private.ops->free)
2150 			continue;
2151 		devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
2152 	}
2153 }
2154 
2155 static struct nfs4_deviceid_node *
2156 ff_layout_alloc_deviceid_node(struct nfs_server *server,
2157 			      struct pnfs_device *pdev, gfp_t gfp_flags)
2158 {
2159 	struct nfs4_ff_layout_ds *dsaddr;
2160 
2161 	dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
2162 	if (!dsaddr)
2163 		return NULL;
2164 	return &dsaddr->id_node;
2165 }
2166 
2167 static void
2168 ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
2169 		const void *voidargs,
2170 		const struct nfs4_xdr_opaque_data *ff_opaque)
2171 {
2172 	const struct nfs4_layoutreturn_args *args = voidargs;
2173 	struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
2174 	struct xdr_buf tmp_buf = {
2175 		.head = {
2176 			[0] = {
2177 				.iov_base = page_address(ff_args->pages[0]),
2178 			},
2179 		},
2180 		.buflen = PAGE_SIZE,
2181 	};
2182 	struct xdr_stream tmp_xdr;
2183 	__be32 *start;
2184 
2185 	dprintk("%s: Begin\n", __func__);
2186 
2187 	xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL);
2188 
2189 	ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
2190 	ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
2191 
2192 	start = xdr_reserve_space(xdr, 4);
2193 	*start = cpu_to_be32(tmp_buf.len);
2194 	xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
2195 
2196 	dprintk("%s: Return\n", __func__);
2197 }
2198 
2199 static void
2200 ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
2201 {
2202 	struct nfs4_flexfile_layoutreturn_args *ff_args;
2203 
2204 	if (!args->data)
2205 		return;
2206 	ff_args = args->data;
2207 	args->data = NULL;
2208 
2209 	ff_layout_free_ds_ioerr(&ff_args->errors);
2210 	ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
2211 
2212 	put_page(ff_args->pages[0]);
2213 	kfree(ff_args);
2214 }
2215 
2216 static const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
2217 	.encode = ff_layout_encode_layoutreturn,
2218 	.free = ff_layout_free_layoutreturn,
2219 };
2220 
2221 static int
2222 ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
2223 {
2224 	struct nfs4_flexfile_layoutreturn_args *ff_args;
2225 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
2226 
2227 	ff_args = kmalloc(sizeof(*ff_args), nfs_io_gfp_mask());
2228 	if (!ff_args)
2229 		goto out_nomem;
2230 	ff_args->pages[0] = alloc_page(nfs_io_gfp_mask());
2231 	if (!ff_args->pages[0])
2232 		goto out_nomem_free;
2233 
2234 	INIT_LIST_HEAD(&ff_args->errors);
2235 	ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
2236 			&args->range, &ff_args->errors,
2237 			FF_LAYOUTRETURN_MAXERR);
2238 
2239 	spin_lock(&args->inode->i_lock);
2240 	ff_args->num_dev = ff_layout_mirror_prepare_stats(
2241 		&ff_layout->generic_hdr, &ff_args->devinfo[0],
2242 		ARRAY_SIZE(ff_args->devinfo), NFS4_FF_OP_LAYOUTRETURN);
2243 	spin_unlock(&args->inode->i_lock);
2244 
2245 	args->ld_private->ops = &layoutreturn_ops;
2246 	args->ld_private->data = ff_args;
2247 	return 0;
2248 out_nomem_free:
2249 	kfree(ff_args);
2250 out_nomem:
2251 	return -ENOMEM;
2252 }
2253 
2254 #ifdef CONFIG_NFS_V4_2
2255 void
2256 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2257 {
2258 	struct pnfs_layout_hdr *lo = lseg->pls_layout;
2259 	struct nfs42_layout_error *errors;
2260 	LIST_HEAD(head);
2261 
2262 	if (!nfs_server_capable(lo->plh_inode, NFS_CAP_LAYOUTERROR))
2263 		return;
2264 	ff_layout_fetch_ds_ioerr(lo, &lseg->pls_range, &head, -1);
2265 	if (list_empty(&head))
2266 		return;
2267 
2268 	errors = kmalloc_array(NFS42_LAYOUTERROR_MAX, sizeof(*errors),
2269 			       nfs_io_gfp_mask());
2270 	if (errors != NULL) {
2271 		const struct nfs4_ff_layout_ds_err *pos;
2272 		size_t n = 0;
2273 
2274 		list_for_each_entry(pos, &head, list) {
2275 			errors[n].offset = pos->offset;
2276 			errors[n].length = pos->length;
2277 			nfs4_stateid_copy(&errors[n].stateid, &pos->stateid);
2278 			errors[n].errors[0].dev_id = pos->deviceid;
2279 			errors[n].errors[0].status = pos->status;
2280 			errors[n].errors[0].opnum = pos->opnum;
2281 			n++;
2282 			if (!list_is_last(&pos->list, &head) &&
2283 			    n < NFS42_LAYOUTERROR_MAX)
2284 				continue;
2285 			if (nfs42_proc_layouterror(lseg, errors, n) < 0)
2286 				break;
2287 			n = 0;
2288 		}
2289 		kfree(errors);
2290 	}
2291 	ff_layout_free_ds_ioerr(&head);
2292 }
2293 #else
2294 void
2295 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2296 {
2297 }
2298 #endif
2299 
2300 static int
2301 ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
2302 {
2303 	const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
2304 
2305 	return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
2306 }
2307 
2308 static size_t
2309 ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
2310 			  const int buflen)
2311 {
2312 	const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
2313 	const struct in6_addr *addr = &sin6->sin6_addr;
2314 
2315 	/*
2316 	 * RFC 4291, Section 2.2.2
2317 	 *
2318 	 * Shorthanded ANY address
2319 	 */
2320 	if (ipv6_addr_any(addr))
2321 		return snprintf(buf, buflen, "::");
2322 
2323 	/*
2324 	 * RFC 4291, Section 2.2.2
2325 	 *
2326 	 * Shorthanded loopback address
2327 	 */
2328 	if (ipv6_addr_loopback(addr))
2329 		return snprintf(buf, buflen, "::1");
2330 
2331 	/*
2332 	 * RFC 4291, Section 2.2.3
2333 	 *
2334 	 * Special presentation address format for mapped v4
2335 	 * addresses.
2336 	 */
2337 	if (ipv6_addr_v4mapped(addr))
2338 		return snprintf(buf, buflen, "::ffff:%pI4",
2339 					&addr->s6_addr32[3]);
2340 
2341 	/*
2342 	 * RFC 4291, Section 2.2.1
2343 	 */
2344 	return snprintf(buf, buflen, "%pI6c", addr);
2345 }
2346 
2347 /* Derived from rpc_sockaddr2uaddr */
2348 static void
2349 ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
2350 {
2351 	struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
2352 	char portbuf[RPCBIND_MAXUADDRPLEN];
2353 	char addrbuf[RPCBIND_MAXUADDRLEN];
2354 	unsigned short port;
2355 	int len, netid_len;
2356 	__be32 *p;
2357 
2358 	switch (sap->sa_family) {
2359 	case AF_INET:
2360 		if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
2361 			return;
2362 		port = ntohs(((struct sockaddr_in *)sap)->sin_port);
2363 		break;
2364 	case AF_INET6:
2365 		if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
2366 			return;
2367 		port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
2368 		break;
2369 	default:
2370 		WARN_ON_ONCE(1);
2371 		return;
2372 	}
2373 
2374 	snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
2375 	len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
2376 
2377 	netid_len = strlen(da->da_netid);
2378 	p = xdr_reserve_space(xdr, 4 + netid_len);
2379 	xdr_encode_opaque(p, da->da_netid, netid_len);
2380 
2381 	p = xdr_reserve_space(xdr, 4 + len);
2382 	xdr_encode_opaque(p, addrbuf, len);
2383 }
2384 
2385 static void
2386 ff_layout_encode_nfstime(struct xdr_stream *xdr,
2387 			 ktime_t t)
2388 {
2389 	struct timespec64 ts;
2390 	__be32 *p;
2391 
2392 	p = xdr_reserve_space(xdr, 12);
2393 	ts = ktime_to_timespec64(t);
2394 	p = xdr_encode_hyper(p, ts.tv_sec);
2395 	*p++ = cpu_to_be32(ts.tv_nsec);
2396 }
2397 
2398 static void
2399 ff_layout_encode_io_latency(struct xdr_stream *xdr,
2400 			    struct nfs4_ff_io_stat *stat)
2401 {
2402 	__be32 *p;
2403 
2404 	p = xdr_reserve_space(xdr, 5 * 8);
2405 	p = xdr_encode_hyper(p, stat->ops_requested);
2406 	p = xdr_encode_hyper(p, stat->bytes_requested);
2407 	p = xdr_encode_hyper(p, stat->ops_completed);
2408 	p = xdr_encode_hyper(p, stat->bytes_completed);
2409 	p = xdr_encode_hyper(p, stat->bytes_not_delivered);
2410 	ff_layout_encode_nfstime(xdr, stat->total_busy_time);
2411 	ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
2412 }
2413 
2414 static void
2415 ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
2416 			      const struct nfs42_layoutstat_devinfo *devinfo,
2417 			      struct nfs4_ff_layout_mirror *mirror)
2418 {
2419 	struct nfs4_pnfs_ds_addr *da;
2420 	struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
2421 	struct nfs_fh *fh = &mirror->fh_versions[0];
2422 	__be32 *p;
2423 
2424 	da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
2425 	dprintk("%s: DS %s: encoding address %s\n",
2426 		__func__, ds->ds_remotestr, da->da_remotestr);
2427 	/* netaddr4 */
2428 	ff_layout_encode_netaddr(xdr, da);
2429 	/* nfs_fh4 */
2430 	p = xdr_reserve_space(xdr, 4 + fh->size);
2431 	xdr_encode_opaque(p, fh->data, fh->size);
2432 	/* ff_io_latency4 read */
2433 	spin_lock(&mirror->lock);
2434 	ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
2435 	/* ff_io_latency4 write */
2436 	ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
2437 	spin_unlock(&mirror->lock);
2438 	/* nfstime4 */
2439 	ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
2440 	/* bool */
2441 	p = xdr_reserve_space(xdr, 4);
2442 	*p = cpu_to_be32(false);
2443 }
2444 
2445 static void
2446 ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
2447 			     const struct nfs4_xdr_opaque_data *opaque)
2448 {
2449 	struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
2450 			struct nfs42_layoutstat_devinfo, ld_private);
2451 	__be32 *start;
2452 
2453 	/* layoutupdate length */
2454 	start = xdr_reserve_space(xdr, 4);
2455 	ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
2456 
2457 	*start = cpu_to_be32((xdr->p - start - 1) * 4);
2458 }
2459 
2460 static void
2461 ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
2462 {
2463 	struct nfs4_ff_layout_mirror *mirror = opaque->data;
2464 
2465 	ff_layout_put_mirror(mirror);
2466 }
2467 
2468 static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
2469 	.encode = ff_layout_encode_layoutstats,
2470 	.free	= ff_layout_free_layoutstats,
2471 };
2472 
2473 static int
2474 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
2475 			       struct nfs42_layoutstat_devinfo *devinfo,
2476 			       int dev_limit, enum nfs4_ff_op_type type)
2477 {
2478 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
2479 	struct nfs4_ff_layout_mirror *mirror;
2480 	struct nfs4_deviceid_node *dev;
2481 	int i = 0;
2482 
2483 	list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2484 		if (i >= dev_limit)
2485 			break;
2486 		if (IS_ERR_OR_NULL(mirror->mirror_ds))
2487 			continue;
2488 		if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL,
2489 					&mirror->flags) &&
2490 		    type != NFS4_FF_OP_LAYOUTRETURN)
2491 			continue;
2492 		/* mirror refcount put in cleanup_layoutstats */
2493 		if (!refcount_inc_not_zero(&mirror->ref))
2494 			continue;
2495 		dev = &mirror->mirror_ds->id_node;
2496 		memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
2497 		devinfo->offset = 0;
2498 		devinfo->length = NFS4_MAX_UINT64;
2499 		spin_lock(&mirror->lock);
2500 		devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
2501 		devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
2502 		devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
2503 		devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
2504 		spin_unlock(&mirror->lock);
2505 		devinfo->layout_type = LAYOUT_FLEX_FILES;
2506 		devinfo->ld_private.ops = &layoutstat_ops;
2507 		devinfo->ld_private.data = mirror;
2508 
2509 		devinfo++;
2510 		i++;
2511 	}
2512 	return i;
2513 }
2514 
2515 static int ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
2516 {
2517 	struct pnfs_layout_hdr *lo;
2518 	struct nfs4_flexfile_layout *ff_layout;
2519 	const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2520 
2521 	/* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2522 	args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo),
2523 				      nfs_io_gfp_mask());
2524 	if (!args->devinfo)
2525 		return -ENOMEM;
2526 
2527 	spin_lock(&args->inode->i_lock);
2528 	lo = NFS_I(args->inode)->layout;
2529 	if (lo && pnfs_layout_is_valid(lo)) {
2530 		ff_layout = FF_LAYOUT_FROM_HDR(lo);
2531 		args->num_dev = ff_layout_mirror_prepare_stats(
2532 			&ff_layout->generic_hdr, &args->devinfo[0], dev_count,
2533 			NFS4_FF_OP_LAYOUTSTATS);
2534 	} else
2535 		args->num_dev = 0;
2536 	spin_unlock(&args->inode->i_lock);
2537 	if (!args->num_dev) {
2538 		kfree(args->devinfo);
2539 		args->devinfo = NULL;
2540 		return -ENOENT;
2541 	}
2542 
2543 	return 0;
2544 }
2545 
2546 static int
2547 ff_layout_set_layoutdriver(struct nfs_server *server,
2548 		const struct nfs_fh *dummy)
2549 {
2550 #if IS_ENABLED(CONFIG_NFS_V4_2)
2551 	server->caps |= NFS_CAP_LAYOUTSTATS;
2552 #endif
2553 	return 0;
2554 }
2555 
2556 static const struct pnfs_commit_ops ff_layout_commit_ops = {
2557 	.setup_ds_info		= ff_layout_setup_ds_info,
2558 	.release_ds_info	= ff_layout_release_ds_info,
2559 	.mark_request_commit	= pnfs_layout_mark_request_commit,
2560 	.clear_request_commit	= pnfs_generic_clear_request_commit,
2561 	.scan_commit_lists	= pnfs_generic_scan_commit_lists,
2562 	.recover_commit_reqs	= pnfs_generic_recover_commit_reqs,
2563 	.commit_pagelist	= ff_layout_commit_pagelist,
2564 };
2565 
2566 static struct pnfs_layoutdriver_type flexfilelayout_type = {
2567 	.id			= LAYOUT_FLEX_FILES,
2568 	.name			= "LAYOUT_FLEX_FILES",
2569 	.owner			= THIS_MODULE,
2570 	.flags			= PNFS_LAYOUTGET_ON_OPEN,
2571 	.max_layoutget_response	= 4096, /* 1 page or so... */
2572 	.set_layoutdriver	= ff_layout_set_layoutdriver,
2573 	.alloc_layout_hdr	= ff_layout_alloc_layout_hdr,
2574 	.free_layout_hdr	= ff_layout_free_layout_hdr,
2575 	.alloc_lseg		= ff_layout_alloc_lseg,
2576 	.free_lseg		= ff_layout_free_lseg,
2577 	.add_lseg		= ff_layout_add_lseg,
2578 	.pg_read_ops		= &ff_layout_pg_read_ops,
2579 	.pg_write_ops		= &ff_layout_pg_write_ops,
2580 	.get_ds_info		= ff_layout_get_ds_info,
2581 	.free_deviceid_node	= ff_layout_free_deviceid_node,
2582 	.read_pagelist		= ff_layout_read_pagelist,
2583 	.write_pagelist		= ff_layout_write_pagelist,
2584 	.alloc_deviceid_node    = ff_layout_alloc_deviceid_node,
2585 	.prepare_layoutreturn   = ff_layout_prepare_layoutreturn,
2586 	.sync			= pnfs_nfs_generic_sync,
2587 	.prepare_layoutstats	= ff_layout_prepare_layoutstats,
2588 	.cancel_io		= ff_layout_cancel_io,
2589 };
2590 
2591 static int __init nfs4flexfilelayout_init(void)
2592 {
2593 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
2594 	       __func__);
2595 	return pnfs_register_layoutdriver(&flexfilelayout_type);
2596 }
2597 
2598 static void __exit nfs4flexfilelayout_exit(void)
2599 {
2600 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2601 	       __func__);
2602 	pnfs_unregister_layoutdriver(&flexfilelayout_type);
2603 }
2604 
2605 MODULE_ALIAS("nfs-layouttype4-4");
2606 
2607 MODULE_LICENSE("GPL");
2608 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2609 
2610 module_init(nfs4flexfilelayout_init);
2611 module_exit(nfs4flexfilelayout_exit);
2612 
2613 module_param(io_maxretrans, ushort, 0644);
2614 MODULE_PARM_DESC(io_maxretrans, "The  number of times the NFSv4.1 client "
2615 			"retries an I/O request before returning an error. ");
2616