xref: /linux/fs/nfs/flexfilelayout/flexfilelayout.c (revision 070a542f08acb7e8cf197287f5c44658c715d2d1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Module for pnfs flexfile layout driver.
4  *
5  * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
6  *
7  * Tao Peng <bergwolf@primarydata.com>
8  */
9 
10 #include <linux/nfs_fs.h>
11 #include <linux/nfs_mount.h>
12 #include <linux/nfs_page.h>
13 #include <linux/module.h>
14 #include <linux/file.h>
15 #include <linux/sched/mm.h>
16 
17 #include <linux/sunrpc/metrics.h>
18 
19 #include "flexfilelayout.h"
20 #include "../nfs4session.h"
21 #include "../nfs4idmap.h"
22 #include "../internal.h"
23 #include "../delegation.h"
24 #include "../nfs4trace.h"
25 #include "../iostat.h"
26 #include "../nfs.h"
27 #include "../nfs42.h"
28 
29 #define NFSDBG_FACILITY         NFSDBG_PNFS_LD
30 
31 #define FF_LAYOUT_POLL_RETRY_MAX     (15*HZ)
32 #define FF_LAYOUTRETURN_MAXERR 20
33 
34 enum nfs4_ff_op_type {
35 	NFS4_FF_OP_LAYOUTSTATS,
36 	NFS4_FF_OP_LAYOUTRETURN,
37 };
38 
39 static unsigned short io_maxretrans;
40 
41 static const struct pnfs_commit_ops ff_layout_commit_ops;
42 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
43 		struct nfs_pgio_header *hdr);
44 static int
45 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
46 			       struct nfs42_layoutstat_devinfo *devinfo,
47 			       int dev_limit, enum nfs4_ff_op_type type);
48 static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
49 			      const struct nfs42_layoutstat_devinfo *devinfo,
50 			      struct nfs4_ff_layout_ds_stripe *dss_info);
51 
52 static struct pnfs_layout_hdr *
ff_layout_alloc_layout_hdr(struct inode * inode,gfp_t gfp_flags)53 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
54 {
55 	struct nfs4_flexfile_layout *ffl;
56 
57 	ffl = kzalloc(sizeof(*ffl), gfp_flags);
58 	if (ffl) {
59 		pnfs_init_ds_commit_info(&ffl->commit_info);
60 		INIT_LIST_HEAD(&ffl->error_list);
61 		INIT_LIST_HEAD(&ffl->mirrors);
62 		ffl->last_report_time = ktime_get();
63 		ffl->commit_info.ops = &ff_layout_commit_ops;
64 		return &ffl->generic_hdr;
65 	} else
66 		return NULL;
67 }
68 
69 static void
ff_layout_free_layout_hdr(struct pnfs_layout_hdr * lo)70 ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
71 {
72 	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(lo);
73 	struct nfs4_ff_layout_ds_err *err, *n;
74 
75 	list_for_each_entry_safe(err, n, &ffl->error_list, list) {
76 		list_del(&err->list);
77 		kfree(err);
78 	}
79 	kfree_rcu(ffl, generic_hdr.plh_rcu);
80 }
81 
decode_pnfs_stateid(struct xdr_stream * xdr,nfs4_stateid * stateid)82 static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
83 {
84 	__be32 *p;
85 
86 	p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
87 	if (unlikely(p == NULL))
88 		return -ENOBUFS;
89 	stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
90 	memcpy(stateid->data, p, NFS4_STATEID_SIZE);
91 	dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
92 		p[0], p[1], p[2], p[3]);
93 	return 0;
94 }
95 
decode_deviceid(struct xdr_stream * xdr,struct nfs4_deviceid * devid)96 static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
97 {
98 	__be32 *p;
99 
100 	p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
101 	if (unlikely(!p))
102 		return -ENOBUFS;
103 	memcpy(devid, p, NFS4_DEVICEID4_SIZE);
104 	nfs4_print_deviceid(devid);
105 	return 0;
106 }
107 
decode_nfs_fh(struct xdr_stream * xdr,struct nfs_fh * fh)108 static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
109 {
110 	__be32 *p;
111 
112 	p = xdr_inline_decode(xdr, 4);
113 	if (unlikely(!p))
114 		return -ENOBUFS;
115 	fh->size = be32_to_cpup(p++);
116 	if (fh->size > NFS_MAXFHSIZE) {
117 		printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
118 		       fh->size);
119 		return -EOVERFLOW;
120 	}
121 	/* fh.data */
122 	p = xdr_inline_decode(xdr, fh->size);
123 	if (unlikely(!p))
124 		return -ENOBUFS;
125 	memcpy(&fh->data, p, fh->size);
126 	dprintk("%s: fh len %d\n", __func__, fh->size);
127 
128 	return 0;
129 }
130 
131 /*
132  * Currently only stringified uids and gids are accepted.
133  * I.e., kerberos is not supported to the DSes, so no pricipals.
134  *
135  * That means that one common function will suffice, but when
136  * principals are added, this should be split to accomodate
137  * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
138  */
139 static int
decode_name(struct xdr_stream * xdr,u32 * id)140 decode_name(struct xdr_stream *xdr, u32 *id)
141 {
142 	__be32 *p;
143 	int len;
144 
145 	/* opaque_length(4)*/
146 	p = xdr_inline_decode(xdr, 4);
147 	if (unlikely(!p))
148 		return -ENOBUFS;
149 	len = be32_to_cpup(p++);
150 	if (len < 0)
151 		return -EINVAL;
152 
153 	dprintk("%s: len %u\n", __func__, len);
154 
155 	/* opaque body */
156 	p = xdr_inline_decode(xdr, len);
157 	if (unlikely(!p))
158 		return -ENOBUFS;
159 
160 	if (!nfs_map_string_to_numeric((char *)p, len, id))
161 		return -EINVAL;
162 
163 	return 0;
164 }
165 
166 static struct nfsd_file *
ff_local_open_fh(struct pnfs_layout_segment * lseg,u32 ds_idx,u32 dss_id,struct nfs_client * clp,const struct cred * cred,struct nfs_fh * fh,fmode_t mode)167 ff_local_open_fh(struct pnfs_layout_segment *lseg, u32 ds_idx, u32 dss_id,
168 		 struct nfs_client *clp, const struct cred *cred,
169 		 struct nfs_fh *fh, fmode_t mode)
170 {
171 #if IS_ENABLED(CONFIG_NFS_LOCALIO)
172 	struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
173 
174 	return nfs_local_open_fh(clp, cred, fh, &mirror->dss[dss_id].nfl, mode);
175 #else
176 	return NULL;
177 #endif
178 }
179 
ff_dss_match_fh(const struct nfs4_ff_layout_ds_stripe * dss1,const struct nfs4_ff_layout_ds_stripe * dss2)180 static bool ff_dss_match_fh(const struct nfs4_ff_layout_ds_stripe *dss1,
181 		const struct nfs4_ff_layout_ds_stripe *dss2)
182 {
183 	int i, j;
184 
185 	if (dss1->fh_versions_cnt != dss2->fh_versions_cnt)
186 		return false;
187 
188 	for (i = 0; i < dss1->fh_versions_cnt; i++) {
189 		bool found_fh = false;
190 		for (j = 0; j < dss2->fh_versions_cnt; j++) {
191 			if (nfs_compare_fh(&dss1->fh_versions[i],
192 					&dss2->fh_versions[j]) == 0) {
193 				found_fh = true;
194 				break;
195 			}
196 		}
197 		if (!found_fh)
198 			return false;
199 	}
200 	return true;
201 }
202 
ff_mirror_match_fh(const struct nfs4_ff_layout_mirror * m1,const struct nfs4_ff_layout_mirror * m2)203 static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
204 		const struct nfs4_ff_layout_mirror *m2)
205 {
206 	u32 dss_id;
207 
208 	if (m1->dss_count != m2->dss_count)
209 		return false;
210 
211 	for (dss_id = 0; dss_id < m1->dss_count; dss_id++)
212 		if (!ff_dss_match_fh(&m1->dss[dss_id], &m2->dss[dss_id]))
213 			return false;
214 
215 	return true;
216 }
217 
ff_mirror_match_devid(const struct nfs4_ff_layout_mirror * m1,const struct nfs4_ff_layout_mirror * m2)218 static bool ff_mirror_match_devid(const struct nfs4_ff_layout_mirror *m1,
219 		const struct nfs4_ff_layout_mirror *m2)
220 {
221 	u32 dss_id;
222 
223 	if (m1->dss_count != m2->dss_count)
224 		return false;
225 
226 	for (dss_id = 0; dss_id < m1->dss_count; dss_id++)
227 		if (memcmp(&m1->dss[dss_id].devid,
228 			   &m2->dss[dss_id].devid,
229 			   sizeof(m1->dss[dss_id].devid)) != 0)
230 			return false;
231 
232 	return true;
233 }
234 
235 static struct nfs4_ff_layout_mirror *
ff_layout_add_mirror(struct pnfs_layout_hdr * lo,struct nfs4_ff_layout_mirror * mirror)236 ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
237 		struct nfs4_ff_layout_mirror *mirror)
238 {
239 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
240 	struct nfs4_ff_layout_mirror *pos;
241 	struct inode *inode = lo->plh_inode;
242 
243 	spin_lock(&inode->i_lock);
244 	list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
245 		if (!ff_mirror_match_devid(mirror, pos))
246 			continue;
247 		if (!ff_mirror_match_fh(mirror, pos))
248 			continue;
249 		if (refcount_inc_not_zero(&pos->ref)) {
250 			spin_unlock(&inode->i_lock);
251 			return pos;
252 		}
253 	}
254 	list_add(&mirror->mirrors, &ff_layout->mirrors);
255 	mirror->layout = lo;
256 	spin_unlock(&inode->i_lock);
257 	return mirror;
258 }
259 
260 static void
ff_layout_remove_mirror(struct nfs4_ff_layout_mirror * mirror)261 ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
262 {
263 	struct inode *inode;
264 	if (mirror->layout == NULL)
265 		return;
266 	inode = mirror->layout->plh_inode;
267 	spin_lock(&inode->i_lock);
268 	list_del(&mirror->mirrors);
269 	spin_unlock(&inode->i_lock);
270 	mirror->layout = NULL;
271 }
272 
ff_layout_alloc_mirror(gfp_t gfp_flags)273 static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
274 {
275 	struct nfs4_ff_layout_mirror *mirror;
276 	u32 dss_id;
277 
278 	mirror = kzalloc(sizeof(*mirror), gfp_flags);
279 	if (mirror != NULL) {
280 		spin_lock_init(&mirror->lock);
281 		refcount_set(&mirror->ref, 1);
282 		INIT_LIST_HEAD(&mirror->mirrors);
283 		for (dss_id = 0; dss_id < mirror->dss_count; dss_id++)
284 			nfs_localio_file_init(&mirror->dss[dss_id].nfl);
285 	}
286 	return mirror;
287 }
288 
ff_layout_free_mirror(struct nfs4_ff_layout_mirror * mirror)289 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
290 {
291 	const struct cred	*cred;
292 	u32 dss_id;
293 
294 	ff_layout_remove_mirror(mirror);
295 
296 	for (dss_id = 0; dss_id < mirror->dss_count; dss_id++) {
297 		kfree(mirror->dss[dss_id].fh_versions);
298 		cred = rcu_access_pointer(mirror->dss[dss_id].ro_cred);
299 		put_cred(cred);
300 		cred = rcu_access_pointer(mirror->dss[dss_id].rw_cred);
301 		put_cred(cred);
302 		nfs_close_local_fh(&mirror->dss[dss_id].nfl);
303 		nfs4_ff_layout_put_deviceid(mirror->dss[dss_id].mirror_ds);
304 	}
305 
306 	kfree(mirror->dss);
307 	kfree(mirror);
308 }
309 
ff_layout_put_mirror(struct nfs4_ff_layout_mirror * mirror)310 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
311 {
312 	if (mirror != NULL && refcount_dec_and_test(&mirror->ref))
313 		ff_layout_free_mirror(mirror);
314 }
315 
ff_layout_free_mirror_array(struct nfs4_ff_layout_segment * fls)316 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
317 {
318 	u32 i;
319 
320 	for (i = 0; i < fls->mirror_array_cnt; i++)
321 		ff_layout_put_mirror(fls->mirror_array[i]);
322 }
323 
_ff_layout_free_lseg(struct nfs4_ff_layout_segment * fls)324 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
325 {
326 	if (fls) {
327 		ff_layout_free_mirror_array(fls);
328 		kfree(fls);
329 	}
330 }
331 
332 static bool
ff_lseg_match_mirrors(struct pnfs_layout_segment * l1,struct pnfs_layout_segment * l2)333 ff_lseg_match_mirrors(struct pnfs_layout_segment *l1,
334 		struct pnfs_layout_segment *l2)
335 {
336 	const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1);
337 	const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l2);
338 	u32 i;
339 
340 	if (fl1->mirror_array_cnt != fl2->mirror_array_cnt)
341 		return false;
342 	for (i = 0; i < fl1->mirror_array_cnt; i++) {
343 		if (fl1->mirror_array[i] != fl2->mirror_array[i])
344 			return false;
345 	}
346 	return true;
347 }
348 
349 static bool
ff_lseg_range_is_after(const struct pnfs_layout_range * l1,const struct pnfs_layout_range * l2)350 ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
351 		const struct pnfs_layout_range *l2)
352 {
353 	u64 end1, end2;
354 
355 	if (l1->iomode != l2->iomode)
356 		return l1->iomode != IOMODE_READ;
357 	end1 = pnfs_calc_offset_end(l1->offset, l1->length);
358 	end2 = pnfs_calc_offset_end(l2->offset, l2->length);
359 	if (end1 < l2->offset)
360 		return false;
361 	if (end2 < l1->offset)
362 		return true;
363 	return l2->offset <= l1->offset;
364 }
365 
366 static bool
ff_lseg_merge(struct pnfs_layout_segment * new,struct pnfs_layout_segment * old)367 ff_lseg_merge(struct pnfs_layout_segment *new,
368 		struct pnfs_layout_segment *old)
369 {
370 	u64 new_end, old_end;
371 
372 	if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
373 		return false;
374 	if (new->pls_range.iomode != old->pls_range.iomode)
375 		return false;
376 	old_end = pnfs_calc_offset_end(old->pls_range.offset,
377 			old->pls_range.length);
378 	if (old_end < new->pls_range.offset)
379 		return false;
380 	new_end = pnfs_calc_offset_end(new->pls_range.offset,
381 			new->pls_range.length);
382 	if (new_end < old->pls_range.offset)
383 		return false;
384 	if (!ff_lseg_match_mirrors(new, old))
385 		return false;
386 
387 	/* Mergeable: copy info from 'old' to 'new' */
388 	if (new_end < old_end)
389 		new_end = old_end;
390 	if (new->pls_range.offset < old->pls_range.offset)
391 		new->pls_range.offset = old->pls_range.offset;
392 	new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
393 			new_end);
394 	if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
395 		set_bit(NFS_LSEG_ROC, &new->pls_flags);
396 	return true;
397 }
398 
399 static void
ff_layout_add_lseg(struct pnfs_layout_hdr * lo,struct pnfs_layout_segment * lseg,struct list_head * free_me)400 ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
401 		struct pnfs_layout_segment *lseg,
402 		struct list_head *free_me)
403 {
404 	pnfs_generic_layout_insert_lseg(lo, lseg,
405 			ff_lseg_range_is_after,
406 			ff_lseg_merge,
407 			free_me);
408 }
409 
ff_mirror_efficiency_sum(const struct nfs4_ff_layout_mirror * mirror)410 static u32 ff_mirror_efficiency_sum(const struct nfs4_ff_layout_mirror *mirror)
411 {
412 	u32 dss_id, sum = 0;
413 
414 	for (dss_id = 0; dss_id < mirror->dss_count; dss_id++)
415 		sum += mirror->dss[dss_id].efficiency;
416 
417 	return sum;
418 }
419 
ff_layout_sort_mirrors(struct nfs4_ff_layout_segment * fls)420 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
421 {
422 	int i, j;
423 
424 	for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
425 		for (j = i + 1; j < fls->mirror_array_cnt; j++)
426 			if (ff_mirror_efficiency_sum(fls->mirror_array[i]) <
427 			    ff_mirror_efficiency_sum(fls->mirror_array[j]))
428 				swap(fls->mirror_array[i],
429 				     fls->mirror_array[j]);
430 	}
431 }
432 
433 static struct pnfs_layout_segment *
ff_layout_alloc_lseg(struct pnfs_layout_hdr * lh,struct nfs4_layoutget_res * lgr,gfp_t gfp_flags)434 ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
435 		     struct nfs4_layoutget_res *lgr,
436 		     gfp_t gfp_flags)
437 {
438 	struct pnfs_layout_segment *ret;
439 	struct nfs4_ff_layout_segment *fls = NULL;
440 	struct xdr_stream stream;
441 	struct xdr_buf buf;
442 	struct folio *scratch;
443 	u64 stripe_unit;
444 	u32 mirror_array_cnt;
445 	__be32 *p;
446 	int i, rc;
447 	struct nfs4_ff_layout_ds_stripe *dss_info;
448 
449 	dprintk("--> %s\n", __func__);
450 	scratch = folio_alloc(gfp_flags, 0);
451 	if (!scratch)
452 		return ERR_PTR(-ENOMEM);
453 
454 	xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
455 			      lgr->layoutp->len);
456 	xdr_set_scratch_folio(&stream, scratch);
457 
458 	/* stripe unit and mirror_array_cnt */
459 	rc = -EIO;
460 	p = xdr_inline_decode(&stream, 8 + 4);
461 	if (!p)
462 		goto out_err_free;
463 
464 	p = xdr_decode_hyper(p, &stripe_unit);
465 	mirror_array_cnt = be32_to_cpup(p++);
466 	dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
467 		stripe_unit, mirror_array_cnt);
468 
469 	if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
470 	    mirror_array_cnt == 0)
471 		goto out_err_free;
472 
473 	rc = -ENOMEM;
474 	fls = kzalloc(struct_size(fls, mirror_array, mirror_array_cnt),
475 			gfp_flags);
476 	if (!fls)
477 		goto out_err_free;
478 
479 	fls->mirror_array_cnt = mirror_array_cnt;
480 	fls->stripe_unit = stripe_unit;
481 
482 	u32 dss_count = 0;
483 	for (i = 0; i < fls->mirror_array_cnt; i++) {
484 		struct nfs4_ff_layout_mirror *mirror;
485 		struct cred *kcred;
486 		const struct cred __rcu *cred;
487 		kuid_t uid;
488 		kgid_t gid;
489 		u32 fh_count, id;
490 		int j, dss_id;
491 
492 		rc = -EIO;
493 		p = xdr_inline_decode(&stream, 4);
494 		if (!p)
495 			goto out_err_free;
496 
497 		// Ensure all mirrors have same stripe count.
498 		if (dss_count == 0)
499 			dss_count = be32_to_cpup(p);
500 		else if (dss_count != be32_to_cpup(p))
501 			goto out_err_free;
502 
503 		if (dss_count > NFS4_FLEXFILE_LAYOUT_MAX_STRIPE_CNT ||
504 		    dss_count == 0)
505 			goto out_err_free;
506 
507 		if (dss_count > 1 && stripe_unit == 0)
508 			goto out_err_free;
509 
510 		fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
511 		if (fls->mirror_array[i] == NULL) {
512 			rc = -ENOMEM;
513 			goto out_err_free;
514 		}
515 
516 		fls->mirror_array[i]->dss_count = dss_count;
517 		fls->mirror_array[i]->dss =
518 		    kcalloc(dss_count, sizeof(struct nfs4_ff_layout_ds_stripe),
519 			    gfp_flags);
520 
521 		for (dss_id = 0; dss_id < dss_count; dss_id++) {
522 			dss_info = &fls->mirror_array[i]->dss[dss_id];
523 			dss_info->mirror = fls->mirror_array[i];
524 
525 			/* deviceid */
526 			rc = decode_deviceid(&stream, &dss_info->devid);
527 			if (rc)
528 				goto out_err_free;
529 
530 			/* efficiency */
531 			rc = -EIO;
532 			p = xdr_inline_decode(&stream, 4);
533 			if (!p)
534 				goto out_err_free;
535 			dss_info->efficiency = be32_to_cpup(p);
536 
537 			/* stateid */
538 			rc = decode_pnfs_stateid(&stream, &dss_info->stateid);
539 			if (rc)
540 				goto out_err_free;
541 
542 			/* fh */
543 			rc = -EIO;
544 			p = xdr_inline_decode(&stream, 4);
545 			if (!p)
546 				goto out_err_free;
547 			fh_count = be32_to_cpup(p);
548 
549 			dss_info->fh_versions =
550 			    kcalloc(fh_count, sizeof(struct nfs_fh),
551 				    gfp_flags);
552 			if (dss_info->fh_versions == NULL) {
553 				rc = -ENOMEM;
554 				goto out_err_free;
555 			}
556 
557 			for (j = 0; j < fh_count; j++) {
558 				rc = decode_nfs_fh(&stream,
559 						   &dss_info->fh_versions[j]);
560 				if (rc)
561 					goto out_err_free;
562 			}
563 
564 			dss_info->fh_versions_cnt = fh_count;
565 
566 			/* user */
567 			rc = decode_name(&stream, &id);
568 			if (rc)
569 				goto out_err_free;
570 
571 			uid = make_kuid(&init_user_ns, id);
572 
573 			/* group */
574 			rc = decode_name(&stream, &id);
575 			if (rc)
576 				goto out_err_free;
577 
578 			gid = make_kgid(&init_user_ns, id);
579 
580 			if (gfp_flags & __GFP_FS)
581 				kcred = prepare_kernel_cred(&init_task);
582 			else {
583 				unsigned int nofs_flags = memalloc_nofs_save();
584 
585 				kcred = prepare_kernel_cred(&init_task);
586 				memalloc_nofs_restore(nofs_flags);
587 			}
588 			rc = -ENOMEM;
589 			if (!kcred)
590 				goto out_err_free;
591 			kcred->fsuid = uid;
592 			kcred->fsgid = gid;
593 			cred = RCU_INITIALIZER(kcred);
594 
595 			if (lgr->range.iomode == IOMODE_READ)
596 				rcu_assign_pointer(dss_info->ro_cred, cred);
597 			else
598 				rcu_assign_pointer(dss_info->rw_cred, cred);
599 		}
600 
601 		mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
602 		if (mirror != fls->mirror_array[i]) {
603 			for (dss_id = 0; dss_id < dss_count; dss_id++) {
604 				dss_info = &fls->mirror_array[i]->dss[dss_id];
605 				/* swap cred ptrs so free_mirror will clean up old */
606 				if (lgr->range.iomode == IOMODE_READ) {
607 					cred = xchg(&mirror->dss[dss_id].ro_cred,
608 						    dss_info->ro_cred);
609 					rcu_assign_pointer(dss_info->ro_cred, cred);
610 				} else {
611 					cred = xchg(&mirror->dss[dss_id].rw_cred,
612 						    dss_info->rw_cred);
613 					rcu_assign_pointer(dss_info->rw_cred, cred);
614 				}
615 			}
616 			ff_layout_free_mirror(fls->mirror_array[i]);
617 			fls->mirror_array[i] = mirror;
618 		}
619 
620 		dprintk("%s: iomode %s uid %u gid %u\n", __func__,
621 			lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
622 			from_kuid(&init_user_ns, uid),
623 			from_kgid(&init_user_ns, gid));
624 	}
625 
626 	p = xdr_inline_decode(&stream, 4);
627 	if (!p)
628 		goto out_sort_mirrors;
629 	fls->flags = be32_to_cpup(p);
630 
631 	p = xdr_inline_decode(&stream, 4);
632 	if (!p)
633 		goto out_sort_mirrors;
634 	for (i=0; i < fls->mirror_array_cnt; i++)
635 		fls->mirror_array[i]->report_interval = be32_to_cpup(p);
636 
637 out_sort_mirrors:
638 	ff_layout_sort_mirrors(fls);
639 	ret = &fls->generic_hdr;
640 	dprintk("<-- %s (success)\n", __func__);
641 out_free_page:
642 	folio_put(scratch);
643 	return ret;
644 out_err_free:
645 	_ff_layout_free_lseg(fls);
646 	ret = ERR_PTR(rc);
647 	dprintk("<-- %s (%d)\n", __func__, rc);
648 	goto out_free_page;
649 }
650 
651 static void
ff_layout_free_lseg(struct pnfs_layout_segment * lseg)652 ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
653 {
654 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
655 
656 	dprintk("--> %s\n", __func__);
657 
658 	if (lseg->pls_range.iomode == IOMODE_RW) {
659 		struct nfs4_flexfile_layout *ffl;
660 		struct inode *inode;
661 
662 		ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
663 		inode = ffl->generic_hdr.plh_inode;
664 		spin_lock(&inode->i_lock);
665 		pnfs_generic_ds_cinfo_release_lseg(&ffl->commit_info, lseg);
666 		spin_unlock(&inode->i_lock);
667 	}
668 	_ff_layout_free_lseg(fls);
669 }
670 
calc_commit_idx(struct pnfs_layout_segment * lseg,u32 mirror_idx,u32 dss_id)671 static u32 calc_commit_idx(struct pnfs_layout_segment *lseg,
672 			   u32 mirror_idx, u32 dss_id)
673 {
674 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
675 
676 	return (mirror_idx * flseg->mirror_array[0]->dss_count) + dss_id;
677 }
678 
calc_mirror_idx_from_commit(struct pnfs_layout_segment * lseg,u32 commit_index)679 static u32 calc_mirror_idx_from_commit(struct pnfs_layout_segment *lseg,
680 				       u32 commit_index)
681 {
682 	return commit_index / FF_LAYOUT_LSEG(lseg)->mirror_array[0]->dss_count;
683 }
684 
calc_dss_id_from_commit(struct pnfs_layout_segment * lseg,u32 commit_index)685 static u32 calc_dss_id_from_commit(struct pnfs_layout_segment *lseg,
686 				   u32 commit_index)
687 {
688 	return commit_index % FF_LAYOUT_LSEG(lseg)->mirror_array[0]->dss_count;
689 }
690 
691 static void
nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer * timer,ktime_t now)692 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
693 {
694 	/* first IO request? */
695 	if (atomic_inc_return(&timer->n_ops) == 1) {
696 		timer->start_time = now;
697 	}
698 }
699 
700 static ktime_t
nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer * timer,ktime_t now)701 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
702 {
703 	ktime_t start;
704 
705 	if (atomic_dec_return(&timer->n_ops) < 0)
706 		WARN_ON_ONCE(1);
707 
708 	start = timer->start_time;
709 	timer->start_time = now;
710 	return ktime_sub(now, start);
711 }
712 
713 static bool
nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror * mirror,u32 dss_id,struct nfs4_ff_layoutstat * layoutstat,ktime_t now)714 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
715 			    u32 dss_id,
716 			    struct nfs4_ff_layoutstat *layoutstat,
717 			    ktime_t now)
718 {
719 	s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
720 	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
721 
722 	nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
723 	if (!mirror->dss[dss_id].start_time)
724 		mirror->dss[dss_id].start_time = now;
725 	if (mirror->report_interval != 0)
726 		report_interval = (s64)mirror->report_interval * 1000LL;
727 	else if (layoutstats_timer != 0)
728 		report_interval = (s64)layoutstats_timer * 1000LL;
729 	if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
730 			report_interval) {
731 		ffl->last_report_time = now;
732 		return true;
733 	}
734 
735 	return false;
736 }
737 
738 static void
nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat * layoutstat,__u64 requested)739 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
740 		__u64 requested)
741 {
742 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
743 
744 	iostat->ops_requested++;
745 	iostat->bytes_requested += requested;
746 }
747 
748 static void
nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat * layoutstat,__u64 requested,__u64 completed,ktime_t time_completed,ktime_t time_started)749 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
750 		__u64 requested,
751 		__u64 completed,
752 		ktime_t time_completed,
753 		ktime_t time_started)
754 {
755 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
756 	ktime_t completion_time = ktime_sub(time_completed, time_started);
757 	ktime_t timer;
758 
759 	iostat->ops_completed++;
760 	iostat->bytes_completed += completed;
761 	iostat->bytes_not_delivered += requested - completed;
762 
763 	timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
764 	iostat->total_busy_time =
765 			ktime_add(iostat->total_busy_time, timer);
766 	iostat->aggregate_completion_time =
767 			ktime_add(iostat->aggregate_completion_time,
768 					completion_time);
769 }
770 
771 static void
nfs4_ff_layout_stat_io_start_read(struct inode * inode,struct nfs4_ff_layout_mirror * mirror,u32 dss_id,__u64 requested,ktime_t now)772 nfs4_ff_layout_stat_io_start_read(struct inode *inode,
773 		struct nfs4_ff_layout_mirror *mirror,
774 		u32 dss_id,
775 		__u64 requested, ktime_t now)
776 {
777 	bool report;
778 
779 	spin_lock(&mirror->lock);
780 	report = nfs4_ff_layoutstat_start_io(
781 		mirror, dss_id, &mirror->dss[dss_id].read_stat, now);
782 	nfs4_ff_layout_stat_io_update_requested(
783 		&mirror->dss[dss_id].read_stat, requested);
784 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
785 	spin_unlock(&mirror->lock);
786 
787 	if (report)
788 		pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
789 }
790 
791 static void
nfs4_ff_layout_stat_io_end_read(struct rpc_task * task,struct nfs4_ff_layout_mirror * mirror,u32 dss_id,__u64 requested,__u64 completed)792 nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
793 		struct nfs4_ff_layout_mirror *mirror,
794 		u32 dss_id,
795 		__u64 requested,
796 		__u64 completed)
797 {
798 	spin_lock(&mirror->lock);
799 	nfs4_ff_layout_stat_io_update_completed(&mirror->dss[dss_id].read_stat,
800 			requested, completed,
801 			ktime_get(), task->tk_start);
802 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
803 	spin_unlock(&mirror->lock);
804 }
805 
806 static void
nfs4_ff_layout_stat_io_start_write(struct inode * inode,struct nfs4_ff_layout_mirror * mirror,u32 dss_id,__u64 requested,ktime_t now)807 nfs4_ff_layout_stat_io_start_write(struct inode *inode,
808 		struct nfs4_ff_layout_mirror *mirror,
809 		u32 dss_id,
810 		__u64 requested, ktime_t now)
811 {
812 	bool report;
813 
814 	spin_lock(&mirror->lock);
815 	report = nfs4_ff_layoutstat_start_io(
816 		mirror,
817 		dss_id,
818 		&mirror->dss[dss_id].write_stat,
819 		now);
820 	nfs4_ff_layout_stat_io_update_requested(
821 		&mirror->dss[dss_id].write_stat,
822 		requested);
823 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
824 	spin_unlock(&mirror->lock);
825 
826 	if (report)
827 		pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
828 }
829 
830 static void
nfs4_ff_layout_stat_io_end_write(struct rpc_task * task,struct nfs4_ff_layout_mirror * mirror,u32 dss_id,__u64 requested,__u64 completed,enum nfs3_stable_how committed)831 nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
832 		struct nfs4_ff_layout_mirror *mirror,
833 		u32 dss_id,
834 		__u64 requested,
835 		__u64 completed,
836 		enum nfs3_stable_how committed)
837 {
838 	if (committed == NFS_UNSTABLE)
839 		requested = completed = 0;
840 
841 	spin_lock(&mirror->lock);
842 	nfs4_ff_layout_stat_io_update_completed(&mirror->dss[dss_id].write_stat,
843 			requested, completed, ktime_get(), task->tk_start);
844 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
845 	spin_unlock(&mirror->lock);
846 }
847 
848 static void
ff_layout_mark_ds_unreachable(struct pnfs_layout_segment * lseg,u32 idx,u32 dss_id)849 ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, u32 idx, u32 dss_id)
850 {
851 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx, dss_id);
852 
853 	if (devid)
854 		nfs4_mark_deviceid_unavailable(devid);
855 }
856 
857 static void
ff_layout_mark_ds_reachable(struct pnfs_layout_segment * lseg,u32 idx,u32 dss_id)858 ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, u32 idx, u32 dss_id)
859 {
860 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx, dss_id);
861 
862 	if (devid)
863 		nfs4_mark_deviceid_available(devid);
864 }
865 
866 static struct nfs4_pnfs_ds *
ff_layout_choose_ds_for_read(struct pnfs_layout_segment * lseg,u32 start_idx,u32 * best_idx,u32 offset,u32 * dss_id,bool check_device)867 ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
868 			     u32 start_idx, u32 *best_idx,
869 			     u32 offset, u32 *dss_id,
870 			     bool check_device)
871 {
872 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
873 	struct nfs4_ff_layout_mirror *mirror;
874 	struct nfs4_pnfs_ds *ds = ERR_PTR(-EAGAIN);
875 	u32 idx;
876 
877 	/* mirrors are initially sorted by efficiency */
878 	for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
879 		mirror = FF_LAYOUT_COMP(lseg, idx);
880 		*dss_id = nfs4_ff_layout_calc_dss_id(
881 			fls->stripe_unit,
882 			fls->mirror_array[idx]->dss_count,
883 			offset);
884 		ds = nfs4_ff_layout_prepare_ds(lseg, mirror, *dss_id, false);
885 		if (IS_ERR(ds))
886 			continue;
887 
888 		if (check_device &&
889 		    nfs4_test_deviceid_unavailable(&mirror->dss[*dss_id].mirror_ds->id_node)) {
890 			// reinitialize the error state in case if this is the last iteration
891 			ds = ERR_PTR(-EINVAL);
892 			continue;
893 		}
894 
895 		*best_idx = idx;
896 		break;
897 	}
898 
899 	return ds;
900 }
901 
902 static struct nfs4_pnfs_ds *
ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment * lseg,u32 start_idx,u32 * best_idx,u32 offset,u32 * dss_id)903 ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg,
904 				 u32 start_idx, u32 *best_idx,
905 				 u32 offset, u32 *dss_id)
906 {
907 	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx,
908 					    offset, dss_id, false);
909 }
910 
911 static struct nfs4_pnfs_ds *
ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment * lseg,u32 start_idx,u32 * best_idx,u32 offset,u32 * dss_id)912 ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg,
913 				   u32 start_idx, u32 *best_idx,
914 				   u32 offset, u32 *dss_id)
915 {
916 	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx,
917 					    offset, dss_id, true);
918 }
919 
920 static struct nfs4_pnfs_ds *
ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment * lseg,u32 start_idx,u32 * best_idx,u32 offset,u32 * dss_id)921 ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
922 				  u32 start_idx, u32 *best_idx,
923 				  u32 offset, u32 *dss_id)
924 {
925 	struct nfs4_pnfs_ds *ds;
926 
927 	ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx,
928 						offset, dss_id);
929 	if (!IS_ERR(ds))
930 		return ds;
931 	return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx,
932 						offset, dss_id);
933 }
934 
935 static struct nfs4_pnfs_ds *
ff_layout_get_ds_for_read(struct nfs_pageio_descriptor * pgio,u32 * best_idx,u32 offset,u32 * dss_id)936 ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio,
937 			  u32 *best_idx,
938 			  u32 offset,
939 			  u32 *dss_id)
940 {
941 	struct pnfs_layout_segment *lseg = pgio->pg_lseg;
942 	struct nfs4_pnfs_ds *ds;
943 
944 	ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx,
945 					       best_idx, offset, dss_id);
946 	if (!IS_ERR(ds) || !pgio->pg_mirror_idx)
947 		return ds;
948 	return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx,
949 						 offset, dss_id);
950 }
951 
952 static void
ff_layout_pg_get_read(struct nfs_pageio_descriptor * pgio,struct nfs_page * req,bool strict_iomode)953 ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
954 		      struct nfs_page *req,
955 		      bool strict_iomode)
956 {
957 	pnfs_put_lseg(pgio->pg_lseg);
958 	pgio->pg_lseg =
959 		pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
960 				   req_offset(req), req->wb_bytes, IOMODE_READ,
961 				   strict_iomode, nfs_io_gfp_mask());
962 	if (IS_ERR(pgio->pg_lseg)) {
963 		pgio->pg_error = PTR_ERR(pgio->pg_lseg);
964 		pgio->pg_lseg = NULL;
965 	}
966 }
967 
968 static bool
ff_layout_lseg_is_striped(const struct nfs4_ff_layout_segment * fls)969 ff_layout_lseg_is_striped(const struct nfs4_ff_layout_segment *fls)
970 {
971 	return fls->mirror_array[0]->dss_count > 1;
972 }
973 
974 /*
975  * ff_layout_pg_test(). Called by nfs_can_coalesce_requests()
976  *
977  * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
978  * of bytes (maximum @req->wb_bytes) that can be coalesced.
979  */
980 static size_t
ff_layout_pg_test(struct nfs_pageio_descriptor * pgio,struct nfs_page * prev,struct nfs_page * req)981 ff_layout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
982 		  struct nfs_page *req)
983 {
984 	unsigned int size;
985 	u64 p_stripe, r_stripe;
986 	u32 stripe_offset;
987 	u64 segment_offset = pgio->pg_lseg->pls_range.offset;
988 	u32 stripe_unit = FF_LAYOUT_LSEG(pgio->pg_lseg)->stripe_unit;
989 
990 	/* calls nfs_generic_pg_test */
991 	size = pnfs_generic_pg_test(pgio, prev, req);
992 	if (!size)
993 		return 0;
994 	else if (!ff_layout_lseg_is_striped(FF_LAYOUT_LSEG(pgio->pg_lseg)))
995 		return size;
996 
997 	/* see if req and prev are in the same stripe */
998 	if (prev) {
999 		p_stripe = (u64)req_offset(prev) - segment_offset;
1000 		r_stripe = (u64)req_offset(req) - segment_offset;
1001 		do_div(p_stripe, stripe_unit);
1002 		do_div(r_stripe, stripe_unit);
1003 
1004 		if (p_stripe != r_stripe)
1005 			return 0;
1006 	}
1007 
1008 	/* calculate remaining bytes in the current stripe */
1009 	div_u64_rem((u64)req_offset(req) - segment_offset,
1010 			stripe_unit,
1011 			&stripe_offset);
1012 	WARN_ON_ONCE(stripe_offset > stripe_unit);
1013 	if (stripe_offset >= stripe_unit)
1014 		return 0;
1015 	return min(stripe_unit - (unsigned int)stripe_offset, size);
1016 }
1017 
1018 static void
ff_layout_pg_init_read(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)1019 ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
1020 			struct nfs_page *req)
1021 {
1022 	struct nfs_pgio_mirror *pgm;
1023 	struct nfs4_ff_layout_mirror *mirror;
1024 	struct nfs4_pnfs_ds *ds;
1025 	u32 ds_idx, dss_id;
1026 
1027 	if (NFS_SERVER(pgio->pg_inode)->flags &
1028 			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
1029 		pgio->pg_maxretrans = io_maxretrans;
1030 retry:
1031 	pnfs_generic_pg_check_layout(pgio, req);
1032 	/* Use full layout for now */
1033 	if (!pgio->pg_lseg) {
1034 		ff_layout_pg_get_read(pgio, req, false);
1035 		if (!pgio->pg_lseg)
1036 			goto out_nolseg;
1037 	}
1038 	if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
1039 		ff_layout_pg_get_read(pgio, req, true);
1040 		if (!pgio->pg_lseg)
1041 			goto out_nolseg;
1042 	}
1043 	/* Reset wb_nio, since getting layout segment was successful */
1044 	req->wb_nio = 0;
1045 
1046 	ds = ff_layout_get_ds_for_read(pgio, &ds_idx,
1047 				       req_offset(req), &dss_id);
1048 	if (IS_ERR(ds)) {
1049 		if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
1050 			goto out_mds;
1051 		pnfs_generic_pg_cleanup(pgio);
1052 		/* Sleep for 1 second before retrying */
1053 		ssleep(1);
1054 		goto retry;
1055 	}
1056 
1057 	mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
1058 	pgm = &pgio->pg_mirrors[0];
1059 	pgm->pg_bsize = mirror->dss[dss_id].mirror_ds->ds_versions[0].rsize;
1060 
1061 	pgio->pg_mirror_idx = ds_idx;
1062 	return;
1063 out_nolseg:
1064 	if (pgio->pg_error < 0) {
1065 		if (pgio->pg_error != -EAGAIN)
1066 			return;
1067 		/* Retry getting layout segment if lower layer returned -EAGAIN */
1068 		if (pgio->pg_maxretrans && req->wb_nio++ > pgio->pg_maxretrans) {
1069 			if (NFS_SERVER(pgio->pg_inode)->flags & NFS_MOUNT_SOFTERR)
1070 				pgio->pg_error = -ETIMEDOUT;
1071 			else
1072 				pgio->pg_error = -EIO;
1073 			return;
1074 		}
1075 		pgio->pg_error = 0;
1076 		/* Sleep for 1 second before retrying */
1077 		ssleep(1);
1078 		goto retry;
1079 	}
1080 out_mds:
1081 	trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode,
1082 			0, NFS4_MAX_UINT64, IOMODE_READ,
1083 			NFS_I(pgio->pg_inode)->layout,
1084 			pgio->pg_lseg);
1085 	pgio->pg_maxretrans = 0;
1086 	nfs_pageio_reset_read_mds(pgio);
1087 }
1088 
1089 static void
ff_layout_pg_init_write(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)1090 ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
1091 			struct nfs_page *req)
1092 {
1093 	struct nfs4_ff_layout_mirror *mirror;
1094 	struct nfs_pgio_mirror *pgm;
1095 	struct nfs4_pnfs_ds *ds;
1096 	u32 i, dss_id;
1097 
1098 retry:
1099 	pnfs_generic_pg_check_layout(pgio, req);
1100 	if (!pgio->pg_lseg) {
1101 		pgio->pg_lseg =
1102 			pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
1103 					   req_offset(req), req->wb_bytes,
1104 					   IOMODE_RW, false, nfs_io_gfp_mask());
1105 		if (IS_ERR(pgio->pg_lseg)) {
1106 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
1107 			pgio->pg_lseg = NULL;
1108 			return;
1109 		}
1110 	}
1111 	/* If no lseg, fall back to write through mds */
1112 	if (pgio->pg_lseg == NULL)
1113 		goto out_mds;
1114 
1115 	/* Use a direct mapping of ds_idx to pgio mirror_idx */
1116 	if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))
1117 		goto out_eagain;
1118 
1119 	for (i = 0; i < pgio->pg_mirror_count; i++) {
1120 		mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
1121 		dss_id = nfs4_ff_layout_calc_dss_id(
1122 			FF_LAYOUT_LSEG(pgio->pg_lseg)->stripe_unit,
1123 			mirror->dss_count,
1124 			req_offset(req));
1125 		ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror,
1126 					       dss_id, true);
1127 		if (IS_ERR(ds)) {
1128 			if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
1129 				goto out_mds;
1130 			pnfs_generic_pg_cleanup(pgio);
1131 			/* Sleep for 1 second before retrying */
1132 			ssleep(1);
1133 			goto retry;
1134 		}
1135 		pgm = &pgio->pg_mirrors[i];
1136 		pgm->pg_bsize = mirror->dss[dss_id].mirror_ds->ds_versions[0].wsize;
1137 	}
1138 
1139 	if (NFS_SERVER(pgio->pg_inode)->flags &
1140 			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
1141 		pgio->pg_maxretrans = io_maxretrans;
1142 	return;
1143 out_eagain:
1144 	pnfs_generic_pg_cleanup(pgio);
1145 	pgio->pg_error = -EAGAIN;
1146 	return;
1147 out_mds:
1148 	trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode,
1149 			0, NFS4_MAX_UINT64, IOMODE_RW,
1150 			NFS_I(pgio->pg_inode)->layout,
1151 			pgio->pg_lseg);
1152 	pgio->pg_maxretrans = 0;
1153 	nfs_pageio_reset_write_mds(pgio);
1154 	pgio->pg_error = -EAGAIN;
1155 }
1156 
1157 static unsigned int
ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)1158 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
1159 				    struct nfs_page *req)
1160 {
1161 	if (!pgio->pg_lseg) {
1162 		pgio->pg_lseg =
1163 			pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
1164 					   req_offset(req), req->wb_bytes,
1165 					   IOMODE_RW, false, nfs_io_gfp_mask());
1166 		if (IS_ERR(pgio->pg_lseg)) {
1167 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
1168 			pgio->pg_lseg = NULL;
1169 			goto out;
1170 		}
1171 	}
1172 	if (pgio->pg_lseg)
1173 		return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
1174 
1175 	trace_pnfs_mds_fallback_pg_get_mirror_count(pgio->pg_inode,
1176 			0, NFS4_MAX_UINT64, IOMODE_RW,
1177 			NFS_I(pgio->pg_inode)->layout,
1178 			pgio->pg_lseg);
1179 	/* no lseg means that pnfs is not in use, so no mirroring here */
1180 	nfs_pageio_reset_write_mds(pgio);
1181 out:
1182 	return 1;
1183 }
1184 
1185 static u32
ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor * desc,u32 idx)1186 ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
1187 {
1188 	u32 old = desc->pg_mirror_idx;
1189 
1190 	desc->pg_mirror_idx = idx;
1191 	return old;
1192 }
1193 
1194 static struct nfs_pgio_mirror *
ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor * desc,u32 idx)1195 ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
1196 {
1197 	return &desc->pg_mirrors[idx];
1198 }
1199 
1200 static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
1201 	.pg_init = ff_layout_pg_init_read,
1202 	.pg_test = ff_layout_pg_test,
1203 	.pg_doio = pnfs_generic_pg_readpages,
1204 	.pg_cleanup = pnfs_generic_pg_cleanup,
1205 };
1206 
1207 static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
1208 	.pg_init = ff_layout_pg_init_write,
1209 	.pg_test = ff_layout_pg_test,
1210 	.pg_doio = pnfs_generic_pg_writepages,
1211 	.pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
1212 	.pg_cleanup = pnfs_generic_pg_cleanup,
1213 	.pg_get_mirror = ff_layout_pg_get_mirror_write,
1214 	.pg_set_mirror = ff_layout_pg_set_mirror_write,
1215 };
1216 
ff_layout_reset_write(struct nfs_pgio_header * hdr,bool retry_pnfs)1217 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
1218 {
1219 	struct rpc_task *task = &hdr->task;
1220 
1221 	pnfs_layoutcommit_inode(hdr->inode, false);
1222 
1223 	if (retry_pnfs) {
1224 		dprintk("%s Reset task %5u for i/o through pNFS "
1225 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1226 			hdr->task.tk_pid,
1227 			hdr->inode->i_sb->s_id,
1228 			(unsigned long long)NFS_FILEID(hdr->inode),
1229 			hdr->args.count,
1230 			(unsigned long long)hdr->args.offset);
1231 
1232 		hdr->completion_ops->reschedule_io(hdr);
1233 		return;
1234 	}
1235 
1236 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1237 		dprintk("%s Reset task %5u for i/o through MDS "
1238 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1239 			hdr->task.tk_pid,
1240 			hdr->inode->i_sb->s_id,
1241 			(unsigned long long)NFS_FILEID(hdr->inode),
1242 			hdr->args.count,
1243 			(unsigned long long)hdr->args.offset);
1244 
1245 		trace_pnfs_mds_fallback_write_done(hdr->inode,
1246 				hdr->args.offset, hdr->args.count,
1247 				IOMODE_RW, NFS_I(hdr->inode)->layout,
1248 				hdr->lseg);
1249 		task->tk_status = pnfs_write_done_resend_to_mds(hdr);
1250 	}
1251 }
1252 
ff_layout_resend_pnfs_read(struct nfs_pgio_header * hdr)1253 static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr)
1254 {
1255 	u32 idx = hdr->pgio_mirror_idx + 1;
1256 	u32 new_idx = 0;
1257 	u32 dss_id = 0;
1258 	struct nfs4_pnfs_ds *ds;
1259 
1260 	ds = ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx,
1261 					      hdr->args.offset, &dss_id);
1262 	if (IS_ERR(ds))
1263 		pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1264 	else
1265 		ff_layout_send_layouterror(hdr->lseg);
1266 	pnfs_read_resend_pnfs(hdr, new_idx);
1267 }
1268 
ff_layout_reset_read(struct nfs_pgio_header * hdr)1269 static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
1270 {
1271 	struct rpc_task *task = &hdr->task;
1272 
1273 	pnfs_layoutcommit_inode(hdr->inode, false);
1274 	pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1275 
1276 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1277 		dprintk("%s Reset task %5u for i/o through MDS "
1278 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1279 			hdr->task.tk_pid,
1280 			hdr->inode->i_sb->s_id,
1281 			(unsigned long long)NFS_FILEID(hdr->inode),
1282 			hdr->args.count,
1283 			(unsigned long long)hdr->args.offset);
1284 
1285 		trace_pnfs_mds_fallback_read_done(hdr->inode,
1286 				hdr->args.offset, hdr->args.count,
1287 				IOMODE_READ, NFS_I(hdr->inode)->layout,
1288 				hdr->lseg);
1289 		task->tk_status = pnfs_read_done_resend_to_mds(hdr);
1290 	}
1291 }
1292 
ff_layout_async_handle_error_v4(struct rpc_task * task,u32 op_status,struct nfs4_state * state,struct nfs_client * clp,struct pnfs_layout_segment * lseg,u32 idx,u32 dss_id)1293 static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1294 					   u32 op_status,
1295 					   struct nfs4_state *state,
1296 					   struct nfs_client *clp,
1297 					   struct pnfs_layout_segment *lseg,
1298 					   u32 idx, u32 dss_id)
1299 {
1300 	struct pnfs_layout_hdr *lo = lseg->pls_layout;
1301 	struct inode *inode = lo->plh_inode;
1302 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx, dss_id);
1303 	struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
1304 
1305 	switch (op_status) {
1306 	case NFS4_OK:
1307 	case NFS4ERR_NXIO:
1308 		break;
1309 	case NFSERR_PERM:
1310 		if (!task->tk_xprt)
1311 			break;
1312 		xprt_force_disconnect(task->tk_xprt);
1313 		goto out_retry;
1314 	case NFS4ERR_BADSESSION:
1315 	case NFS4ERR_BADSLOT:
1316 	case NFS4ERR_BAD_HIGH_SLOT:
1317 	case NFS4ERR_DEADSESSION:
1318 	case NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1319 	case NFS4ERR_SEQ_FALSE_RETRY:
1320 	case NFS4ERR_SEQ_MISORDERED:
1321 		dprintk("%s ERROR %d, Reset session. Exchangeid "
1322 			"flags 0x%x\n", __func__, task->tk_status,
1323 			clp->cl_exchange_flags);
1324 		nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
1325 		goto out_retry;
1326 	case NFS4ERR_DELAY:
1327 		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1328 		fallthrough;
1329 	case NFS4ERR_GRACE:
1330 		rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1331 		goto out_retry;
1332 	case NFS4ERR_RETRY_UNCACHED_REP:
1333 		goto out_retry;
1334 	/* Invalidate Layout errors */
1335 	case NFS4ERR_PNFS_NO_LAYOUT:
1336 	case NFS4ERR_STALE:
1337 	case NFS4ERR_BADHANDLE:
1338 	case NFS4ERR_ISDIR:
1339 	case NFS4ERR_FHEXPIRED:
1340 	case NFS4ERR_WRONG_TYPE:
1341 		dprintk("%s Invalid layout error %d\n", __func__,
1342 			task->tk_status);
1343 		/*
1344 		 * Destroy layout so new i/o will get a new layout.
1345 		 * Layout will not be destroyed until all current lseg
1346 		 * references are put. Mark layout as invalid to resend failed
1347 		 * i/o and all i/o waiting on the slot table to the MDS until
1348 		 * layout is destroyed and a new valid layout is obtained.
1349 		 */
1350 		pnfs_destroy_layout(NFS_I(inode));
1351 		rpc_wake_up(&tbl->slot_tbl_waitq);
1352 		goto reset;
1353 	default:
1354 		break;
1355 	}
1356 
1357 	switch (task->tk_status) {
1358 	/* RPC connection errors */
1359 	case -ENETDOWN:
1360 	case -ENETUNREACH:
1361 		if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags))
1362 			return -NFS4ERR_FATAL_IOERROR;
1363 		fallthrough;
1364 	case -ECONNREFUSED:
1365 	case -EHOSTDOWN:
1366 	case -EHOSTUNREACH:
1367 	case -EIO:
1368 	case -ETIMEDOUT:
1369 	case -EPIPE:
1370 	case -EPROTO:
1371 	case -ENODEV:
1372 		dprintk("%s DS connection error %d\n", __func__,
1373 			task->tk_status);
1374 		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1375 				&devid->deviceid);
1376 		rpc_wake_up(&tbl->slot_tbl_waitq);
1377 		break;
1378 	default:
1379 		break;
1380 	}
1381 
1382 	if (ff_layout_avoid_mds_available_ds(lseg))
1383 		return -NFS4ERR_RESET_TO_PNFS;
1384 reset:
1385 	dprintk("%s Retry through MDS. Error %d\n", __func__,
1386 		task->tk_status);
1387 	return -NFS4ERR_RESET_TO_MDS;
1388 
1389 out_retry:
1390 	task->tk_status = 0;
1391 	return -EAGAIN;
1392 }
1393 
1394 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
ff_layout_async_handle_error_v3(struct rpc_task * task,u32 op_status,struct nfs_client * clp,struct pnfs_layout_segment * lseg,u32 idx,u32 dss_id)1395 static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1396 					   u32 op_status,
1397 					   struct nfs_client *clp,
1398 					   struct pnfs_layout_segment *lseg,
1399 					   u32 idx, u32 dss_id)
1400 {
1401 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx, dss_id);
1402 
1403 	switch (op_status) {
1404 	case NFS_OK:
1405 	case NFSERR_NXIO:
1406 		break;
1407 	case NFSERR_PERM:
1408 		if (!task->tk_xprt)
1409 			break;
1410 		xprt_force_disconnect(task->tk_xprt);
1411 		goto out_retry;
1412 	case NFSERR_ACCES:
1413 	case NFSERR_BADHANDLE:
1414 	case NFSERR_FBIG:
1415 	case NFSERR_IO:
1416 	case NFSERR_NOSPC:
1417 	case NFSERR_ROFS:
1418 	case NFSERR_STALE:
1419 		goto out_reset_to_pnfs;
1420 	case NFSERR_JUKEBOX:
1421 		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1422 		goto out_retry;
1423 	default:
1424 		break;
1425 	}
1426 
1427 	switch (task->tk_status) {
1428 	/* File access problems. Don't mark the device as unavailable */
1429 	case -EACCES:
1430 	case -ESTALE:
1431 	case -EISDIR:
1432 	case -EBADHANDLE:
1433 	case -ELOOP:
1434 	case -ENOSPC:
1435 		break;
1436 	case -EJUKEBOX:
1437 		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1438 		goto out_retry;
1439 	case -ENETDOWN:
1440 	case -ENETUNREACH:
1441 		if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags))
1442 			return -NFS4ERR_FATAL_IOERROR;
1443 		fallthrough;
1444 	default:
1445 		dprintk("%s DS connection error %d\n", __func__,
1446 			task->tk_status);
1447 		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1448 				&devid->deviceid);
1449 	}
1450 out_reset_to_pnfs:
1451 	/* FIXME: Need to prevent infinite looping here. */
1452 	return -NFS4ERR_RESET_TO_PNFS;
1453 out_retry:
1454 	task->tk_status = 0;
1455 	rpc_restart_call_prepare(task);
1456 	rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1457 	return -EAGAIN;
1458 }
1459 
ff_layout_async_handle_error(struct rpc_task * task,u32 op_status,struct nfs4_state * state,struct nfs_client * clp,struct pnfs_layout_segment * lseg,u32 idx,u32 dss_id)1460 static int ff_layout_async_handle_error(struct rpc_task *task,
1461 					u32 op_status,
1462 					struct nfs4_state *state,
1463 					struct nfs_client *clp,
1464 					struct pnfs_layout_segment *lseg,
1465 					u32 idx, u32 dss_id)
1466 {
1467 	int vers = clp->cl_nfs_mod->rpc_vers->number;
1468 
1469 	if (task->tk_status >= 0) {
1470 		ff_layout_mark_ds_reachable(lseg, idx, dss_id);
1471 		return 0;
1472 	}
1473 
1474 	/* Handle the case of an invalid layout segment */
1475 	if (!pnfs_is_valid_lseg(lseg))
1476 		return -NFS4ERR_RESET_TO_PNFS;
1477 
1478 	switch (vers) {
1479 	case 3:
1480 		return ff_layout_async_handle_error_v3(task, op_status, clp,
1481 						       lseg, idx, dss_id);
1482 	case 4:
1483 		return ff_layout_async_handle_error_v4(task, op_status, state,
1484 						       clp, lseg, idx, dss_id);
1485 	default:
1486 		/* should never happen */
1487 		WARN_ON_ONCE(1);
1488 		return 0;
1489 	}
1490 }
1491 
ff_layout_io_track_ds_error(struct pnfs_layout_segment * lseg,u32 idx,u32 dss_id,u64 offset,u64 length,u32 * op_status,int opnum,int error)1492 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1493 					u32 idx, u32 dss_id, u64 offset, u64 length,
1494 					u32 *op_status, int opnum, int error)
1495 {
1496 	struct nfs4_ff_layout_mirror *mirror;
1497 	u32 status = *op_status;
1498 	int err;
1499 
1500 	if (status == 0) {
1501 		switch (error) {
1502 		case -ETIMEDOUT:
1503 		case -EPFNOSUPPORT:
1504 		case -EPROTONOSUPPORT:
1505 		case -EOPNOTSUPP:
1506 		case -EINVAL:
1507 		case -ECONNREFUSED:
1508 		case -ECONNRESET:
1509 		case -EHOSTDOWN:
1510 		case -EHOSTUNREACH:
1511 		case -ENETDOWN:
1512 		case -ENETUNREACH:
1513 		case -EADDRINUSE:
1514 		case -ENOBUFS:
1515 		case -EPIPE:
1516 		case -EPERM:
1517 		case -EPROTO:
1518 		case -ENODEV:
1519 			*op_status = status = NFS4ERR_NXIO;
1520 			break;
1521 		case -EACCES:
1522 			*op_status = status = NFS4ERR_ACCESS;
1523 			break;
1524 		default:
1525 			return;
1526 		}
1527 	}
1528 
1529 	mirror = FF_LAYOUT_COMP(lseg, idx);
1530 	err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1531 				       mirror, dss_id, offset, length, status, opnum,
1532 				       nfs_io_gfp_mask());
1533 
1534 	switch (status) {
1535 	case NFS4ERR_DELAY:
1536 	case NFS4ERR_GRACE:
1537 	case NFS4ERR_PERM:
1538 		break;
1539 	case NFS4ERR_NXIO:
1540 		ff_layout_mark_ds_unreachable(lseg, idx, dss_id);
1541 		/*
1542 		 * Don't return the layout if this is a read and we still
1543 		 * have layouts to try
1544 		 */
1545 		if (opnum == OP_READ)
1546 			break;
1547 		fallthrough;
1548 	default:
1549 		pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
1550 						  lseg);
1551 	}
1552 
1553 	dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1554 }
1555 
1556 /* NFS_PROTO call done callback routines */
ff_layout_read_done_cb(struct rpc_task * task,struct nfs_pgio_header * hdr)1557 static int ff_layout_read_done_cb(struct rpc_task *task,
1558 				struct nfs_pgio_header *hdr)
1559 {
1560 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(hdr->lseg);
1561 	u32 dss_id = nfs4_ff_layout_calc_dss_id(
1562 		flseg->stripe_unit,
1563 		flseg->mirror_array[hdr->pgio_mirror_idx]->dss_count,
1564 		hdr->args.offset);
1565 	int err;
1566 
1567 	if (task->tk_status < 0) {
1568 		ff_layout_io_track_ds_error(hdr->lseg,
1569 					    hdr->pgio_mirror_idx, dss_id,
1570 					    hdr->args.offset, hdr->args.count,
1571 					    &hdr->res.op_status, OP_READ,
1572 					    task->tk_status);
1573 		trace_ff_layout_read_error(hdr, task->tk_status);
1574 	}
1575 
1576 	err = ff_layout_async_handle_error(task, hdr->res.op_status,
1577 					   hdr->args.context->state,
1578 					   hdr->ds_clp, hdr->lseg,
1579 					   hdr->pgio_mirror_idx,
1580 					   dss_id);
1581 
1582 	trace_nfs4_pnfs_read(hdr, err);
1583 	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1584 	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1585 	switch (err) {
1586 	case -NFS4ERR_RESET_TO_PNFS:
1587 		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1588 		return task->tk_status;
1589 	case -NFS4ERR_RESET_TO_MDS:
1590 		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1591 		return task->tk_status;
1592 	case -EAGAIN:
1593 		goto out_eagain;
1594 	case -NFS4ERR_FATAL_IOERROR:
1595 		task->tk_status = -EIO;
1596 		return 0;
1597 	}
1598 
1599 	return 0;
1600 out_eagain:
1601 	rpc_restart_call_prepare(task);
1602 	return -EAGAIN;
1603 }
1604 
1605 static bool
ff_layout_need_layoutcommit(struct pnfs_layout_segment * lseg)1606 ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1607 {
1608 	return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1609 }
1610 
1611 /*
1612  * We reference the rpc_cred of the first WRITE that triggers the need for
1613  * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1614  * rfc5661 is not clear about which credential should be used.
1615  *
1616  * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1617  * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1618  * we always send layoutcommit after DS writes.
1619  */
1620 static void
ff_layout_set_layoutcommit(struct inode * inode,struct pnfs_layout_segment * lseg,loff_t end_offset)1621 ff_layout_set_layoutcommit(struct inode *inode,
1622 		struct pnfs_layout_segment *lseg,
1623 		loff_t end_offset)
1624 {
1625 	if (!ff_layout_need_layoutcommit(lseg))
1626 		return;
1627 
1628 	pnfs_set_layoutcommit(inode, lseg, end_offset);
1629 	dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
1630 		(unsigned long long) NFS_I(inode)->layout->plh_lwb);
1631 }
1632 
ff_layout_read_record_layoutstats_start(struct rpc_task * task,struct nfs_pgio_header * hdr)1633 static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
1634 		struct nfs_pgio_header *hdr)
1635 {
1636 	struct nfs4_ff_layout_mirror *mirror;
1637 	u32 dss_id;
1638 
1639 	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1640 		return;
1641 
1642 	mirror = FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx);
1643 	dss_id = nfs4_ff_layout_calc_dss_id(
1644 		FF_LAYOUT_LSEG(hdr->lseg)->stripe_unit,
1645 		mirror->dss_count,
1646 		hdr->args.offset);
1647 
1648 	nfs4_ff_layout_stat_io_start_read(
1649 		hdr->inode,
1650 		mirror,
1651 		dss_id,
1652 		hdr->args.count,
1653 		task->tk_start);
1654 }
1655 
ff_layout_read_record_layoutstats_done(struct rpc_task * task,struct nfs_pgio_header * hdr)1656 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
1657 		struct nfs_pgio_header *hdr)
1658 {
1659 	struct nfs4_ff_layout_mirror *mirror;
1660 	u32 dss_id;
1661 
1662 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1663 		return;
1664 
1665 	mirror = FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx);
1666 	dss_id = nfs4_ff_layout_calc_dss_id(
1667 		FF_LAYOUT_LSEG(hdr->lseg)->stripe_unit,
1668 		mirror->dss_count,
1669 		hdr->args.offset);
1670 
1671 	nfs4_ff_layout_stat_io_end_read(
1672 		task,
1673 		mirror,
1674 		dss_id,
1675 		hdr->args.count,
1676 		hdr->res.count);
1677 	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1678 }
1679 
ff_layout_read_prepare_common(struct rpc_task * task,struct nfs_pgio_header * hdr)1680 static int ff_layout_read_prepare_common(struct rpc_task *task,
1681 					 struct nfs_pgio_header *hdr)
1682 {
1683 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1684 		rpc_exit(task, -EIO);
1685 		return -EIO;
1686 	}
1687 
1688 	if (!pnfs_is_valid_lseg(hdr->lseg)) {
1689 		rpc_exit(task, -EAGAIN);
1690 		return -EAGAIN;
1691 	}
1692 
1693 	ff_layout_read_record_layoutstats_start(task, hdr);
1694 	return 0;
1695 }
1696 
1697 /*
1698  * Call ops for the async read/write cases
1699  * In the case of dense layouts, the offset needs to be reset to its
1700  * original value.
1701  */
ff_layout_read_prepare_v3(struct rpc_task * task,void * data)1702 static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1703 {
1704 	struct nfs_pgio_header *hdr = data;
1705 
1706 	if (ff_layout_read_prepare_common(task, hdr))
1707 		return;
1708 
1709 	rpc_call_start(task);
1710 }
1711 
ff_layout_read_prepare_v4(struct rpc_task * task,void * data)1712 static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1713 {
1714 	struct nfs_pgio_header *hdr = data;
1715 
1716 	if (nfs4_setup_sequence(hdr->ds_clp,
1717 				&hdr->args.seq_args,
1718 				&hdr->res.seq_res,
1719 				task))
1720 		return;
1721 
1722 	ff_layout_read_prepare_common(task, hdr);
1723 }
1724 
ff_layout_read_call_done(struct rpc_task * task,void * data)1725 static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1726 {
1727 	struct nfs_pgio_header *hdr = data;
1728 
1729 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1730 	    task->tk_status == 0) {
1731 		nfs4_sequence_done(task, &hdr->res.seq_res);
1732 		return;
1733 	}
1734 
1735 	/* Note this may cause RPC to be resent */
1736 	hdr->mds_ops->rpc_call_done(task, hdr);
1737 }
1738 
ff_layout_read_count_stats(struct rpc_task * task,void * data)1739 static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1740 {
1741 	struct nfs_pgio_header *hdr = data;
1742 
1743 	ff_layout_read_record_layoutstats_done(task, hdr);
1744 	rpc_count_iostats_metrics(task,
1745 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1746 }
1747 
ff_layout_read_release(void * data)1748 static void ff_layout_read_release(void *data)
1749 {
1750 	struct nfs_pgio_header *hdr = data;
1751 
1752 	ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
1753 	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
1754 		ff_layout_resend_pnfs_read(hdr);
1755 	else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1756 		ff_layout_reset_read(hdr);
1757 	pnfs_generic_rw_release(data);
1758 }
1759 
1760 
ff_layout_write_done_cb(struct rpc_task * task,struct nfs_pgio_header * hdr)1761 static int ff_layout_write_done_cb(struct rpc_task *task,
1762 				struct nfs_pgio_header *hdr)
1763 {
1764 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(hdr->lseg);
1765 	u32 dss_id = nfs4_ff_layout_calc_dss_id(
1766 		flseg->stripe_unit,
1767 		flseg->mirror_array[hdr->pgio_mirror_idx]->dss_count,
1768 		hdr->args.offset);
1769 	loff_t end_offs = 0;
1770 	int err;
1771 
1772 	if (task->tk_status < 0) {
1773 		ff_layout_io_track_ds_error(hdr->lseg,
1774 					    hdr->pgio_mirror_idx, dss_id,
1775 					    hdr->args.offset, hdr->args.count,
1776 					    &hdr->res.op_status, OP_WRITE,
1777 					    task->tk_status);
1778 		trace_ff_layout_write_error(hdr, task->tk_status);
1779 	}
1780 
1781 	err = ff_layout_async_handle_error(task, hdr->res.op_status,
1782 					   hdr->args.context->state,
1783 					   hdr->ds_clp, hdr->lseg,
1784 					   hdr->pgio_mirror_idx,
1785 					   dss_id);
1786 
1787 	trace_nfs4_pnfs_write(hdr, err);
1788 	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1789 	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1790 	switch (err) {
1791 	case -NFS4ERR_RESET_TO_PNFS:
1792 		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1793 		return task->tk_status;
1794 	case -NFS4ERR_RESET_TO_MDS:
1795 		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1796 		return task->tk_status;
1797 	case -EAGAIN:
1798 		return -EAGAIN;
1799 	case -NFS4ERR_FATAL_IOERROR:
1800 		task->tk_status = -EIO;
1801 		return 0;
1802 	}
1803 
1804 	if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1805 	    hdr->res.verf->committed == NFS_DATA_SYNC)
1806 		end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
1807 
1808 	/* Note: if the write is unstable, don't set end_offs until commit */
1809 	ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
1810 
1811 	/* zero out fattr since we don't care DS attr at all */
1812 	hdr->fattr.valid = 0;
1813 	if (task->tk_status >= 0)
1814 		nfs_writeback_update_inode(hdr);
1815 
1816 	return 0;
1817 }
1818 
ff_layout_commit_done_cb(struct rpc_task * task,struct nfs_commit_data * data)1819 static int ff_layout_commit_done_cb(struct rpc_task *task,
1820 				     struct nfs_commit_data *data)
1821 {
1822 	int err;
1823 	u32 idx = calc_mirror_idx_from_commit(data->lseg, data->ds_commit_index);
1824 	u32 dss_id = calc_dss_id_from_commit(data->lseg, data->ds_commit_index);
1825 
1826 	if (task->tk_status < 0) {
1827 		ff_layout_io_track_ds_error(data->lseg, idx, dss_id,
1828 					    data->args.offset, data->args.count,
1829 					    &data->res.op_status, OP_COMMIT,
1830 					    task->tk_status);
1831 		trace_ff_layout_commit_error(data, task->tk_status);
1832 	}
1833 
1834 	err = ff_layout_async_handle_error(task, data->res.op_status,
1835 					   NULL, data->ds_clp, data->lseg, idx,
1836 					   dss_id);
1837 
1838 	trace_nfs4_pnfs_commit_ds(data, err);
1839 	switch (err) {
1840 	case -NFS4ERR_RESET_TO_PNFS:
1841 		pnfs_generic_prepare_to_resend_writes(data);
1842 		return -EAGAIN;
1843 	case -NFS4ERR_RESET_TO_MDS:
1844 		pnfs_generic_prepare_to_resend_writes(data);
1845 		return -EAGAIN;
1846 	case -EAGAIN:
1847 		rpc_restart_call_prepare(task);
1848 		return -EAGAIN;
1849 	case -NFS4ERR_FATAL_IOERROR:
1850 		task->tk_status = -EIO;
1851 		return 0;
1852 	}
1853 
1854 	ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
1855 	return 0;
1856 }
1857 
ff_layout_write_record_layoutstats_start(struct rpc_task * task,struct nfs_pgio_header * hdr)1858 static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
1859 		struct nfs_pgio_header *hdr)
1860 {
1861 	struct nfs4_ff_layout_mirror *mirror;
1862 	u32 dss_id;
1863 
1864 	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1865 		return;
1866 
1867 	mirror = FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx);
1868 	dss_id = nfs4_ff_layout_calc_dss_id(
1869 		FF_LAYOUT_LSEG(hdr->lseg)->stripe_unit,
1870 		mirror->dss_count,
1871 		hdr->args.offset);
1872 
1873 	nfs4_ff_layout_stat_io_start_write(
1874 		hdr->inode,
1875 		mirror,
1876 		dss_id,
1877 		hdr->args.count,
1878 		task->tk_start);
1879 }
1880 
ff_layout_write_record_layoutstats_done(struct rpc_task * task,struct nfs_pgio_header * hdr)1881 static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
1882 		struct nfs_pgio_header *hdr)
1883 {
1884 	struct nfs4_ff_layout_mirror *mirror;
1885 	u32 dss_id;
1886 
1887 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1888 		return;
1889 
1890 	mirror = FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx);
1891 	dss_id = nfs4_ff_layout_calc_dss_id(
1892 		FF_LAYOUT_LSEG(hdr->lseg)->stripe_unit,
1893 		mirror->dss_count,
1894 		hdr->args.offset);
1895 
1896 	nfs4_ff_layout_stat_io_end_write(
1897 		task,
1898 		mirror,
1899 		dss_id,
1900 		hdr->args.count,
1901 		hdr->res.count,
1902 		hdr->res.verf->committed);
1903 	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1904 }
1905 
ff_layout_write_prepare_common(struct rpc_task * task,struct nfs_pgio_header * hdr)1906 static int ff_layout_write_prepare_common(struct rpc_task *task,
1907 					  struct nfs_pgio_header *hdr)
1908 {
1909 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1910 		rpc_exit(task, -EIO);
1911 		return -EIO;
1912 	}
1913 
1914 	if (!pnfs_is_valid_lseg(hdr->lseg)) {
1915 		rpc_exit(task, -EAGAIN);
1916 		return -EAGAIN;
1917 	}
1918 
1919 	ff_layout_write_record_layoutstats_start(task, hdr);
1920 	return 0;
1921 }
1922 
ff_layout_write_prepare_v3(struct rpc_task * task,void * data)1923 static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1924 {
1925 	struct nfs_pgio_header *hdr = data;
1926 
1927 	if (ff_layout_write_prepare_common(task, hdr))
1928 		return;
1929 
1930 	rpc_call_start(task);
1931 }
1932 
ff_layout_write_prepare_v4(struct rpc_task * task,void * data)1933 static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1934 {
1935 	struct nfs_pgio_header *hdr = data;
1936 
1937 	if (nfs4_setup_sequence(hdr->ds_clp,
1938 				&hdr->args.seq_args,
1939 				&hdr->res.seq_res,
1940 				task))
1941 		return;
1942 
1943 	ff_layout_write_prepare_common(task, hdr);
1944 }
1945 
ff_layout_write_call_done(struct rpc_task * task,void * data)1946 static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1947 {
1948 	struct nfs_pgio_header *hdr = data;
1949 
1950 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1951 	    task->tk_status == 0) {
1952 		nfs4_sequence_done(task, &hdr->res.seq_res);
1953 		return;
1954 	}
1955 
1956 	/* Note this may cause RPC to be resent */
1957 	hdr->mds_ops->rpc_call_done(task, hdr);
1958 }
1959 
ff_layout_write_count_stats(struct rpc_task * task,void * data)1960 static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1961 {
1962 	struct nfs_pgio_header *hdr = data;
1963 
1964 	ff_layout_write_record_layoutstats_done(task, hdr);
1965 	rpc_count_iostats_metrics(task,
1966 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1967 }
1968 
ff_layout_write_release(void * data)1969 static void ff_layout_write_release(void *data)
1970 {
1971 	struct nfs_pgio_header *hdr = data;
1972 
1973 	ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
1974 	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
1975 		ff_layout_send_layouterror(hdr->lseg);
1976 		ff_layout_reset_write(hdr, true);
1977 	} else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1978 		ff_layout_reset_write(hdr, false);
1979 	pnfs_generic_rw_release(data);
1980 }
1981 
ff_layout_commit_record_layoutstats_start(struct rpc_task * task,struct nfs_commit_data * cdata)1982 static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
1983 		struct nfs_commit_data *cdata)
1984 {
1985 	u32 idx, dss_id;
1986 
1987 	if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
1988 		return;
1989 
1990 	idx = calc_mirror_idx_from_commit(cdata->lseg, cdata->ds_commit_index);
1991 	dss_id = calc_dss_id_from_commit(cdata->lseg, cdata->ds_commit_index);
1992 	nfs4_ff_layout_stat_io_start_write(cdata->inode,
1993 			FF_LAYOUT_COMP(cdata->lseg, idx),
1994 			dss_id,
1995 			0, task->tk_start);
1996 }
1997 
ff_layout_commit_record_layoutstats_done(struct rpc_task * task,struct nfs_commit_data * cdata)1998 static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
1999 		struct nfs_commit_data *cdata)
2000 {
2001 	struct nfs_page *req;
2002 	__u64 count = 0;
2003 	u32 idx, dss_id;
2004 
2005 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
2006 		return;
2007 
2008 	if (task->tk_status == 0) {
2009 		list_for_each_entry(req, &cdata->pages, wb_list)
2010 			count += req->wb_bytes;
2011 	}
2012 
2013 	idx = calc_mirror_idx_from_commit(cdata->lseg, cdata->ds_commit_index);
2014 	dss_id = calc_dss_id_from_commit(cdata->lseg, cdata->ds_commit_index);
2015 	nfs4_ff_layout_stat_io_end_write(task,
2016 			FF_LAYOUT_COMP(cdata->lseg, idx),
2017 			dss_id,
2018 			count, count, NFS_FILE_SYNC);
2019 	set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
2020 }
2021 
ff_layout_commit_prepare_common(struct rpc_task * task,struct nfs_commit_data * cdata)2022 static int ff_layout_commit_prepare_common(struct rpc_task *task,
2023 					   struct nfs_commit_data *cdata)
2024 {
2025 	if (!pnfs_is_valid_lseg(cdata->lseg)) {
2026 		rpc_exit(task, -EAGAIN);
2027 		return -EAGAIN;
2028 	}
2029 
2030 	ff_layout_commit_record_layoutstats_start(task, cdata);
2031 	return 0;
2032 }
2033 
ff_layout_commit_prepare_v3(struct rpc_task * task,void * data)2034 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
2035 {
2036 	if (ff_layout_commit_prepare_common(task, data))
2037 		return;
2038 
2039 	rpc_call_start(task);
2040 }
2041 
ff_layout_commit_prepare_v4(struct rpc_task * task,void * data)2042 static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
2043 {
2044 	struct nfs_commit_data *wdata = data;
2045 
2046 	if (nfs4_setup_sequence(wdata->ds_clp,
2047 				&wdata->args.seq_args,
2048 				&wdata->res.seq_res,
2049 				task))
2050 		return;
2051 	ff_layout_commit_prepare_common(task, data);
2052 }
2053 
ff_layout_commit_done(struct rpc_task * task,void * data)2054 static void ff_layout_commit_done(struct rpc_task *task, void *data)
2055 {
2056 	pnfs_generic_write_commit_done(task, data);
2057 }
2058 
ff_layout_commit_count_stats(struct rpc_task * task,void * data)2059 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
2060 {
2061 	struct nfs_commit_data *cdata = data;
2062 
2063 	ff_layout_commit_record_layoutstats_done(task, cdata);
2064 	rpc_count_iostats_metrics(task,
2065 	    &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
2066 }
2067 
ff_layout_commit_release(void * data)2068 static void ff_layout_commit_release(void *data)
2069 {
2070 	struct nfs_commit_data *cdata = data;
2071 
2072 	ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
2073 	pnfs_generic_commit_release(data);
2074 }
2075 
2076 static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
2077 	.rpc_call_prepare = ff_layout_read_prepare_v3,
2078 	.rpc_call_done = ff_layout_read_call_done,
2079 	.rpc_count_stats = ff_layout_read_count_stats,
2080 	.rpc_release = ff_layout_read_release,
2081 };
2082 
2083 static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
2084 	.rpc_call_prepare = ff_layout_read_prepare_v4,
2085 	.rpc_call_done = ff_layout_read_call_done,
2086 	.rpc_count_stats = ff_layout_read_count_stats,
2087 	.rpc_release = ff_layout_read_release,
2088 };
2089 
2090 static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
2091 	.rpc_call_prepare = ff_layout_write_prepare_v3,
2092 	.rpc_call_done = ff_layout_write_call_done,
2093 	.rpc_count_stats = ff_layout_write_count_stats,
2094 	.rpc_release = ff_layout_write_release,
2095 };
2096 
2097 static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
2098 	.rpc_call_prepare = ff_layout_write_prepare_v4,
2099 	.rpc_call_done = ff_layout_write_call_done,
2100 	.rpc_count_stats = ff_layout_write_count_stats,
2101 	.rpc_release = ff_layout_write_release,
2102 };
2103 
2104 static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
2105 	.rpc_call_prepare = ff_layout_commit_prepare_v3,
2106 	.rpc_call_done = ff_layout_commit_done,
2107 	.rpc_count_stats = ff_layout_commit_count_stats,
2108 	.rpc_release = ff_layout_commit_release,
2109 };
2110 
2111 static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
2112 	.rpc_call_prepare = ff_layout_commit_prepare_v4,
2113 	.rpc_call_done = ff_layout_commit_done,
2114 	.rpc_count_stats = ff_layout_commit_count_stats,
2115 	.rpc_release = ff_layout_commit_release,
2116 };
2117 
2118 static enum pnfs_try_status
ff_layout_read_pagelist(struct nfs_pgio_header * hdr)2119 ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
2120 {
2121 	struct pnfs_layout_segment *lseg = hdr->lseg;
2122 	struct nfs4_pnfs_ds *ds;
2123 	struct rpc_clnt *ds_clnt;
2124 	struct nfsd_file *localio;
2125 	struct nfs4_ff_layout_mirror *mirror;
2126 	const struct cred *ds_cred;
2127 	loff_t offset = hdr->args.offset;
2128 	u32 idx = hdr->pgio_mirror_idx;
2129 	int vers;
2130 	struct nfs_fh *fh;
2131 	u32 dss_id;
2132 	bool ds_fatal_error = false;
2133 
2134 	dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
2135 		__func__, hdr->inode->i_ino,
2136 		hdr->args.pgbase, (size_t)hdr->args.count, offset);
2137 
2138 	mirror = FF_LAYOUT_COMP(lseg, idx);
2139 	dss_id = nfs4_ff_layout_calc_dss_id(
2140 		FF_LAYOUT_LSEG(lseg)->stripe_unit,
2141 		mirror->dss_count,
2142 		offset);
2143 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, dss_id, false);
2144 	if (IS_ERR(ds)) {
2145 		ds_fatal_error = nfs_error_is_fatal(PTR_ERR(ds));
2146 		goto out_failed;
2147 	}
2148 
2149 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
2150 						   hdr->inode, dss_id);
2151 	if (IS_ERR(ds_clnt))
2152 		goto out_failed;
2153 
2154 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred, dss_id);
2155 	if (!ds_cred)
2156 		goto out_failed;
2157 
2158 	vers = nfs4_ff_layout_ds_version(mirror, dss_id);
2159 
2160 	dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
2161 		ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
2162 
2163 	hdr->pgio_done_cb = ff_layout_read_done_cb;
2164 	refcount_inc(&ds->ds_clp->cl_count);
2165 	hdr->ds_clp = ds->ds_clp;
2166 	fh = nfs4_ff_layout_select_ds_fh(mirror, dss_id);
2167 	if (fh)
2168 		hdr->args.fh = fh;
2169 
2170 	nfs4_ff_layout_select_ds_stateid(mirror, dss_id, &hdr->args.stateid);
2171 
2172 	/*
2173 	 * Note that if we ever decide to split across DSes,
2174 	 * then we may need to handle dense-like offsets.
2175 	 */
2176 	hdr->args.offset = offset;
2177 	hdr->mds_offset = offset;
2178 
2179 	/* Start IO accounting for local read */
2180 	localio = ff_local_open_fh(lseg, idx, dss_id, ds->ds_clp, ds_cred, fh,
2181 				FMODE_READ);
2182 	if (localio) {
2183 		hdr->task.tk_start = ktime_get();
2184 		ff_layout_read_record_layoutstats_start(&hdr->task, hdr);
2185 	}
2186 
2187 	/* Perform an asynchronous read to ds */
2188 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
2189 			  vers == 3 ? &ff_layout_read_call_ops_v3 :
2190 				      &ff_layout_read_call_ops_v4,
2191 			  0, RPC_TASK_SOFTCONN, localio);
2192 	put_cred(ds_cred);
2193 	return PNFS_ATTEMPTED;
2194 
2195 out_failed:
2196 	if (ff_layout_avoid_mds_available_ds(lseg) && !ds_fatal_error)
2197 		return PNFS_TRY_AGAIN;
2198 	trace_pnfs_mds_fallback_read_pagelist(hdr->inode,
2199 			hdr->args.offset, hdr->args.count,
2200 			IOMODE_READ, NFS_I(hdr->inode)->layout, lseg);
2201 	return PNFS_NOT_ATTEMPTED;
2202 }
2203 
2204 /* Perform async writes. */
2205 static enum pnfs_try_status
ff_layout_write_pagelist(struct nfs_pgio_header * hdr,int sync)2206 ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
2207 {
2208 	struct pnfs_layout_segment *lseg = hdr->lseg;
2209 	struct nfs4_pnfs_ds *ds;
2210 	struct rpc_clnt *ds_clnt;
2211 	struct nfsd_file *localio;
2212 	struct nfs4_ff_layout_mirror *mirror;
2213 	const struct cred *ds_cred;
2214 	loff_t offset = hdr->args.offset;
2215 	int vers;
2216 	struct nfs_fh *fh;
2217 	u32 idx = hdr->pgio_mirror_idx;
2218 	u32 dss_id;
2219 	bool ds_fatal_error = false;
2220 
2221 	mirror = FF_LAYOUT_COMP(lseg, idx);
2222 	dss_id = nfs4_ff_layout_calc_dss_id(
2223 		FF_LAYOUT_LSEG(lseg)->stripe_unit,
2224 		mirror->dss_count,
2225 		offset);
2226 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, dss_id, true);
2227 	if (IS_ERR(ds)) {
2228 		ds_fatal_error = nfs_error_is_fatal(PTR_ERR(ds));
2229 		goto out_failed;
2230 	}
2231 
2232 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
2233 						   hdr->inode, dss_id);
2234 	if (IS_ERR(ds_clnt))
2235 		goto out_failed;
2236 
2237 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred, dss_id);
2238 	if (!ds_cred)
2239 		goto out_failed;
2240 
2241 	vers = nfs4_ff_layout_ds_version(mirror, dss_id);
2242 
2243 	dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
2244 		__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
2245 		offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count),
2246 		vers);
2247 
2248 	hdr->pgio_done_cb = ff_layout_write_done_cb;
2249 	refcount_inc(&ds->ds_clp->cl_count);
2250 	hdr->ds_clp = ds->ds_clp;
2251 	hdr->ds_commit_idx = calc_commit_idx(lseg, idx, dss_id);
2252 	fh = nfs4_ff_layout_select_ds_fh(mirror, dss_id);
2253 	if (fh)
2254 		hdr->args.fh = fh;
2255 
2256 	nfs4_ff_layout_select_ds_stateid(mirror, dss_id, &hdr->args.stateid);
2257 
2258 	/*
2259 	 * Note that if we ever decide to split across DSes,
2260 	 * then we may need to handle dense-like offsets.
2261 	 */
2262 	hdr->args.offset = offset;
2263 
2264 	/* Start IO accounting for local write */
2265 	localio = ff_local_open_fh(lseg, idx, dss_id, ds->ds_clp, ds_cred, fh,
2266 				   FMODE_READ|FMODE_WRITE);
2267 	if (localio) {
2268 		hdr->task.tk_start = ktime_get();
2269 		ff_layout_write_record_layoutstats_start(&hdr->task, hdr);
2270 	}
2271 
2272 	/* Perform an asynchronous write */
2273 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
2274 			  vers == 3 ? &ff_layout_write_call_ops_v3 :
2275 				      &ff_layout_write_call_ops_v4,
2276 			  sync, RPC_TASK_SOFTCONN, localio);
2277 	put_cred(ds_cred);
2278 	return PNFS_ATTEMPTED;
2279 
2280 out_failed:
2281 	if (ff_layout_avoid_mds_available_ds(lseg) && !ds_fatal_error)
2282 		return PNFS_TRY_AGAIN;
2283 	trace_pnfs_mds_fallback_write_pagelist(hdr->inode,
2284 			hdr->args.offset, hdr->args.count,
2285 			IOMODE_RW, NFS_I(hdr->inode)->layout, lseg);
2286 	return PNFS_NOT_ATTEMPTED;
2287 }
2288 
2289 static struct nfs_fh *
select_ds_fh_from_commit(struct pnfs_layout_segment * lseg,u32 i,u32 dss_id)2290 select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i, u32 dss_id)
2291 {
2292 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2293 
2294 	/* FIXME: Assume that there is only one NFS version available
2295 	 * for the DS.
2296 	 */
2297 	return &flseg->mirror_array[i]->dss[dss_id].fh_versions[0];
2298 }
2299 
ff_layout_initiate_commit(struct nfs_commit_data * data,int how)2300 static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
2301 {
2302 	struct pnfs_layout_segment *lseg = data->lseg;
2303 	struct nfs4_pnfs_ds *ds;
2304 	struct rpc_clnt *ds_clnt;
2305 	struct nfsd_file *localio;
2306 	struct nfs4_ff_layout_mirror *mirror;
2307 	const struct cred *ds_cred;
2308 	u32 idx, dss_id;
2309 	int vers, ret;
2310 	struct nfs_fh *fh;
2311 
2312 	if (!lseg || !(pnfs_is_valid_lseg(lseg) ||
2313 	    test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)))
2314 		goto out_err;
2315 
2316 	idx = calc_mirror_idx_from_commit(lseg, data->ds_commit_index);
2317 	mirror = FF_LAYOUT_COMP(lseg, idx);
2318 	dss_id = calc_dss_id_from_commit(lseg, data->ds_commit_index);
2319 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, dss_id, true);
2320 	if (IS_ERR(ds))
2321 		goto out_err;
2322 
2323 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
2324 						   data->inode, dss_id);
2325 	if (IS_ERR(ds_clnt))
2326 		goto out_err;
2327 
2328 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred, dss_id);
2329 	if (!ds_cred)
2330 		goto out_err;
2331 
2332 	vers = nfs4_ff_layout_ds_version(mirror, dss_id);
2333 
2334 	dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
2335 		data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
2336 		vers);
2337 	data->commit_done_cb = ff_layout_commit_done_cb;
2338 	data->cred = ds_cred;
2339 	refcount_inc(&ds->ds_clp->cl_count);
2340 	data->ds_clp = ds->ds_clp;
2341 	fh = select_ds_fh_from_commit(lseg, idx, dss_id);
2342 	if (fh)
2343 		data->args.fh = fh;
2344 
2345 	/* Start IO accounting for local commit */
2346 	localio = ff_local_open_fh(lseg, idx, dss_id, ds->ds_clp, ds_cred, fh,
2347 				   FMODE_READ|FMODE_WRITE);
2348 	if (localio) {
2349 		data->task.tk_start = ktime_get();
2350 		ff_layout_commit_record_layoutstats_start(&data->task, data);
2351 	}
2352 
2353 	ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
2354 				   vers == 3 ? &ff_layout_commit_call_ops_v3 :
2355 					       &ff_layout_commit_call_ops_v4,
2356 				   how, RPC_TASK_SOFTCONN, localio);
2357 	put_cred(ds_cred);
2358 	return ret;
2359 out_err:
2360 	pnfs_generic_prepare_to_resend_writes(data);
2361 	pnfs_generic_commit_release(data);
2362 	return -EAGAIN;
2363 }
2364 
2365 static int
ff_layout_commit_pagelist(struct inode * inode,struct list_head * mds_pages,int how,struct nfs_commit_info * cinfo)2366 ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
2367 			   int how, struct nfs_commit_info *cinfo)
2368 {
2369 	return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
2370 					    ff_layout_initiate_commit);
2371 }
2372 
ff_layout_match_rw(const struct rpc_task * task,const struct nfs_pgio_header * hdr,const struct pnfs_layout_segment * lseg)2373 static bool ff_layout_match_rw(const struct rpc_task *task,
2374 			       const struct nfs_pgio_header *hdr,
2375 			       const struct pnfs_layout_segment *lseg)
2376 {
2377 	return hdr->lseg == lseg;
2378 }
2379 
ff_layout_match_commit(const struct rpc_task * task,const struct nfs_commit_data * cdata,const struct pnfs_layout_segment * lseg)2380 static bool ff_layout_match_commit(const struct rpc_task *task,
2381 				   const struct nfs_commit_data *cdata,
2382 				   const struct pnfs_layout_segment *lseg)
2383 {
2384 	return cdata->lseg == lseg;
2385 }
2386 
ff_layout_match_io(const struct rpc_task * task,const void * data)2387 static bool ff_layout_match_io(const struct rpc_task *task, const void *data)
2388 {
2389 	const struct rpc_call_ops *ops = task->tk_ops;
2390 
2391 	if (ops == &ff_layout_read_call_ops_v3 ||
2392 	    ops == &ff_layout_read_call_ops_v4 ||
2393 	    ops == &ff_layout_write_call_ops_v3 ||
2394 	    ops == &ff_layout_write_call_ops_v4)
2395 		return ff_layout_match_rw(task, task->tk_calldata, data);
2396 	if (ops == &ff_layout_commit_call_ops_v3 ||
2397 	    ops == &ff_layout_commit_call_ops_v4)
2398 		return ff_layout_match_commit(task, task->tk_calldata, data);
2399 	return false;
2400 }
2401 
ff_layout_cancel_io(struct pnfs_layout_segment * lseg)2402 static void ff_layout_cancel_io(struct pnfs_layout_segment *lseg)
2403 {
2404 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2405 	struct nfs4_ff_layout_mirror *mirror;
2406 	struct nfs4_ff_layout_ds *mirror_ds;
2407 	struct nfs4_pnfs_ds *ds;
2408 	struct nfs_client *ds_clp;
2409 	struct rpc_clnt *clnt;
2410 	u32 idx, dss_id;
2411 
2412 	for (idx = 0; idx < flseg->mirror_array_cnt; idx++) {
2413 		mirror = flseg->mirror_array[idx];
2414 		for (dss_id = 0; dss_id < mirror->dss_count; dss_id++) {
2415 			mirror_ds = mirror->dss[dss_id].mirror_ds;
2416 			if (IS_ERR_OR_NULL(mirror_ds))
2417 				continue;
2418 			ds = mirror->dss[dss_id].mirror_ds->ds;
2419 			if (!ds)
2420 				continue;
2421 			ds_clp = ds->ds_clp;
2422 			if (!ds_clp)
2423 				continue;
2424 			clnt = ds_clp->cl_rpcclient;
2425 			if (!clnt)
2426 				continue;
2427 			if (!rpc_cancel_tasks(clnt, -EAGAIN,
2428 					      ff_layout_match_io, lseg))
2429 				continue;
2430 			rpc_clnt_disconnect(clnt);
2431 		}
2432 	}
2433 }
2434 
2435 static struct pnfs_ds_commit_info *
ff_layout_get_ds_info(struct inode * inode)2436 ff_layout_get_ds_info(struct inode *inode)
2437 {
2438 	struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
2439 
2440 	if (layout == NULL)
2441 		return NULL;
2442 
2443 	return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
2444 }
2445 
2446 static void
ff_layout_setup_ds_info(struct pnfs_ds_commit_info * fl_cinfo,struct pnfs_layout_segment * lseg)2447 ff_layout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
2448 		struct pnfs_layout_segment *lseg)
2449 {
2450 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2451 	struct inode *inode = lseg->pls_layout->plh_inode;
2452 	struct pnfs_commit_array *array, *new;
2453 	u32 size = flseg->mirror_array_cnt * flseg->mirror_array[0]->dss_count;
2454 
2455 	new = pnfs_alloc_commit_array(size,
2456 				      nfs_io_gfp_mask());
2457 	if (new) {
2458 		spin_lock(&inode->i_lock);
2459 		array = pnfs_add_commit_array(fl_cinfo, new, lseg);
2460 		spin_unlock(&inode->i_lock);
2461 		if (array != new)
2462 			pnfs_free_commit_array(new);
2463 	}
2464 }
2465 
2466 static void
ff_layout_release_ds_info(struct pnfs_ds_commit_info * fl_cinfo,struct inode * inode)2467 ff_layout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
2468 		struct inode *inode)
2469 {
2470 	spin_lock(&inode->i_lock);
2471 	pnfs_generic_ds_cinfo_destroy(fl_cinfo);
2472 	spin_unlock(&inode->i_lock);
2473 }
2474 
2475 static void
ff_layout_free_deviceid_node(struct nfs4_deviceid_node * d)2476 ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
2477 {
2478 	nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
2479 						  id_node));
2480 }
2481 
ff_layout_encode_ioerr(struct xdr_stream * xdr,const struct nfs4_layoutreturn_args * args,const struct nfs4_flexfile_layoutreturn_args * ff_args)2482 static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
2483 				  const struct nfs4_layoutreturn_args *args,
2484 				  const struct nfs4_flexfile_layoutreturn_args *ff_args)
2485 {
2486 	__be32 *start;
2487 
2488 	start = xdr_reserve_space(xdr, 4);
2489 	if (unlikely(!start))
2490 		return -E2BIG;
2491 
2492 	*start = cpu_to_be32(ff_args->num_errors);
2493 	/* This assume we always return _ALL_ layouts */
2494 	return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
2495 }
2496 
2497 static void
ff_layout_encode_ff_iostat_head(struct xdr_stream * xdr,const nfs4_stateid * stateid,const struct nfs42_layoutstat_devinfo * devinfo)2498 ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
2499 			    const nfs4_stateid *stateid,
2500 			    const struct nfs42_layoutstat_devinfo *devinfo)
2501 {
2502 	__be32 *p;
2503 
2504 	p = xdr_reserve_space(xdr, 8 + 8);
2505 	p = xdr_encode_hyper(p, devinfo->offset);
2506 	p = xdr_encode_hyper(p, devinfo->length);
2507 	encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
2508 	p = xdr_reserve_space(xdr, 4*8);
2509 	p = xdr_encode_hyper(p, devinfo->read_count);
2510 	p = xdr_encode_hyper(p, devinfo->read_bytes);
2511 	p = xdr_encode_hyper(p, devinfo->write_count);
2512 	p = xdr_encode_hyper(p, devinfo->write_bytes);
2513 	encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
2514 }
2515 
2516 static void
ff_layout_encode_ff_iostat(struct xdr_stream * xdr,const nfs4_stateid * stateid,const struct nfs42_layoutstat_devinfo * devinfo)2517 ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
2518 			    const nfs4_stateid *stateid,
2519 			    const struct nfs42_layoutstat_devinfo *devinfo)
2520 {
2521 	ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
2522 	ff_layout_encode_ff_layoutupdate(xdr, devinfo,
2523 			devinfo->ld_private.data);
2524 }
2525 
2526 /* report nothing for now */
ff_layout_encode_iostats_array(struct xdr_stream * xdr,const struct nfs4_layoutreturn_args * args,struct nfs4_flexfile_layoutreturn_args * ff_args)2527 static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
2528 		const struct nfs4_layoutreturn_args *args,
2529 		struct nfs4_flexfile_layoutreturn_args *ff_args)
2530 {
2531 	__be32 *p;
2532 	int i;
2533 
2534 	p = xdr_reserve_space(xdr, 4);
2535 	*p = cpu_to_be32(ff_args->num_dev);
2536 	for (i = 0; i < ff_args->num_dev; i++)
2537 		ff_layout_encode_ff_iostat(xdr,
2538 				&args->layout->plh_stateid,
2539 				&ff_args->devinfo[i]);
2540 }
2541 
2542 static void
ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo * devinfo,unsigned int num_entries)2543 ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
2544 		unsigned int num_entries)
2545 {
2546 	unsigned int i;
2547 
2548 	for (i = 0; i < num_entries; i++) {
2549 		if (!devinfo[i].ld_private.ops)
2550 			continue;
2551 		if (!devinfo[i].ld_private.ops->free)
2552 			continue;
2553 		devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
2554 	}
2555 }
2556 
2557 static struct nfs4_deviceid_node *
ff_layout_alloc_deviceid_node(struct nfs_server * server,struct pnfs_device * pdev,gfp_t gfp_flags)2558 ff_layout_alloc_deviceid_node(struct nfs_server *server,
2559 			      struct pnfs_device *pdev, gfp_t gfp_flags)
2560 {
2561 	struct nfs4_ff_layout_ds *dsaddr;
2562 
2563 	dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
2564 	if (!dsaddr)
2565 		return NULL;
2566 	return &dsaddr->id_node;
2567 }
2568 
2569 static void
ff_layout_encode_layoutreturn(struct xdr_stream * xdr,const void * voidargs,const struct nfs4_xdr_opaque_data * ff_opaque)2570 ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
2571 		const void *voidargs,
2572 		const struct nfs4_xdr_opaque_data *ff_opaque)
2573 {
2574 	const struct nfs4_layoutreturn_args *args = voidargs;
2575 	struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
2576 	struct xdr_buf tmp_buf = {
2577 		.head = {
2578 			[0] = {
2579 				.iov_base = page_address(ff_args->pages[0]),
2580 			},
2581 		},
2582 		.buflen = PAGE_SIZE,
2583 	};
2584 	struct xdr_stream tmp_xdr;
2585 	__be32 *start;
2586 
2587 	dprintk("%s: Begin\n", __func__);
2588 
2589 	xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL);
2590 
2591 	ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
2592 	ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
2593 
2594 	start = xdr_reserve_space(xdr, 4);
2595 	*start = cpu_to_be32(tmp_buf.len);
2596 	xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
2597 
2598 	dprintk("%s: Return\n", __func__);
2599 }
2600 
2601 static void
ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data * args)2602 ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
2603 {
2604 	struct nfs4_flexfile_layoutreturn_args *ff_args;
2605 
2606 	if (!args->data)
2607 		return;
2608 	ff_args = args->data;
2609 	args->data = NULL;
2610 
2611 	ff_layout_free_ds_ioerr(&ff_args->errors);
2612 	ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
2613 
2614 	put_page(ff_args->pages[0]);
2615 	kfree(ff_args);
2616 }
2617 
2618 static const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
2619 	.encode = ff_layout_encode_layoutreturn,
2620 	.free = ff_layout_free_layoutreturn,
2621 };
2622 
2623 static int
ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args * args)2624 ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
2625 {
2626 	struct nfs4_flexfile_layoutreturn_args *ff_args;
2627 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
2628 
2629 	ff_args = kmalloc(sizeof(*ff_args), nfs_io_gfp_mask());
2630 	if (!ff_args)
2631 		goto out_nomem;
2632 	ff_args->pages[0] = alloc_page(nfs_io_gfp_mask());
2633 	if (!ff_args->pages[0])
2634 		goto out_nomem_free;
2635 
2636 	INIT_LIST_HEAD(&ff_args->errors);
2637 	ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
2638 			&args->range, &ff_args->errors,
2639 			FF_LAYOUTRETURN_MAXERR);
2640 
2641 	spin_lock(&args->inode->i_lock);
2642 	ff_args->num_dev = ff_layout_mirror_prepare_stats(
2643 		&ff_layout->generic_hdr, &ff_args->devinfo[0],
2644 		ARRAY_SIZE(ff_args->devinfo), NFS4_FF_OP_LAYOUTRETURN);
2645 	spin_unlock(&args->inode->i_lock);
2646 
2647 	args->ld_private->ops = &layoutreturn_ops;
2648 	args->ld_private->data = ff_args;
2649 	return 0;
2650 out_nomem_free:
2651 	kfree(ff_args);
2652 out_nomem:
2653 	return -ENOMEM;
2654 }
2655 
2656 #ifdef CONFIG_NFS_V4_2
2657 void
ff_layout_send_layouterror(struct pnfs_layout_segment * lseg)2658 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2659 {
2660 	struct pnfs_layout_hdr *lo = lseg->pls_layout;
2661 	struct nfs42_layout_error *errors;
2662 	LIST_HEAD(head);
2663 
2664 	if (!nfs_server_capable(lo->plh_inode, NFS_CAP_LAYOUTERROR))
2665 		return;
2666 	ff_layout_fetch_ds_ioerr(lo, &lseg->pls_range, &head, -1);
2667 	if (list_empty(&head))
2668 		return;
2669 
2670 	errors = kmalloc_array(NFS42_LAYOUTERROR_MAX, sizeof(*errors),
2671 			       nfs_io_gfp_mask());
2672 	if (errors != NULL) {
2673 		const struct nfs4_ff_layout_ds_err *pos;
2674 		size_t n = 0;
2675 
2676 		list_for_each_entry(pos, &head, list) {
2677 			errors[n].offset = pos->offset;
2678 			errors[n].length = pos->length;
2679 			nfs4_stateid_copy(&errors[n].stateid, &pos->stateid);
2680 			errors[n].errors[0].dev_id = pos->deviceid;
2681 			errors[n].errors[0].status = pos->status;
2682 			errors[n].errors[0].opnum = pos->opnum;
2683 			n++;
2684 			if (!list_is_last(&pos->list, &head) &&
2685 			    n < NFS42_LAYOUTERROR_MAX)
2686 				continue;
2687 			if (nfs42_proc_layouterror(lseg, errors, n) < 0)
2688 				break;
2689 			n = 0;
2690 		}
2691 		kfree(errors);
2692 	}
2693 	ff_layout_free_ds_ioerr(&head);
2694 }
2695 #else
2696 void
ff_layout_send_layouterror(struct pnfs_layout_segment * lseg)2697 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2698 {
2699 }
2700 #endif
2701 
2702 static int
ff_layout_ntop4(const struct sockaddr * sap,char * buf,const size_t buflen)2703 ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
2704 {
2705 	const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
2706 
2707 	return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
2708 }
2709 
2710 static size_t
ff_layout_ntop6_noscopeid(const struct sockaddr * sap,char * buf,const int buflen)2711 ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
2712 			  const int buflen)
2713 {
2714 	const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
2715 	const struct in6_addr *addr = &sin6->sin6_addr;
2716 
2717 	/*
2718 	 * RFC 4291, Section 2.2.2
2719 	 *
2720 	 * Shorthanded ANY address
2721 	 */
2722 	if (ipv6_addr_any(addr))
2723 		return snprintf(buf, buflen, "::");
2724 
2725 	/*
2726 	 * RFC 4291, Section 2.2.2
2727 	 *
2728 	 * Shorthanded loopback address
2729 	 */
2730 	if (ipv6_addr_loopback(addr))
2731 		return snprintf(buf, buflen, "::1");
2732 
2733 	/*
2734 	 * RFC 4291, Section 2.2.3
2735 	 *
2736 	 * Special presentation address format for mapped v4
2737 	 * addresses.
2738 	 */
2739 	if (ipv6_addr_v4mapped(addr))
2740 		return snprintf(buf, buflen, "::ffff:%pI4",
2741 					&addr->s6_addr32[3]);
2742 
2743 	/*
2744 	 * RFC 4291, Section 2.2.1
2745 	 */
2746 	return snprintf(buf, buflen, "%pI6c", addr);
2747 }
2748 
2749 /* Derived from rpc_sockaddr2uaddr */
2750 static void
ff_layout_encode_netaddr(struct xdr_stream * xdr,struct nfs4_pnfs_ds_addr * da)2751 ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
2752 {
2753 	struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
2754 	char portbuf[RPCBIND_MAXUADDRPLEN];
2755 	char addrbuf[RPCBIND_MAXUADDRLEN];
2756 	unsigned short port;
2757 	int len, netid_len;
2758 	__be32 *p;
2759 
2760 	switch (sap->sa_family) {
2761 	case AF_INET:
2762 		if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
2763 			return;
2764 		port = ntohs(((struct sockaddr_in *)sap)->sin_port);
2765 		break;
2766 	case AF_INET6:
2767 		if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
2768 			return;
2769 		port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
2770 		break;
2771 	default:
2772 		WARN_ON_ONCE(1);
2773 		return;
2774 	}
2775 
2776 	snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
2777 	len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
2778 
2779 	netid_len = strlen(da->da_netid);
2780 	p = xdr_reserve_space(xdr, 4 + netid_len);
2781 	xdr_encode_opaque(p, da->da_netid, netid_len);
2782 
2783 	p = xdr_reserve_space(xdr, 4 + len);
2784 	xdr_encode_opaque(p, addrbuf, len);
2785 }
2786 
2787 static void
ff_layout_encode_nfstime(struct xdr_stream * xdr,ktime_t t)2788 ff_layout_encode_nfstime(struct xdr_stream *xdr,
2789 			 ktime_t t)
2790 {
2791 	struct timespec64 ts;
2792 	__be32 *p;
2793 
2794 	p = xdr_reserve_space(xdr, 12);
2795 	ts = ktime_to_timespec64(t);
2796 	p = xdr_encode_hyper(p, ts.tv_sec);
2797 	*p++ = cpu_to_be32(ts.tv_nsec);
2798 }
2799 
2800 static void
ff_layout_encode_io_latency(struct xdr_stream * xdr,struct nfs4_ff_io_stat * stat)2801 ff_layout_encode_io_latency(struct xdr_stream *xdr,
2802 			    struct nfs4_ff_io_stat *stat)
2803 {
2804 	__be32 *p;
2805 
2806 	p = xdr_reserve_space(xdr, 5 * 8);
2807 	p = xdr_encode_hyper(p, stat->ops_requested);
2808 	p = xdr_encode_hyper(p, stat->bytes_requested);
2809 	p = xdr_encode_hyper(p, stat->ops_completed);
2810 	p = xdr_encode_hyper(p, stat->bytes_completed);
2811 	p = xdr_encode_hyper(p, stat->bytes_not_delivered);
2812 	ff_layout_encode_nfstime(xdr, stat->total_busy_time);
2813 	ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
2814 }
2815 
2816 static void
ff_layout_encode_ff_layoutupdate(struct xdr_stream * xdr,const struct nfs42_layoutstat_devinfo * devinfo,struct nfs4_ff_layout_ds_stripe * dss_info)2817 ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
2818 			      const struct nfs42_layoutstat_devinfo *devinfo,
2819 			      struct nfs4_ff_layout_ds_stripe *dss_info)
2820 {
2821 	struct nfs4_pnfs_ds_addr *da;
2822 	struct nfs4_pnfs_ds *ds = dss_info->mirror_ds->ds;
2823 	struct nfs_fh *fh = &dss_info->fh_versions[0];
2824 	__be32 *p;
2825 
2826 	da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
2827 	dprintk("%s: DS %s: encoding address %s\n",
2828 		__func__, ds->ds_remotestr, da->da_remotestr);
2829 	/* netaddr4 */
2830 	ff_layout_encode_netaddr(xdr, da);
2831 	/* nfs_fh4 */
2832 	p = xdr_reserve_space(xdr, 4 + fh->size);
2833 	xdr_encode_opaque(p, fh->data, fh->size);
2834 	/* ff_io_latency4 read */
2835 	spin_lock(&dss_info->mirror->lock);
2836 	ff_layout_encode_io_latency(xdr,
2837 				    &dss_info->read_stat.io_stat);
2838 	/* ff_io_latency4 write */
2839 	ff_layout_encode_io_latency(xdr,
2840 				    &dss_info->write_stat.io_stat);
2841 	spin_unlock(&dss_info->mirror->lock);
2842 	/* nfstime4 */
2843 	ff_layout_encode_nfstime(xdr,
2844 				 ktime_sub(ktime_get(),
2845 					   dss_info->start_time));
2846 	/* bool */
2847 	p = xdr_reserve_space(xdr, 4);
2848 	*p = cpu_to_be32(false);
2849 }
2850 
2851 static void
ff_layout_encode_layoutstats(struct xdr_stream * xdr,const void * args,const struct nfs4_xdr_opaque_data * opaque)2852 ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
2853 			     const struct nfs4_xdr_opaque_data *opaque)
2854 {
2855 	struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
2856 			struct nfs42_layoutstat_devinfo, ld_private);
2857 	__be32 *start;
2858 
2859 	/* layoutupdate length */
2860 	start = xdr_reserve_space(xdr, 4);
2861 	ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
2862 
2863 	*start = cpu_to_be32((xdr->p - start - 1) * 4);
2864 }
2865 
2866 static void
ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data * opaque)2867 ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
2868 {
2869 	struct nfs4_ff_layout_ds_stripe *dss_info = opaque->data;
2870 	struct nfs4_ff_layout_mirror *mirror = dss_info->mirror;
2871 
2872 	ff_layout_put_mirror(mirror);
2873 }
2874 
2875 static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
2876 	.encode = ff_layout_encode_layoutstats,
2877 	.free	= ff_layout_free_layoutstats,
2878 };
2879 
2880 static int
ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr * lo,struct nfs42_layoutstat_devinfo * devinfo,int dev_limit,enum nfs4_ff_op_type type)2881 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
2882 			       struct nfs42_layoutstat_devinfo *devinfo,
2883 			       int dev_limit, enum nfs4_ff_op_type type)
2884 {
2885 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
2886 	struct nfs4_ff_layout_mirror *mirror;
2887 	struct nfs4_ff_layout_ds_stripe *dss_info;
2888 	struct nfs4_deviceid_node *dev;
2889 	int i = 0, dss_id;
2890 
2891 	list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2892 		for (dss_id = 0; dss_id < mirror->dss_count; ++dss_id) {
2893 			dss_info = &mirror->dss[dss_id];
2894 			if (i >= dev_limit)
2895 				break;
2896 			if (IS_ERR_OR_NULL(dss_info->mirror_ds))
2897 				continue;
2898 			if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL,
2899 						&mirror->flags) &&
2900 			    type != NFS4_FF_OP_LAYOUTRETURN)
2901 				continue;
2902 			/* mirror refcount put in cleanup_layoutstats */
2903 			if (!refcount_inc_not_zero(&mirror->ref))
2904 				continue;
2905 			dev = &dss_info->mirror_ds->id_node;
2906 			memcpy(&devinfo->dev_id,
2907 			       &dev->deviceid,
2908 			       NFS4_DEVICEID4_SIZE);
2909 			devinfo->offset = 0;
2910 			devinfo->length = NFS4_MAX_UINT64;
2911 			spin_lock(&mirror->lock);
2912 			devinfo->read_count =
2913 			    dss_info->read_stat.io_stat.ops_completed;
2914 			devinfo->read_bytes =
2915 			    dss_info->read_stat.io_stat.bytes_completed;
2916 			devinfo->write_count =
2917 			    dss_info->write_stat.io_stat.ops_completed;
2918 			devinfo->write_bytes =
2919 			    dss_info->write_stat.io_stat.bytes_completed;
2920 			spin_unlock(&mirror->lock);
2921 			devinfo->layout_type = LAYOUT_FLEX_FILES;
2922 			devinfo->ld_private.ops = &layoutstat_ops;
2923 			devinfo->ld_private.data = &mirror->dss[dss_id];
2924 
2925 			devinfo++;
2926 			i++;
2927 		}
2928 	}
2929 	return i;
2930 }
2931 
ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args * args)2932 static int ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
2933 {
2934 	struct pnfs_layout_hdr *lo;
2935 	struct nfs4_flexfile_layout *ff_layout;
2936 	const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2937 
2938 	/* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2939 	args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo),
2940 				      nfs_io_gfp_mask());
2941 	if (!args->devinfo)
2942 		return -ENOMEM;
2943 
2944 	spin_lock(&args->inode->i_lock);
2945 	lo = NFS_I(args->inode)->layout;
2946 	if (lo && pnfs_layout_is_valid(lo)) {
2947 		ff_layout = FF_LAYOUT_FROM_HDR(lo);
2948 		args->num_dev = ff_layout_mirror_prepare_stats(
2949 			&ff_layout->generic_hdr, &args->devinfo[0], dev_count,
2950 			NFS4_FF_OP_LAYOUTSTATS);
2951 	} else
2952 		args->num_dev = 0;
2953 	spin_unlock(&args->inode->i_lock);
2954 	if (!args->num_dev) {
2955 		kfree(args->devinfo);
2956 		args->devinfo = NULL;
2957 		return -ENOENT;
2958 	}
2959 
2960 	return 0;
2961 }
2962 
2963 static int
ff_layout_set_layoutdriver(struct nfs_server * server,const struct nfs_fh * dummy)2964 ff_layout_set_layoutdriver(struct nfs_server *server,
2965 		const struct nfs_fh *dummy)
2966 {
2967 #if IS_ENABLED(CONFIG_NFS_V4_2)
2968 	server->caps |= NFS_CAP_LAYOUTSTATS | NFS_CAP_REBOOT_LAYOUTRETURN;
2969 #endif
2970 	return 0;
2971 }
2972 
2973 static const struct pnfs_commit_ops ff_layout_commit_ops = {
2974 	.setup_ds_info		= ff_layout_setup_ds_info,
2975 	.release_ds_info	= ff_layout_release_ds_info,
2976 	.mark_request_commit	= pnfs_layout_mark_request_commit,
2977 	.clear_request_commit	= pnfs_generic_clear_request_commit,
2978 	.scan_commit_lists	= pnfs_generic_scan_commit_lists,
2979 	.recover_commit_reqs	= pnfs_generic_recover_commit_reqs,
2980 	.commit_pagelist	= ff_layout_commit_pagelist,
2981 };
2982 
2983 static struct pnfs_layoutdriver_type flexfilelayout_type = {
2984 	.id			= LAYOUT_FLEX_FILES,
2985 	.name			= "LAYOUT_FLEX_FILES",
2986 	.owner			= THIS_MODULE,
2987 	.flags			= PNFS_LAYOUTGET_ON_OPEN,
2988 	.max_layoutget_response	= 4096, /* 1 page or so... */
2989 	.set_layoutdriver	= ff_layout_set_layoutdriver,
2990 	.alloc_layout_hdr	= ff_layout_alloc_layout_hdr,
2991 	.free_layout_hdr	= ff_layout_free_layout_hdr,
2992 	.alloc_lseg		= ff_layout_alloc_lseg,
2993 	.free_lseg		= ff_layout_free_lseg,
2994 	.add_lseg		= ff_layout_add_lseg,
2995 	.pg_read_ops		= &ff_layout_pg_read_ops,
2996 	.pg_write_ops		= &ff_layout_pg_write_ops,
2997 	.get_ds_info		= ff_layout_get_ds_info,
2998 	.free_deviceid_node	= ff_layout_free_deviceid_node,
2999 	.read_pagelist		= ff_layout_read_pagelist,
3000 	.write_pagelist		= ff_layout_write_pagelist,
3001 	.alloc_deviceid_node    = ff_layout_alloc_deviceid_node,
3002 	.prepare_layoutreturn   = ff_layout_prepare_layoutreturn,
3003 	.sync			= pnfs_nfs_generic_sync,
3004 	.prepare_layoutstats	= ff_layout_prepare_layoutstats,
3005 	.cancel_io		= ff_layout_cancel_io,
3006 };
3007 
nfs4flexfilelayout_init(void)3008 static int __init nfs4flexfilelayout_init(void)
3009 {
3010 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
3011 	       __func__);
3012 	return pnfs_register_layoutdriver(&flexfilelayout_type);
3013 }
3014 
nfs4flexfilelayout_exit(void)3015 static void __exit nfs4flexfilelayout_exit(void)
3016 {
3017 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
3018 	       __func__);
3019 	pnfs_unregister_layoutdriver(&flexfilelayout_type);
3020 }
3021 
3022 MODULE_ALIAS("nfs-layouttype4-4");
3023 
3024 MODULE_LICENSE("GPL");
3025 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
3026 
3027 module_init(nfs4flexfilelayout_init);
3028 module_exit(nfs4flexfilelayout_exit);
3029 
3030 module_param(io_maxretrans, ushort, 0644);
3031 MODULE_PARM_DESC(io_maxretrans, "The  number of times the NFSv4.1 client "
3032 			"retries an I/O request before returning an error. ");
3033