xref: /linux/fs/nfs/flexfilelayout/flexfilelayout.c (revision 2d07c6c2098805054f84ce642587093bb8feaf8c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Module for pnfs flexfile layout driver.
4  *
5  * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
6  *
7  * Tao Peng <bergwolf@primarydata.com>
8  */
9 
10 #include <linux/nfs_fs.h>
11 #include <linux/nfs_mount.h>
12 #include <linux/nfs_page.h>
13 #include <linux/module.h>
14 #include <linux/file.h>
15 #include <linux/sched/mm.h>
16 
17 #include <linux/sunrpc/metrics.h>
18 
19 #include "flexfilelayout.h"
20 #include "../nfs4session.h"
21 #include "../nfs4idmap.h"
22 #include "../internal.h"
23 #include "../delegation.h"
24 #include "../nfs4trace.h"
25 #include "../iostat.h"
26 #include "../nfs.h"
27 #include "../nfs42.h"
28 
29 #define NFSDBG_FACILITY         NFSDBG_PNFS_LD
30 
31 #define FF_LAYOUT_POLL_RETRY_MAX     (15*HZ)
32 #define FF_LAYOUTRETURN_MAXERR 20
33 
34 enum nfs4_ff_op_type {
35 	NFS4_FF_OP_LAYOUTSTATS,
36 	NFS4_FF_OP_LAYOUTRETURN,
37 };
38 
39 static unsigned short io_maxretrans;
40 
41 static const struct pnfs_commit_ops ff_layout_commit_ops;
42 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
43 		struct nfs_pgio_header *hdr);
44 static int
45 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
46 			       struct nfs42_layoutstat_devinfo *devinfo,
47 			       int dev_limit, enum nfs4_ff_op_type type);
48 static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
49 			      const struct nfs42_layoutstat_devinfo *devinfo,
50 			      struct nfs4_ff_layout_ds_stripe *dss_info);
51 
52 static struct pnfs_layout_hdr *
ff_layout_alloc_layout_hdr(struct inode * inode,gfp_t gfp_flags)53 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
54 {
55 	struct nfs4_flexfile_layout *ffl;
56 
57 	ffl = kzalloc(sizeof(*ffl), gfp_flags);
58 	if (ffl) {
59 		pnfs_init_ds_commit_info(&ffl->commit_info);
60 		INIT_LIST_HEAD(&ffl->error_list);
61 		INIT_LIST_HEAD(&ffl->mirrors);
62 		ffl->last_report_time = ktime_get();
63 		ffl->commit_info.ops = &ff_layout_commit_ops;
64 		return &ffl->generic_hdr;
65 	} else
66 		return NULL;
67 }
68 
69 static void
ff_layout_free_layout_hdr(struct pnfs_layout_hdr * lo)70 ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
71 {
72 	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(lo);
73 	struct nfs4_ff_layout_ds_err *err, *n;
74 
75 	list_for_each_entry_safe(err, n, &ffl->error_list, list) {
76 		list_del(&err->list);
77 		kfree(err);
78 	}
79 	kfree_rcu(ffl, generic_hdr.plh_rcu);
80 }
81 
decode_pnfs_stateid(struct xdr_stream * xdr,nfs4_stateid * stateid)82 static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
83 {
84 	__be32 *p;
85 
86 	p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
87 	if (unlikely(p == NULL))
88 		return -ENOBUFS;
89 	stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
90 	memcpy(stateid->data, p, NFS4_STATEID_SIZE);
91 	dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
92 		p[0], p[1], p[2], p[3]);
93 	return 0;
94 }
95 
decode_deviceid(struct xdr_stream * xdr,struct nfs4_deviceid * devid)96 static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
97 {
98 	__be32 *p;
99 
100 	p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
101 	if (unlikely(!p))
102 		return -ENOBUFS;
103 	memcpy(devid, p, NFS4_DEVICEID4_SIZE);
104 	nfs4_print_deviceid(devid);
105 	return 0;
106 }
107 
decode_nfs_fh(struct xdr_stream * xdr,struct nfs_fh * fh)108 static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
109 {
110 	__be32 *p;
111 
112 	p = xdr_inline_decode(xdr, 4);
113 	if (unlikely(!p))
114 		return -ENOBUFS;
115 	fh->size = be32_to_cpup(p++);
116 	if (fh->size > NFS_MAXFHSIZE) {
117 		printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
118 		       fh->size);
119 		return -EOVERFLOW;
120 	}
121 	/* fh.data */
122 	p = xdr_inline_decode(xdr, fh->size);
123 	if (unlikely(!p))
124 		return -ENOBUFS;
125 	memcpy(&fh->data, p, fh->size);
126 	dprintk("%s: fh len %d\n", __func__, fh->size);
127 
128 	return 0;
129 }
130 
131 /*
132  * Currently only stringified uids and gids are accepted.
133  * I.e., kerberos is not supported to the DSes, so no pricipals.
134  *
135  * That means that one common function will suffice, but when
136  * principals are added, this should be split to accomodate
137  * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
138  */
139 static int
decode_name(struct xdr_stream * xdr,u32 * id)140 decode_name(struct xdr_stream *xdr, u32 *id)
141 {
142 	__be32 *p;
143 	int len;
144 
145 	/* opaque_length(4)*/
146 	p = xdr_inline_decode(xdr, 4);
147 	if (unlikely(!p))
148 		return -ENOBUFS;
149 	len = be32_to_cpup(p++);
150 	if (len < 0)
151 		return -EINVAL;
152 
153 	dprintk("%s: len %u\n", __func__, len);
154 
155 	/* opaque body */
156 	p = xdr_inline_decode(xdr, len);
157 	if (unlikely(!p))
158 		return -ENOBUFS;
159 
160 	if (!nfs_map_string_to_numeric((char *)p, len, id))
161 		return -EINVAL;
162 
163 	return 0;
164 }
165 
166 static struct nfsd_file *
ff_local_open_fh(struct pnfs_layout_segment * lseg,u32 ds_idx,u32 dss_id,struct nfs_client * clp,const struct cred * cred,struct nfs_fh * fh,fmode_t mode)167 ff_local_open_fh(struct pnfs_layout_segment *lseg, u32 ds_idx, u32 dss_id,
168 		 struct nfs_client *clp, const struct cred *cred,
169 		 struct nfs_fh *fh, fmode_t mode)
170 {
171 #if IS_ENABLED(CONFIG_NFS_LOCALIO)
172 	struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
173 
174 	return nfs_local_open_fh(clp, cred, fh, &mirror->dss[dss_id].nfl, mode);
175 #else
176 	return NULL;
177 #endif
178 }
179 
ff_dss_match_fh(const struct nfs4_ff_layout_ds_stripe * dss1,const struct nfs4_ff_layout_ds_stripe * dss2)180 static bool ff_dss_match_fh(const struct nfs4_ff_layout_ds_stripe *dss1,
181 		const struct nfs4_ff_layout_ds_stripe *dss2)
182 {
183 	int i, j;
184 
185 	if (dss1->fh_versions_cnt != dss2->fh_versions_cnt)
186 		return false;
187 
188 	for (i = 0; i < dss1->fh_versions_cnt; i++) {
189 		bool found_fh = false;
190 		for (j = 0; j < dss2->fh_versions_cnt; j++) {
191 			if (nfs_compare_fh(&dss1->fh_versions[i],
192 					&dss2->fh_versions[j]) == 0) {
193 				found_fh = true;
194 				break;
195 			}
196 		}
197 		if (!found_fh)
198 			return false;
199 	}
200 	return true;
201 }
202 
ff_mirror_match_fh(const struct nfs4_ff_layout_mirror * m1,const struct nfs4_ff_layout_mirror * m2)203 static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
204 		const struct nfs4_ff_layout_mirror *m2)
205 {
206 	u32 dss_id;
207 
208 	if (m1->dss_count != m2->dss_count)
209 		return false;
210 
211 	for (dss_id = 0; dss_id < m1->dss_count; dss_id++)
212 		if (!ff_dss_match_fh(&m1->dss[dss_id], &m2->dss[dss_id]))
213 			return false;
214 
215 	return true;
216 }
217 
ff_mirror_match_devid(const struct nfs4_ff_layout_mirror * m1,const struct nfs4_ff_layout_mirror * m2)218 static bool ff_mirror_match_devid(const struct nfs4_ff_layout_mirror *m1,
219 		const struct nfs4_ff_layout_mirror *m2)
220 {
221 	u32 dss_id;
222 
223 	if (m1->dss_count != m2->dss_count)
224 		return false;
225 
226 	for (dss_id = 0; dss_id < m1->dss_count; dss_id++)
227 		if (memcmp(&m1->dss[dss_id].devid,
228 			   &m2->dss[dss_id].devid,
229 			   sizeof(m1->dss[dss_id].devid)) != 0)
230 			return false;
231 
232 	return true;
233 }
234 
235 static struct nfs4_ff_layout_mirror *
ff_layout_add_mirror(struct pnfs_layout_hdr * lo,struct nfs4_ff_layout_mirror * mirror)236 ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
237 		struct nfs4_ff_layout_mirror *mirror)
238 {
239 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
240 	struct nfs4_ff_layout_mirror *pos;
241 	struct inode *inode = lo->plh_inode;
242 
243 	spin_lock(&inode->i_lock);
244 	list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
245 		if (!ff_mirror_match_devid(mirror, pos))
246 			continue;
247 		if (!ff_mirror_match_fh(mirror, pos))
248 			continue;
249 		if (refcount_inc_not_zero(&pos->ref)) {
250 			spin_unlock(&inode->i_lock);
251 			return pos;
252 		}
253 	}
254 	list_add(&mirror->mirrors, &ff_layout->mirrors);
255 	mirror->layout = lo;
256 	spin_unlock(&inode->i_lock);
257 	return mirror;
258 }
259 
260 static void
ff_layout_remove_mirror(struct nfs4_ff_layout_mirror * mirror)261 ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
262 {
263 	struct inode *inode;
264 	if (mirror->layout == NULL)
265 		return;
266 	inode = mirror->layout->plh_inode;
267 	spin_lock(&inode->i_lock);
268 	list_del(&mirror->mirrors);
269 	spin_unlock(&inode->i_lock);
270 	mirror->layout = NULL;
271 }
272 
ff_layout_alloc_mirror(u32 dss_count,gfp_t gfp_flags)273 static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(u32 dss_count,
274 							    gfp_t gfp_flags)
275 {
276 	struct nfs4_ff_layout_mirror *mirror;
277 
278 	mirror = kzalloc(sizeof(*mirror), gfp_flags);
279 	if (mirror == NULL)
280 		return NULL;
281 
282 	spin_lock_init(&mirror->lock);
283 	refcount_set(&mirror->ref, 1);
284 	INIT_LIST_HEAD(&mirror->mirrors);
285 
286 	mirror->dss_count = dss_count;
287 	mirror->dss =
288 		kcalloc(dss_count, sizeof(struct nfs4_ff_layout_ds_stripe),
289 			gfp_flags);
290 	if (mirror->dss == NULL) {
291 		kfree(mirror);
292 		return NULL;
293 	}
294 
295 	for (u32 dss_id = 0; dss_id < mirror->dss_count; dss_id++)
296 		nfs_localio_file_init(&mirror->dss[dss_id].nfl);
297 
298 	return mirror;
299 }
300 
ff_layout_free_mirror(struct nfs4_ff_layout_mirror * mirror)301 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
302 {
303 	const struct cred	*cred;
304 	u32 dss_id;
305 
306 	ff_layout_remove_mirror(mirror);
307 
308 	for (dss_id = 0; dss_id < mirror->dss_count; dss_id++) {
309 		kfree(mirror->dss[dss_id].fh_versions);
310 		cred = rcu_access_pointer(mirror->dss[dss_id].ro_cred);
311 		put_cred(cred);
312 		cred = rcu_access_pointer(mirror->dss[dss_id].rw_cred);
313 		put_cred(cred);
314 		nfs_close_local_fh(&mirror->dss[dss_id].nfl);
315 		nfs4_ff_layout_put_deviceid(mirror->dss[dss_id].mirror_ds);
316 	}
317 
318 	kfree(mirror->dss);
319 	kfree(mirror);
320 }
321 
ff_layout_put_mirror(struct nfs4_ff_layout_mirror * mirror)322 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
323 {
324 	if (mirror != NULL && refcount_dec_and_test(&mirror->ref))
325 		ff_layout_free_mirror(mirror);
326 }
327 
ff_layout_free_mirror_array(struct nfs4_ff_layout_segment * fls)328 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
329 {
330 	u32 i;
331 
332 	for (i = 0; i < fls->mirror_array_cnt; i++)
333 		ff_layout_put_mirror(fls->mirror_array[i]);
334 }
335 
_ff_layout_free_lseg(struct nfs4_ff_layout_segment * fls)336 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
337 {
338 	if (fls) {
339 		ff_layout_free_mirror_array(fls);
340 		kfree(fls);
341 	}
342 }
343 
344 static bool
ff_lseg_match_mirrors(struct pnfs_layout_segment * l1,struct pnfs_layout_segment * l2)345 ff_lseg_match_mirrors(struct pnfs_layout_segment *l1,
346 		struct pnfs_layout_segment *l2)
347 {
348 	const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1);
349 	const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l2);
350 	u32 i;
351 
352 	if (fl1->mirror_array_cnt != fl2->mirror_array_cnt)
353 		return false;
354 	for (i = 0; i < fl1->mirror_array_cnt; i++) {
355 		if (fl1->mirror_array[i] != fl2->mirror_array[i])
356 			return false;
357 	}
358 	return true;
359 }
360 
361 static bool
ff_lseg_range_is_after(const struct pnfs_layout_range * l1,const struct pnfs_layout_range * l2)362 ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
363 		const struct pnfs_layout_range *l2)
364 {
365 	u64 end1, end2;
366 
367 	if (l1->iomode != l2->iomode)
368 		return l1->iomode != IOMODE_READ;
369 	end1 = pnfs_calc_offset_end(l1->offset, l1->length);
370 	end2 = pnfs_calc_offset_end(l2->offset, l2->length);
371 	if (end1 < l2->offset)
372 		return false;
373 	if (end2 < l1->offset)
374 		return true;
375 	return l2->offset <= l1->offset;
376 }
377 
378 static bool
ff_lseg_merge(struct pnfs_layout_segment * new,struct pnfs_layout_segment * old)379 ff_lseg_merge(struct pnfs_layout_segment *new,
380 		struct pnfs_layout_segment *old)
381 {
382 	u64 new_end, old_end;
383 
384 	if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
385 		return false;
386 	if (new->pls_range.iomode != old->pls_range.iomode)
387 		return false;
388 	old_end = pnfs_calc_offset_end(old->pls_range.offset,
389 			old->pls_range.length);
390 	if (old_end < new->pls_range.offset)
391 		return false;
392 	new_end = pnfs_calc_offset_end(new->pls_range.offset,
393 			new->pls_range.length);
394 	if (new_end < old->pls_range.offset)
395 		return false;
396 	if (!ff_lseg_match_mirrors(new, old))
397 		return false;
398 
399 	/* Mergeable: copy info from 'old' to 'new' */
400 	if (new_end < old_end)
401 		new_end = old_end;
402 	if (new->pls_range.offset < old->pls_range.offset)
403 		new->pls_range.offset = old->pls_range.offset;
404 	new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
405 			new_end);
406 	if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
407 		set_bit(NFS_LSEG_ROC, &new->pls_flags);
408 	return true;
409 }
410 
411 static void
ff_layout_add_lseg(struct pnfs_layout_hdr * lo,struct pnfs_layout_segment * lseg,struct list_head * free_me)412 ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
413 		struct pnfs_layout_segment *lseg,
414 		struct list_head *free_me)
415 {
416 	pnfs_generic_layout_insert_lseg(lo, lseg,
417 			ff_lseg_range_is_after,
418 			ff_lseg_merge,
419 			free_me);
420 }
421 
ff_mirror_efficiency_sum(const struct nfs4_ff_layout_mirror * mirror)422 static u32 ff_mirror_efficiency_sum(const struct nfs4_ff_layout_mirror *mirror)
423 {
424 	u32 dss_id, sum = 0;
425 
426 	for (dss_id = 0; dss_id < mirror->dss_count; dss_id++)
427 		sum += mirror->dss[dss_id].efficiency;
428 
429 	return sum;
430 }
431 
ff_layout_sort_mirrors(struct nfs4_ff_layout_segment * fls)432 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
433 {
434 	int i, j;
435 
436 	for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
437 		for (j = i + 1; j < fls->mirror_array_cnt; j++)
438 			if (ff_mirror_efficiency_sum(fls->mirror_array[i]) <
439 			    ff_mirror_efficiency_sum(fls->mirror_array[j]))
440 				swap(fls->mirror_array[i],
441 				     fls->mirror_array[j]);
442 	}
443 }
444 
445 static struct pnfs_layout_segment *
ff_layout_alloc_lseg(struct pnfs_layout_hdr * lh,struct nfs4_layoutget_res * lgr,gfp_t gfp_flags)446 ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
447 		     struct nfs4_layoutget_res *lgr,
448 		     gfp_t gfp_flags)
449 {
450 	struct pnfs_layout_segment *ret;
451 	struct nfs4_ff_layout_segment *fls = NULL;
452 	struct xdr_stream stream;
453 	struct xdr_buf buf;
454 	struct folio *scratch;
455 	u64 stripe_unit;
456 	u32 mirror_array_cnt;
457 	__be32 *p;
458 	int i, rc;
459 	struct nfs4_ff_layout_ds_stripe *dss_info;
460 
461 	dprintk("--> %s\n", __func__);
462 	scratch = folio_alloc(gfp_flags, 0);
463 	if (!scratch)
464 		return ERR_PTR(-ENOMEM);
465 
466 	xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
467 			      lgr->layoutp->len);
468 	xdr_set_scratch_folio(&stream, scratch);
469 
470 	/* stripe unit and mirror_array_cnt */
471 	rc = -EIO;
472 	p = xdr_inline_decode(&stream, 8 + 4);
473 	if (!p)
474 		goto out_err_free;
475 
476 	p = xdr_decode_hyper(p, &stripe_unit);
477 	mirror_array_cnt = be32_to_cpup(p++);
478 	dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
479 		stripe_unit, mirror_array_cnt);
480 
481 	if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
482 	    mirror_array_cnt == 0)
483 		goto out_err_free;
484 
485 	rc = -ENOMEM;
486 	fls = kzalloc(struct_size(fls, mirror_array, mirror_array_cnt),
487 			gfp_flags);
488 	if (!fls)
489 		goto out_err_free;
490 
491 	fls->mirror_array_cnt = mirror_array_cnt;
492 	fls->stripe_unit = stripe_unit;
493 
494 	u32 dss_count = 0;
495 	for (i = 0; i < fls->mirror_array_cnt; i++) {
496 		struct nfs4_ff_layout_mirror *mirror;
497 		struct cred *kcred;
498 		const struct cred __rcu *cred;
499 		kuid_t uid;
500 		kgid_t gid;
501 		u32 fh_count, id;
502 		int j, dss_id;
503 
504 		rc = -EIO;
505 		p = xdr_inline_decode(&stream, 4);
506 		if (!p)
507 			goto out_err_free;
508 
509 		// Ensure all mirrors have same stripe count.
510 		if (dss_count == 0)
511 			dss_count = be32_to_cpup(p);
512 		else if (dss_count != be32_to_cpup(p))
513 			goto out_err_free;
514 
515 		if (dss_count > NFS4_FLEXFILE_LAYOUT_MAX_STRIPE_CNT ||
516 		    dss_count == 0)
517 			goto out_err_free;
518 
519 		if (dss_count > 1 && stripe_unit == 0)
520 			goto out_err_free;
521 
522 		fls->mirror_array[i] = ff_layout_alloc_mirror(dss_count, gfp_flags);
523 		if (fls->mirror_array[i] == NULL) {
524 			rc = -ENOMEM;
525 			goto out_err_free;
526 		}
527 
528 		for (dss_id = 0; dss_id < dss_count; dss_id++) {
529 			dss_info = &fls->mirror_array[i]->dss[dss_id];
530 			dss_info->mirror = fls->mirror_array[i];
531 
532 			/* deviceid */
533 			rc = decode_deviceid(&stream, &dss_info->devid);
534 			if (rc)
535 				goto out_err_free;
536 
537 			/* efficiency */
538 			rc = -EIO;
539 			p = xdr_inline_decode(&stream, 4);
540 			if (!p)
541 				goto out_err_free;
542 			dss_info->efficiency = be32_to_cpup(p);
543 
544 			/* stateid */
545 			rc = decode_pnfs_stateid(&stream, &dss_info->stateid);
546 			if (rc)
547 				goto out_err_free;
548 
549 			/* fh */
550 			rc = -EIO;
551 			p = xdr_inline_decode(&stream, 4);
552 			if (!p)
553 				goto out_err_free;
554 			fh_count = be32_to_cpup(p);
555 
556 			dss_info->fh_versions =
557 			    kcalloc(fh_count, sizeof(struct nfs_fh),
558 				    gfp_flags);
559 			if (dss_info->fh_versions == NULL) {
560 				rc = -ENOMEM;
561 				goto out_err_free;
562 			}
563 
564 			for (j = 0; j < fh_count; j++) {
565 				rc = decode_nfs_fh(&stream,
566 						   &dss_info->fh_versions[j]);
567 				if (rc)
568 					goto out_err_free;
569 			}
570 
571 			dss_info->fh_versions_cnt = fh_count;
572 
573 			/* user */
574 			rc = decode_name(&stream, &id);
575 			if (rc)
576 				goto out_err_free;
577 
578 			uid = make_kuid(&init_user_ns, id);
579 
580 			/* group */
581 			rc = decode_name(&stream, &id);
582 			if (rc)
583 				goto out_err_free;
584 
585 			gid = make_kgid(&init_user_ns, id);
586 
587 			if (gfp_flags & __GFP_FS)
588 				kcred = prepare_kernel_cred(&init_task);
589 			else {
590 				unsigned int nofs_flags = memalloc_nofs_save();
591 
592 				kcred = prepare_kernel_cred(&init_task);
593 				memalloc_nofs_restore(nofs_flags);
594 			}
595 			rc = -ENOMEM;
596 			if (!kcred)
597 				goto out_err_free;
598 			kcred->fsuid = uid;
599 			kcred->fsgid = gid;
600 			cred = RCU_INITIALIZER(kcred);
601 
602 			if (lgr->range.iomode == IOMODE_READ)
603 				rcu_assign_pointer(dss_info->ro_cred, cred);
604 			else
605 				rcu_assign_pointer(dss_info->rw_cred, cred);
606 		}
607 
608 		mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
609 		if (mirror != fls->mirror_array[i]) {
610 			for (dss_id = 0; dss_id < dss_count; dss_id++) {
611 				dss_info = &fls->mirror_array[i]->dss[dss_id];
612 				/* swap cred ptrs so free_mirror will clean up old */
613 				if (lgr->range.iomode == IOMODE_READ) {
614 					cred = xchg(&mirror->dss[dss_id].ro_cred,
615 						    dss_info->ro_cred);
616 					rcu_assign_pointer(dss_info->ro_cred, cred);
617 				} else {
618 					cred = xchg(&mirror->dss[dss_id].rw_cred,
619 						    dss_info->rw_cred);
620 					rcu_assign_pointer(dss_info->rw_cred, cred);
621 				}
622 			}
623 			ff_layout_free_mirror(fls->mirror_array[i]);
624 			fls->mirror_array[i] = mirror;
625 		}
626 
627 		dprintk("%s: iomode %s uid %u gid %u\n", __func__,
628 			lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
629 			from_kuid(&init_user_ns, uid),
630 			from_kgid(&init_user_ns, gid));
631 	}
632 
633 	p = xdr_inline_decode(&stream, 4);
634 	if (!p)
635 		goto out_sort_mirrors;
636 	fls->flags = be32_to_cpup(p);
637 
638 	p = xdr_inline_decode(&stream, 4);
639 	if (!p)
640 		goto out_sort_mirrors;
641 	for (i=0; i < fls->mirror_array_cnt; i++)
642 		fls->mirror_array[i]->report_interval = be32_to_cpup(p);
643 
644 out_sort_mirrors:
645 	ff_layout_sort_mirrors(fls);
646 	ret = &fls->generic_hdr;
647 	dprintk("<-- %s (success)\n", __func__);
648 out_free_page:
649 	folio_put(scratch);
650 	return ret;
651 out_err_free:
652 	_ff_layout_free_lseg(fls);
653 	ret = ERR_PTR(rc);
654 	dprintk("<-- %s (%d)\n", __func__, rc);
655 	goto out_free_page;
656 }
657 
658 static void
ff_layout_free_lseg(struct pnfs_layout_segment * lseg)659 ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
660 {
661 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
662 
663 	dprintk("--> %s\n", __func__);
664 
665 	if (lseg->pls_range.iomode == IOMODE_RW) {
666 		struct nfs4_flexfile_layout *ffl;
667 		struct inode *inode;
668 
669 		ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
670 		inode = ffl->generic_hdr.plh_inode;
671 		spin_lock(&inode->i_lock);
672 		pnfs_generic_ds_cinfo_release_lseg(&ffl->commit_info, lseg);
673 		spin_unlock(&inode->i_lock);
674 	}
675 	_ff_layout_free_lseg(fls);
676 }
677 
calc_commit_idx(struct pnfs_layout_segment * lseg,u32 mirror_idx,u32 dss_id)678 static u32 calc_commit_idx(struct pnfs_layout_segment *lseg,
679 			   u32 mirror_idx, u32 dss_id)
680 {
681 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
682 
683 	return (mirror_idx * flseg->mirror_array[0]->dss_count) + dss_id;
684 }
685 
calc_mirror_idx_from_commit(struct pnfs_layout_segment * lseg,u32 commit_index)686 static u32 calc_mirror_idx_from_commit(struct pnfs_layout_segment *lseg,
687 				       u32 commit_index)
688 {
689 	return commit_index / FF_LAYOUT_LSEG(lseg)->mirror_array[0]->dss_count;
690 }
691 
calc_dss_id_from_commit(struct pnfs_layout_segment * lseg,u32 commit_index)692 static u32 calc_dss_id_from_commit(struct pnfs_layout_segment *lseg,
693 				   u32 commit_index)
694 {
695 	return commit_index % FF_LAYOUT_LSEG(lseg)->mirror_array[0]->dss_count;
696 }
697 
698 static void
nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer * timer,ktime_t now)699 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
700 {
701 	/* first IO request? */
702 	if (atomic_inc_return(&timer->n_ops) == 1) {
703 		timer->start_time = now;
704 	}
705 }
706 
707 static ktime_t
nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer * timer,ktime_t now)708 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
709 {
710 	ktime_t start;
711 
712 	if (atomic_dec_return(&timer->n_ops) < 0)
713 		WARN_ON_ONCE(1);
714 
715 	start = timer->start_time;
716 	timer->start_time = now;
717 	return ktime_sub(now, start);
718 }
719 
720 static bool
nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror * mirror,u32 dss_id,struct nfs4_ff_layoutstat * layoutstat,ktime_t now)721 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
722 			    u32 dss_id,
723 			    struct nfs4_ff_layoutstat *layoutstat,
724 			    ktime_t now)
725 {
726 	s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
727 	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
728 
729 	nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
730 	if (!mirror->dss[dss_id].start_time)
731 		mirror->dss[dss_id].start_time = now;
732 	if (mirror->report_interval != 0)
733 		report_interval = (s64)mirror->report_interval * 1000LL;
734 	else if (layoutstats_timer != 0)
735 		report_interval = (s64)layoutstats_timer * 1000LL;
736 	if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
737 			report_interval) {
738 		ffl->last_report_time = now;
739 		return true;
740 	}
741 
742 	return false;
743 }
744 
745 static void
nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat * layoutstat,__u64 requested)746 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
747 		__u64 requested)
748 {
749 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
750 
751 	iostat->ops_requested++;
752 	iostat->bytes_requested += requested;
753 }
754 
755 static void
nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat * layoutstat,__u64 requested,__u64 completed,ktime_t time_completed,ktime_t time_started)756 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
757 		__u64 requested,
758 		__u64 completed,
759 		ktime_t time_completed,
760 		ktime_t time_started)
761 {
762 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
763 	ktime_t completion_time = ktime_sub(time_completed, time_started);
764 	ktime_t timer;
765 
766 	iostat->ops_completed++;
767 	iostat->bytes_completed += completed;
768 	iostat->bytes_not_delivered += requested - completed;
769 
770 	timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
771 	iostat->total_busy_time =
772 			ktime_add(iostat->total_busy_time, timer);
773 	iostat->aggregate_completion_time =
774 			ktime_add(iostat->aggregate_completion_time,
775 					completion_time);
776 }
777 
778 static void
nfs4_ff_layout_stat_io_start_read(struct inode * inode,struct nfs4_ff_layout_mirror * mirror,u32 dss_id,__u64 requested,ktime_t now)779 nfs4_ff_layout_stat_io_start_read(struct inode *inode,
780 		struct nfs4_ff_layout_mirror *mirror,
781 		u32 dss_id,
782 		__u64 requested, ktime_t now)
783 {
784 	bool report;
785 
786 	spin_lock(&mirror->lock);
787 	report = nfs4_ff_layoutstat_start_io(
788 		mirror, dss_id, &mirror->dss[dss_id].read_stat, now);
789 	nfs4_ff_layout_stat_io_update_requested(
790 		&mirror->dss[dss_id].read_stat, requested);
791 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
792 	spin_unlock(&mirror->lock);
793 
794 	if (report)
795 		pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
796 }
797 
798 static void
nfs4_ff_layout_stat_io_end_read(struct rpc_task * task,struct nfs4_ff_layout_mirror * mirror,u32 dss_id,__u64 requested,__u64 completed)799 nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
800 		struct nfs4_ff_layout_mirror *mirror,
801 		u32 dss_id,
802 		__u64 requested,
803 		__u64 completed)
804 {
805 	spin_lock(&mirror->lock);
806 	nfs4_ff_layout_stat_io_update_completed(&mirror->dss[dss_id].read_stat,
807 			requested, completed,
808 			ktime_get(), task->tk_start);
809 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
810 	spin_unlock(&mirror->lock);
811 }
812 
813 static void
nfs4_ff_layout_stat_io_start_write(struct inode * inode,struct nfs4_ff_layout_mirror * mirror,u32 dss_id,__u64 requested,ktime_t now)814 nfs4_ff_layout_stat_io_start_write(struct inode *inode,
815 		struct nfs4_ff_layout_mirror *mirror,
816 		u32 dss_id,
817 		__u64 requested, ktime_t now)
818 {
819 	bool report;
820 
821 	spin_lock(&mirror->lock);
822 	report = nfs4_ff_layoutstat_start_io(
823 		mirror,
824 		dss_id,
825 		&mirror->dss[dss_id].write_stat,
826 		now);
827 	nfs4_ff_layout_stat_io_update_requested(
828 		&mirror->dss[dss_id].write_stat,
829 		requested);
830 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
831 	spin_unlock(&mirror->lock);
832 
833 	if (report)
834 		pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
835 }
836 
837 static void
nfs4_ff_layout_stat_io_end_write(struct rpc_task * task,struct nfs4_ff_layout_mirror * mirror,u32 dss_id,__u64 requested,__u64 completed,enum nfs3_stable_how committed)838 nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
839 		struct nfs4_ff_layout_mirror *mirror,
840 		u32 dss_id,
841 		__u64 requested,
842 		__u64 completed,
843 		enum nfs3_stable_how committed)
844 {
845 	if (committed == NFS_UNSTABLE)
846 		requested = completed = 0;
847 
848 	spin_lock(&mirror->lock);
849 	nfs4_ff_layout_stat_io_update_completed(&mirror->dss[dss_id].write_stat,
850 			requested, completed, ktime_get(), task->tk_start);
851 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
852 	spin_unlock(&mirror->lock);
853 }
854 
855 static void
ff_layout_mark_ds_unreachable(struct pnfs_layout_segment * lseg,u32 idx,u32 dss_id)856 ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, u32 idx, u32 dss_id)
857 {
858 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx, dss_id);
859 
860 	if (devid)
861 		nfs4_mark_deviceid_unavailable(devid);
862 }
863 
864 static void
ff_layout_mark_ds_reachable(struct pnfs_layout_segment * lseg,u32 idx,u32 dss_id)865 ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, u32 idx, u32 dss_id)
866 {
867 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx, dss_id);
868 
869 	if (devid)
870 		nfs4_mark_deviceid_available(devid);
871 }
872 
873 static struct nfs4_pnfs_ds *
ff_layout_choose_ds_for_read(struct pnfs_layout_segment * lseg,u32 start_idx,u32 * best_idx,u32 offset,u32 * dss_id,bool check_device)874 ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
875 			     u32 start_idx, u32 *best_idx,
876 			     u32 offset, u32 *dss_id,
877 			     bool check_device)
878 {
879 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
880 	struct nfs4_ff_layout_mirror *mirror;
881 	struct nfs4_pnfs_ds *ds = ERR_PTR(-EAGAIN);
882 	u32 idx;
883 
884 	/* mirrors are initially sorted by efficiency */
885 	for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
886 		mirror = FF_LAYOUT_COMP(lseg, idx);
887 		*dss_id = nfs4_ff_layout_calc_dss_id(
888 			fls->stripe_unit,
889 			fls->mirror_array[idx]->dss_count,
890 			offset);
891 		ds = nfs4_ff_layout_prepare_ds(lseg, mirror, *dss_id, false);
892 		if (IS_ERR(ds))
893 			continue;
894 
895 		if (check_device &&
896 		    nfs4_test_deviceid_unavailable(&mirror->dss[*dss_id].mirror_ds->id_node)) {
897 			// reinitialize the error state in case if this is the last iteration
898 			ds = ERR_PTR(-EINVAL);
899 			continue;
900 		}
901 
902 		*best_idx = idx;
903 		break;
904 	}
905 
906 	return ds;
907 }
908 
909 static struct nfs4_pnfs_ds *
ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment * lseg,u32 start_idx,u32 * best_idx,u32 offset,u32 * dss_id)910 ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg,
911 				 u32 start_idx, u32 *best_idx,
912 				 u32 offset, u32 *dss_id)
913 {
914 	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx,
915 					    offset, dss_id, false);
916 }
917 
918 static struct nfs4_pnfs_ds *
ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment * lseg,u32 start_idx,u32 * best_idx,u32 offset,u32 * dss_id)919 ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg,
920 				   u32 start_idx, u32 *best_idx,
921 				   u32 offset, u32 *dss_id)
922 {
923 	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx,
924 					    offset, dss_id, true);
925 }
926 
927 static struct nfs4_pnfs_ds *
ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment * lseg,u32 start_idx,u32 * best_idx,u32 offset,u32 * dss_id)928 ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
929 				  u32 start_idx, u32 *best_idx,
930 				  u32 offset, u32 *dss_id)
931 {
932 	struct nfs4_pnfs_ds *ds;
933 
934 	ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx,
935 						offset, dss_id);
936 	if (!IS_ERR(ds))
937 		return ds;
938 	return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx,
939 						offset, dss_id);
940 }
941 
942 static struct nfs4_pnfs_ds *
ff_layout_get_ds_for_read(struct nfs_pageio_descriptor * pgio,u32 * best_idx,u32 offset,u32 * dss_id)943 ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio,
944 			  u32 *best_idx,
945 			  u32 offset,
946 			  u32 *dss_id)
947 {
948 	struct pnfs_layout_segment *lseg = pgio->pg_lseg;
949 	struct nfs4_pnfs_ds *ds;
950 
951 	ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx,
952 					       best_idx, offset, dss_id);
953 	if (!IS_ERR(ds) || !pgio->pg_mirror_idx)
954 		return ds;
955 	return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx,
956 						 offset, dss_id);
957 }
958 
959 static void
ff_layout_pg_get_read(struct nfs_pageio_descriptor * pgio,struct nfs_page * req,bool strict_iomode)960 ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
961 		      struct nfs_page *req,
962 		      bool strict_iomode)
963 {
964 	pnfs_put_lseg(pgio->pg_lseg);
965 	pgio->pg_lseg =
966 		pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
967 				   req_offset(req), req->wb_bytes, IOMODE_READ,
968 				   strict_iomode, nfs_io_gfp_mask());
969 	if (IS_ERR(pgio->pg_lseg)) {
970 		pgio->pg_error = PTR_ERR(pgio->pg_lseg);
971 		pgio->pg_lseg = NULL;
972 	}
973 }
974 
975 static bool
ff_layout_lseg_is_striped(const struct nfs4_ff_layout_segment * fls)976 ff_layout_lseg_is_striped(const struct nfs4_ff_layout_segment *fls)
977 {
978 	return fls->mirror_array[0]->dss_count > 1;
979 }
980 
981 /*
982  * ff_layout_pg_test(). Called by nfs_can_coalesce_requests()
983  *
984  * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
985  * of bytes (maximum @req->wb_bytes) that can be coalesced.
986  */
987 static size_t
ff_layout_pg_test(struct nfs_pageio_descriptor * pgio,struct nfs_page * prev,struct nfs_page * req)988 ff_layout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
989 		  struct nfs_page *req)
990 {
991 	unsigned int size;
992 	u64 p_stripe, r_stripe;
993 	u32 stripe_offset;
994 	u64 segment_offset = pgio->pg_lseg->pls_range.offset;
995 	u32 stripe_unit = FF_LAYOUT_LSEG(pgio->pg_lseg)->stripe_unit;
996 
997 	/* calls nfs_generic_pg_test */
998 	size = pnfs_generic_pg_test(pgio, prev, req);
999 	if (!size)
1000 		return 0;
1001 	else if (!ff_layout_lseg_is_striped(FF_LAYOUT_LSEG(pgio->pg_lseg)))
1002 		return size;
1003 
1004 	/* see if req and prev are in the same stripe */
1005 	if (prev) {
1006 		p_stripe = (u64)req_offset(prev) - segment_offset;
1007 		r_stripe = (u64)req_offset(req) - segment_offset;
1008 		do_div(p_stripe, stripe_unit);
1009 		do_div(r_stripe, stripe_unit);
1010 
1011 		if (p_stripe != r_stripe)
1012 			return 0;
1013 	}
1014 
1015 	/* calculate remaining bytes in the current stripe */
1016 	div_u64_rem((u64)req_offset(req) - segment_offset,
1017 			stripe_unit,
1018 			&stripe_offset);
1019 	WARN_ON_ONCE(stripe_offset > stripe_unit);
1020 	if (stripe_offset >= stripe_unit)
1021 		return 0;
1022 	return min(stripe_unit - (unsigned int)stripe_offset, size);
1023 }
1024 
1025 static void
ff_layout_pg_init_read(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)1026 ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
1027 			struct nfs_page *req)
1028 {
1029 	struct nfs_pgio_mirror *pgm;
1030 	struct nfs4_ff_layout_mirror *mirror;
1031 	struct nfs4_pnfs_ds *ds;
1032 	u32 ds_idx, dss_id;
1033 
1034 	if (NFS_SERVER(pgio->pg_inode)->flags &
1035 			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
1036 		pgio->pg_maxretrans = io_maxretrans;
1037 retry:
1038 	pnfs_generic_pg_check_layout(pgio, req);
1039 	/* Use full layout for now */
1040 	if (!pgio->pg_lseg) {
1041 		ff_layout_pg_get_read(pgio, req, false);
1042 		if (!pgio->pg_lseg)
1043 			goto out_nolseg;
1044 	}
1045 	if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
1046 		ff_layout_pg_get_read(pgio, req, true);
1047 		if (!pgio->pg_lseg)
1048 			goto out_nolseg;
1049 	}
1050 	/* Reset wb_nio, since getting layout segment was successful */
1051 	req->wb_nio = 0;
1052 
1053 	ds = ff_layout_get_ds_for_read(pgio, &ds_idx,
1054 				       req_offset(req), &dss_id);
1055 	if (IS_ERR(ds)) {
1056 		if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
1057 			goto out_mds;
1058 		pnfs_generic_pg_cleanup(pgio);
1059 		/* Sleep for 1 second before retrying */
1060 		ssleep(1);
1061 		goto retry;
1062 	}
1063 
1064 	mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
1065 	pgm = &pgio->pg_mirrors[0];
1066 	pgm->pg_bsize = mirror->dss[dss_id].mirror_ds->ds_versions[0].rsize;
1067 
1068 	pgio->pg_mirror_idx = ds_idx;
1069 	return;
1070 out_nolseg:
1071 	if (pgio->pg_error < 0) {
1072 		if (pgio->pg_error != -EAGAIN)
1073 			return;
1074 		/* Retry getting layout segment if lower layer returned -EAGAIN */
1075 		if (pgio->pg_maxretrans && req->wb_nio++ > pgio->pg_maxretrans) {
1076 			if (NFS_SERVER(pgio->pg_inode)->flags & NFS_MOUNT_SOFTERR)
1077 				pgio->pg_error = -ETIMEDOUT;
1078 			else
1079 				pgio->pg_error = -EIO;
1080 			return;
1081 		}
1082 		pgio->pg_error = 0;
1083 		/* Sleep for 1 second before retrying */
1084 		ssleep(1);
1085 		goto retry;
1086 	}
1087 out_mds:
1088 	trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode,
1089 			0, NFS4_MAX_UINT64, IOMODE_READ,
1090 			NFS_I(pgio->pg_inode)->layout,
1091 			pgio->pg_lseg);
1092 	pgio->pg_maxretrans = 0;
1093 	nfs_pageio_reset_read_mds(pgio);
1094 }
1095 
1096 static void
ff_layout_pg_init_write(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)1097 ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
1098 			struct nfs_page *req)
1099 {
1100 	struct nfs4_ff_layout_mirror *mirror;
1101 	struct nfs_pgio_mirror *pgm;
1102 	struct nfs4_pnfs_ds *ds;
1103 	u32 i, dss_id;
1104 
1105 retry:
1106 	pnfs_generic_pg_check_layout(pgio, req);
1107 	if (!pgio->pg_lseg) {
1108 		pgio->pg_lseg =
1109 			pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
1110 					   req_offset(req), req->wb_bytes,
1111 					   IOMODE_RW, false, nfs_io_gfp_mask());
1112 		if (IS_ERR(pgio->pg_lseg)) {
1113 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
1114 			pgio->pg_lseg = NULL;
1115 			return;
1116 		}
1117 	}
1118 	/* If no lseg, fall back to write through mds */
1119 	if (pgio->pg_lseg == NULL)
1120 		goto out_mds;
1121 
1122 	/* Use a direct mapping of ds_idx to pgio mirror_idx */
1123 	if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))
1124 		goto out_eagain;
1125 
1126 	for (i = 0; i < pgio->pg_mirror_count; i++) {
1127 		mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
1128 		dss_id = nfs4_ff_layout_calc_dss_id(
1129 			FF_LAYOUT_LSEG(pgio->pg_lseg)->stripe_unit,
1130 			mirror->dss_count,
1131 			req_offset(req));
1132 		ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror,
1133 					       dss_id, true);
1134 		if (IS_ERR(ds)) {
1135 			if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
1136 				goto out_mds;
1137 			pnfs_generic_pg_cleanup(pgio);
1138 			/* Sleep for 1 second before retrying */
1139 			ssleep(1);
1140 			goto retry;
1141 		}
1142 		pgm = &pgio->pg_mirrors[i];
1143 		pgm->pg_bsize = mirror->dss[dss_id].mirror_ds->ds_versions[0].wsize;
1144 	}
1145 
1146 	if (NFS_SERVER(pgio->pg_inode)->flags &
1147 			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
1148 		pgio->pg_maxretrans = io_maxretrans;
1149 	return;
1150 out_eagain:
1151 	pnfs_generic_pg_cleanup(pgio);
1152 	pgio->pg_error = -EAGAIN;
1153 	return;
1154 out_mds:
1155 	trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode,
1156 			0, NFS4_MAX_UINT64, IOMODE_RW,
1157 			NFS_I(pgio->pg_inode)->layout,
1158 			pgio->pg_lseg);
1159 	pgio->pg_maxretrans = 0;
1160 	nfs_pageio_reset_write_mds(pgio);
1161 	pgio->pg_error = -EAGAIN;
1162 }
1163 
1164 static unsigned int
ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)1165 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
1166 				    struct nfs_page *req)
1167 {
1168 	if (!pgio->pg_lseg) {
1169 		pgio->pg_lseg =
1170 			pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
1171 					   req_offset(req), req->wb_bytes,
1172 					   IOMODE_RW, false, nfs_io_gfp_mask());
1173 		if (IS_ERR(pgio->pg_lseg)) {
1174 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
1175 			pgio->pg_lseg = NULL;
1176 			goto out;
1177 		}
1178 	}
1179 	if (pgio->pg_lseg)
1180 		return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
1181 
1182 	trace_pnfs_mds_fallback_pg_get_mirror_count(pgio->pg_inode,
1183 			0, NFS4_MAX_UINT64, IOMODE_RW,
1184 			NFS_I(pgio->pg_inode)->layout,
1185 			pgio->pg_lseg);
1186 	/* no lseg means that pnfs is not in use, so no mirroring here */
1187 	nfs_pageio_reset_write_mds(pgio);
1188 out:
1189 	return 1;
1190 }
1191 
1192 static u32
ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor * desc,u32 idx)1193 ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
1194 {
1195 	u32 old = desc->pg_mirror_idx;
1196 
1197 	desc->pg_mirror_idx = idx;
1198 	return old;
1199 }
1200 
1201 static struct nfs_pgio_mirror *
ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor * desc,u32 idx)1202 ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
1203 {
1204 	return &desc->pg_mirrors[idx];
1205 }
1206 
1207 static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
1208 	.pg_init = ff_layout_pg_init_read,
1209 	.pg_test = ff_layout_pg_test,
1210 	.pg_doio = pnfs_generic_pg_readpages,
1211 	.pg_cleanup = pnfs_generic_pg_cleanup,
1212 };
1213 
1214 static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
1215 	.pg_init = ff_layout_pg_init_write,
1216 	.pg_test = ff_layout_pg_test,
1217 	.pg_doio = pnfs_generic_pg_writepages,
1218 	.pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
1219 	.pg_cleanup = pnfs_generic_pg_cleanup,
1220 	.pg_get_mirror = ff_layout_pg_get_mirror_write,
1221 	.pg_set_mirror = ff_layout_pg_set_mirror_write,
1222 };
1223 
ff_layout_reset_write(struct nfs_pgio_header * hdr,bool retry_pnfs)1224 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
1225 {
1226 	struct rpc_task *task = &hdr->task;
1227 
1228 	pnfs_layoutcommit_inode(hdr->inode, false);
1229 
1230 	if (retry_pnfs) {
1231 		dprintk("%s Reset task %5u for i/o through pNFS "
1232 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1233 			hdr->task.tk_pid,
1234 			hdr->inode->i_sb->s_id,
1235 			(unsigned long long)NFS_FILEID(hdr->inode),
1236 			hdr->args.count,
1237 			(unsigned long long)hdr->args.offset);
1238 
1239 		hdr->completion_ops->reschedule_io(hdr);
1240 		return;
1241 	}
1242 
1243 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1244 		dprintk("%s Reset task %5u for i/o through MDS "
1245 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1246 			hdr->task.tk_pid,
1247 			hdr->inode->i_sb->s_id,
1248 			(unsigned long long)NFS_FILEID(hdr->inode),
1249 			hdr->args.count,
1250 			(unsigned long long)hdr->args.offset);
1251 
1252 		trace_pnfs_mds_fallback_write_done(hdr->inode,
1253 				hdr->args.offset, hdr->args.count,
1254 				IOMODE_RW, NFS_I(hdr->inode)->layout,
1255 				hdr->lseg);
1256 		task->tk_status = pnfs_write_done_resend_to_mds(hdr);
1257 	}
1258 }
1259 
ff_layout_resend_pnfs_read(struct nfs_pgio_header * hdr)1260 static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr)
1261 {
1262 	u32 idx = hdr->pgio_mirror_idx + 1;
1263 	u32 new_idx = 0;
1264 	u32 dss_id = 0;
1265 	struct nfs4_pnfs_ds *ds;
1266 
1267 	ds = ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx,
1268 					      hdr->args.offset, &dss_id);
1269 	if (IS_ERR(ds))
1270 		pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1271 	else
1272 		ff_layout_send_layouterror(hdr->lseg);
1273 	pnfs_read_resend_pnfs(hdr, new_idx);
1274 }
1275 
ff_layout_reset_read(struct nfs_pgio_header * hdr)1276 static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
1277 {
1278 	struct rpc_task *task = &hdr->task;
1279 
1280 	pnfs_layoutcommit_inode(hdr->inode, false);
1281 	pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1282 
1283 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1284 		dprintk("%s Reset task %5u for i/o through MDS "
1285 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1286 			hdr->task.tk_pid,
1287 			hdr->inode->i_sb->s_id,
1288 			(unsigned long long)NFS_FILEID(hdr->inode),
1289 			hdr->args.count,
1290 			(unsigned long long)hdr->args.offset);
1291 
1292 		trace_pnfs_mds_fallback_read_done(hdr->inode,
1293 				hdr->args.offset, hdr->args.count,
1294 				IOMODE_READ, NFS_I(hdr->inode)->layout,
1295 				hdr->lseg);
1296 		task->tk_status = pnfs_read_done_resend_to_mds(hdr);
1297 	}
1298 }
1299 
ff_layout_async_handle_error_v4(struct rpc_task * task,u32 op_status,struct nfs4_state * state,struct nfs_client * clp,struct pnfs_layout_segment * lseg,u32 idx,u32 dss_id)1300 static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1301 					   u32 op_status,
1302 					   struct nfs4_state *state,
1303 					   struct nfs_client *clp,
1304 					   struct pnfs_layout_segment *lseg,
1305 					   u32 idx, u32 dss_id)
1306 {
1307 	struct pnfs_layout_hdr *lo = lseg->pls_layout;
1308 	struct inode *inode = lo->plh_inode;
1309 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx, dss_id);
1310 	struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
1311 
1312 	switch (op_status) {
1313 	case NFS4_OK:
1314 	case NFS4ERR_NXIO:
1315 		break;
1316 	case NFSERR_PERM:
1317 		if (!task->tk_xprt)
1318 			break;
1319 		xprt_force_disconnect(task->tk_xprt);
1320 		goto out_retry;
1321 	case NFS4ERR_BADSESSION:
1322 	case NFS4ERR_BADSLOT:
1323 	case NFS4ERR_BAD_HIGH_SLOT:
1324 	case NFS4ERR_DEADSESSION:
1325 	case NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1326 	case NFS4ERR_SEQ_FALSE_RETRY:
1327 	case NFS4ERR_SEQ_MISORDERED:
1328 		dprintk("%s ERROR %d, Reset session. Exchangeid "
1329 			"flags 0x%x\n", __func__, task->tk_status,
1330 			clp->cl_exchange_flags);
1331 		nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
1332 		goto out_retry;
1333 	case NFS4ERR_DELAY:
1334 		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1335 		fallthrough;
1336 	case NFS4ERR_GRACE:
1337 		rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1338 		goto out_retry;
1339 	case NFS4ERR_RETRY_UNCACHED_REP:
1340 		goto out_retry;
1341 	/* Invalidate Layout errors */
1342 	case NFS4ERR_PNFS_NO_LAYOUT:
1343 	case NFS4ERR_STALE:
1344 	case NFS4ERR_BADHANDLE:
1345 	case NFS4ERR_ISDIR:
1346 	case NFS4ERR_FHEXPIRED:
1347 	case NFS4ERR_WRONG_TYPE:
1348 		dprintk("%s Invalid layout error %d\n", __func__,
1349 			task->tk_status);
1350 		/*
1351 		 * Destroy layout so new i/o will get a new layout.
1352 		 * Layout will not be destroyed until all current lseg
1353 		 * references are put. Mark layout as invalid to resend failed
1354 		 * i/o and all i/o waiting on the slot table to the MDS until
1355 		 * layout is destroyed and a new valid layout is obtained.
1356 		 */
1357 		pnfs_destroy_layout(NFS_I(inode));
1358 		rpc_wake_up(&tbl->slot_tbl_waitq);
1359 		goto reset;
1360 	default:
1361 		break;
1362 	}
1363 
1364 	switch (task->tk_status) {
1365 	/* RPC connection errors */
1366 	case -ENETDOWN:
1367 	case -ENETUNREACH:
1368 		if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags))
1369 			return -NFS4ERR_FATAL_IOERROR;
1370 		fallthrough;
1371 	case -ECONNREFUSED:
1372 	case -EHOSTDOWN:
1373 	case -EHOSTUNREACH:
1374 	case -EIO:
1375 	case -ETIMEDOUT:
1376 	case -EPIPE:
1377 	case -EPROTO:
1378 	case -ENODEV:
1379 		dprintk("%s DS connection error %d\n", __func__,
1380 			task->tk_status);
1381 		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1382 				&devid->deviceid);
1383 		rpc_wake_up(&tbl->slot_tbl_waitq);
1384 		break;
1385 	default:
1386 		break;
1387 	}
1388 
1389 	if (ff_layout_avoid_mds_available_ds(lseg))
1390 		return -NFS4ERR_RESET_TO_PNFS;
1391 reset:
1392 	dprintk("%s Retry through MDS. Error %d\n", __func__,
1393 		task->tk_status);
1394 	return -NFS4ERR_RESET_TO_MDS;
1395 
1396 out_retry:
1397 	task->tk_status = 0;
1398 	return -EAGAIN;
1399 }
1400 
1401 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
ff_layout_async_handle_error_v3(struct rpc_task * task,u32 op_status,struct nfs_client * clp,struct pnfs_layout_segment * lseg,u32 idx,u32 dss_id)1402 static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1403 					   u32 op_status,
1404 					   struct nfs_client *clp,
1405 					   struct pnfs_layout_segment *lseg,
1406 					   u32 idx, u32 dss_id)
1407 {
1408 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx, dss_id);
1409 
1410 	switch (op_status) {
1411 	case NFS_OK:
1412 	case NFSERR_NXIO:
1413 		break;
1414 	case NFSERR_PERM:
1415 		if (!task->tk_xprt)
1416 			break;
1417 		xprt_force_disconnect(task->tk_xprt);
1418 		goto out_retry;
1419 	case NFSERR_ACCES:
1420 	case NFSERR_BADHANDLE:
1421 	case NFSERR_FBIG:
1422 	case NFSERR_IO:
1423 	case NFSERR_NOSPC:
1424 	case NFSERR_ROFS:
1425 	case NFSERR_STALE:
1426 		goto out_reset_to_pnfs;
1427 	case NFSERR_JUKEBOX:
1428 		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1429 		goto out_retry;
1430 	default:
1431 		break;
1432 	}
1433 
1434 	switch (task->tk_status) {
1435 	/* File access problems. Don't mark the device as unavailable */
1436 	case -EACCES:
1437 	case -ESTALE:
1438 	case -EISDIR:
1439 	case -EBADHANDLE:
1440 	case -ELOOP:
1441 	case -ENOSPC:
1442 		break;
1443 	case -EJUKEBOX:
1444 		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1445 		goto out_retry;
1446 	case -ENETDOWN:
1447 	case -ENETUNREACH:
1448 		if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags))
1449 			return -NFS4ERR_FATAL_IOERROR;
1450 		fallthrough;
1451 	default:
1452 		dprintk("%s DS connection error %d\n", __func__,
1453 			task->tk_status);
1454 		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1455 				&devid->deviceid);
1456 	}
1457 out_reset_to_pnfs:
1458 	/* FIXME: Need to prevent infinite looping here. */
1459 	return -NFS4ERR_RESET_TO_PNFS;
1460 out_retry:
1461 	task->tk_status = 0;
1462 	rpc_restart_call_prepare(task);
1463 	rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1464 	return -EAGAIN;
1465 }
1466 
ff_layout_async_handle_error(struct rpc_task * task,u32 op_status,struct nfs4_state * state,struct nfs_client * clp,struct pnfs_layout_segment * lseg,u32 idx,u32 dss_id)1467 static int ff_layout_async_handle_error(struct rpc_task *task,
1468 					u32 op_status,
1469 					struct nfs4_state *state,
1470 					struct nfs_client *clp,
1471 					struct pnfs_layout_segment *lseg,
1472 					u32 idx, u32 dss_id)
1473 {
1474 	int vers = clp->cl_nfs_mod->rpc_vers->number;
1475 
1476 	if (task->tk_status >= 0) {
1477 		ff_layout_mark_ds_reachable(lseg, idx, dss_id);
1478 		return 0;
1479 	}
1480 
1481 	/* Handle the case of an invalid layout segment */
1482 	if (!pnfs_is_valid_lseg(lseg))
1483 		return -NFS4ERR_RESET_TO_PNFS;
1484 
1485 	switch (vers) {
1486 	case 3:
1487 		return ff_layout_async_handle_error_v3(task, op_status, clp,
1488 						       lseg, idx, dss_id);
1489 	case 4:
1490 		return ff_layout_async_handle_error_v4(task, op_status, state,
1491 						       clp, lseg, idx, dss_id);
1492 	default:
1493 		/* should never happen */
1494 		WARN_ON_ONCE(1);
1495 		return 0;
1496 	}
1497 }
1498 
ff_layout_io_track_ds_error(struct pnfs_layout_segment * lseg,u32 idx,u32 dss_id,u64 offset,u64 length,u32 * op_status,int opnum,int error)1499 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1500 					u32 idx, u32 dss_id, u64 offset, u64 length,
1501 					u32 *op_status, int opnum, int error)
1502 {
1503 	struct nfs4_ff_layout_mirror *mirror;
1504 	u32 status = *op_status;
1505 	int err;
1506 
1507 	if (status == 0) {
1508 		switch (error) {
1509 		case -ETIMEDOUT:
1510 		case -EPFNOSUPPORT:
1511 		case -EPROTONOSUPPORT:
1512 		case -EOPNOTSUPP:
1513 		case -EINVAL:
1514 		case -ECONNREFUSED:
1515 		case -ECONNRESET:
1516 		case -EHOSTDOWN:
1517 		case -EHOSTUNREACH:
1518 		case -ENETDOWN:
1519 		case -ENETUNREACH:
1520 		case -EADDRINUSE:
1521 		case -ENOBUFS:
1522 		case -EPIPE:
1523 		case -EPERM:
1524 		case -EPROTO:
1525 		case -ENODEV:
1526 			*op_status = status = NFS4ERR_NXIO;
1527 			break;
1528 		case -EACCES:
1529 			*op_status = status = NFS4ERR_ACCESS;
1530 			break;
1531 		default:
1532 			return;
1533 		}
1534 	}
1535 
1536 	mirror = FF_LAYOUT_COMP(lseg, idx);
1537 	err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1538 				       mirror, dss_id, offset, length, status, opnum,
1539 				       nfs_io_gfp_mask());
1540 
1541 	switch (status) {
1542 	case NFS4ERR_DELAY:
1543 	case NFS4ERR_GRACE:
1544 	case NFS4ERR_PERM:
1545 		break;
1546 	case NFS4ERR_NXIO:
1547 		ff_layout_mark_ds_unreachable(lseg, idx, dss_id);
1548 		/*
1549 		 * Don't return the layout if this is a read and we still
1550 		 * have layouts to try
1551 		 */
1552 		if (opnum == OP_READ)
1553 			break;
1554 		fallthrough;
1555 	default:
1556 		pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
1557 						  lseg);
1558 	}
1559 
1560 	dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1561 }
1562 
1563 /* NFS_PROTO call done callback routines */
ff_layout_read_done_cb(struct rpc_task * task,struct nfs_pgio_header * hdr)1564 static int ff_layout_read_done_cb(struct rpc_task *task,
1565 				struct nfs_pgio_header *hdr)
1566 {
1567 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(hdr->lseg);
1568 	u32 dss_id = nfs4_ff_layout_calc_dss_id(
1569 		flseg->stripe_unit,
1570 		flseg->mirror_array[hdr->pgio_mirror_idx]->dss_count,
1571 		hdr->args.offset);
1572 	int err;
1573 
1574 	if (task->tk_status < 0) {
1575 		ff_layout_io_track_ds_error(hdr->lseg,
1576 					    hdr->pgio_mirror_idx, dss_id,
1577 					    hdr->args.offset, hdr->args.count,
1578 					    &hdr->res.op_status, OP_READ,
1579 					    task->tk_status);
1580 		trace_ff_layout_read_error(hdr, task->tk_status);
1581 	}
1582 
1583 	err = ff_layout_async_handle_error(task, hdr->res.op_status,
1584 					   hdr->args.context->state,
1585 					   hdr->ds_clp, hdr->lseg,
1586 					   hdr->pgio_mirror_idx,
1587 					   dss_id);
1588 
1589 	trace_nfs4_pnfs_read(hdr, err);
1590 	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1591 	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1592 	switch (err) {
1593 	case -NFS4ERR_RESET_TO_PNFS:
1594 		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1595 		return task->tk_status;
1596 	case -NFS4ERR_RESET_TO_MDS:
1597 		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1598 		return task->tk_status;
1599 	case -EAGAIN:
1600 		goto out_eagain;
1601 	case -NFS4ERR_FATAL_IOERROR:
1602 		task->tk_status = -EIO;
1603 		return 0;
1604 	}
1605 
1606 	return 0;
1607 out_eagain:
1608 	rpc_restart_call_prepare(task);
1609 	return -EAGAIN;
1610 }
1611 
1612 static bool
ff_layout_need_layoutcommit(struct pnfs_layout_segment * lseg)1613 ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1614 {
1615 	return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1616 }
1617 
1618 /*
1619  * We reference the rpc_cred of the first WRITE that triggers the need for
1620  * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1621  * rfc5661 is not clear about which credential should be used.
1622  *
1623  * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1624  * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1625  * we always send layoutcommit after DS writes.
1626  */
1627 static void
ff_layout_set_layoutcommit(struct inode * inode,struct pnfs_layout_segment * lseg,loff_t end_offset)1628 ff_layout_set_layoutcommit(struct inode *inode,
1629 		struct pnfs_layout_segment *lseg,
1630 		loff_t end_offset)
1631 {
1632 	if (!ff_layout_need_layoutcommit(lseg))
1633 		return;
1634 
1635 	pnfs_set_layoutcommit(inode, lseg, end_offset);
1636 	dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
1637 		(unsigned long long) NFS_I(inode)->layout->plh_lwb);
1638 }
1639 
ff_layout_read_record_layoutstats_start(struct rpc_task * task,struct nfs_pgio_header * hdr)1640 static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
1641 		struct nfs_pgio_header *hdr)
1642 {
1643 	struct nfs4_ff_layout_mirror *mirror;
1644 	u32 dss_id;
1645 
1646 	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1647 		return;
1648 
1649 	mirror = FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx);
1650 	dss_id = nfs4_ff_layout_calc_dss_id(
1651 		FF_LAYOUT_LSEG(hdr->lseg)->stripe_unit,
1652 		mirror->dss_count,
1653 		hdr->args.offset);
1654 
1655 	nfs4_ff_layout_stat_io_start_read(
1656 		hdr->inode,
1657 		mirror,
1658 		dss_id,
1659 		hdr->args.count,
1660 		task->tk_start);
1661 }
1662 
ff_layout_read_record_layoutstats_done(struct rpc_task * task,struct nfs_pgio_header * hdr)1663 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
1664 		struct nfs_pgio_header *hdr)
1665 {
1666 	struct nfs4_ff_layout_mirror *mirror;
1667 	u32 dss_id;
1668 
1669 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1670 		return;
1671 
1672 	mirror = FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx);
1673 	dss_id = nfs4_ff_layout_calc_dss_id(
1674 		FF_LAYOUT_LSEG(hdr->lseg)->stripe_unit,
1675 		mirror->dss_count,
1676 		hdr->args.offset);
1677 
1678 	nfs4_ff_layout_stat_io_end_read(
1679 		task,
1680 		mirror,
1681 		dss_id,
1682 		hdr->args.count,
1683 		hdr->res.count);
1684 	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1685 }
1686 
ff_layout_read_prepare_common(struct rpc_task * task,struct nfs_pgio_header * hdr)1687 static int ff_layout_read_prepare_common(struct rpc_task *task,
1688 					 struct nfs_pgio_header *hdr)
1689 {
1690 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1691 		rpc_exit(task, -EIO);
1692 		return -EIO;
1693 	}
1694 
1695 	if (!pnfs_is_valid_lseg(hdr->lseg)) {
1696 		rpc_exit(task, -EAGAIN);
1697 		return -EAGAIN;
1698 	}
1699 
1700 	ff_layout_read_record_layoutstats_start(task, hdr);
1701 	return 0;
1702 }
1703 
1704 /*
1705  * Call ops for the async read/write cases
1706  * In the case of dense layouts, the offset needs to be reset to its
1707  * original value.
1708  */
ff_layout_read_prepare_v3(struct rpc_task * task,void * data)1709 static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1710 {
1711 	struct nfs_pgio_header *hdr = data;
1712 
1713 	if (ff_layout_read_prepare_common(task, hdr))
1714 		return;
1715 
1716 	rpc_call_start(task);
1717 }
1718 
ff_layout_read_prepare_v4(struct rpc_task * task,void * data)1719 static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1720 {
1721 	struct nfs_pgio_header *hdr = data;
1722 
1723 	if (nfs4_setup_sequence(hdr->ds_clp,
1724 				&hdr->args.seq_args,
1725 				&hdr->res.seq_res,
1726 				task))
1727 		return;
1728 
1729 	ff_layout_read_prepare_common(task, hdr);
1730 }
1731 
ff_layout_read_call_done(struct rpc_task * task,void * data)1732 static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1733 {
1734 	struct nfs_pgio_header *hdr = data;
1735 
1736 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1737 	    task->tk_status == 0) {
1738 		nfs4_sequence_done(task, &hdr->res.seq_res);
1739 		return;
1740 	}
1741 
1742 	/* Note this may cause RPC to be resent */
1743 	hdr->mds_ops->rpc_call_done(task, hdr);
1744 }
1745 
ff_layout_read_count_stats(struct rpc_task * task,void * data)1746 static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1747 {
1748 	struct nfs_pgio_header *hdr = data;
1749 
1750 	ff_layout_read_record_layoutstats_done(task, hdr);
1751 	rpc_count_iostats_metrics(task,
1752 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1753 }
1754 
ff_layout_read_release(void * data)1755 static void ff_layout_read_release(void *data)
1756 {
1757 	struct nfs_pgio_header *hdr = data;
1758 
1759 	ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
1760 	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
1761 		ff_layout_resend_pnfs_read(hdr);
1762 	else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1763 		ff_layout_reset_read(hdr);
1764 	pnfs_generic_rw_release(data);
1765 }
1766 
1767 
ff_layout_write_done_cb(struct rpc_task * task,struct nfs_pgio_header * hdr)1768 static int ff_layout_write_done_cb(struct rpc_task *task,
1769 				struct nfs_pgio_header *hdr)
1770 {
1771 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(hdr->lseg);
1772 	u32 dss_id = nfs4_ff_layout_calc_dss_id(
1773 		flseg->stripe_unit,
1774 		flseg->mirror_array[hdr->pgio_mirror_idx]->dss_count,
1775 		hdr->args.offset);
1776 	loff_t end_offs = 0;
1777 	int err;
1778 
1779 	if (task->tk_status < 0) {
1780 		ff_layout_io_track_ds_error(hdr->lseg,
1781 					    hdr->pgio_mirror_idx, dss_id,
1782 					    hdr->args.offset, hdr->args.count,
1783 					    &hdr->res.op_status, OP_WRITE,
1784 					    task->tk_status);
1785 		trace_ff_layout_write_error(hdr, task->tk_status);
1786 	}
1787 
1788 	err = ff_layout_async_handle_error(task, hdr->res.op_status,
1789 					   hdr->args.context->state,
1790 					   hdr->ds_clp, hdr->lseg,
1791 					   hdr->pgio_mirror_idx,
1792 					   dss_id);
1793 
1794 	trace_nfs4_pnfs_write(hdr, err);
1795 	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1796 	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1797 	switch (err) {
1798 	case -NFS4ERR_RESET_TO_PNFS:
1799 		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1800 		return task->tk_status;
1801 	case -NFS4ERR_RESET_TO_MDS:
1802 		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1803 		return task->tk_status;
1804 	case -EAGAIN:
1805 		return -EAGAIN;
1806 	case -NFS4ERR_FATAL_IOERROR:
1807 		task->tk_status = -EIO;
1808 		return 0;
1809 	}
1810 
1811 	if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1812 	    hdr->res.verf->committed == NFS_DATA_SYNC)
1813 		end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
1814 
1815 	/* Note: if the write is unstable, don't set end_offs until commit */
1816 	ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
1817 
1818 	/* zero out fattr since we don't care DS attr at all */
1819 	hdr->fattr.valid = 0;
1820 	if (task->tk_status >= 0)
1821 		nfs_writeback_update_inode(hdr);
1822 
1823 	return 0;
1824 }
1825 
ff_layout_commit_done_cb(struct rpc_task * task,struct nfs_commit_data * data)1826 static int ff_layout_commit_done_cb(struct rpc_task *task,
1827 				     struct nfs_commit_data *data)
1828 {
1829 	int err;
1830 	u32 idx = calc_mirror_idx_from_commit(data->lseg, data->ds_commit_index);
1831 	u32 dss_id = calc_dss_id_from_commit(data->lseg, data->ds_commit_index);
1832 
1833 	if (task->tk_status < 0) {
1834 		ff_layout_io_track_ds_error(data->lseg, idx, dss_id,
1835 					    data->args.offset, data->args.count,
1836 					    &data->res.op_status, OP_COMMIT,
1837 					    task->tk_status);
1838 		trace_ff_layout_commit_error(data, task->tk_status);
1839 	}
1840 
1841 	err = ff_layout_async_handle_error(task, data->res.op_status,
1842 					   NULL, data->ds_clp, data->lseg, idx,
1843 					   dss_id);
1844 
1845 	trace_nfs4_pnfs_commit_ds(data, err);
1846 	switch (err) {
1847 	case -NFS4ERR_RESET_TO_PNFS:
1848 		pnfs_generic_prepare_to_resend_writes(data);
1849 		return -EAGAIN;
1850 	case -NFS4ERR_RESET_TO_MDS:
1851 		pnfs_generic_prepare_to_resend_writes(data);
1852 		return -EAGAIN;
1853 	case -EAGAIN:
1854 		rpc_restart_call_prepare(task);
1855 		return -EAGAIN;
1856 	case -NFS4ERR_FATAL_IOERROR:
1857 		task->tk_status = -EIO;
1858 		return 0;
1859 	}
1860 
1861 	ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
1862 	return 0;
1863 }
1864 
ff_layout_write_record_layoutstats_start(struct rpc_task * task,struct nfs_pgio_header * hdr)1865 static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
1866 		struct nfs_pgio_header *hdr)
1867 {
1868 	struct nfs4_ff_layout_mirror *mirror;
1869 	u32 dss_id;
1870 
1871 	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1872 		return;
1873 
1874 	mirror = FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx);
1875 	dss_id = nfs4_ff_layout_calc_dss_id(
1876 		FF_LAYOUT_LSEG(hdr->lseg)->stripe_unit,
1877 		mirror->dss_count,
1878 		hdr->args.offset);
1879 
1880 	nfs4_ff_layout_stat_io_start_write(
1881 		hdr->inode,
1882 		mirror,
1883 		dss_id,
1884 		hdr->args.count,
1885 		task->tk_start);
1886 }
1887 
ff_layout_write_record_layoutstats_done(struct rpc_task * task,struct nfs_pgio_header * hdr)1888 static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
1889 		struct nfs_pgio_header *hdr)
1890 {
1891 	struct nfs4_ff_layout_mirror *mirror;
1892 	u32 dss_id;
1893 
1894 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1895 		return;
1896 
1897 	mirror = FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx);
1898 	dss_id = nfs4_ff_layout_calc_dss_id(
1899 		FF_LAYOUT_LSEG(hdr->lseg)->stripe_unit,
1900 		mirror->dss_count,
1901 		hdr->args.offset);
1902 
1903 	nfs4_ff_layout_stat_io_end_write(
1904 		task,
1905 		mirror,
1906 		dss_id,
1907 		hdr->args.count,
1908 		hdr->res.count,
1909 		hdr->res.verf->committed);
1910 	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1911 }
1912 
ff_layout_write_prepare_common(struct rpc_task * task,struct nfs_pgio_header * hdr)1913 static int ff_layout_write_prepare_common(struct rpc_task *task,
1914 					  struct nfs_pgio_header *hdr)
1915 {
1916 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1917 		rpc_exit(task, -EIO);
1918 		return -EIO;
1919 	}
1920 
1921 	if (!pnfs_is_valid_lseg(hdr->lseg)) {
1922 		rpc_exit(task, -EAGAIN);
1923 		return -EAGAIN;
1924 	}
1925 
1926 	ff_layout_write_record_layoutstats_start(task, hdr);
1927 	return 0;
1928 }
1929 
ff_layout_write_prepare_v3(struct rpc_task * task,void * data)1930 static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1931 {
1932 	struct nfs_pgio_header *hdr = data;
1933 
1934 	if (ff_layout_write_prepare_common(task, hdr))
1935 		return;
1936 
1937 	rpc_call_start(task);
1938 }
1939 
ff_layout_write_prepare_v4(struct rpc_task * task,void * data)1940 static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1941 {
1942 	struct nfs_pgio_header *hdr = data;
1943 
1944 	if (nfs4_setup_sequence(hdr->ds_clp,
1945 				&hdr->args.seq_args,
1946 				&hdr->res.seq_res,
1947 				task))
1948 		return;
1949 
1950 	ff_layout_write_prepare_common(task, hdr);
1951 }
1952 
ff_layout_write_call_done(struct rpc_task * task,void * data)1953 static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1954 {
1955 	struct nfs_pgio_header *hdr = data;
1956 
1957 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1958 	    task->tk_status == 0) {
1959 		nfs4_sequence_done(task, &hdr->res.seq_res);
1960 		return;
1961 	}
1962 
1963 	/* Note this may cause RPC to be resent */
1964 	hdr->mds_ops->rpc_call_done(task, hdr);
1965 }
1966 
ff_layout_write_count_stats(struct rpc_task * task,void * data)1967 static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1968 {
1969 	struct nfs_pgio_header *hdr = data;
1970 
1971 	ff_layout_write_record_layoutstats_done(task, hdr);
1972 	rpc_count_iostats_metrics(task,
1973 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1974 }
1975 
ff_layout_write_release(void * data)1976 static void ff_layout_write_release(void *data)
1977 {
1978 	struct nfs_pgio_header *hdr = data;
1979 
1980 	ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
1981 	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
1982 		ff_layout_send_layouterror(hdr->lseg);
1983 		ff_layout_reset_write(hdr, true);
1984 	} else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1985 		ff_layout_reset_write(hdr, false);
1986 	pnfs_generic_rw_release(data);
1987 }
1988 
ff_layout_commit_record_layoutstats_start(struct rpc_task * task,struct nfs_commit_data * cdata)1989 static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
1990 		struct nfs_commit_data *cdata)
1991 {
1992 	u32 idx, dss_id;
1993 
1994 	if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
1995 		return;
1996 
1997 	idx = calc_mirror_idx_from_commit(cdata->lseg, cdata->ds_commit_index);
1998 	dss_id = calc_dss_id_from_commit(cdata->lseg, cdata->ds_commit_index);
1999 	nfs4_ff_layout_stat_io_start_write(cdata->inode,
2000 			FF_LAYOUT_COMP(cdata->lseg, idx),
2001 			dss_id,
2002 			0, task->tk_start);
2003 }
2004 
ff_layout_commit_record_layoutstats_done(struct rpc_task * task,struct nfs_commit_data * cdata)2005 static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
2006 		struct nfs_commit_data *cdata)
2007 {
2008 	struct nfs_page *req;
2009 	__u64 count = 0;
2010 	u32 idx, dss_id;
2011 
2012 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
2013 		return;
2014 
2015 	if (task->tk_status == 0) {
2016 		list_for_each_entry(req, &cdata->pages, wb_list)
2017 			count += req->wb_bytes;
2018 	}
2019 
2020 	idx = calc_mirror_idx_from_commit(cdata->lseg, cdata->ds_commit_index);
2021 	dss_id = calc_dss_id_from_commit(cdata->lseg, cdata->ds_commit_index);
2022 	nfs4_ff_layout_stat_io_end_write(task,
2023 			FF_LAYOUT_COMP(cdata->lseg, idx),
2024 			dss_id,
2025 			count, count, NFS_FILE_SYNC);
2026 	set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
2027 }
2028 
ff_layout_commit_prepare_common(struct rpc_task * task,struct nfs_commit_data * cdata)2029 static int ff_layout_commit_prepare_common(struct rpc_task *task,
2030 					   struct nfs_commit_data *cdata)
2031 {
2032 	if (!pnfs_is_valid_lseg(cdata->lseg)) {
2033 		rpc_exit(task, -EAGAIN);
2034 		return -EAGAIN;
2035 	}
2036 
2037 	ff_layout_commit_record_layoutstats_start(task, cdata);
2038 	return 0;
2039 }
2040 
ff_layout_commit_prepare_v3(struct rpc_task * task,void * data)2041 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
2042 {
2043 	if (ff_layout_commit_prepare_common(task, data))
2044 		return;
2045 
2046 	rpc_call_start(task);
2047 }
2048 
ff_layout_commit_prepare_v4(struct rpc_task * task,void * data)2049 static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
2050 {
2051 	struct nfs_commit_data *wdata = data;
2052 
2053 	if (nfs4_setup_sequence(wdata->ds_clp,
2054 				&wdata->args.seq_args,
2055 				&wdata->res.seq_res,
2056 				task))
2057 		return;
2058 	ff_layout_commit_prepare_common(task, data);
2059 }
2060 
ff_layout_commit_done(struct rpc_task * task,void * data)2061 static void ff_layout_commit_done(struct rpc_task *task, void *data)
2062 {
2063 	pnfs_generic_write_commit_done(task, data);
2064 }
2065 
ff_layout_commit_count_stats(struct rpc_task * task,void * data)2066 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
2067 {
2068 	struct nfs_commit_data *cdata = data;
2069 
2070 	ff_layout_commit_record_layoutstats_done(task, cdata);
2071 	rpc_count_iostats_metrics(task,
2072 	    &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
2073 }
2074 
ff_layout_commit_release(void * data)2075 static void ff_layout_commit_release(void *data)
2076 {
2077 	struct nfs_commit_data *cdata = data;
2078 
2079 	ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
2080 	pnfs_generic_commit_release(data);
2081 }
2082 
2083 static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
2084 	.rpc_call_prepare = ff_layout_read_prepare_v3,
2085 	.rpc_call_done = ff_layout_read_call_done,
2086 	.rpc_count_stats = ff_layout_read_count_stats,
2087 	.rpc_release = ff_layout_read_release,
2088 };
2089 
2090 static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
2091 	.rpc_call_prepare = ff_layout_read_prepare_v4,
2092 	.rpc_call_done = ff_layout_read_call_done,
2093 	.rpc_count_stats = ff_layout_read_count_stats,
2094 	.rpc_release = ff_layout_read_release,
2095 };
2096 
2097 static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
2098 	.rpc_call_prepare = ff_layout_write_prepare_v3,
2099 	.rpc_call_done = ff_layout_write_call_done,
2100 	.rpc_count_stats = ff_layout_write_count_stats,
2101 	.rpc_release = ff_layout_write_release,
2102 };
2103 
2104 static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
2105 	.rpc_call_prepare = ff_layout_write_prepare_v4,
2106 	.rpc_call_done = ff_layout_write_call_done,
2107 	.rpc_count_stats = ff_layout_write_count_stats,
2108 	.rpc_release = ff_layout_write_release,
2109 };
2110 
2111 static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
2112 	.rpc_call_prepare = ff_layout_commit_prepare_v3,
2113 	.rpc_call_done = ff_layout_commit_done,
2114 	.rpc_count_stats = ff_layout_commit_count_stats,
2115 	.rpc_release = ff_layout_commit_release,
2116 };
2117 
2118 static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
2119 	.rpc_call_prepare = ff_layout_commit_prepare_v4,
2120 	.rpc_call_done = ff_layout_commit_done,
2121 	.rpc_count_stats = ff_layout_commit_count_stats,
2122 	.rpc_release = ff_layout_commit_release,
2123 };
2124 
2125 static enum pnfs_try_status
ff_layout_read_pagelist(struct nfs_pgio_header * hdr)2126 ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
2127 {
2128 	struct pnfs_layout_segment *lseg = hdr->lseg;
2129 	struct nfs4_pnfs_ds *ds;
2130 	struct rpc_clnt *ds_clnt;
2131 	struct nfsd_file *localio;
2132 	struct nfs4_ff_layout_mirror *mirror;
2133 	const struct cred *ds_cred;
2134 	loff_t offset = hdr->args.offset;
2135 	u32 idx = hdr->pgio_mirror_idx;
2136 	int vers;
2137 	struct nfs_fh *fh;
2138 	u32 dss_id;
2139 	bool ds_fatal_error = false;
2140 
2141 	dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
2142 		__func__, hdr->inode->i_ino,
2143 		hdr->args.pgbase, (size_t)hdr->args.count, offset);
2144 
2145 	mirror = FF_LAYOUT_COMP(lseg, idx);
2146 	dss_id = nfs4_ff_layout_calc_dss_id(
2147 		FF_LAYOUT_LSEG(lseg)->stripe_unit,
2148 		mirror->dss_count,
2149 		offset);
2150 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, dss_id, false);
2151 	if (IS_ERR(ds)) {
2152 		ds_fatal_error = nfs_error_is_fatal(PTR_ERR(ds));
2153 		goto out_failed;
2154 	}
2155 
2156 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
2157 						   hdr->inode, dss_id);
2158 	if (IS_ERR(ds_clnt))
2159 		goto out_failed;
2160 
2161 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred, dss_id);
2162 	if (!ds_cred)
2163 		goto out_failed;
2164 
2165 	vers = nfs4_ff_layout_ds_version(mirror, dss_id);
2166 
2167 	dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
2168 		ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
2169 
2170 	hdr->pgio_done_cb = ff_layout_read_done_cb;
2171 	refcount_inc(&ds->ds_clp->cl_count);
2172 	hdr->ds_clp = ds->ds_clp;
2173 	fh = nfs4_ff_layout_select_ds_fh(mirror, dss_id);
2174 	if (fh)
2175 		hdr->args.fh = fh;
2176 
2177 	nfs4_ff_layout_select_ds_stateid(mirror, dss_id, &hdr->args.stateid);
2178 
2179 	/*
2180 	 * Note that if we ever decide to split across DSes,
2181 	 * then we may need to handle dense-like offsets.
2182 	 */
2183 	hdr->args.offset = offset;
2184 	hdr->mds_offset = offset;
2185 
2186 	/* Start IO accounting for local read */
2187 	localio = ff_local_open_fh(lseg, idx, dss_id, ds->ds_clp, ds_cred, fh,
2188 				FMODE_READ);
2189 	if (localio) {
2190 		hdr->task.tk_start = ktime_get();
2191 		ff_layout_read_record_layoutstats_start(&hdr->task, hdr);
2192 	}
2193 
2194 	/* Perform an asynchronous read to ds */
2195 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
2196 			  vers == 3 ? &ff_layout_read_call_ops_v3 :
2197 				      &ff_layout_read_call_ops_v4,
2198 			  0, RPC_TASK_SOFTCONN, localio);
2199 	put_cred(ds_cred);
2200 	return PNFS_ATTEMPTED;
2201 
2202 out_failed:
2203 	if (ff_layout_avoid_mds_available_ds(lseg) && !ds_fatal_error)
2204 		return PNFS_TRY_AGAIN;
2205 	trace_pnfs_mds_fallback_read_pagelist(hdr->inode,
2206 			hdr->args.offset, hdr->args.count,
2207 			IOMODE_READ, NFS_I(hdr->inode)->layout, lseg);
2208 	return PNFS_NOT_ATTEMPTED;
2209 }
2210 
2211 /* Perform async writes. */
2212 static enum pnfs_try_status
ff_layout_write_pagelist(struct nfs_pgio_header * hdr,int sync)2213 ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
2214 {
2215 	struct pnfs_layout_segment *lseg = hdr->lseg;
2216 	struct nfs4_pnfs_ds *ds;
2217 	struct rpc_clnt *ds_clnt;
2218 	struct nfsd_file *localio;
2219 	struct nfs4_ff_layout_mirror *mirror;
2220 	const struct cred *ds_cred;
2221 	loff_t offset = hdr->args.offset;
2222 	int vers;
2223 	struct nfs_fh *fh;
2224 	u32 idx = hdr->pgio_mirror_idx;
2225 	u32 dss_id;
2226 	bool ds_fatal_error = false;
2227 
2228 	mirror = FF_LAYOUT_COMP(lseg, idx);
2229 	dss_id = nfs4_ff_layout_calc_dss_id(
2230 		FF_LAYOUT_LSEG(lseg)->stripe_unit,
2231 		mirror->dss_count,
2232 		offset);
2233 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, dss_id, true);
2234 	if (IS_ERR(ds)) {
2235 		ds_fatal_error = nfs_error_is_fatal(PTR_ERR(ds));
2236 		goto out_failed;
2237 	}
2238 
2239 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
2240 						   hdr->inode, dss_id);
2241 	if (IS_ERR(ds_clnt))
2242 		goto out_failed;
2243 
2244 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred, dss_id);
2245 	if (!ds_cred)
2246 		goto out_failed;
2247 
2248 	vers = nfs4_ff_layout_ds_version(mirror, dss_id);
2249 
2250 	dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
2251 		__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
2252 		offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count),
2253 		vers);
2254 
2255 	hdr->pgio_done_cb = ff_layout_write_done_cb;
2256 	refcount_inc(&ds->ds_clp->cl_count);
2257 	hdr->ds_clp = ds->ds_clp;
2258 	hdr->ds_commit_idx = calc_commit_idx(lseg, idx, dss_id);
2259 	fh = nfs4_ff_layout_select_ds_fh(mirror, dss_id);
2260 	if (fh)
2261 		hdr->args.fh = fh;
2262 
2263 	nfs4_ff_layout_select_ds_stateid(mirror, dss_id, &hdr->args.stateid);
2264 
2265 	/*
2266 	 * Note that if we ever decide to split across DSes,
2267 	 * then we may need to handle dense-like offsets.
2268 	 */
2269 	hdr->args.offset = offset;
2270 
2271 	/* Start IO accounting for local write */
2272 	localio = ff_local_open_fh(lseg, idx, dss_id, ds->ds_clp, ds_cred, fh,
2273 				   FMODE_READ|FMODE_WRITE);
2274 	if (localio) {
2275 		hdr->task.tk_start = ktime_get();
2276 		ff_layout_write_record_layoutstats_start(&hdr->task, hdr);
2277 	}
2278 
2279 	/* Perform an asynchronous write */
2280 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
2281 			  vers == 3 ? &ff_layout_write_call_ops_v3 :
2282 				      &ff_layout_write_call_ops_v4,
2283 			  sync, RPC_TASK_SOFTCONN, localio);
2284 	put_cred(ds_cred);
2285 	return PNFS_ATTEMPTED;
2286 
2287 out_failed:
2288 	if (ff_layout_avoid_mds_available_ds(lseg) && !ds_fatal_error)
2289 		return PNFS_TRY_AGAIN;
2290 	trace_pnfs_mds_fallback_write_pagelist(hdr->inode,
2291 			hdr->args.offset, hdr->args.count,
2292 			IOMODE_RW, NFS_I(hdr->inode)->layout, lseg);
2293 	return PNFS_NOT_ATTEMPTED;
2294 }
2295 
2296 static struct nfs_fh *
select_ds_fh_from_commit(struct pnfs_layout_segment * lseg,u32 i,u32 dss_id)2297 select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i, u32 dss_id)
2298 {
2299 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2300 
2301 	/* FIXME: Assume that there is only one NFS version available
2302 	 * for the DS.
2303 	 */
2304 	return &flseg->mirror_array[i]->dss[dss_id].fh_versions[0];
2305 }
2306 
ff_layout_initiate_commit(struct nfs_commit_data * data,int how)2307 static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
2308 {
2309 	struct pnfs_layout_segment *lseg = data->lseg;
2310 	struct nfs4_pnfs_ds *ds;
2311 	struct rpc_clnt *ds_clnt;
2312 	struct nfsd_file *localio;
2313 	struct nfs4_ff_layout_mirror *mirror;
2314 	const struct cred *ds_cred;
2315 	u32 idx, dss_id;
2316 	int vers, ret;
2317 	struct nfs_fh *fh;
2318 
2319 	if (!lseg || !(pnfs_is_valid_lseg(lseg) ||
2320 	    test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)))
2321 		goto out_err;
2322 
2323 	idx = calc_mirror_idx_from_commit(lseg, data->ds_commit_index);
2324 	mirror = FF_LAYOUT_COMP(lseg, idx);
2325 	dss_id = calc_dss_id_from_commit(lseg, data->ds_commit_index);
2326 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, dss_id, true);
2327 	if (IS_ERR(ds))
2328 		goto out_err;
2329 
2330 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
2331 						   data->inode, dss_id);
2332 	if (IS_ERR(ds_clnt))
2333 		goto out_err;
2334 
2335 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred, dss_id);
2336 	if (!ds_cred)
2337 		goto out_err;
2338 
2339 	vers = nfs4_ff_layout_ds_version(mirror, dss_id);
2340 
2341 	dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
2342 		data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
2343 		vers);
2344 	data->commit_done_cb = ff_layout_commit_done_cb;
2345 	data->cred = ds_cred;
2346 	refcount_inc(&ds->ds_clp->cl_count);
2347 	data->ds_clp = ds->ds_clp;
2348 	fh = select_ds_fh_from_commit(lseg, idx, dss_id);
2349 	if (fh)
2350 		data->args.fh = fh;
2351 
2352 	/* Start IO accounting for local commit */
2353 	localio = ff_local_open_fh(lseg, idx, dss_id, ds->ds_clp, ds_cred, fh,
2354 				   FMODE_READ|FMODE_WRITE);
2355 	if (localio) {
2356 		data->task.tk_start = ktime_get();
2357 		ff_layout_commit_record_layoutstats_start(&data->task, data);
2358 	}
2359 
2360 	ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
2361 				   vers == 3 ? &ff_layout_commit_call_ops_v3 :
2362 					       &ff_layout_commit_call_ops_v4,
2363 				   how, RPC_TASK_SOFTCONN, localio);
2364 	put_cred(ds_cred);
2365 	return ret;
2366 out_err:
2367 	pnfs_generic_prepare_to_resend_writes(data);
2368 	pnfs_generic_commit_release(data);
2369 	return -EAGAIN;
2370 }
2371 
2372 static int
ff_layout_commit_pagelist(struct inode * inode,struct list_head * mds_pages,int how,struct nfs_commit_info * cinfo)2373 ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
2374 			   int how, struct nfs_commit_info *cinfo)
2375 {
2376 	return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
2377 					    ff_layout_initiate_commit);
2378 }
2379 
ff_layout_match_rw(const struct rpc_task * task,const struct nfs_pgio_header * hdr,const struct pnfs_layout_segment * lseg)2380 static bool ff_layout_match_rw(const struct rpc_task *task,
2381 			       const struct nfs_pgio_header *hdr,
2382 			       const struct pnfs_layout_segment *lseg)
2383 {
2384 	return hdr->lseg == lseg;
2385 }
2386 
ff_layout_match_commit(const struct rpc_task * task,const struct nfs_commit_data * cdata,const struct pnfs_layout_segment * lseg)2387 static bool ff_layout_match_commit(const struct rpc_task *task,
2388 				   const struct nfs_commit_data *cdata,
2389 				   const struct pnfs_layout_segment *lseg)
2390 {
2391 	return cdata->lseg == lseg;
2392 }
2393 
ff_layout_match_io(const struct rpc_task * task,const void * data)2394 static bool ff_layout_match_io(const struct rpc_task *task, const void *data)
2395 {
2396 	const struct rpc_call_ops *ops = task->tk_ops;
2397 
2398 	if (ops == &ff_layout_read_call_ops_v3 ||
2399 	    ops == &ff_layout_read_call_ops_v4 ||
2400 	    ops == &ff_layout_write_call_ops_v3 ||
2401 	    ops == &ff_layout_write_call_ops_v4)
2402 		return ff_layout_match_rw(task, task->tk_calldata, data);
2403 	if (ops == &ff_layout_commit_call_ops_v3 ||
2404 	    ops == &ff_layout_commit_call_ops_v4)
2405 		return ff_layout_match_commit(task, task->tk_calldata, data);
2406 	return false;
2407 }
2408 
ff_layout_cancel_io(struct pnfs_layout_segment * lseg)2409 static void ff_layout_cancel_io(struct pnfs_layout_segment *lseg)
2410 {
2411 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2412 	struct nfs4_ff_layout_mirror *mirror;
2413 	struct nfs4_ff_layout_ds *mirror_ds;
2414 	struct nfs4_pnfs_ds *ds;
2415 	struct nfs_client *ds_clp;
2416 	struct rpc_clnt *clnt;
2417 	u32 idx, dss_id;
2418 
2419 	for (idx = 0; idx < flseg->mirror_array_cnt; idx++) {
2420 		mirror = flseg->mirror_array[idx];
2421 		for (dss_id = 0; dss_id < mirror->dss_count; dss_id++) {
2422 			mirror_ds = mirror->dss[dss_id].mirror_ds;
2423 			if (IS_ERR_OR_NULL(mirror_ds))
2424 				continue;
2425 			ds = mirror->dss[dss_id].mirror_ds->ds;
2426 			if (!ds)
2427 				continue;
2428 			ds_clp = ds->ds_clp;
2429 			if (!ds_clp)
2430 				continue;
2431 			clnt = ds_clp->cl_rpcclient;
2432 			if (!clnt)
2433 				continue;
2434 			if (!rpc_cancel_tasks(clnt, -EAGAIN,
2435 					      ff_layout_match_io, lseg))
2436 				continue;
2437 			rpc_clnt_disconnect(clnt);
2438 		}
2439 	}
2440 }
2441 
2442 static struct pnfs_ds_commit_info *
ff_layout_get_ds_info(struct inode * inode)2443 ff_layout_get_ds_info(struct inode *inode)
2444 {
2445 	struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
2446 
2447 	if (layout == NULL)
2448 		return NULL;
2449 
2450 	return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
2451 }
2452 
2453 static void
ff_layout_setup_ds_info(struct pnfs_ds_commit_info * fl_cinfo,struct pnfs_layout_segment * lseg)2454 ff_layout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
2455 		struct pnfs_layout_segment *lseg)
2456 {
2457 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2458 	struct inode *inode = lseg->pls_layout->plh_inode;
2459 	struct pnfs_commit_array *array, *new;
2460 	u32 size = flseg->mirror_array_cnt * flseg->mirror_array[0]->dss_count;
2461 
2462 	new = pnfs_alloc_commit_array(size,
2463 				      nfs_io_gfp_mask());
2464 	if (new) {
2465 		spin_lock(&inode->i_lock);
2466 		array = pnfs_add_commit_array(fl_cinfo, new, lseg);
2467 		spin_unlock(&inode->i_lock);
2468 		if (array != new)
2469 			pnfs_free_commit_array(new);
2470 	}
2471 }
2472 
2473 static void
ff_layout_release_ds_info(struct pnfs_ds_commit_info * fl_cinfo,struct inode * inode)2474 ff_layout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
2475 		struct inode *inode)
2476 {
2477 	spin_lock(&inode->i_lock);
2478 	pnfs_generic_ds_cinfo_destroy(fl_cinfo);
2479 	spin_unlock(&inode->i_lock);
2480 }
2481 
2482 static void
ff_layout_free_deviceid_node(struct nfs4_deviceid_node * d)2483 ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
2484 {
2485 	nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
2486 						  id_node));
2487 }
2488 
ff_layout_encode_ioerr(struct xdr_stream * xdr,const struct nfs4_layoutreturn_args * args,const struct nfs4_flexfile_layoutreturn_args * ff_args)2489 static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
2490 				  const struct nfs4_layoutreturn_args *args,
2491 				  const struct nfs4_flexfile_layoutreturn_args *ff_args)
2492 {
2493 	__be32 *start;
2494 
2495 	start = xdr_reserve_space(xdr, 4);
2496 	if (unlikely(!start))
2497 		return -E2BIG;
2498 
2499 	*start = cpu_to_be32(ff_args->num_errors);
2500 	/* This assume we always return _ALL_ layouts */
2501 	return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
2502 }
2503 
2504 static void
ff_layout_encode_ff_iostat_head(struct xdr_stream * xdr,const nfs4_stateid * stateid,const struct nfs42_layoutstat_devinfo * devinfo)2505 ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
2506 			    const nfs4_stateid *stateid,
2507 			    const struct nfs42_layoutstat_devinfo *devinfo)
2508 {
2509 	__be32 *p;
2510 
2511 	p = xdr_reserve_space(xdr, 8 + 8);
2512 	p = xdr_encode_hyper(p, devinfo->offset);
2513 	p = xdr_encode_hyper(p, devinfo->length);
2514 	encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
2515 	p = xdr_reserve_space(xdr, 4*8);
2516 	p = xdr_encode_hyper(p, devinfo->read_count);
2517 	p = xdr_encode_hyper(p, devinfo->read_bytes);
2518 	p = xdr_encode_hyper(p, devinfo->write_count);
2519 	p = xdr_encode_hyper(p, devinfo->write_bytes);
2520 	encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
2521 }
2522 
2523 static void
ff_layout_encode_ff_iostat(struct xdr_stream * xdr,const nfs4_stateid * stateid,const struct nfs42_layoutstat_devinfo * devinfo)2524 ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
2525 			    const nfs4_stateid *stateid,
2526 			    const struct nfs42_layoutstat_devinfo *devinfo)
2527 {
2528 	ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
2529 	ff_layout_encode_ff_layoutupdate(xdr, devinfo,
2530 			devinfo->ld_private.data);
2531 }
2532 
2533 /* report nothing for now */
ff_layout_encode_iostats_array(struct xdr_stream * xdr,const struct nfs4_layoutreturn_args * args,struct nfs4_flexfile_layoutreturn_args * ff_args)2534 static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
2535 		const struct nfs4_layoutreturn_args *args,
2536 		struct nfs4_flexfile_layoutreturn_args *ff_args)
2537 {
2538 	__be32 *p;
2539 	int i;
2540 
2541 	p = xdr_reserve_space(xdr, 4);
2542 	*p = cpu_to_be32(ff_args->num_dev);
2543 	for (i = 0; i < ff_args->num_dev; i++)
2544 		ff_layout_encode_ff_iostat(xdr,
2545 				&args->layout->plh_stateid,
2546 				&ff_args->devinfo[i]);
2547 }
2548 
2549 static void
ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo * devinfo,unsigned int num_entries)2550 ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
2551 		unsigned int num_entries)
2552 {
2553 	unsigned int i;
2554 
2555 	for (i = 0; i < num_entries; i++) {
2556 		if (!devinfo[i].ld_private.ops)
2557 			continue;
2558 		if (!devinfo[i].ld_private.ops->free)
2559 			continue;
2560 		devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
2561 	}
2562 }
2563 
2564 static struct nfs4_deviceid_node *
ff_layout_alloc_deviceid_node(struct nfs_server * server,struct pnfs_device * pdev,gfp_t gfp_flags)2565 ff_layout_alloc_deviceid_node(struct nfs_server *server,
2566 			      struct pnfs_device *pdev, gfp_t gfp_flags)
2567 {
2568 	struct nfs4_ff_layout_ds *dsaddr;
2569 
2570 	dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
2571 	if (!dsaddr)
2572 		return NULL;
2573 	return &dsaddr->id_node;
2574 }
2575 
2576 static void
ff_layout_encode_layoutreturn(struct xdr_stream * xdr,const void * voidargs,const struct nfs4_xdr_opaque_data * ff_opaque)2577 ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
2578 		const void *voidargs,
2579 		const struct nfs4_xdr_opaque_data *ff_opaque)
2580 {
2581 	const struct nfs4_layoutreturn_args *args = voidargs;
2582 	struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
2583 	struct xdr_buf tmp_buf = {
2584 		.head = {
2585 			[0] = {
2586 				.iov_base = page_address(ff_args->pages[0]),
2587 			},
2588 		},
2589 		.buflen = PAGE_SIZE,
2590 	};
2591 	struct xdr_stream tmp_xdr;
2592 	__be32 *start;
2593 
2594 	dprintk("%s: Begin\n", __func__);
2595 
2596 	xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL);
2597 
2598 	ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
2599 	ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
2600 
2601 	start = xdr_reserve_space(xdr, 4);
2602 	*start = cpu_to_be32(tmp_buf.len);
2603 	xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
2604 
2605 	dprintk("%s: Return\n", __func__);
2606 }
2607 
2608 static void
ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data * args)2609 ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
2610 {
2611 	struct nfs4_flexfile_layoutreturn_args *ff_args;
2612 
2613 	if (!args->data)
2614 		return;
2615 	ff_args = args->data;
2616 	args->data = NULL;
2617 
2618 	ff_layout_free_ds_ioerr(&ff_args->errors);
2619 	ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
2620 
2621 	put_page(ff_args->pages[0]);
2622 	kfree(ff_args);
2623 }
2624 
2625 static const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
2626 	.encode = ff_layout_encode_layoutreturn,
2627 	.free = ff_layout_free_layoutreturn,
2628 };
2629 
2630 static int
ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args * args)2631 ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
2632 {
2633 	struct nfs4_flexfile_layoutreturn_args *ff_args;
2634 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
2635 
2636 	ff_args = kmalloc(sizeof(*ff_args), nfs_io_gfp_mask());
2637 	if (!ff_args)
2638 		goto out_nomem;
2639 	ff_args->pages[0] = alloc_page(nfs_io_gfp_mask());
2640 	if (!ff_args->pages[0])
2641 		goto out_nomem_free;
2642 
2643 	INIT_LIST_HEAD(&ff_args->errors);
2644 	ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
2645 			&args->range, &ff_args->errors,
2646 			FF_LAYOUTRETURN_MAXERR);
2647 
2648 	spin_lock(&args->inode->i_lock);
2649 	ff_args->num_dev = ff_layout_mirror_prepare_stats(
2650 		&ff_layout->generic_hdr, &ff_args->devinfo[0],
2651 		ARRAY_SIZE(ff_args->devinfo), NFS4_FF_OP_LAYOUTRETURN);
2652 	spin_unlock(&args->inode->i_lock);
2653 
2654 	args->ld_private->ops = &layoutreturn_ops;
2655 	args->ld_private->data = ff_args;
2656 	return 0;
2657 out_nomem_free:
2658 	kfree(ff_args);
2659 out_nomem:
2660 	return -ENOMEM;
2661 }
2662 
2663 #ifdef CONFIG_NFS_V4_2
2664 void
ff_layout_send_layouterror(struct pnfs_layout_segment * lseg)2665 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2666 {
2667 	struct pnfs_layout_hdr *lo = lseg->pls_layout;
2668 	struct nfs42_layout_error *errors;
2669 	LIST_HEAD(head);
2670 
2671 	if (!nfs_server_capable(lo->plh_inode, NFS_CAP_LAYOUTERROR))
2672 		return;
2673 	ff_layout_fetch_ds_ioerr(lo, &lseg->pls_range, &head, -1);
2674 	if (list_empty(&head))
2675 		return;
2676 
2677 	errors = kmalloc_array(NFS42_LAYOUTERROR_MAX, sizeof(*errors),
2678 			       nfs_io_gfp_mask());
2679 	if (errors != NULL) {
2680 		const struct nfs4_ff_layout_ds_err *pos;
2681 		size_t n = 0;
2682 
2683 		list_for_each_entry(pos, &head, list) {
2684 			errors[n].offset = pos->offset;
2685 			errors[n].length = pos->length;
2686 			nfs4_stateid_copy(&errors[n].stateid, &pos->stateid);
2687 			errors[n].errors[0].dev_id = pos->deviceid;
2688 			errors[n].errors[0].status = pos->status;
2689 			errors[n].errors[0].opnum = pos->opnum;
2690 			n++;
2691 			if (!list_is_last(&pos->list, &head) &&
2692 			    n < NFS42_LAYOUTERROR_MAX)
2693 				continue;
2694 			if (nfs42_proc_layouterror(lseg, errors, n) < 0)
2695 				break;
2696 			n = 0;
2697 		}
2698 		kfree(errors);
2699 	}
2700 	ff_layout_free_ds_ioerr(&head);
2701 }
2702 #else
2703 void
ff_layout_send_layouterror(struct pnfs_layout_segment * lseg)2704 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2705 {
2706 }
2707 #endif
2708 
2709 static int
ff_layout_ntop4(const struct sockaddr * sap,char * buf,const size_t buflen)2710 ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
2711 {
2712 	const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
2713 
2714 	return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
2715 }
2716 
2717 static size_t
ff_layout_ntop6_noscopeid(const struct sockaddr * sap,char * buf,const int buflen)2718 ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
2719 			  const int buflen)
2720 {
2721 	const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
2722 	const struct in6_addr *addr = &sin6->sin6_addr;
2723 
2724 	/*
2725 	 * RFC 4291, Section 2.2.2
2726 	 *
2727 	 * Shorthanded ANY address
2728 	 */
2729 	if (ipv6_addr_any(addr))
2730 		return snprintf(buf, buflen, "::");
2731 
2732 	/*
2733 	 * RFC 4291, Section 2.2.2
2734 	 *
2735 	 * Shorthanded loopback address
2736 	 */
2737 	if (ipv6_addr_loopback(addr))
2738 		return snprintf(buf, buflen, "::1");
2739 
2740 	/*
2741 	 * RFC 4291, Section 2.2.3
2742 	 *
2743 	 * Special presentation address format for mapped v4
2744 	 * addresses.
2745 	 */
2746 	if (ipv6_addr_v4mapped(addr))
2747 		return snprintf(buf, buflen, "::ffff:%pI4",
2748 					&addr->s6_addr32[3]);
2749 
2750 	/*
2751 	 * RFC 4291, Section 2.2.1
2752 	 */
2753 	return snprintf(buf, buflen, "%pI6c", addr);
2754 }
2755 
2756 /* Derived from rpc_sockaddr2uaddr */
2757 static void
ff_layout_encode_netaddr(struct xdr_stream * xdr,struct nfs4_pnfs_ds_addr * da)2758 ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
2759 {
2760 	struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
2761 	char portbuf[RPCBIND_MAXUADDRPLEN];
2762 	char addrbuf[RPCBIND_MAXUADDRLEN];
2763 	unsigned short port;
2764 	int len, netid_len;
2765 	__be32 *p;
2766 
2767 	switch (sap->sa_family) {
2768 	case AF_INET:
2769 		if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
2770 			return;
2771 		port = ntohs(((struct sockaddr_in *)sap)->sin_port);
2772 		break;
2773 	case AF_INET6:
2774 		if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
2775 			return;
2776 		port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
2777 		break;
2778 	default:
2779 		WARN_ON_ONCE(1);
2780 		return;
2781 	}
2782 
2783 	snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
2784 	len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
2785 
2786 	netid_len = strlen(da->da_netid);
2787 	p = xdr_reserve_space(xdr, 4 + netid_len);
2788 	xdr_encode_opaque(p, da->da_netid, netid_len);
2789 
2790 	p = xdr_reserve_space(xdr, 4 + len);
2791 	xdr_encode_opaque(p, addrbuf, len);
2792 }
2793 
2794 static void
ff_layout_encode_nfstime(struct xdr_stream * xdr,ktime_t t)2795 ff_layout_encode_nfstime(struct xdr_stream *xdr,
2796 			 ktime_t t)
2797 {
2798 	struct timespec64 ts;
2799 	__be32 *p;
2800 
2801 	p = xdr_reserve_space(xdr, 12);
2802 	ts = ktime_to_timespec64(t);
2803 	p = xdr_encode_hyper(p, ts.tv_sec);
2804 	*p++ = cpu_to_be32(ts.tv_nsec);
2805 }
2806 
2807 static void
ff_layout_encode_io_latency(struct xdr_stream * xdr,struct nfs4_ff_io_stat * stat)2808 ff_layout_encode_io_latency(struct xdr_stream *xdr,
2809 			    struct nfs4_ff_io_stat *stat)
2810 {
2811 	__be32 *p;
2812 
2813 	p = xdr_reserve_space(xdr, 5 * 8);
2814 	p = xdr_encode_hyper(p, stat->ops_requested);
2815 	p = xdr_encode_hyper(p, stat->bytes_requested);
2816 	p = xdr_encode_hyper(p, stat->ops_completed);
2817 	p = xdr_encode_hyper(p, stat->bytes_completed);
2818 	p = xdr_encode_hyper(p, stat->bytes_not_delivered);
2819 	ff_layout_encode_nfstime(xdr, stat->total_busy_time);
2820 	ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
2821 }
2822 
2823 static void
ff_layout_encode_ff_layoutupdate(struct xdr_stream * xdr,const struct nfs42_layoutstat_devinfo * devinfo,struct nfs4_ff_layout_ds_stripe * dss_info)2824 ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
2825 			      const struct nfs42_layoutstat_devinfo *devinfo,
2826 			      struct nfs4_ff_layout_ds_stripe *dss_info)
2827 {
2828 	struct nfs4_pnfs_ds_addr *da;
2829 	struct nfs4_pnfs_ds *ds = dss_info->mirror_ds->ds;
2830 	struct nfs_fh *fh = &dss_info->fh_versions[0];
2831 	__be32 *p;
2832 
2833 	da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
2834 	dprintk("%s: DS %s: encoding address %s\n",
2835 		__func__, ds->ds_remotestr, da->da_remotestr);
2836 	/* netaddr4 */
2837 	ff_layout_encode_netaddr(xdr, da);
2838 	/* nfs_fh4 */
2839 	p = xdr_reserve_space(xdr, 4 + fh->size);
2840 	xdr_encode_opaque(p, fh->data, fh->size);
2841 	/* ff_io_latency4 read */
2842 	spin_lock(&dss_info->mirror->lock);
2843 	ff_layout_encode_io_latency(xdr,
2844 				    &dss_info->read_stat.io_stat);
2845 	/* ff_io_latency4 write */
2846 	ff_layout_encode_io_latency(xdr,
2847 				    &dss_info->write_stat.io_stat);
2848 	spin_unlock(&dss_info->mirror->lock);
2849 	/* nfstime4 */
2850 	ff_layout_encode_nfstime(xdr,
2851 				 ktime_sub(ktime_get(),
2852 					   dss_info->start_time));
2853 	/* bool */
2854 	p = xdr_reserve_space(xdr, 4);
2855 	*p = cpu_to_be32(false);
2856 }
2857 
2858 static void
ff_layout_encode_layoutstats(struct xdr_stream * xdr,const void * args,const struct nfs4_xdr_opaque_data * opaque)2859 ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
2860 			     const struct nfs4_xdr_opaque_data *opaque)
2861 {
2862 	struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
2863 			struct nfs42_layoutstat_devinfo, ld_private);
2864 	__be32 *start;
2865 
2866 	/* layoutupdate length */
2867 	start = xdr_reserve_space(xdr, 4);
2868 	ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
2869 
2870 	*start = cpu_to_be32((xdr->p - start - 1) * 4);
2871 }
2872 
2873 static void
ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data * opaque)2874 ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
2875 {
2876 	struct nfs4_ff_layout_ds_stripe *dss_info = opaque->data;
2877 	struct nfs4_ff_layout_mirror *mirror = dss_info->mirror;
2878 
2879 	ff_layout_put_mirror(mirror);
2880 }
2881 
2882 static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
2883 	.encode = ff_layout_encode_layoutstats,
2884 	.free	= ff_layout_free_layoutstats,
2885 };
2886 
2887 static int
ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr * lo,struct nfs42_layoutstat_devinfo * devinfo,int dev_limit,enum nfs4_ff_op_type type)2888 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
2889 			       struct nfs42_layoutstat_devinfo *devinfo,
2890 			       int dev_limit, enum nfs4_ff_op_type type)
2891 {
2892 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
2893 	struct nfs4_ff_layout_mirror *mirror;
2894 	struct nfs4_ff_layout_ds_stripe *dss_info;
2895 	struct nfs4_deviceid_node *dev;
2896 	int i = 0, dss_id;
2897 
2898 	list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2899 		for (dss_id = 0; dss_id < mirror->dss_count; ++dss_id) {
2900 			dss_info = &mirror->dss[dss_id];
2901 			if (i >= dev_limit)
2902 				break;
2903 			if (IS_ERR_OR_NULL(dss_info->mirror_ds))
2904 				continue;
2905 			if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL,
2906 						&mirror->flags) &&
2907 			    type != NFS4_FF_OP_LAYOUTRETURN)
2908 				continue;
2909 			/* mirror refcount put in cleanup_layoutstats */
2910 			if (!refcount_inc_not_zero(&mirror->ref))
2911 				continue;
2912 			dev = &dss_info->mirror_ds->id_node;
2913 			memcpy(&devinfo->dev_id,
2914 			       &dev->deviceid,
2915 			       NFS4_DEVICEID4_SIZE);
2916 			devinfo->offset = 0;
2917 			devinfo->length = NFS4_MAX_UINT64;
2918 			spin_lock(&mirror->lock);
2919 			devinfo->read_count =
2920 			    dss_info->read_stat.io_stat.ops_completed;
2921 			devinfo->read_bytes =
2922 			    dss_info->read_stat.io_stat.bytes_completed;
2923 			devinfo->write_count =
2924 			    dss_info->write_stat.io_stat.ops_completed;
2925 			devinfo->write_bytes =
2926 			    dss_info->write_stat.io_stat.bytes_completed;
2927 			spin_unlock(&mirror->lock);
2928 			devinfo->layout_type = LAYOUT_FLEX_FILES;
2929 			devinfo->ld_private.ops = &layoutstat_ops;
2930 			devinfo->ld_private.data = &mirror->dss[dss_id];
2931 
2932 			devinfo++;
2933 			i++;
2934 		}
2935 	}
2936 	return i;
2937 }
2938 
ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args * args)2939 static int ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
2940 {
2941 	struct pnfs_layout_hdr *lo;
2942 	struct nfs4_flexfile_layout *ff_layout;
2943 	const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2944 
2945 	/* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2946 	args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo),
2947 				      nfs_io_gfp_mask());
2948 	if (!args->devinfo)
2949 		return -ENOMEM;
2950 
2951 	spin_lock(&args->inode->i_lock);
2952 	lo = NFS_I(args->inode)->layout;
2953 	if (lo && pnfs_layout_is_valid(lo)) {
2954 		ff_layout = FF_LAYOUT_FROM_HDR(lo);
2955 		args->num_dev = ff_layout_mirror_prepare_stats(
2956 			&ff_layout->generic_hdr, &args->devinfo[0], dev_count,
2957 			NFS4_FF_OP_LAYOUTSTATS);
2958 	} else
2959 		args->num_dev = 0;
2960 	spin_unlock(&args->inode->i_lock);
2961 	if (!args->num_dev) {
2962 		kfree(args->devinfo);
2963 		args->devinfo = NULL;
2964 		return -ENOENT;
2965 	}
2966 
2967 	return 0;
2968 }
2969 
2970 static int
ff_layout_set_layoutdriver(struct nfs_server * server,const struct nfs_fh * dummy)2971 ff_layout_set_layoutdriver(struct nfs_server *server,
2972 		const struct nfs_fh *dummy)
2973 {
2974 #if IS_ENABLED(CONFIG_NFS_V4_2)
2975 	server->caps |= NFS_CAP_LAYOUTSTATS | NFS_CAP_REBOOT_LAYOUTRETURN;
2976 #endif
2977 	return 0;
2978 }
2979 
2980 static const struct pnfs_commit_ops ff_layout_commit_ops = {
2981 	.setup_ds_info		= ff_layout_setup_ds_info,
2982 	.release_ds_info	= ff_layout_release_ds_info,
2983 	.mark_request_commit	= pnfs_layout_mark_request_commit,
2984 	.clear_request_commit	= pnfs_generic_clear_request_commit,
2985 	.scan_commit_lists	= pnfs_generic_scan_commit_lists,
2986 	.recover_commit_reqs	= pnfs_generic_recover_commit_reqs,
2987 	.commit_pagelist	= ff_layout_commit_pagelist,
2988 };
2989 
2990 static struct pnfs_layoutdriver_type flexfilelayout_type = {
2991 	.id			= LAYOUT_FLEX_FILES,
2992 	.name			= "LAYOUT_FLEX_FILES",
2993 	.owner			= THIS_MODULE,
2994 	.flags			= PNFS_LAYOUTGET_ON_OPEN,
2995 	.max_layoutget_response	= 4096, /* 1 page or so... */
2996 	.set_layoutdriver	= ff_layout_set_layoutdriver,
2997 	.alloc_layout_hdr	= ff_layout_alloc_layout_hdr,
2998 	.free_layout_hdr	= ff_layout_free_layout_hdr,
2999 	.alloc_lseg		= ff_layout_alloc_lseg,
3000 	.free_lseg		= ff_layout_free_lseg,
3001 	.add_lseg		= ff_layout_add_lseg,
3002 	.pg_read_ops		= &ff_layout_pg_read_ops,
3003 	.pg_write_ops		= &ff_layout_pg_write_ops,
3004 	.get_ds_info		= ff_layout_get_ds_info,
3005 	.free_deviceid_node	= ff_layout_free_deviceid_node,
3006 	.read_pagelist		= ff_layout_read_pagelist,
3007 	.write_pagelist		= ff_layout_write_pagelist,
3008 	.alloc_deviceid_node    = ff_layout_alloc_deviceid_node,
3009 	.prepare_layoutreturn   = ff_layout_prepare_layoutreturn,
3010 	.sync			= pnfs_nfs_generic_sync,
3011 	.prepare_layoutstats	= ff_layout_prepare_layoutstats,
3012 	.cancel_io		= ff_layout_cancel_io,
3013 };
3014 
nfs4flexfilelayout_init(void)3015 static int __init nfs4flexfilelayout_init(void)
3016 {
3017 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
3018 	       __func__);
3019 	return pnfs_register_layoutdriver(&flexfilelayout_type);
3020 }
3021 
nfs4flexfilelayout_exit(void)3022 static void __exit nfs4flexfilelayout_exit(void)
3023 {
3024 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
3025 	       __func__);
3026 	pnfs_unregister_layoutdriver(&flexfilelayout_type);
3027 }
3028 
3029 MODULE_ALIAS("nfs-layouttype4-4");
3030 
3031 MODULE_LICENSE("GPL");
3032 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
3033 
3034 module_init(nfs4flexfilelayout_init);
3035 module_exit(nfs4flexfilelayout_exit);
3036 
3037 module_param(io_maxretrans, ushort, 0644);
3038 MODULE_PARM_DESC(io_maxretrans, "The  number of times the NFSv4.1 client "
3039 			"retries an I/O request before returning an error. ");
3040