1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Module for pnfs flexfile layout driver.
4 *
5 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
6 *
7 * Tao Peng <bergwolf@primarydata.com>
8 */
9
10 #include <linux/nfs_fs.h>
11 #include <linux/nfs_mount.h>
12 #include <linux/nfs_page.h>
13 #include <linux/module.h>
14 #include <linux/file.h>
15 #include <linux/sched/mm.h>
16
17 #include <linux/sunrpc/metrics.h>
18
19 #include "flexfilelayout.h"
20 #include "../nfs4session.h"
21 #include "../nfs4idmap.h"
22 #include "../internal.h"
23 #include "../delegation.h"
24 #include "../nfs4trace.h"
25 #include "../iostat.h"
26 #include "../nfs.h"
27 #include "../nfs42.h"
28
29 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
30
31 #define FF_LAYOUT_POLL_RETRY_MAX (15*HZ)
32 #define FF_LAYOUTRETURN_MAXERR 20
33
34 enum nfs4_ff_op_type {
35 NFS4_FF_OP_LAYOUTSTATS,
36 NFS4_FF_OP_LAYOUTRETURN,
37 };
38
39 static unsigned short io_maxretrans;
40
41 static const struct pnfs_commit_ops ff_layout_commit_ops;
42 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
43 struct nfs_pgio_header *hdr);
44 static int
45 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
46 struct nfs42_layoutstat_devinfo *devinfo,
47 int dev_limit, enum nfs4_ff_op_type type);
48 static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
49 const struct nfs42_layoutstat_devinfo *devinfo,
50 struct nfs4_ff_layout_ds_stripe *dss_info);
51
52 static struct pnfs_layout_hdr *
ff_layout_alloc_layout_hdr(struct inode * inode,gfp_t gfp_flags)53 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
54 {
55 struct nfs4_flexfile_layout *ffl;
56
57 ffl = kzalloc_obj(*ffl, gfp_flags);
58 if (ffl) {
59 pnfs_init_ds_commit_info(&ffl->commit_info);
60 INIT_LIST_HEAD(&ffl->error_list);
61 INIT_LIST_HEAD(&ffl->mirrors);
62 ffl->last_report_time = ktime_get();
63 ffl->commit_info.ops = &ff_layout_commit_ops;
64 return &ffl->generic_hdr;
65 } else
66 return NULL;
67 }
68
69 static void
ff_layout_free_layout_hdr(struct pnfs_layout_hdr * lo)70 ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
71 {
72 struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(lo);
73 struct nfs4_ff_layout_ds_err *err, *n;
74
75 list_for_each_entry_safe(err, n, &ffl->error_list, list) {
76 list_del(&err->list);
77 kfree(err);
78 }
79 kfree_rcu(ffl, generic_hdr.plh_rcu);
80 }
81
decode_pnfs_stateid(struct xdr_stream * xdr,nfs4_stateid * stateid)82 static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
83 {
84 __be32 *p;
85
86 p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
87 if (unlikely(p == NULL))
88 return -ENOBUFS;
89 stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
90 memcpy(stateid->data, p, NFS4_STATEID_SIZE);
91 dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
92 p[0], p[1], p[2], p[3]);
93 return 0;
94 }
95
decode_deviceid(struct xdr_stream * xdr,struct nfs4_deviceid * devid)96 static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
97 {
98 __be32 *p;
99
100 p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
101 if (unlikely(!p))
102 return -ENOBUFS;
103 memcpy(devid, p, NFS4_DEVICEID4_SIZE);
104 nfs4_print_deviceid(devid);
105 return 0;
106 }
107
decode_nfs_fh(struct xdr_stream * xdr,struct nfs_fh * fh)108 static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
109 {
110 __be32 *p;
111
112 p = xdr_inline_decode(xdr, 4);
113 if (unlikely(!p))
114 return -ENOBUFS;
115 fh->size = be32_to_cpup(p++);
116 if (fh->size > NFS_MAXFHSIZE) {
117 printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
118 fh->size);
119 return -EOVERFLOW;
120 }
121 /* fh.data */
122 p = xdr_inline_decode(xdr, fh->size);
123 if (unlikely(!p))
124 return -ENOBUFS;
125 memcpy(&fh->data, p, fh->size);
126 dprintk("%s: fh len %d\n", __func__, fh->size);
127
128 return 0;
129 }
130
131 /*
132 * Currently only stringified uids and gids are accepted.
133 * I.e., kerberos is not supported to the DSes, so no pricipals.
134 *
135 * That means that one common function will suffice, but when
136 * principals are added, this should be split to accomodate
137 * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
138 */
139 static int
decode_name(struct xdr_stream * xdr,u32 * id)140 decode_name(struct xdr_stream *xdr, u32 *id)
141 {
142 __be32 *p;
143 int len;
144
145 /* opaque_length(4)*/
146 p = xdr_inline_decode(xdr, 4);
147 if (unlikely(!p))
148 return -ENOBUFS;
149 len = be32_to_cpup(p++);
150 if (len < 0)
151 return -EINVAL;
152
153 dprintk("%s: len %u\n", __func__, len);
154
155 /* opaque body */
156 p = xdr_inline_decode(xdr, len);
157 if (unlikely(!p))
158 return -ENOBUFS;
159
160 if (!nfs_map_string_to_numeric((char *)p, len, id))
161 return -EINVAL;
162
163 return 0;
164 }
165
166 static struct nfsd_file *
ff_local_open_fh(struct pnfs_layout_segment * lseg,u32 ds_idx,u32 dss_id,struct nfs_client * clp,const struct cred * cred,struct nfs_fh * fh,fmode_t mode)167 ff_local_open_fh(struct pnfs_layout_segment *lseg, u32 ds_idx, u32 dss_id,
168 struct nfs_client *clp, const struct cred *cred,
169 struct nfs_fh *fh, fmode_t mode)
170 {
171 #if IS_ENABLED(CONFIG_NFS_LOCALIO)
172 struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
173
174 return nfs_local_open_fh(clp, cred, fh, &mirror->dss[dss_id].nfl, mode);
175 #else
176 return NULL;
177 #endif
178 }
179
ff_dss_match_fh(const struct nfs4_ff_layout_ds_stripe * dss1,const struct nfs4_ff_layout_ds_stripe * dss2)180 static bool ff_dss_match_fh(const struct nfs4_ff_layout_ds_stripe *dss1,
181 const struct nfs4_ff_layout_ds_stripe *dss2)
182 {
183 int i, j;
184
185 if (dss1->fh_versions_cnt != dss2->fh_versions_cnt)
186 return false;
187
188 for (i = 0; i < dss1->fh_versions_cnt; i++) {
189 bool found_fh = false;
190 for (j = 0; j < dss2->fh_versions_cnt; j++) {
191 if (nfs_compare_fh(&dss1->fh_versions[i],
192 &dss2->fh_versions[j]) == 0) {
193 found_fh = true;
194 break;
195 }
196 }
197 if (!found_fh)
198 return false;
199 }
200 return true;
201 }
202
ff_mirror_match_fh(const struct nfs4_ff_layout_mirror * m1,const struct nfs4_ff_layout_mirror * m2)203 static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
204 const struct nfs4_ff_layout_mirror *m2)
205 {
206 u32 dss_id;
207
208 if (m1->dss_count != m2->dss_count)
209 return false;
210
211 for (dss_id = 0; dss_id < m1->dss_count; dss_id++)
212 if (!ff_dss_match_fh(&m1->dss[dss_id], &m2->dss[dss_id]))
213 return false;
214
215 return true;
216 }
217
ff_mirror_match_devid(const struct nfs4_ff_layout_mirror * m1,const struct nfs4_ff_layout_mirror * m2)218 static bool ff_mirror_match_devid(const struct nfs4_ff_layout_mirror *m1,
219 const struct nfs4_ff_layout_mirror *m2)
220 {
221 u32 dss_id;
222
223 if (m1->dss_count != m2->dss_count)
224 return false;
225
226 for (dss_id = 0; dss_id < m1->dss_count; dss_id++)
227 if (memcmp(&m1->dss[dss_id].devid,
228 &m2->dss[dss_id].devid,
229 sizeof(m1->dss[dss_id].devid)) != 0)
230 return false;
231
232 return true;
233 }
234
235 static struct nfs4_ff_layout_mirror *
ff_layout_add_mirror(struct pnfs_layout_hdr * lo,struct nfs4_ff_layout_mirror * mirror)236 ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
237 struct nfs4_ff_layout_mirror *mirror)
238 {
239 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
240 struct nfs4_ff_layout_mirror *pos;
241 struct inode *inode = lo->plh_inode;
242
243 spin_lock(&inode->i_lock);
244 list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
245 if (!ff_mirror_match_devid(mirror, pos))
246 continue;
247 if (!ff_mirror_match_fh(mirror, pos))
248 continue;
249 if (refcount_inc_not_zero(&pos->ref)) {
250 spin_unlock(&inode->i_lock);
251 return pos;
252 }
253 }
254 list_add(&mirror->mirrors, &ff_layout->mirrors);
255 mirror->layout = lo;
256 spin_unlock(&inode->i_lock);
257 return mirror;
258 }
259
260 static void
ff_layout_remove_mirror(struct nfs4_ff_layout_mirror * mirror)261 ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
262 {
263 struct inode *inode;
264 if (mirror->layout == NULL)
265 return;
266 inode = mirror->layout->plh_inode;
267 spin_lock(&inode->i_lock);
268 list_del(&mirror->mirrors);
269 spin_unlock(&inode->i_lock);
270 mirror->layout = NULL;
271 }
272
ff_layout_alloc_mirror(u32 dss_count,gfp_t gfp_flags)273 static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(u32 dss_count,
274 gfp_t gfp_flags)
275 {
276 struct nfs4_ff_layout_mirror *mirror;
277
278 mirror = kzalloc_obj(*mirror, gfp_flags);
279 if (mirror == NULL)
280 return NULL;
281
282 spin_lock_init(&mirror->lock);
283 refcount_set(&mirror->ref, 1);
284 INIT_LIST_HEAD(&mirror->mirrors);
285
286 mirror->dss_count = dss_count;
287 mirror->dss =
288 kzalloc_objs(struct nfs4_ff_layout_ds_stripe, dss_count,
289 gfp_flags);
290 if (mirror->dss == NULL) {
291 kfree(mirror);
292 return NULL;
293 }
294
295 for (u32 dss_id = 0; dss_id < mirror->dss_count; dss_id++)
296 nfs_localio_file_init(&mirror->dss[dss_id].nfl);
297
298 return mirror;
299 }
300
ff_layout_free_mirror(struct nfs4_ff_layout_mirror * mirror)301 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
302 {
303 const struct cred *cred;
304 u32 dss_id;
305
306 ff_layout_remove_mirror(mirror);
307
308 for (dss_id = 0; dss_id < mirror->dss_count; dss_id++) {
309 kfree(mirror->dss[dss_id].fh_versions);
310 cred = rcu_access_pointer(mirror->dss[dss_id].ro_cred);
311 put_cred(cred);
312 cred = rcu_access_pointer(mirror->dss[dss_id].rw_cred);
313 put_cred(cred);
314 nfs_close_local_fh(&mirror->dss[dss_id].nfl);
315 nfs4_ff_layout_put_deviceid(mirror->dss[dss_id].mirror_ds);
316 }
317
318 kfree(mirror->dss);
319 kfree(mirror);
320 }
321
ff_layout_put_mirror(struct nfs4_ff_layout_mirror * mirror)322 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
323 {
324 if (mirror != NULL && refcount_dec_and_test(&mirror->ref))
325 ff_layout_free_mirror(mirror);
326 }
327
ff_layout_free_mirror_array(struct nfs4_ff_layout_segment * fls)328 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
329 {
330 u32 i;
331
332 for (i = 0; i < fls->mirror_array_cnt; i++)
333 ff_layout_put_mirror(fls->mirror_array[i]);
334 }
335
_ff_layout_free_lseg(struct nfs4_ff_layout_segment * fls)336 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
337 {
338 if (fls) {
339 ff_layout_free_mirror_array(fls);
340 kfree(fls);
341 }
342 }
343
344 static bool
ff_lseg_match_mirrors(struct pnfs_layout_segment * l1,struct pnfs_layout_segment * l2)345 ff_lseg_match_mirrors(struct pnfs_layout_segment *l1,
346 struct pnfs_layout_segment *l2)
347 {
348 const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1);
349 const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l2);
350 u32 i;
351
352 if (fl1->mirror_array_cnt != fl2->mirror_array_cnt)
353 return false;
354 for (i = 0; i < fl1->mirror_array_cnt; i++) {
355 if (fl1->mirror_array[i] != fl2->mirror_array[i])
356 return false;
357 }
358 return true;
359 }
360
361 static bool
ff_lseg_range_is_after(const struct pnfs_layout_range * l1,const struct pnfs_layout_range * l2)362 ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
363 const struct pnfs_layout_range *l2)
364 {
365 u64 end1, end2;
366
367 if (l1->iomode != l2->iomode)
368 return l1->iomode != IOMODE_READ;
369 end1 = pnfs_calc_offset_end(l1->offset, l1->length);
370 end2 = pnfs_calc_offset_end(l2->offset, l2->length);
371 if (end1 < l2->offset)
372 return false;
373 if (end2 < l1->offset)
374 return true;
375 return l2->offset <= l1->offset;
376 }
377
378 static bool
ff_lseg_merge(struct pnfs_layout_segment * new,struct pnfs_layout_segment * old)379 ff_lseg_merge(struct pnfs_layout_segment *new,
380 struct pnfs_layout_segment *old)
381 {
382 u64 new_end, old_end;
383
384 if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
385 return false;
386 if (new->pls_range.iomode != old->pls_range.iomode)
387 return false;
388 old_end = pnfs_calc_offset_end(old->pls_range.offset,
389 old->pls_range.length);
390 if (old_end < new->pls_range.offset)
391 return false;
392 new_end = pnfs_calc_offset_end(new->pls_range.offset,
393 new->pls_range.length);
394 if (new_end < old->pls_range.offset)
395 return false;
396 if (!ff_lseg_match_mirrors(new, old))
397 return false;
398
399 /* Mergeable: copy info from 'old' to 'new' */
400 if (new_end < old_end)
401 new_end = old_end;
402 if (new->pls_range.offset < old->pls_range.offset)
403 new->pls_range.offset = old->pls_range.offset;
404 new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
405 new_end);
406 if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
407 set_bit(NFS_LSEG_ROC, &new->pls_flags);
408 return true;
409 }
410
411 static void
ff_layout_add_lseg(struct pnfs_layout_hdr * lo,struct pnfs_layout_segment * lseg,struct list_head * free_me)412 ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
413 struct pnfs_layout_segment *lseg,
414 struct list_head *free_me)
415 {
416 pnfs_generic_layout_insert_lseg(lo, lseg,
417 ff_lseg_range_is_after,
418 ff_lseg_merge,
419 free_me);
420 }
421
ff_mirror_efficiency_sum(const struct nfs4_ff_layout_mirror * mirror)422 static u32 ff_mirror_efficiency_sum(const struct nfs4_ff_layout_mirror *mirror)
423 {
424 u32 dss_id, sum = 0;
425
426 for (dss_id = 0; dss_id < mirror->dss_count; dss_id++)
427 sum += mirror->dss[dss_id].efficiency;
428
429 return sum;
430 }
431
ff_layout_sort_mirrors(struct nfs4_ff_layout_segment * fls)432 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
433 {
434 int i, j;
435
436 for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
437 for (j = i + 1; j < fls->mirror_array_cnt; j++)
438 if (ff_mirror_efficiency_sum(fls->mirror_array[i]) <
439 ff_mirror_efficiency_sum(fls->mirror_array[j]))
440 swap(fls->mirror_array[i],
441 fls->mirror_array[j]);
442 }
443 }
444
445 static struct pnfs_layout_segment *
ff_layout_alloc_lseg(struct pnfs_layout_hdr * lh,struct nfs4_layoutget_res * lgr,gfp_t gfp_flags)446 ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
447 struct nfs4_layoutget_res *lgr,
448 gfp_t gfp_flags)
449 {
450 struct pnfs_layout_segment *ret;
451 struct nfs4_ff_layout_segment *fls = NULL;
452 struct xdr_stream stream;
453 struct xdr_buf buf;
454 struct folio *scratch;
455 u64 stripe_unit;
456 u32 mirror_array_cnt;
457 __be32 *p;
458 int i, rc;
459 struct nfs4_ff_layout_ds_stripe *dss_info;
460
461 dprintk("--> %s\n", __func__);
462 scratch = folio_alloc(gfp_flags, 0);
463 if (!scratch)
464 return ERR_PTR(-ENOMEM);
465
466 xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
467 lgr->layoutp->len);
468 xdr_set_scratch_folio(&stream, scratch);
469
470 /* stripe unit and mirror_array_cnt */
471 rc = -EIO;
472 p = xdr_inline_decode(&stream, 8 + 4);
473 if (!p)
474 goto out_err_free;
475
476 p = xdr_decode_hyper(p, &stripe_unit);
477 mirror_array_cnt = be32_to_cpup(p++);
478 dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
479 stripe_unit, mirror_array_cnt);
480
481 if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
482 mirror_array_cnt == 0)
483 goto out_err_free;
484
485 rc = -ENOMEM;
486 fls = kzalloc_flex(*fls, mirror_array, mirror_array_cnt, gfp_flags);
487 if (!fls)
488 goto out_err_free;
489
490 fls->mirror_array_cnt = mirror_array_cnt;
491 fls->stripe_unit = stripe_unit;
492
493 u32 dss_count = 0;
494 for (i = 0; i < fls->mirror_array_cnt; i++) {
495 struct nfs4_ff_layout_mirror *mirror;
496 struct cred *kcred;
497 const struct cred __rcu *cred;
498 kuid_t uid;
499 kgid_t gid;
500 u32 fh_count, id;
501 int j, dss_id;
502
503 rc = -EIO;
504 p = xdr_inline_decode(&stream, 4);
505 if (!p)
506 goto out_err_free;
507
508 // Ensure all mirrors have same stripe count.
509 if (dss_count == 0)
510 dss_count = be32_to_cpup(p);
511 else if (dss_count != be32_to_cpup(p))
512 goto out_err_free;
513
514 if (dss_count > NFS4_FLEXFILE_LAYOUT_MAX_STRIPE_CNT ||
515 dss_count == 0)
516 goto out_err_free;
517
518 if (dss_count > 1 && stripe_unit == 0)
519 goto out_err_free;
520
521 fls->mirror_array[i] = ff_layout_alloc_mirror(dss_count, gfp_flags);
522 if (fls->mirror_array[i] == NULL) {
523 rc = -ENOMEM;
524 goto out_err_free;
525 }
526
527 for (dss_id = 0; dss_id < dss_count; dss_id++) {
528 dss_info = &fls->mirror_array[i]->dss[dss_id];
529 dss_info->mirror = fls->mirror_array[i];
530
531 /* deviceid */
532 rc = decode_deviceid(&stream, &dss_info->devid);
533 if (rc)
534 goto out_err_free;
535
536 /* efficiency */
537 rc = -EIO;
538 p = xdr_inline_decode(&stream, 4);
539 if (!p)
540 goto out_err_free;
541 dss_info->efficiency = be32_to_cpup(p);
542
543 /* stateid */
544 rc = decode_pnfs_stateid(&stream, &dss_info->stateid);
545 if (rc)
546 goto out_err_free;
547
548 /* fh */
549 rc = -EIO;
550 p = xdr_inline_decode(&stream, 4);
551 if (!p)
552 goto out_err_free;
553 fh_count = be32_to_cpup(p);
554
555 dss_info->fh_versions =
556 kzalloc_objs(struct nfs_fh, fh_count, gfp_flags);
557 if (dss_info->fh_versions == NULL) {
558 rc = -ENOMEM;
559 goto out_err_free;
560 }
561
562 for (j = 0; j < fh_count; j++) {
563 rc = decode_nfs_fh(&stream,
564 &dss_info->fh_versions[j]);
565 if (rc)
566 goto out_err_free;
567 }
568
569 dss_info->fh_versions_cnt = fh_count;
570
571 /* user */
572 rc = decode_name(&stream, &id);
573 if (rc)
574 goto out_err_free;
575
576 uid = make_kuid(&init_user_ns, id);
577
578 /* group */
579 rc = decode_name(&stream, &id);
580 if (rc)
581 goto out_err_free;
582
583 gid = make_kgid(&init_user_ns, id);
584
585 if (gfp_flags & __GFP_FS)
586 kcred = prepare_kernel_cred(&init_task);
587 else {
588 unsigned int nofs_flags = memalloc_nofs_save();
589
590 kcred = prepare_kernel_cred(&init_task);
591 memalloc_nofs_restore(nofs_flags);
592 }
593 rc = -ENOMEM;
594 if (!kcred)
595 goto out_err_free;
596 kcred->fsuid = uid;
597 kcred->fsgid = gid;
598 cred = RCU_INITIALIZER(kcred);
599
600 if (lgr->range.iomode == IOMODE_READ)
601 rcu_assign_pointer(dss_info->ro_cred, cred);
602 else
603 rcu_assign_pointer(dss_info->rw_cred, cred);
604 }
605
606 mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
607 if (mirror != fls->mirror_array[i]) {
608 for (dss_id = 0; dss_id < dss_count; dss_id++) {
609 dss_info = &fls->mirror_array[i]->dss[dss_id];
610 /* swap cred ptrs so free_mirror will clean up old */
611 if (lgr->range.iomode == IOMODE_READ) {
612 cred = xchg(&mirror->dss[dss_id].ro_cred,
613 dss_info->ro_cred);
614 rcu_assign_pointer(dss_info->ro_cred, cred);
615 } else {
616 cred = xchg(&mirror->dss[dss_id].rw_cred,
617 dss_info->rw_cred);
618 rcu_assign_pointer(dss_info->rw_cred, cred);
619 }
620 }
621 ff_layout_free_mirror(fls->mirror_array[i]);
622 fls->mirror_array[i] = mirror;
623 }
624
625 dprintk("%s: iomode %s uid %u gid %u\n", __func__,
626 lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
627 from_kuid(&init_user_ns, uid),
628 from_kgid(&init_user_ns, gid));
629 }
630
631 p = xdr_inline_decode(&stream, 4);
632 if (!p)
633 goto out_sort_mirrors;
634 fls->flags = be32_to_cpup(p);
635
636 p = xdr_inline_decode(&stream, 4);
637 if (!p)
638 goto out_sort_mirrors;
639 for (i=0; i < fls->mirror_array_cnt; i++)
640 fls->mirror_array[i]->report_interval = be32_to_cpup(p);
641
642 out_sort_mirrors:
643 ff_layout_sort_mirrors(fls);
644 ret = &fls->generic_hdr;
645 dprintk("<-- %s (success)\n", __func__);
646 out_free_page:
647 folio_put(scratch);
648 return ret;
649 out_err_free:
650 _ff_layout_free_lseg(fls);
651 ret = ERR_PTR(rc);
652 dprintk("<-- %s (%d)\n", __func__, rc);
653 goto out_free_page;
654 }
655
656 static void
ff_layout_free_lseg(struct pnfs_layout_segment * lseg)657 ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
658 {
659 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
660
661 dprintk("--> %s\n", __func__);
662
663 if (lseg->pls_range.iomode == IOMODE_RW) {
664 struct nfs4_flexfile_layout *ffl;
665 struct inode *inode;
666
667 ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
668 inode = ffl->generic_hdr.plh_inode;
669 spin_lock(&inode->i_lock);
670 pnfs_generic_ds_cinfo_release_lseg(&ffl->commit_info, lseg);
671 spin_unlock(&inode->i_lock);
672 }
673 _ff_layout_free_lseg(fls);
674 }
675
calc_commit_idx(struct pnfs_layout_segment * lseg,u32 mirror_idx,u32 dss_id)676 static u32 calc_commit_idx(struct pnfs_layout_segment *lseg,
677 u32 mirror_idx, u32 dss_id)
678 {
679 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
680
681 return (mirror_idx * flseg->mirror_array[0]->dss_count) + dss_id;
682 }
683
calc_mirror_idx_from_commit(struct pnfs_layout_segment * lseg,u32 commit_index)684 static u32 calc_mirror_idx_from_commit(struct pnfs_layout_segment *lseg,
685 u32 commit_index)
686 {
687 return commit_index / FF_LAYOUT_LSEG(lseg)->mirror_array[0]->dss_count;
688 }
689
calc_dss_id_from_commit(struct pnfs_layout_segment * lseg,u32 commit_index)690 static u32 calc_dss_id_from_commit(struct pnfs_layout_segment *lseg,
691 u32 commit_index)
692 {
693 return commit_index % FF_LAYOUT_LSEG(lseg)->mirror_array[0]->dss_count;
694 }
695
696 static void
nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer * timer,ktime_t now)697 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
698 {
699 /* first IO request? */
700 if (atomic_inc_return(&timer->n_ops) == 1) {
701 timer->start_time = now;
702 }
703 }
704
705 static ktime_t
nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer * timer,ktime_t now)706 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
707 {
708 ktime_t start;
709
710 if (atomic_dec_return(&timer->n_ops) < 0)
711 WARN_ON_ONCE(1);
712
713 start = timer->start_time;
714 timer->start_time = now;
715 return ktime_sub(now, start);
716 }
717
718 static bool
nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror * mirror,u32 dss_id,struct nfs4_ff_layoutstat * layoutstat,ktime_t now)719 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
720 u32 dss_id,
721 struct nfs4_ff_layoutstat *layoutstat,
722 ktime_t now)
723 {
724 s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
725 struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
726
727 nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
728 if (!mirror->dss[dss_id].start_time)
729 mirror->dss[dss_id].start_time = now;
730 if (mirror->report_interval != 0)
731 report_interval = (s64)mirror->report_interval * 1000LL;
732 else if (layoutstats_timer != 0)
733 report_interval = (s64)layoutstats_timer * 1000LL;
734 if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
735 report_interval) {
736 ffl->last_report_time = now;
737 return true;
738 }
739
740 return false;
741 }
742
743 static void
nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat * layoutstat,__u64 requested)744 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
745 __u64 requested)
746 {
747 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
748
749 iostat->ops_requested++;
750 iostat->bytes_requested += requested;
751 }
752
753 static void
nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat * layoutstat,__u64 requested,__u64 completed,ktime_t time_completed,ktime_t time_started)754 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
755 __u64 requested,
756 __u64 completed,
757 ktime_t time_completed,
758 ktime_t time_started)
759 {
760 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
761 ktime_t completion_time = ktime_sub(time_completed, time_started);
762 ktime_t timer;
763
764 iostat->ops_completed++;
765 iostat->bytes_completed += completed;
766 iostat->bytes_not_delivered += requested - completed;
767
768 timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
769 iostat->total_busy_time =
770 ktime_add(iostat->total_busy_time, timer);
771 iostat->aggregate_completion_time =
772 ktime_add(iostat->aggregate_completion_time,
773 completion_time);
774 }
775
776 static void
nfs4_ff_layout_stat_io_start_read(struct inode * inode,struct nfs4_ff_layout_mirror * mirror,u32 dss_id,__u64 requested,ktime_t now)777 nfs4_ff_layout_stat_io_start_read(struct inode *inode,
778 struct nfs4_ff_layout_mirror *mirror,
779 u32 dss_id,
780 __u64 requested, ktime_t now)
781 {
782 bool report;
783
784 spin_lock(&mirror->lock);
785 report = nfs4_ff_layoutstat_start_io(
786 mirror, dss_id, &mirror->dss[dss_id].read_stat, now);
787 nfs4_ff_layout_stat_io_update_requested(
788 &mirror->dss[dss_id].read_stat, requested);
789 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
790 spin_unlock(&mirror->lock);
791
792 if (report)
793 pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
794 }
795
796 static void
nfs4_ff_layout_stat_io_end_read(struct rpc_task * task,struct nfs4_ff_layout_mirror * mirror,u32 dss_id,__u64 requested,__u64 completed)797 nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
798 struct nfs4_ff_layout_mirror *mirror,
799 u32 dss_id,
800 __u64 requested,
801 __u64 completed)
802 {
803 spin_lock(&mirror->lock);
804 nfs4_ff_layout_stat_io_update_completed(&mirror->dss[dss_id].read_stat,
805 requested, completed,
806 ktime_get(), task->tk_start);
807 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
808 spin_unlock(&mirror->lock);
809 }
810
811 static void
nfs4_ff_layout_stat_io_start_write(struct inode * inode,struct nfs4_ff_layout_mirror * mirror,u32 dss_id,__u64 requested,ktime_t now)812 nfs4_ff_layout_stat_io_start_write(struct inode *inode,
813 struct nfs4_ff_layout_mirror *mirror,
814 u32 dss_id,
815 __u64 requested, ktime_t now)
816 {
817 bool report;
818
819 spin_lock(&mirror->lock);
820 report = nfs4_ff_layoutstat_start_io(
821 mirror,
822 dss_id,
823 &mirror->dss[dss_id].write_stat,
824 now);
825 nfs4_ff_layout_stat_io_update_requested(
826 &mirror->dss[dss_id].write_stat,
827 requested);
828 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
829 spin_unlock(&mirror->lock);
830
831 if (report)
832 pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
833 }
834
835 static void
nfs4_ff_layout_stat_io_end_write(struct rpc_task * task,struct nfs4_ff_layout_mirror * mirror,u32 dss_id,__u64 requested,__u64 completed,enum nfs3_stable_how committed)836 nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
837 struct nfs4_ff_layout_mirror *mirror,
838 u32 dss_id,
839 __u64 requested,
840 __u64 completed,
841 enum nfs3_stable_how committed)
842 {
843 if (committed == NFS_UNSTABLE)
844 requested = completed = 0;
845
846 spin_lock(&mirror->lock);
847 nfs4_ff_layout_stat_io_update_completed(&mirror->dss[dss_id].write_stat,
848 requested, completed, ktime_get(), task->tk_start);
849 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
850 spin_unlock(&mirror->lock);
851 }
852
853 static void
ff_layout_mark_ds_unreachable(struct pnfs_layout_segment * lseg,u32 idx,u32 dss_id)854 ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, u32 idx, u32 dss_id)
855 {
856 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx, dss_id);
857
858 if (devid)
859 nfs4_mark_deviceid_unavailable(devid);
860 }
861
862 static void
ff_layout_mark_ds_reachable(struct pnfs_layout_segment * lseg,u32 idx,u32 dss_id)863 ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, u32 idx, u32 dss_id)
864 {
865 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx, dss_id);
866
867 if (devid)
868 nfs4_mark_deviceid_available(devid);
869 }
870
871 static struct nfs4_pnfs_ds *
ff_layout_choose_ds_for_read(struct pnfs_layout_segment * lseg,u32 start_idx,u32 * best_idx,u32 offset,u32 * dss_id,bool check_device)872 ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
873 u32 start_idx, u32 *best_idx,
874 u32 offset, u32 *dss_id,
875 bool check_device)
876 {
877 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
878 struct nfs4_ff_layout_mirror *mirror;
879 struct nfs4_pnfs_ds *ds = ERR_PTR(-EAGAIN);
880 u32 idx;
881
882 /* mirrors are initially sorted by efficiency */
883 for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
884 mirror = FF_LAYOUT_COMP(lseg, idx);
885 *dss_id = nfs4_ff_layout_calc_dss_id(
886 fls->stripe_unit,
887 fls->mirror_array[idx]->dss_count,
888 offset);
889 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, *dss_id, false);
890 if (IS_ERR(ds))
891 continue;
892
893 if (check_device &&
894 nfs4_test_deviceid_unavailable(&mirror->dss[*dss_id].mirror_ds->id_node)) {
895 // reinitialize the error state in case if this is the last iteration
896 ds = ERR_PTR(-EINVAL);
897 continue;
898 }
899
900 *best_idx = idx;
901 break;
902 }
903
904 return ds;
905 }
906
907 static struct nfs4_pnfs_ds *
ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment * lseg,u32 start_idx,u32 * best_idx,u32 offset,u32 * dss_id)908 ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg,
909 u32 start_idx, u32 *best_idx,
910 u32 offset, u32 *dss_id)
911 {
912 return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx,
913 offset, dss_id, false);
914 }
915
916 static struct nfs4_pnfs_ds *
ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment * lseg,u32 start_idx,u32 * best_idx,u32 offset,u32 * dss_id)917 ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg,
918 u32 start_idx, u32 *best_idx,
919 u32 offset, u32 *dss_id)
920 {
921 return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx,
922 offset, dss_id, true);
923 }
924
925 static struct nfs4_pnfs_ds *
ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment * lseg,u32 start_idx,u32 * best_idx,u32 offset,u32 * dss_id)926 ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
927 u32 start_idx, u32 *best_idx,
928 u32 offset, u32 *dss_id)
929 {
930 struct nfs4_pnfs_ds *ds;
931
932 ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx,
933 offset, dss_id);
934 if (!IS_ERR(ds))
935 return ds;
936 return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx,
937 offset, dss_id);
938 }
939
940 static struct nfs4_pnfs_ds *
ff_layout_get_ds_for_read(struct nfs_pageio_descriptor * pgio,u32 * best_idx,u32 offset,u32 * dss_id)941 ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio,
942 u32 *best_idx,
943 u32 offset,
944 u32 *dss_id)
945 {
946 struct pnfs_layout_segment *lseg = pgio->pg_lseg;
947 struct nfs4_pnfs_ds *ds;
948
949 ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx,
950 best_idx, offset, dss_id);
951 if (!IS_ERR(ds) || !pgio->pg_mirror_idx)
952 return ds;
953 return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx,
954 offset, dss_id);
955 }
956
957 static void
ff_layout_pg_get_read(struct nfs_pageio_descriptor * pgio,struct nfs_page * req,bool strict_iomode)958 ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
959 struct nfs_page *req,
960 bool strict_iomode)
961 {
962 pnfs_put_lseg(pgio->pg_lseg);
963 pgio->pg_lseg =
964 pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
965 req_offset(req), req->wb_bytes, IOMODE_READ,
966 strict_iomode, nfs_io_gfp_mask());
967 if (IS_ERR(pgio->pg_lseg)) {
968 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
969 pgio->pg_lseg = NULL;
970 }
971 }
972
973 static bool
ff_layout_lseg_is_striped(const struct nfs4_ff_layout_segment * fls)974 ff_layout_lseg_is_striped(const struct nfs4_ff_layout_segment *fls)
975 {
976 return fls->mirror_array[0]->dss_count > 1;
977 }
978
979 /*
980 * ff_layout_pg_test(). Called by nfs_can_coalesce_requests()
981 *
982 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
983 * of bytes (maximum @req->wb_bytes) that can be coalesced.
984 */
985 static size_t
ff_layout_pg_test(struct nfs_pageio_descriptor * pgio,struct nfs_page * prev,struct nfs_page * req)986 ff_layout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
987 struct nfs_page *req)
988 {
989 unsigned int size;
990 u64 p_stripe, r_stripe;
991 u32 stripe_offset;
992 u64 segment_offset = pgio->pg_lseg->pls_range.offset;
993 u32 stripe_unit = FF_LAYOUT_LSEG(pgio->pg_lseg)->stripe_unit;
994
995 /* calls nfs_generic_pg_test */
996 size = pnfs_generic_pg_test(pgio, prev, req);
997 if (!size)
998 return 0;
999 else if (!ff_layout_lseg_is_striped(FF_LAYOUT_LSEG(pgio->pg_lseg)))
1000 return size;
1001
1002 /* see if req and prev are in the same stripe */
1003 if (prev) {
1004 p_stripe = (u64)req_offset(prev) - segment_offset;
1005 r_stripe = (u64)req_offset(req) - segment_offset;
1006 do_div(p_stripe, stripe_unit);
1007 do_div(r_stripe, stripe_unit);
1008
1009 if (p_stripe != r_stripe)
1010 return 0;
1011 }
1012
1013 /* calculate remaining bytes in the current stripe */
1014 div_u64_rem((u64)req_offset(req) - segment_offset,
1015 stripe_unit,
1016 &stripe_offset);
1017 WARN_ON_ONCE(stripe_offset > stripe_unit);
1018 if (stripe_offset >= stripe_unit)
1019 return 0;
1020 return min(stripe_unit - (unsigned int)stripe_offset, size);
1021 }
1022
1023 static void
ff_layout_pg_init_read(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)1024 ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
1025 struct nfs_page *req)
1026 {
1027 struct nfs_pgio_mirror *pgm;
1028 struct nfs4_ff_layout_mirror *mirror;
1029 struct nfs4_pnfs_ds *ds;
1030 u32 ds_idx, dss_id;
1031
1032 if (NFS_SERVER(pgio->pg_inode)->flags &
1033 (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
1034 pgio->pg_maxretrans = io_maxretrans;
1035 retry:
1036 pnfs_generic_pg_check_layout(pgio, req);
1037 /* Use full layout for now */
1038 if (!pgio->pg_lseg) {
1039 ff_layout_pg_get_read(pgio, req, false);
1040 if (!pgio->pg_lseg)
1041 goto out_nolseg;
1042 }
1043 if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
1044 ff_layout_pg_get_read(pgio, req, true);
1045 if (!pgio->pg_lseg)
1046 goto out_nolseg;
1047 }
1048 /* Reset wb_nio, since getting layout segment was successful */
1049 req->wb_nio = 0;
1050
1051 ds = ff_layout_get_ds_for_read(pgio, &ds_idx,
1052 req_offset(req), &dss_id);
1053 if (IS_ERR(ds)) {
1054 if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
1055 goto out_mds;
1056 pnfs_generic_pg_cleanup(pgio);
1057 /* Sleep for 1 second before retrying */
1058 ssleep(1);
1059 goto retry;
1060 }
1061
1062 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
1063 pgm = &pgio->pg_mirrors[0];
1064 pgm->pg_bsize = mirror->dss[dss_id].mirror_ds->ds_versions[0].rsize;
1065
1066 pgio->pg_mirror_idx = ds_idx;
1067 return;
1068 out_nolseg:
1069 if (pgio->pg_error < 0) {
1070 if (pgio->pg_error != -EAGAIN)
1071 return;
1072 /* Retry getting layout segment if lower layer returned -EAGAIN */
1073 if (pgio->pg_maxretrans && req->wb_nio++ > pgio->pg_maxretrans) {
1074 if (NFS_SERVER(pgio->pg_inode)->flags & NFS_MOUNT_SOFTERR)
1075 pgio->pg_error = -ETIMEDOUT;
1076 else
1077 pgio->pg_error = -EIO;
1078 return;
1079 }
1080 pgio->pg_error = 0;
1081 /* Sleep for 1 second before retrying */
1082 ssleep(1);
1083 goto retry;
1084 }
1085 out_mds:
1086 trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode,
1087 0, NFS4_MAX_UINT64, IOMODE_READ,
1088 NFS_I(pgio->pg_inode)->layout,
1089 pgio->pg_lseg);
1090 pgio->pg_maxretrans = 0;
1091 nfs_pageio_reset_read_mds(pgio);
1092 }
1093
1094 static void
ff_layout_pg_init_write(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)1095 ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
1096 struct nfs_page *req)
1097 {
1098 struct nfs4_ff_layout_mirror *mirror;
1099 struct nfs_pgio_mirror *pgm;
1100 struct nfs4_pnfs_ds *ds;
1101 u32 i, dss_id;
1102
1103 retry:
1104 pnfs_generic_pg_check_layout(pgio, req);
1105 if (!pgio->pg_lseg) {
1106 pgio->pg_lseg =
1107 pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
1108 req_offset(req), req->wb_bytes,
1109 IOMODE_RW, false, nfs_io_gfp_mask());
1110 if (IS_ERR(pgio->pg_lseg)) {
1111 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
1112 pgio->pg_lseg = NULL;
1113 return;
1114 }
1115 }
1116 /* If no lseg, fall back to write through mds */
1117 if (pgio->pg_lseg == NULL)
1118 goto out_mds;
1119
1120 /* Use a direct mapping of ds_idx to pgio mirror_idx */
1121 if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))
1122 goto out_eagain;
1123
1124 for (i = 0; i < pgio->pg_mirror_count; i++) {
1125 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
1126 dss_id = nfs4_ff_layout_calc_dss_id(
1127 FF_LAYOUT_LSEG(pgio->pg_lseg)->stripe_unit,
1128 mirror->dss_count,
1129 req_offset(req));
1130 ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror,
1131 dss_id, true);
1132 if (IS_ERR(ds)) {
1133 if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
1134 goto out_mds;
1135 pnfs_generic_pg_cleanup(pgio);
1136 /* Sleep for 1 second before retrying */
1137 ssleep(1);
1138 goto retry;
1139 }
1140 pgm = &pgio->pg_mirrors[i];
1141 pgm->pg_bsize = mirror->dss[dss_id].mirror_ds->ds_versions[0].wsize;
1142 }
1143
1144 if (NFS_SERVER(pgio->pg_inode)->flags &
1145 (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
1146 pgio->pg_maxretrans = io_maxretrans;
1147 return;
1148 out_eagain:
1149 pnfs_generic_pg_cleanup(pgio);
1150 pgio->pg_error = -EAGAIN;
1151 return;
1152 out_mds:
1153 trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode,
1154 0, NFS4_MAX_UINT64, IOMODE_RW,
1155 NFS_I(pgio->pg_inode)->layout,
1156 pgio->pg_lseg);
1157 pgio->pg_maxretrans = 0;
1158 nfs_pageio_reset_write_mds(pgio);
1159 pgio->pg_error = -EAGAIN;
1160 }
1161
1162 static unsigned int
ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)1163 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
1164 struct nfs_page *req)
1165 {
1166 if (!pgio->pg_lseg) {
1167 pgio->pg_lseg =
1168 pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
1169 req_offset(req), req->wb_bytes,
1170 IOMODE_RW, false, nfs_io_gfp_mask());
1171 if (IS_ERR(pgio->pg_lseg)) {
1172 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
1173 pgio->pg_lseg = NULL;
1174 goto out;
1175 }
1176 }
1177 if (pgio->pg_lseg)
1178 return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
1179
1180 trace_pnfs_mds_fallback_pg_get_mirror_count(pgio->pg_inode,
1181 0, NFS4_MAX_UINT64, IOMODE_RW,
1182 NFS_I(pgio->pg_inode)->layout,
1183 pgio->pg_lseg);
1184 /* no lseg means that pnfs is not in use, so no mirroring here */
1185 nfs_pageio_reset_write_mds(pgio);
1186 out:
1187 return 1;
1188 }
1189
1190 static u32
ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor * desc,u32 idx)1191 ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
1192 {
1193 u32 old = desc->pg_mirror_idx;
1194
1195 desc->pg_mirror_idx = idx;
1196 return old;
1197 }
1198
1199 static struct nfs_pgio_mirror *
ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor * desc,u32 idx)1200 ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
1201 {
1202 return &desc->pg_mirrors[idx];
1203 }
1204
1205 static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
1206 .pg_init = ff_layout_pg_init_read,
1207 .pg_test = ff_layout_pg_test,
1208 .pg_doio = pnfs_generic_pg_readpages,
1209 .pg_cleanup = pnfs_generic_pg_cleanup,
1210 };
1211
1212 static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
1213 .pg_init = ff_layout_pg_init_write,
1214 .pg_test = ff_layout_pg_test,
1215 .pg_doio = pnfs_generic_pg_writepages,
1216 .pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
1217 .pg_cleanup = pnfs_generic_pg_cleanup,
1218 .pg_get_mirror = ff_layout_pg_get_mirror_write,
1219 .pg_set_mirror = ff_layout_pg_set_mirror_write,
1220 };
1221
ff_layout_reset_write(struct nfs_pgio_header * hdr,bool retry_pnfs)1222 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
1223 {
1224 struct rpc_task *task = &hdr->task;
1225
1226 pnfs_layoutcommit_inode(hdr->inode, false);
1227
1228 if (retry_pnfs) {
1229 dprintk("%s Reset task %5u for i/o through pNFS "
1230 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1231 hdr->task.tk_pid,
1232 hdr->inode->i_sb->s_id,
1233 (unsigned long long)NFS_FILEID(hdr->inode),
1234 hdr->args.count,
1235 (unsigned long long)hdr->args.offset);
1236
1237 hdr->completion_ops->reschedule_io(hdr);
1238 return;
1239 }
1240
1241 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1242 dprintk("%s Reset task %5u for i/o through MDS "
1243 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1244 hdr->task.tk_pid,
1245 hdr->inode->i_sb->s_id,
1246 (unsigned long long)NFS_FILEID(hdr->inode),
1247 hdr->args.count,
1248 (unsigned long long)hdr->args.offset);
1249
1250 trace_pnfs_mds_fallback_write_done(hdr->inode,
1251 hdr->args.offset, hdr->args.count,
1252 IOMODE_RW, NFS_I(hdr->inode)->layout,
1253 hdr->lseg);
1254 task->tk_status = pnfs_write_done_resend_to_mds(hdr);
1255 }
1256 }
1257
ff_layout_resend_pnfs_read(struct nfs_pgio_header * hdr)1258 static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr)
1259 {
1260 u32 idx = hdr->pgio_mirror_idx + 1;
1261 u32 new_idx = 0;
1262 u32 dss_id = 0;
1263 struct nfs4_pnfs_ds *ds;
1264
1265 ds = ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx,
1266 hdr->args.offset, &dss_id);
1267 if (IS_ERR(ds))
1268 pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1269 else
1270 ff_layout_send_layouterror(hdr->lseg);
1271 pnfs_read_resend_pnfs(hdr, new_idx);
1272 }
1273
ff_layout_reset_read(struct nfs_pgio_header * hdr)1274 static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
1275 {
1276 struct rpc_task *task = &hdr->task;
1277
1278 pnfs_layoutcommit_inode(hdr->inode, false);
1279 pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1280
1281 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1282 dprintk("%s Reset task %5u for i/o through MDS "
1283 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1284 hdr->task.tk_pid,
1285 hdr->inode->i_sb->s_id,
1286 (unsigned long long)NFS_FILEID(hdr->inode),
1287 hdr->args.count,
1288 (unsigned long long)hdr->args.offset);
1289
1290 trace_pnfs_mds_fallback_read_done(hdr->inode,
1291 hdr->args.offset, hdr->args.count,
1292 IOMODE_READ, NFS_I(hdr->inode)->layout,
1293 hdr->lseg);
1294 task->tk_status = pnfs_read_done_resend_to_mds(hdr);
1295 }
1296 }
1297
ff_layout_async_handle_error_v4(struct rpc_task * task,u32 op_status,struct nfs4_state * state,struct nfs_client * clp,struct pnfs_layout_segment * lseg,u32 idx,u32 dss_id)1298 static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1299 u32 op_status,
1300 struct nfs4_state *state,
1301 struct nfs_client *clp,
1302 struct pnfs_layout_segment *lseg,
1303 u32 idx, u32 dss_id)
1304 {
1305 struct pnfs_layout_hdr *lo = lseg->pls_layout;
1306 struct inode *inode = lo->plh_inode;
1307 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx, dss_id);
1308 struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
1309
1310 switch (op_status) {
1311 case NFS4_OK:
1312 case NFS4ERR_NXIO:
1313 break;
1314 case NFSERR_PERM:
1315 if (!task->tk_xprt)
1316 break;
1317 xprt_force_disconnect(task->tk_xprt);
1318 goto out_retry;
1319 case NFS4ERR_BADSESSION:
1320 case NFS4ERR_BADSLOT:
1321 case NFS4ERR_BAD_HIGH_SLOT:
1322 case NFS4ERR_DEADSESSION:
1323 case NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1324 case NFS4ERR_SEQ_FALSE_RETRY:
1325 case NFS4ERR_SEQ_MISORDERED:
1326 dprintk("%s ERROR %d, Reset session. Exchangeid "
1327 "flags 0x%x\n", __func__, task->tk_status,
1328 clp->cl_exchange_flags);
1329 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
1330 goto out_retry;
1331 case NFS4ERR_DELAY:
1332 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1333 fallthrough;
1334 case NFS4ERR_GRACE:
1335 rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1336 goto out_retry;
1337 case NFS4ERR_RETRY_UNCACHED_REP:
1338 goto out_retry;
1339 /* Invalidate Layout errors */
1340 case NFS4ERR_PNFS_NO_LAYOUT:
1341 case NFS4ERR_STALE:
1342 case NFS4ERR_BADHANDLE:
1343 case NFS4ERR_ISDIR:
1344 case NFS4ERR_FHEXPIRED:
1345 case NFS4ERR_WRONG_TYPE:
1346 dprintk("%s Invalid layout error %d\n", __func__,
1347 task->tk_status);
1348 /*
1349 * Destroy layout so new i/o will get a new layout.
1350 * Layout will not be destroyed until all current lseg
1351 * references are put. Mark layout as invalid to resend failed
1352 * i/o and all i/o waiting on the slot table to the MDS until
1353 * layout is destroyed and a new valid layout is obtained.
1354 */
1355 pnfs_destroy_layout(NFS_I(inode));
1356 rpc_wake_up(&tbl->slot_tbl_waitq);
1357 goto reset;
1358 default:
1359 break;
1360 }
1361
1362 switch (task->tk_status) {
1363 /* RPC connection errors */
1364 case -ENETDOWN:
1365 case -ENETUNREACH:
1366 if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags))
1367 return -NFS4ERR_FATAL_IOERROR;
1368 fallthrough;
1369 case -ECONNREFUSED:
1370 case -EHOSTDOWN:
1371 case -EHOSTUNREACH:
1372 case -EIO:
1373 case -ETIMEDOUT:
1374 case -EPIPE:
1375 case -EPROTO:
1376 case -ENODEV:
1377 dprintk("%s DS connection error %d\n", __func__,
1378 task->tk_status);
1379 nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1380 &devid->deviceid);
1381 rpc_wake_up(&tbl->slot_tbl_waitq);
1382 break;
1383 default:
1384 break;
1385 }
1386
1387 if (ff_layout_avoid_mds_available_ds(lseg))
1388 return -NFS4ERR_RESET_TO_PNFS;
1389 reset:
1390 dprintk("%s Retry through MDS. Error %d\n", __func__,
1391 task->tk_status);
1392 return -NFS4ERR_RESET_TO_MDS;
1393
1394 out_retry:
1395 task->tk_status = 0;
1396 return -EAGAIN;
1397 }
1398
1399 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
ff_layout_async_handle_error_v3(struct rpc_task * task,u32 op_status,struct nfs_client * clp,struct pnfs_layout_segment * lseg,u32 idx,u32 dss_id)1400 static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1401 u32 op_status,
1402 struct nfs_client *clp,
1403 struct pnfs_layout_segment *lseg,
1404 u32 idx, u32 dss_id)
1405 {
1406 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx, dss_id);
1407
1408 switch (op_status) {
1409 case NFS_OK:
1410 case NFSERR_NXIO:
1411 break;
1412 case NFSERR_PERM:
1413 if (!task->tk_xprt)
1414 break;
1415 xprt_force_disconnect(task->tk_xprt);
1416 goto out_retry;
1417 case NFSERR_ACCES:
1418 case NFSERR_BADHANDLE:
1419 case NFSERR_FBIG:
1420 case NFSERR_IO:
1421 case NFSERR_NOSPC:
1422 case NFSERR_ROFS:
1423 case NFSERR_STALE:
1424 goto out_reset_to_pnfs;
1425 case NFSERR_JUKEBOX:
1426 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1427 goto out_retry;
1428 default:
1429 break;
1430 }
1431
1432 switch (task->tk_status) {
1433 /* File access problems. Don't mark the device as unavailable */
1434 case -EACCES:
1435 case -ESTALE:
1436 case -EISDIR:
1437 case -EBADHANDLE:
1438 case -ELOOP:
1439 case -ENOSPC:
1440 break;
1441 case -EJUKEBOX:
1442 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1443 goto out_retry;
1444 case -ENETDOWN:
1445 case -ENETUNREACH:
1446 if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags))
1447 return -NFS4ERR_FATAL_IOERROR;
1448 fallthrough;
1449 default:
1450 dprintk("%s DS connection error %d\n", __func__,
1451 task->tk_status);
1452 nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1453 &devid->deviceid);
1454 }
1455 out_reset_to_pnfs:
1456 /* FIXME: Need to prevent infinite looping here. */
1457 return -NFS4ERR_RESET_TO_PNFS;
1458 out_retry:
1459 task->tk_status = 0;
1460 rpc_restart_call_prepare(task);
1461 rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1462 return -EAGAIN;
1463 }
1464
ff_layout_async_handle_error(struct rpc_task * task,u32 op_status,struct nfs4_state * state,struct nfs_client * clp,struct pnfs_layout_segment * lseg,u32 idx,u32 dss_id)1465 static int ff_layout_async_handle_error(struct rpc_task *task,
1466 u32 op_status,
1467 struct nfs4_state *state,
1468 struct nfs_client *clp,
1469 struct pnfs_layout_segment *lseg,
1470 u32 idx, u32 dss_id)
1471 {
1472 int vers = clp->cl_nfs_mod->rpc_vers->number;
1473
1474 if (task->tk_status >= 0) {
1475 ff_layout_mark_ds_reachable(lseg, idx, dss_id);
1476 return 0;
1477 }
1478
1479 /* Handle the case of an invalid layout segment */
1480 if (!pnfs_is_valid_lseg(lseg))
1481 return -NFS4ERR_RESET_TO_PNFS;
1482
1483 switch (vers) {
1484 case 3:
1485 return ff_layout_async_handle_error_v3(task, op_status, clp,
1486 lseg, idx, dss_id);
1487 case 4:
1488 return ff_layout_async_handle_error_v4(task, op_status, state,
1489 clp, lseg, idx, dss_id);
1490 default:
1491 /* should never happen */
1492 WARN_ON_ONCE(1);
1493 return 0;
1494 }
1495 }
1496
ff_layout_io_track_ds_error(struct pnfs_layout_segment * lseg,u32 idx,u32 dss_id,u64 offset,u64 length,u32 * op_status,int opnum,int error)1497 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1498 u32 idx, u32 dss_id, u64 offset, u64 length,
1499 u32 *op_status, int opnum, int error)
1500 {
1501 struct nfs4_ff_layout_mirror *mirror;
1502 u32 status = *op_status;
1503 int err;
1504
1505 if (status == 0) {
1506 switch (error) {
1507 case -ETIMEDOUT:
1508 case -EPFNOSUPPORT:
1509 case -EPROTONOSUPPORT:
1510 case -EOPNOTSUPP:
1511 case -EINVAL:
1512 case -ECONNREFUSED:
1513 case -ECONNRESET:
1514 case -EHOSTDOWN:
1515 case -EHOSTUNREACH:
1516 case -ENETDOWN:
1517 case -ENETUNREACH:
1518 case -EADDRINUSE:
1519 case -ENOBUFS:
1520 case -EPIPE:
1521 case -EPERM:
1522 case -EPROTO:
1523 case -ENODEV:
1524 *op_status = status = NFS4ERR_NXIO;
1525 break;
1526 case -EACCES:
1527 *op_status = status = NFS4ERR_ACCESS;
1528 break;
1529 default:
1530 return;
1531 }
1532 }
1533
1534 mirror = FF_LAYOUT_COMP(lseg, idx);
1535 err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1536 mirror, dss_id, offset, length, status, opnum,
1537 nfs_io_gfp_mask());
1538
1539 switch (status) {
1540 case NFS4ERR_DELAY:
1541 case NFS4ERR_GRACE:
1542 case NFS4ERR_PERM:
1543 break;
1544 case NFS4ERR_NXIO:
1545 ff_layout_mark_ds_unreachable(lseg, idx, dss_id);
1546 /*
1547 * Don't return the layout if this is a read and we still
1548 * have layouts to try
1549 */
1550 if (opnum == OP_READ)
1551 break;
1552 fallthrough;
1553 default:
1554 pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
1555 lseg);
1556 }
1557
1558 dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1559 }
1560
1561 /* NFS_PROTO call done callback routines */
ff_layout_read_done_cb(struct rpc_task * task,struct nfs_pgio_header * hdr)1562 static int ff_layout_read_done_cb(struct rpc_task *task,
1563 struct nfs_pgio_header *hdr)
1564 {
1565 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(hdr->lseg);
1566 u32 dss_id = nfs4_ff_layout_calc_dss_id(
1567 flseg->stripe_unit,
1568 flseg->mirror_array[hdr->pgio_mirror_idx]->dss_count,
1569 hdr->args.offset);
1570 int err;
1571
1572 if (task->tk_status < 0) {
1573 ff_layout_io_track_ds_error(hdr->lseg,
1574 hdr->pgio_mirror_idx, dss_id,
1575 hdr->args.offset, hdr->args.count,
1576 &hdr->res.op_status, OP_READ,
1577 task->tk_status);
1578 trace_ff_layout_read_error(hdr, task->tk_status);
1579 }
1580
1581 err = ff_layout_async_handle_error(task, hdr->res.op_status,
1582 hdr->args.context->state,
1583 hdr->ds_clp, hdr->lseg,
1584 hdr->pgio_mirror_idx,
1585 dss_id);
1586
1587 trace_nfs4_pnfs_read(hdr, err);
1588 clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1589 clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1590 switch (err) {
1591 case -NFS4ERR_RESET_TO_PNFS:
1592 set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1593 return task->tk_status;
1594 case -NFS4ERR_RESET_TO_MDS:
1595 set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1596 return task->tk_status;
1597 case -EAGAIN:
1598 goto out_eagain;
1599 case -NFS4ERR_FATAL_IOERROR:
1600 task->tk_status = -EIO;
1601 return 0;
1602 }
1603
1604 return 0;
1605 out_eagain:
1606 rpc_restart_call_prepare(task);
1607 return -EAGAIN;
1608 }
1609
1610 static bool
ff_layout_need_layoutcommit(struct pnfs_layout_segment * lseg)1611 ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1612 {
1613 return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1614 }
1615
1616 /*
1617 * We reference the rpc_cred of the first WRITE that triggers the need for
1618 * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1619 * rfc5661 is not clear about which credential should be used.
1620 *
1621 * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1622 * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1623 * we always send layoutcommit after DS writes.
1624 */
1625 static void
ff_layout_set_layoutcommit(struct inode * inode,struct pnfs_layout_segment * lseg,loff_t end_offset)1626 ff_layout_set_layoutcommit(struct inode *inode,
1627 struct pnfs_layout_segment *lseg,
1628 loff_t end_offset)
1629 {
1630 if (!ff_layout_need_layoutcommit(lseg))
1631 return;
1632
1633 pnfs_set_layoutcommit(inode, lseg, end_offset);
1634 dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
1635 (unsigned long long) NFS_I(inode)->layout->plh_lwb);
1636 }
1637
ff_layout_read_record_layoutstats_start(struct rpc_task * task,struct nfs_pgio_header * hdr)1638 static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
1639 struct nfs_pgio_header *hdr)
1640 {
1641 struct nfs4_ff_layout_mirror *mirror;
1642 u32 dss_id;
1643
1644 if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1645 return;
1646
1647 mirror = FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx);
1648 dss_id = nfs4_ff_layout_calc_dss_id(
1649 FF_LAYOUT_LSEG(hdr->lseg)->stripe_unit,
1650 mirror->dss_count,
1651 hdr->args.offset);
1652
1653 nfs4_ff_layout_stat_io_start_read(
1654 hdr->inode,
1655 mirror,
1656 dss_id,
1657 hdr->args.count,
1658 task->tk_start);
1659 }
1660
ff_layout_read_record_layoutstats_done(struct rpc_task * task,struct nfs_pgio_header * hdr)1661 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
1662 struct nfs_pgio_header *hdr)
1663 {
1664 struct nfs4_ff_layout_mirror *mirror;
1665 u32 dss_id;
1666
1667 if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1668 return;
1669
1670 mirror = FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx);
1671 dss_id = nfs4_ff_layout_calc_dss_id(
1672 FF_LAYOUT_LSEG(hdr->lseg)->stripe_unit,
1673 mirror->dss_count,
1674 hdr->args.offset);
1675
1676 nfs4_ff_layout_stat_io_end_read(
1677 task,
1678 mirror,
1679 dss_id,
1680 hdr->args.count,
1681 hdr->res.count);
1682 set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1683 }
1684
ff_layout_read_prepare_common(struct rpc_task * task,struct nfs_pgio_header * hdr)1685 static int ff_layout_read_prepare_common(struct rpc_task *task,
1686 struct nfs_pgio_header *hdr)
1687 {
1688 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1689 rpc_exit(task, -EIO);
1690 return -EIO;
1691 }
1692
1693 if (!pnfs_is_valid_lseg(hdr->lseg)) {
1694 rpc_exit(task, -EAGAIN);
1695 return -EAGAIN;
1696 }
1697
1698 ff_layout_read_record_layoutstats_start(task, hdr);
1699 return 0;
1700 }
1701
1702 /*
1703 * Call ops for the async read/write cases
1704 * In the case of dense layouts, the offset needs to be reset to its
1705 * original value.
1706 */
ff_layout_read_prepare_v3(struct rpc_task * task,void * data)1707 static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1708 {
1709 struct nfs_pgio_header *hdr = data;
1710
1711 if (ff_layout_read_prepare_common(task, hdr))
1712 return;
1713
1714 rpc_call_start(task);
1715 }
1716
ff_layout_read_prepare_v4(struct rpc_task * task,void * data)1717 static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1718 {
1719 struct nfs_pgio_header *hdr = data;
1720
1721 if (nfs4_setup_sequence(hdr->ds_clp,
1722 &hdr->args.seq_args,
1723 &hdr->res.seq_res,
1724 task))
1725 return;
1726
1727 ff_layout_read_prepare_common(task, hdr);
1728 }
1729
ff_layout_read_call_done(struct rpc_task * task,void * data)1730 static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1731 {
1732 struct nfs_pgio_header *hdr = data;
1733
1734 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1735 task->tk_status == 0) {
1736 nfs4_sequence_done(task, &hdr->res.seq_res);
1737 return;
1738 }
1739
1740 /* Note this may cause RPC to be resent */
1741 hdr->mds_ops->rpc_call_done(task, hdr);
1742 }
1743
ff_layout_read_count_stats(struct rpc_task * task,void * data)1744 static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1745 {
1746 struct nfs_pgio_header *hdr = data;
1747
1748 ff_layout_read_record_layoutstats_done(task, hdr);
1749 rpc_count_iostats_metrics(task,
1750 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1751 }
1752
ff_layout_read_release(void * data)1753 static void ff_layout_read_release(void *data)
1754 {
1755 struct nfs_pgio_header *hdr = data;
1756
1757 ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
1758 if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
1759 ff_layout_resend_pnfs_read(hdr);
1760 else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1761 ff_layout_reset_read(hdr);
1762 pnfs_generic_rw_release(data);
1763 }
1764
1765
ff_layout_write_done_cb(struct rpc_task * task,struct nfs_pgio_header * hdr)1766 static int ff_layout_write_done_cb(struct rpc_task *task,
1767 struct nfs_pgio_header *hdr)
1768 {
1769 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(hdr->lseg);
1770 u32 dss_id = nfs4_ff_layout_calc_dss_id(
1771 flseg->stripe_unit,
1772 flseg->mirror_array[hdr->pgio_mirror_idx]->dss_count,
1773 hdr->args.offset);
1774 loff_t end_offs = 0;
1775 int err;
1776
1777 if (task->tk_status < 0) {
1778 ff_layout_io_track_ds_error(hdr->lseg,
1779 hdr->pgio_mirror_idx, dss_id,
1780 hdr->args.offset, hdr->args.count,
1781 &hdr->res.op_status, OP_WRITE,
1782 task->tk_status);
1783 trace_ff_layout_write_error(hdr, task->tk_status);
1784 }
1785
1786 err = ff_layout_async_handle_error(task, hdr->res.op_status,
1787 hdr->args.context->state,
1788 hdr->ds_clp, hdr->lseg,
1789 hdr->pgio_mirror_idx,
1790 dss_id);
1791
1792 trace_nfs4_pnfs_write(hdr, err);
1793 clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1794 clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1795 switch (err) {
1796 case -NFS4ERR_RESET_TO_PNFS:
1797 set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1798 return task->tk_status;
1799 case -NFS4ERR_RESET_TO_MDS:
1800 set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1801 return task->tk_status;
1802 case -EAGAIN:
1803 return -EAGAIN;
1804 case -NFS4ERR_FATAL_IOERROR:
1805 task->tk_status = -EIO;
1806 return 0;
1807 }
1808
1809 if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1810 hdr->res.verf->committed == NFS_DATA_SYNC)
1811 end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
1812
1813 /* Note: if the write is unstable, don't set end_offs until commit */
1814 ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
1815
1816 /* zero out fattr since we don't care DS attr at all */
1817 hdr->fattr.valid = 0;
1818 if (task->tk_status >= 0)
1819 nfs_writeback_update_inode(hdr);
1820
1821 return 0;
1822 }
1823
ff_layout_commit_done_cb(struct rpc_task * task,struct nfs_commit_data * data)1824 static int ff_layout_commit_done_cb(struct rpc_task *task,
1825 struct nfs_commit_data *data)
1826 {
1827 int err;
1828 u32 idx = calc_mirror_idx_from_commit(data->lseg, data->ds_commit_index);
1829 u32 dss_id = calc_dss_id_from_commit(data->lseg, data->ds_commit_index);
1830
1831 if (task->tk_status < 0) {
1832 ff_layout_io_track_ds_error(data->lseg, idx, dss_id,
1833 data->args.offset, data->args.count,
1834 &data->res.op_status, OP_COMMIT,
1835 task->tk_status);
1836 trace_ff_layout_commit_error(data, task->tk_status);
1837 }
1838
1839 err = ff_layout_async_handle_error(task, data->res.op_status,
1840 NULL, data->ds_clp, data->lseg, idx,
1841 dss_id);
1842
1843 trace_nfs4_pnfs_commit_ds(data, err);
1844 switch (err) {
1845 case -NFS4ERR_RESET_TO_PNFS:
1846 pnfs_generic_prepare_to_resend_writes(data);
1847 return -EAGAIN;
1848 case -NFS4ERR_RESET_TO_MDS:
1849 pnfs_generic_prepare_to_resend_writes(data);
1850 return -EAGAIN;
1851 case -EAGAIN:
1852 rpc_restart_call_prepare(task);
1853 return -EAGAIN;
1854 case -NFS4ERR_FATAL_IOERROR:
1855 task->tk_status = -EIO;
1856 return 0;
1857 }
1858
1859 ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
1860 return 0;
1861 }
1862
ff_layout_write_record_layoutstats_start(struct rpc_task * task,struct nfs_pgio_header * hdr)1863 static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
1864 struct nfs_pgio_header *hdr)
1865 {
1866 struct nfs4_ff_layout_mirror *mirror;
1867 u32 dss_id;
1868
1869 if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1870 return;
1871
1872 mirror = FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx);
1873 dss_id = nfs4_ff_layout_calc_dss_id(
1874 FF_LAYOUT_LSEG(hdr->lseg)->stripe_unit,
1875 mirror->dss_count,
1876 hdr->args.offset);
1877
1878 nfs4_ff_layout_stat_io_start_write(
1879 hdr->inode,
1880 mirror,
1881 dss_id,
1882 hdr->args.count,
1883 task->tk_start);
1884 }
1885
ff_layout_write_record_layoutstats_done(struct rpc_task * task,struct nfs_pgio_header * hdr)1886 static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
1887 struct nfs_pgio_header *hdr)
1888 {
1889 struct nfs4_ff_layout_mirror *mirror;
1890 u32 dss_id;
1891
1892 if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1893 return;
1894
1895 mirror = FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx);
1896 dss_id = nfs4_ff_layout_calc_dss_id(
1897 FF_LAYOUT_LSEG(hdr->lseg)->stripe_unit,
1898 mirror->dss_count,
1899 hdr->args.offset);
1900
1901 nfs4_ff_layout_stat_io_end_write(
1902 task,
1903 mirror,
1904 dss_id,
1905 hdr->args.count,
1906 hdr->res.count,
1907 hdr->res.verf->committed);
1908 set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1909 }
1910
ff_layout_write_prepare_common(struct rpc_task * task,struct nfs_pgio_header * hdr)1911 static int ff_layout_write_prepare_common(struct rpc_task *task,
1912 struct nfs_pgio_header *hdr)
1913 {
1914 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1915 rpc_exit(task, -EIO);
1916 return -EIO;
1917 }
1918
1919 if (!pnfs_is_valid_lseg(hdr->lseg)) {
1920 rpc_exit(task, -EAGAIN);
1921 return -EAGAIN;
1922 }
1923
1924 ff_layout_write_record_layoutstats_start(task, hdr);
1925 return 0;
1926 }
1927
ff_layout_write_prepare_v3(struct rpc_task * task,void * data)1928 static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1929 {
1930 struct nfs_pgio_header *hdr = data;
1931
1932 if (ff_layout_write_prepare_common(task, hdr))
1933 return;
1934
1935 rpc_call_start(task);
1936 }
1937
ff_layout_write_prepare_v4(struct rpc_task * task,void * data)1938 static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1939 {
1940 struct nfs_pgio_header *hdr = data;
1941
1942 if (nfs4_setup_sequence(hdr->ds_clp,
1943 &hdr->args.seq_args,
1944 &hdr->res.seq_res,
1945 task))
1946 return;
1947
1948 ff_layout_write_prepare_common(task, hdr);
1949 }
1950
ff_layout_write_call_done(struct rpc_task * task,void * data)1951 static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1952 {
1953 struct nfs_pgio_header *hdr = data;
1954
1955 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1956 task->tk_status == 0) {
1957 nfs4_sequence_done(task, &hdr->res.seq_res);
1958 return;
1959 }
1960
1961 /* Note this may cause RPC to be resent */
1962 hdr->mds_ops->rpc_call_done(task, hdr);
1963 }
1964
ff_layout_write_count_stats(struct rpc_task * task,void * data)1965 static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1966 {
1967 struct nfs_pgio_header *hdr = data;
1968
1969 ff_layout_write_record_layoutstats_done(task, hdr);
1970 rpc_count_iostats_metrics(task,
1971 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1972 }
1973
ff_layout_write_release(void * data)1974 static void ff_layout_write_release(void *data)
1975 {
1976 struct nfs_pgio_header *hdr = data;
1977
1978 ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
1979 if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
1980 ff_layout_send_layouterror(hdr->lseg);
1981 ff_layout_reset_write(hdr, true);
1982 } else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1983 ff_layout_reset_write(hdr, false);
1984 pnfs_generic_rw_release(data);
1985 }
1986
ff_layout_commit_record_layoutstats_start(struct rpc_task * task,struct nfs_commit_data * cdata)1987 static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
1988 struct nfs_commit_data *cdata)
1989 {
1990 u32 idx, dss_id;
1991
1992 if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
1993 return;
1994
1995 idx = calc_mirror_idx_from_commit(cdata->lseg, cdata->ds_commit_index);
1996 dss_id = calc_dss_id_from_commit(cdata->lseg, cdata->ds_commit_index);
1997 nfs4_ff_layout_stat_io_start_write(cdata->inode,
1998 FF_LAYOUT_COMP(cdata->lseg, idx),
1999 dss_id,
2000 0, task->tk_start);
2001 }
2002
ff_layout_commit_record_layoutstats_done(struct rpc_task * task,struct nfs_commit_data * cdata)2003 static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
2004 struct nfs_commit_data *cdata)
2005 {
2006 struct nfs_page *req;
2007 __u64 count = 0;
2008 u32 idx, dss_id;
2009
2010 if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
2011 return;
2012
2013 if (task->tk_status == 0) {
2014 list_for_each_entry(req, &cdata->pages, wb_list)
2015 count += req->wb_bytes;
2016 }
2017
2018 idx = calc_mirror_idx_from_commit(cdata->lseg, cdata->ds_commit_index);
2019 dss_id = calc_dss_id_from_commit(cdata->lseg, cdata->ds_commit_index);
2020 nfs4_ff_layout_stat_io_end_write(task,
2021 FF_LAYOUT_COMP(cdata->lseg, idx),
2022 dss_id,
2023 count, count, NFS_FILE_SYNC);
2024 set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
2025 }
2026
ff_layout_commit_prepare_common(struct rpc_task * task,struct nfs_commit_data * cdata)2027 static int ff_layout_commit_prepare_common(struct rpc_task *task,
2028 struct nfs_commit_data *cdata)
2029 {
2030 if (!pnfs_is_valid_lseg(cdata->lseg)) {
2031 rpc_exit(task, -EAGAIN);
2032 return -EAGAIN;
2033 }
2034
2035 ff_layout_commit_record_layoutstats_start(task, cdata);
2036 return 0;
2037 }
2038
ff_layout_commit_prepare_v3(struct rpc_task * task,void * data)2039 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
2040 {
2041 if (ff_layout_commit_prepare_common(task, data))
2042 return;
2043
2044 rpc_call_start(task);
2045 }
2046
ff_layout_commit_prepare_v4(struct rpc_task * task,void * data)2047 static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
2048 {
2049 struct nfs_commit_data *wdata = data;
2050
2051 if (nfs4_setup_sequence(wdata->ds_clp,
2052 &wdata->args.seq_args,
2053 &wdata->res.seq_res,
2054 task))
2055 return;
2056 ff_layout_commit_prepare_common(task, data);
2057 }
2058
ff_layout_commit_done(struct rpc_task * task,void * data)2059 static void ff_layout_commit_done(struct rpc_task *task, void *data)
2060 {
2061 pnfs_generic_write_commit_done(task, data);
2062 }
2063
ff_layout_commit_count_stats(struct rpc_task * task,void * data)2064 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
2065 {
2066 struct nfs_commit_data *cdata = data;
2067
2068 ff_layout_commit_record_layoutstats_done(task, cdata);
2069 rpc_count_iostats_metrics(task,
2070 &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
2071 }
2072
ff_layout_commit_release(void * data)2073 static void ff_layout_commit_release(void *data)
2074 {
2075 struct nfs_commit_data *cdata = data;
2076
2077 ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
2078 pnfs_generic_commit_release(data);
2079 }
2080
2081 static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
2082 .rpc_call_prepare = ff_layout_read_prepare_v3,
2083 .rpc_call_done = ff_layout_read_call_done,
2084 .rpc_count_stats = ff_layout_read_count_stats,
2085 .rpc_release = ff_layout_read_release,
2086 };
2087
2088 static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
2089 .rpc_call_prepare = ff_layout_read_prepare_v4,
2090 .rpc_call_done = ff_layout_read_call_done,
2091 .rpc_count_stats = ff_layout_read_count_stats,
2092 .rpc_release = ff_layout_read_release,
2093 };
2094
2095 static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
2096 .rpc_call_prepare = ff_layout_write_prepare_v3,
2097 .rpc_call_done = ff_layout_write_call_done,
2098 .rpc_count_stats = ff_layout_write_count_stats,
2099 .rpc_release = ff_layout_write_release,
2100 };
2101
2102 static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
2103 .rpc_call_prepare = ff_layout_write_prepare_v4,
2104 .rpc_call_done = ff_layout_write_call_done,
2105 .rpc_count_stats = ff_layout_write_count_stats,
2106 .rpc_release = ff_layout_write_release,
2107 };
2108
2109 static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
2110 .rpc_call_prepare = ff_layout_commit_prepare_v3,
2111 .rpc_call_done = ff_layout_commit_done,
2112 .rpc_count_stats = ff_layout_commit_count_stats,
2113 .rpc_release = ff_layout_commit_release,
2114 };
2115
2116 static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
2117 .rpc_call_prepare = ff_layout_commit_prepare_v4,
2118 .rpc_call_done = ff_layout_commit_done,
2119 .rpc_count_stats = ff_layout_commit_count_stats,
2120 .rpc_release = ff_layout_commit_release,
2121 };
2122
2123 static enum pnfs_try_status
ff_layout_read_pagelist(struct nfs_pgio_header * hdr)2124 ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
2125 {
2126 struct pnfs_layout_segment *lseg = hdr->lseg;
2127 struct nfs4_pnfs_ds *ds;
2128 struct rpc_clnt *ds_clnt;
2129 struct nfsd_file *localio;
2130 struct nfs4_ff_layout_mirror *mirror;
2131 const struct cred *ds_cred;
2132 loff_t offset = hdr->args.offset;
2133 u32 idx = hdr->pgio_mirror_idx;
2134 int vers;
2135 struct nfs_fh *fh;
2136 u32 dss_id;
2137 bool ds_fatal_error = false;
2138
2139 dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
2140 __func__, hdr->inode->i_ino,
2141 hdr->args.pgbase, (size_t)hdr->args.count, offset);
2142
2143 mirror = FF_LAYOUT_COMP(lseg, idx);
2144 dss_id = nfs4_ff_layout_calc_dss_id(
2145 FF_LAYOUT_LSEG(lseg)->stripe_unit,
2146 mirror->dss_count,
2147 offset);
2148 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, dss_id, false);
2149 if (IS_ERR(ds)) {
2150 ds_fatal_error = nfs_error_is_fatal(PTR_ERR(ds));
2151 goto out_failed;
2152 }
2153
2154 ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
2155 hdr->inode, dss_id);
2156 if (IS_ERR(ds_clnt))
2157 goto out_failed;
2158
2159 ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred, dss_id);
2160 if (!ds_cred)
2161 goto out_failed;
2162
2163 vers = nfs4_ff_layout_ds_version(mirror, dss_id);
2164
2165 dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
2166 ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
2167
2168 hdr->pgio_done_cb = ff_layout_read_done_cb;
2169 refcount_inc(&ds->ds_clp->cl_count);
2170 hdr->ds_clp = ds->ds_clp;
2171 fh = nfs4_ff_layout_select_ds_fh(mirror, dss_id);
2172 if (fh)
2173 hdr->args.fh = fh;
2174
2175 nfs4_ff_layout_select_ds_stateid(mirror, dss_id, &hdr->args.stateid);
2176
2177 /*
2178 * Note that if we ever decide to split across DSes,
2179 * then we may need to handle dense-like offsets.
2180 */
2181 hdr->args.offset = offset;
2182 hdr->mds_offset = offset;
2183
2184 /* Start IO accounting for local read */
2185 localio = ff_local_open_fh(lseg, idx, dss_id, ds->ds_clp, ds_cred, fh,
2186 FMODE_READ);
2187 if (localio) {
2188 hdr->task.tk_start = ktime_get();
2189 ff_layout_read_record_layoutstats_start(&hdr->task, hdr);
2190 }
2191
2192 /* Perform an asynchronous read to ds */
2193 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
2194 vers == 3 ? &ff_layout_read_call_ops_v3 :
2195 &ff_layout_read_call_ops_v4,
2196 0, RPC_TASK_SOFTCONN, localio);
2197 put_cred(ds_cred);
2198 return PNFS_ATTEMPTED;
2199
2200 out_failed:
2201 if (ff_layout_avoid_mds_available_ds(lseg) && !ds_fatal_error)
2202 return PNFS_TRY_AGAIN;
2203 trace_pnfs_mds_fallback_read_pagelist(hdr->inode,
2204 hdr->args.offset, hdr->args.count,
2205 IOMODE_READ, NFS_I(hdr->inode)->layout, lseg);
2206 return PNFS_NOT_ATTEMPTED;
2207 }
2208
2209 /* Perform async writes. */
2210 static enum pnfs_try_status
ff_layout_write_pagelist(struct nfs_pgio_header * hdr,int sync)2211 ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
2212 {
2213 struct pnfs_layout_segment *lseg = hdr->lseg;
2214 struct nfs4_pnfs_ds *ds;
2215 struct rpc_clnt *ds_clnt;
2216 struct nfsd_file *localio;
2217 struct nfs4_ff_layout_mirror *mirror;
2218 const struct cred *ds_cred;
2219 loff_t offset = hdr->args.offset;
2220 int vers;
2221 struct nfs_fh *fh;
2222 u32 idx = hdr->pgio_mirror_idx;
2223 u32 dss_id;
2224 bool ds_fatal_error = false;
2225
2226 mirror = FF_LAYOUT_COMP(lseg, idx);
2227 dss_id = nfs4_ff_layout_calc_dss_id(
2228 FF_LAYOUT_LSEG(lseg)->stripe_unit,
2229 mirror->dss_count,
2230 offset);
2231 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, dss_id, true);
2232 if (IS_ERR(ds)) {
2233 ds_fatal_error = nfs_error_is_fatal(PTR_ERR(ds));
2234 goto out_failed;
2235 }
2236
2237 ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
2238 hdr->inode, dss_id);
2239 if (IS_ERR(ds_clnt))
2240 goto out_failed;
2241
2242 ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred, dss_id);
2243 if (!ds_cred)
2244 goto out_failed;
2245
2246 vers = nfs4_ff_layout_ds_version(mirror, dss_id);
2247
2248 dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
2249 __func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
2250 offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count),
2251 vers);
2252
2253 hdr->pgio_done_cb = ff_layout_write_done_cb;
2254 refcount_inc(&ds->ds_clp->cl_count);
2255 hdr->ds_clp = ds->ds_clp;
2256 hdr->ds_commit_idx = calc_commit_idx(lseg, idx, dss_id);
2257 fh = nfs4_ff_layout_select_ds_fh(mirror, dss_id);
2258 if (fh)
2259 hdr->args.fh = fh;
2260
2261 nfs4_ff_layout_select_ds_stateid(mirror, dss_id, &hdr->args.stateid);
2262
2263 /*
2264 * Note that if we ever decide to split across DSes,
2265 * then we may need to handle dense-like offsets.
2266 */
2267 hdr->args.offset = offset;
2268
2269 /* Start IO accounting for local write */
2270 localio = ff_local_open_fh(lseg, idx, dss_id, ds->ds_clp, ds_cred, fh,
2271 FMODE_READ|FMODE_WRITE);
2272 if (localio) {
2273 hdr->task.tk_start = ktime_get();
2274 ff_layout_write_record_layoutstats_start(&hdr->task, hdr);
2275 }
2276
2277 /* Perform an asynchronous write */
2278 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
2279 vers == 3 ? &ff_layout_write_call_ops_v3 :
2280 &ff_layout_write_call_ops_v4,
2281 sync, RPC_TASK_SOFTCONN, localio);
2282 put_cred(ds_cred);
2283 return PNFS_ATTEMPTED;
2284
2285 out_failed:
2286 if (ff_layout_avoid_mds_available_ds(lseg) && !ds_fatal_error)
2287 return PNFS_TRY_AGAIN;
2288 trace_pnfs_mds_fallback_write_pagelist(hdr->inode,
2289 hdr->args.offset, hdr->args.count,
2290 IOMODE_RW, NFS_I(hdr->inode)->layout, lseg);
2291 return PNFS_NOT_ATTEMPTED;
2292 }
2293
2294 static struct nfs_fh *
select_ds_fh_from_commit(struct pnfs_layout_segment * lseg,u32 i,u32 dss_id)2295 select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i, u32 dss_id)
2296 {
2297 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2298
2299 /* FIXME: Assume that there is only one NFS version available
2300 * for the DS.
2301 */
2302 return &flseg->mirror_array[i]->dss[dss_id].fh_versions[0];
2303 }
2304
ff_layout_initiate_commit(struct nfs_commit_data * data,int how)2305 static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
2306 {
2307 struct pnfs_layout_segment *lseg = data->lseg;
2308 struct nfs4_pnfs_ds *ds;
2309 struct rpc_clnt *ds_clnt;
2310 struct nfsd_file *localio;
2311 struct nfs4_ff_layout_mirror *mirror;
2312 const struct cred *ds_cred;
2313 u32 idx, dss_id;
2314 int vers, ret;
2315 struct nfs_fh *fh;
2316
2317 if (!lseg || !(pnfs_is_valid_lseg(lseg) ||
2318 test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)))
2319 goto out_err;
2320
2321 idx = calc_mirror_idx_from_commit(lseg, data->ds_commit_index);
2322 mirror = FF_LAYOUT_COMP(lseg, idx);
2323 dss_id = calc_dss_id_from_commit(lseg, data->ds_commit_index);
2324 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, dss_id, true);
2325 if (IS_ERR(ds))
2326 goto out_err;
2327
2328 ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
2329 data->inode, dss_id);
2330 if (IS_ERR(ds_clnt))
2331 goto out_err;
2332
2333 ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred, dss_id);
2334 if (!ds_cred)
2335 goto out_err;
2336
2337 vers = nfs4_ff_layout_ds_version(mirror, dss_id);
2338
2339 dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
2340 data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
2341 vers);
2342 data->commit_done_cb = ff_layout_commit_done_cb;
2343 data->cred = ds_cred;
2344 refcount_inc(&ds->ds_clp->cl_count);
2345 data->ds_clp = ds->ds_clp;
2346 fh = select_ds_fh_from_commit(lseg, idx, dss_id);
2347 if (fh)
2348 data->args.fh = fh;
2349
2350 /* Start IO accounting for local commit */
2351 localio = ff_local_open_fh(lseg, idx, dss_id, ds->ds_clp, ds_cred, fh,
2352 FMODE_READ|FMODE_WRITE);
2353 if (localio) {
2354 data->task.tk_start = ktime_get();
2355 ff_layout_commit_record_layoutstats_start(&data->task, data);
2356 }
2357
2358 ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
2359 vers == 3 ? &ff_layout_commit_call_ops_v3 :
2360 &ff_layout_commit_call_ops_v4,
2361 how, RPC_TASK_SOFTCONN, localio);
2362 put_cred(ds_cred);
2363 return ret;
2364 out_err:
2365 pnfs_generic_prepare_to_resend_writes(data);
2366 pnfs_generic_commit_release(data);
2367 return -EAGAIN;
2368 }
2369
2370 static int
ff_layout_commit_pagelist(struct inode * inode,struct list_head * mds_pages,int how,struct nfs_commit_info * cinfo)2371 ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
2372 int how, struct nfs_commit_info *cinfo)
2373 {
2374 return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
2375 ff_layout_initiate_commit);
2376 }
2377
ff_layout_match_rw(const struct rpc_task * task,const struct nfs_pgio_header * hdr,const struct pnfs_layout_segment * lseg)2378 static bool ff_layout_match_rw(const struct rpc_task *task,
2379 const struct nfs_pgio_header *hdr,
2380 const struct pnfs_layout_segment *lseg)
2381 {
2382 return hdr->lseg == lseg;
2383 }
2384
ff_layout_match_commit(const struct rpc_task * task,const struct nfs_commit_data * cdata,const struct pnfs_layout_segment * lseg)2385 static bool ff_layout_match_commit(const struct rpc_task *task,
2386 const struct nfs_commit_data *cdata,
2387 const struct pnfs_layout_segment *lseg)
2388 {
2389 return cdata->lseg == lseg;
2390 }
2391
ff_layout_match_io(const struct rpc_task * task,const void * data)2392 static bool ff_layout_match_io(const struct rpc_task *task, const void *data)
2393 {
2394 const struct rpc_call_ops *ops = task->tk_ops;
2395
2396 if (ops == &ff_layout_read_call_ops_v3 ||
2397 ops == &ff_layout_read_call_ops_v4 ||
2398 ops == &ff_layout_write_call_ops_v3 ||
2399 ops == &ff_layout_write_call_ops_v4)
2400 return ff_layout_match_rw(task, task->tk_calldata, data);
2401 if (ops == &ff_layout_commit_call_ops_v3 ||
2402 ops == &ff_layout_commit_call_ops_v4)
2403 return ff_layout_match_commit(task, task->tk_calldata, data);
2404 return false;
2405 }
2406
ff_layout_cancel_io(struct pnfs_layout_segment * lseg)2407 static void ff_layout_cancel_io(struct pnfs_layout_segment *lseg)
2408 {
2409 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2410 struct nfs4_ff_layout_mirror *mirror;
2411 struct nfs4_ff_layout_ds *mirror_ds;
2412 struct nfs4_pnfs_ds *ds;
2413 struct nfs_client *ds_clp;
2414 struct rpc_clnt *clnt;
2415 u32 idx, dss_id;
2416
2417 for (idx = 0; idx < flseg->mirror_array_cnt; idx++) {
2418 mirror = flseg->mirror_array[idx];
2419 for (dss_id = 0; dss_id < mirror->dss_count; dss_id++) {
2420 mirror_ds = mirror->dss[dss_id].mirror_ds;
2421 if (IS_ERR_OR_NULL(mirror_ds))
2422 continue;
2423 ds = mirror->dss[dss_id].mirror_ds->ds;
2424 if (!ds)
2425 continue;
2426 ds_clp = ds->ds_clp;
2427 if (!ds_clp)
2428 continue;
2429 clnt = ds_clp->cl_rpcclient;
2430 if (!clnt)
2431 continue;
2432 if (!rpc_cancel_tasks(clnt, -EAGAIN,
2433 ff_layout_match_io, lseg))
2434 continue;
2435 rpc_clnt_disconnect(clnt);
2436 }
2437 }
2438 }
2439
2440 static struct pnfs_ds_commit_info *
ff_layout_get_ds_info(struct inode * inode)2441 ff_layout_get_ds_info(struct inode *inode)
2442 {
2443 struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
2444
2445 if (layout == NULL)
2446 return NULL;
2447
2448 return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
2449 }
2450
2451 static void
ff_layout_setup_ds_info(struct pnfs_ds_commit_info * fl_cinfo,struct pnfs_layout_segment * lseg)2452 ff_layout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
2453 struct pnfs_layout_segment *lseg)
2454 {
2455 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2456 struct inode *inode = lseg->pls_layout->plh_inode;
2457 struct pnfs_commit_array *array, *new;
2458 u32 size = flseg->mirror_array_cnt * flseg->mirror_array[0]->dss_count;
2459
2460 new = pnfs_alloc_commit_array(size,
2461 nfs_io_gfp_mask());
2462 if (new) {
2463 spin_lock(&inode->i_lock);
2464 array = pnfs_add_commit_array(fl_cinfo, new, lseg);
2465 spin_unlock(&inode->i_lock);
2466 if (array != new)
2467 pnfs_free_commit_array(new);
2468 }
2469 }
2470
2471 static void
ff_layout_release_ds_info(struct pnfs_ds_commit_info * fl_cinfo,struct inode * inode)2472 ff_layout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
2473 struct inode *inode)
2474 {
2475 spin_lock(&inode->i_lock);
2476 pnfs_generic_ds_cinfo_destroy(fl_cinfo);
2477 spin_unlock(&inode->i_lock);
2478 }
2479
2480 static void
ff_layout_free_deviceid_node(struct nfs4_deviceid_node * d)2481 ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
2482 {
2483 nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
2484 id_node));
2485 }
2486
ff_layout_encode_ioerr(struct xdr_stream * xdr,const struct nfs4_layoutreturn_args * args,const struct nfs4_flexfile_layoutreturn_args * ff_args)2487 static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
2488 const struct nfs4_layoutreturn_args *args,
2489 const struct nfs4_flexfile_layoutreturn_args *ff_args)
2490 {
2491 __be32 *start;
2492
2493 start = xdr_reserve_space(xdr, 4);
2494 if (unlikely(!start))
2495 return -E2BIG;
2496
2497 *start = cpu_to_be32(ff_args->num_errors);
2498 /* This assume we always return _ALL_ layouts */
2499 return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
2500 }
2501
2502 static void
ff_layout_encode_ff_iostat_head(struct xdr_stream * xdr,const nfs4_stateid * stateid,const struct nfs42_layoutstat_devinfo * devinfo)2503 ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
2504 const nfs4_stateid *stateid,
2505 const struct nfs42_layoutstat_devinfo *devinfo)
2506 {
2507 __be32 *p;
2508
2509 p = xdr_reserve_space(xdr, 8 + 8);
2510 p = xdr_encode_hyper(p, devinfo->offset);
2511 p = xdr_encode_hyper(p, devinfo->length);
2512 encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
2513 p = xdr_reserve_space(xdr, 4*8);
2514 p = xdr_encode_hyper(p, devinfo->read_count);
2515 p = xdr_encode_hyper(p, devinfo->read_bytes);
2516 p = xdr_encode_hyper(p, devinfo->write_count);
2517 p = xdr_encode_hyper(p, devinfo->write_bytes);
2518 encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
2519 }
2520
2521 static void
ff_layout_encode_ff_iostat(struct xdr_stream * xdr,const nfs4_stateid * stateid,const struct nfs42_layoutstat_devinfo * devinfo)2522 ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
2523 const nfs4_stateid *stateid,
2524 const struct nfs42_layoutstat_devinfo *devinfo)
2525 {
2526 ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
2527 ff_layout_encode_ff_layoutupdate(xdr, devinfo,
2528 devinfo->ld_private.data);
2529 }
2530
2531 /* report nothing for now */
ff_layout_encode_iostats_array(struct xdr_stream * xdr,const struct nfs4_layoutreturn_args * args,struct nfs4_flexfile_layoutreturn_args * ff_args)2532 static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
2533 const struct nfs4_layoutreturn_args *args,
2534 struct nfs4_flexfile_layoutreturn_args *ff_args)
2535 {
2536 __be32 *p;
2537 int i;
2538
2539 p = xdr_reserve_space(xdr, 4);
2540 *p = cpu_to_be32(ff_args->num_dev);
2541 for (i = 0; i < ff_args->num_dev; i++)
2542 ff_layout_encode_ff_iostat(xdr,
2543 &args->layout->plh_stateid,
2544 &ff_args->devinfo[i]);
2545 }
2546
2547 static void
ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo * devinfo,unsigned int num_entries)2548 ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
2549 unsigned int num_entries)
2550 {
2551 unsigned int i;
2552
2553 for (i = 0; i < num_entries; i++) {
2554 if (!devinfo[i].ld_private.ops)
2555 continue;
2556 if (!devinfo[i].ld_private.ops->free)
2557 continue;
2558 devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
2559 }
2560 }
2561
2562 static struct nfs4_deviceid_node *
ff_layout_alloc_deviceid_node(struct nfs_server * server,struct pnfs_device * pdev,gfp_t gfp_flags)2563 ff_layout_alloc_deviceid_node(struct nfs_server *server,
2564 struct pnfs_device *pdev, gfp_t gfp_flags)
2565 {
2566 struct nfs4_ff_layout_ds *dsaddr;
2567
2568 dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
2569 if (!dsaddr)
2570 return NULL;
2571 return &dsaddr->id_node;
2572 }
2573
2574 static void
ff_layout_encode_layoutreturn(struct xdr_stream * xdr,const void * voidargs,const struct nfs4_xdr_opaque_data * ff_opaque)2575 ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
2576 const void *voidargs,
2577 const struct nfs4_xdr_opaque_data *ff_opaque)
2578 {
2579 const struct nfs4_layoutreturn_args *args = voidargs;
2580 struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
2581 struct xdr_buf tmp_buf = {
2582 .head = {
2583 [0] = {
2584 .iov_base = page_address(ff_args->pages[0]),
2585 },
2586 },
2587 .buflen = PAGE_SIZE,
2588 };
2589 struct xdr_stream tmp_xdr;
2590 __be32 *start;
2591
2592 dprintk("%s: Begin\n", __func__);
2593
2594 xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL);
2595
2596 ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
2597 ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
2598
2599 start = xdr_reserve_space(xdr, 4);
2600 *start = cpu_to_be32(tmp_buf.len);
2601 xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
2602
2603 dprintk("%s: Return\n", __func__);
2604 }
2605
2606 static void
ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data * args)2607 ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
2608 {
2609 struct nfs4_flexfile_layoutreturn_args *ff_args;
2610
2611 if (!args->data)
2612 return;
2613 ff_args = args->data;
2614 args->data = NULL;
2615
2616 ff_layout_free_ds_ioerr(&ff_args->errors);
2617 ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
2618
2619 put_page(ff_args->pages[0]);
2620 kfree(ff_args);
2621 }
2622
2623 static const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
2624 .encode = ff_layout_encode_layoutreturn,
2625 .free = ff_layout_free_layoutreturn,
2626 };
2627
2628 static int
ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args * args)2629 ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
2630 {
2631 struct nfs4_flexfile_layoutreturn_args *ff_args;
2632 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
2633
2634 ff_args = kmalloc_obj(*ff_args, nfs_io_gfp_mask());
2635 if (!ff_args)
2636 goto out_nomem;
2637 ff_args->pages[0] = alloc_page(nfs_io_gfp_mask());
2638 if (!ff_args->pages[0])
2639 goto out_nomem_free;
2640
2641 INIT_LIST_HEAD(&ff_args->errors);
2642 ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
2643 &args->range, &ff_args->errors,
2644 FF_LAYOUTRETURN_MAXERR);
2645
2646 spin_lock(&args->inode->i_lock);
2647 ff_args->num_dev = ff_layout_mirror_prepare_stats(
2648 &ff_layout->generic_hdr, &ff_args->devinfo[0],
2649 ARRAY_SIZE(ff_args->devinfo), NFS4_FF_OP_LAYOUTRETURN);
2650 spin_unlock(&args->inode->i_lock);
2651
2652 args->ld_private->ops = &layoutreturn_ops;
2653 args->ld_private->data = ff_args;
2654 return 0;
2655 out_nomem_free:
2656 kfree(ff_args);
2657 out_nomem:
2658 return -ENOMEM;
2659 }
2660
2661 #ifdef CONFIG_NFS_V4_2
2662 void
ff_layout_send_layouterror(struct pnfs_layout_segment * lseg)2663 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2664 {
2665 struct pnfs_layout_hdr *lo = lseg->pls_layout;
2666 struct nfs42_layout_error *errors;
2667 LIST_HEAD(head);
2668
2669 if (!nfs_server_capable(lo->plh_inode, NFS_CAP_LAYOUTERROR))
2670 return;
2671 ff_layout_fetch_ds_ioerr(lo, &lseg->pls_range, &head, -1);
2672 if (list_empty(&head))
2673 return;
2674
2675 errors = kmalloc_objs(*errors, NFS42_LAYOUTERROR_MAX, nfs_io_gfp_mask());
2676 if (errors != NULL) {
2677 const struct nfs4_ff_layout_ds_err *pos;
2678 size_t n = 0;
2679
2680 list_for_each_entry(pos, &head, list) {
2681 errors[n].offset = pos->offset;
2682 errors[n].length = pos->length;
2683 nfs4_stateid_copy(&errors[n].stateid, &pos->stateid);
2684 errors[n].errors[0].dev_id = pos->deviceid;
2685 errors[n].errors[0].status = pos->status;
2686 errors[n].errors[0].opnum = pos->opnum;
2687 n++;
2688 if (!list_is_last(&pos->list, &head) &&
2689 n < NFS42_LAYOUTERROR_MAX)
2690 continue;
2691 if (nfs42_proc_layouterror(lseg, errors, n) < 0)
2692 break;
2693 n = 0;
2694 }
2695 kfree(errors);
2696 }
2697 ff_layout_free_ds_ioerr(&head);
2698 }
2699 #else
2700 void
ff_layout_send_layouterror(struct pnfs_layout_segment * lseg)2701 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2702 {
2703 }
2704 #endif
2705
2706 static int
ff_layout_ntop4(const struct sockaddr * sap,char * buf,const size_t buflen)2707 ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
2708 {
2709 const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
2710
2711 return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
2712 }
2713
2714 static size_t
ff_layout_ntop6_noscopeid(const struct sockaddr * sap,char * buf,const int buflen)2715 ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
2716 const int buflen)
2717 {
2718 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
2719 const struct in6_addr *addr = &sin6->sin6_addr;
2720
2721 /*
2722 * RFC 4291, Section 2.2.2
2723 *
2724 * Shorthanded ANY address
2725 */
2726 if (ipv6_addr_any(addr))
2727 return snprintf(buf, buflen, "::");
2728
2729 /*
2730 * RFC 4291, Section 2.2.2
2731 *
2732 * Shorthanded loopback address
2733 */
2734 if (ipv6_addr_loopback(addr))
2735 return snprintf(buf, buflen, "::1");
2736
2737 /*
2738 * RFC 4291, Section 2.2.3
2739 *
2740 * Special presentation address format for mapped v4
2741 * addresses.
2742 */
2743 if (ipv6_addr_v4mapped(addr))
2744 return snprintf(buf, buflen, "::ffff:%pI4",
2745 &addr->s6_addr32[3]);
2746
2747 /*
2748 * RFC 4291, Section 2.2.1
2749 */
2750 return snprintf(buf, buflen, "%pI6c", addr);
2751 }
2752
2753 /* Derived from rpc_sockaddr2uaddr */
2754 static void
ff_layout_encode_netaddr(struct xdr_stream * xdr,struct nfs4_pnfs_ds_addr * da)2755 ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
2756 {
2757 struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
2758 char portbuf[RPCBIND_MAXUADDRPLEN];
2759 char addrbuf[RPCBIND_MAXUADDRLEN];
2760 unsigned short port;
2761 int len, netid_len;
2762 __be32 *p;
2763
2764 switch (sap->sa_family) {
2765 case AF_INET:
2766 if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
2767 return;
2768 port = ntohs(((struct sockaddr_in *)sap)->sin_port);
2769 break;
2770 case AF_INET6:
2771 if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
2772 return;
2773 port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
2774 break;
2775 default:
2776 WARN_ON_ONCE(1);
2777 return;
2778 }
2779
2780 snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
2781 len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
2782
2783 netid_len = strlen(da->da_netid);
2784 p = xdr_reserve_space(xdr, 4 + netid_len);
2785 xdr_encode_opaque(p, da->da_netid, netid_len);
2786
2787 p = xdr_reserve_space(xdr, 4 + len);
2788 xdr_encode_opaque(p, addrbuf, len);
2789 }
2790
2791 static void
ff_layout_encode_nfstime(struct xdr_stream * xdr,ktime_t t)2792 ff_layout_encode_nfstime(struct xdr_stream *xdr,
2793 ktime_t t)
2794 {
2795 struct timespec64 ts;
2796 __be32 *p;
2797
2798 p = xdr_reserve_space(xdr, 12);
2799 ts = ktime_to_timespec64(t);
2800 p = xdr_encode_hyper(p, ts.tv_sec);
2801 *p++ = cpu_to_be32(ts.tv_nsec);
2802 }
2803
2804 static void
ff_layout_encode_io_latency(struct xdr_stream * xdr,struct nfs4_ff_io_stat * stat)2805 ff_layout_encode_io_latency(struct xdr_stream *xdr,
2806 struct nfs4_ff_io_stat *stat)
2807 {
2808 __be32 *p;
2809
2810 p = xdr_reserve_space(xdr, 5 * 8);
2811 p = xdr_encode_hyper(p, stat->ops_requested);
2812 p = xdr_encode_hyper(p, stat->bytes_requested);
2813 p = xdr_encode_hyper(p, stat->ops_completed);
2814 p = xdr_encode_hyper(p, stat->bytes_completed);
2815 p = xdr_encode_hyper(p, stat->bytes_not_delivered);
2816 ff_layout_encode_nfstime(xdr, stat->total_busy_time);
2817 ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
2818 }
2819
2820 static void
ff_layout_encode_ff_layoutupdate(struct xdr_stream * xdr,const struct nfs42_layoutstat_devinfo * devinfo,struct nfs4_ff_layout_ds_stripe * dss_info)2821 ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
2822 const struct nfs42_layoutstat_devinfo *devinfo,
2823 struct nfs4_ff_layout_ds_stripe *dss_info)
2824 {
2825 struct nfs4_pnfs_ds_addr *da;
2826 struct nfs4_pnfs_ds *ds = dss_info->mirror_ds->ds;
2827 struct nfs_fh *fh = &dss_info->fh_versions[0];
2828 __be32 *p;
2829
2830 da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
2831 dprintk("%s: DS %s: encoding address %s\n",
2832 __func__, ds->ds_remotestr, da->da_remotestr);
2833 /* netaddr4 */
2834 ff_layout_encode_netaddr(xdr, da);
2835 /* nfs_fh4 */
2836 p = xdr_reserve_space(xdr, 4 + fh->size);
2837 xdr_encode_opaque(p, fh->data, fh->size);
2838 /* ff_io_latency4 read */
2839 spin_lock(&dss_info->mirror->lock);
2840 ff_layout_encode_io_latency(xdr,
2841 &dss_info->read_stat.io_stat);
2842 /* ff_io_latency4 write */
2843 ff_layout_encode_io_latency(xdr,
2844 &dss_info->write_stat.io_stat);
2845 spin_unlock(&dss_info->mirror->lock);
2846 /* nfstime4 */
2847 ff_layout_encode_nfstime(xdr,
2848 ktime_sub(ktime_get(),
2849 dss_info->start_time));
2850 /* bool */
2851 p = xdr_reserve_space(xdr, 4);
2852 *p = cpu_to_be32(false);
2853 }
2854
2855 static void
ff_layout_encode_layoutstats(struct xdr_stream * xdr,const void * args,const struct nfs4_xdr_opaque_data * opaque)2856 ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
2857 const struct nfs4_xdr_opaque_data *opaque)
2858 {
2859 struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
2860 struct nfs42_layoutstat_devinfo, ld_private);
2861 __be32 *start;
2862
2863 /* layoutupdate length */
2864 start = xdr_reserve_space(xdr, 4);
2865 ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
2866
2867 *start = cpu_to_be32((xdr->p - start - 1) * 4);
2868 }
2869
2870 static void
ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data * opaque)2871 ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
2872 {
2873 struct nfs4_ff_layout_ds_stripe *dss_info = opaque->data;
2874 struct nfs4_ff_layout_mirror *mirror = dss_info->mirror;
2875
2876 ff_layout_put_mirror(mirror);
2877 }
2878
2879 static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
2880 .encode = ff_layout_encode_layoutstats,
2881 .free = ff_layout_free_layoutstats,
2882 };
2883
2884 static int
ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr * lo,struct nfs42_layoutstat_devinfo * devinfo,int dev_limit,enum nfs4_ff_op_type type)2885 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
2886 struct nfs42_layoutstat_devinfo *devinfo,
2887 int dev_limit, enum nfs4_ff_op_type type)
2888 {
2889 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
2890 struct nfs4_ff_layout_mirror *mirror;
2891 struct nfs4_ff_layout_ds_stripe *dss_info;
2892 struct nfs4_deviceid_node *dev;
2893 int i = 0, dss_id;
2894
2895 list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2896 for (dss_id = 0; dss_id < mirror->dss_count; ++dss_id) {
2897 dss_info = &mirror->dss[dss_id];
2898 if (i >= dev_limit)
2899 break;
2900 if (IS_ERR_OR_NULL(dss_info->mirror_ds))
2901 continue;
2902 if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL,
2903 &mirror->flags) &&
2904 type != NFS4_FF_OP_LAYOUTRETURN)
2905 continue;
2906 /* mirror refcount put in cleanup_layoutstats */
2907 if (!refcount_inc_not_zero(&mirror->ref))
2908 continue;
2909 dev = &dss_info->mirror_ds->id_node;
2910 memcpy(&devinfo->dev_id,
2911 &dev->deviceid,
2912 NFS4_DEVICEID4_SIZE);
2913 devinfo->offset = 0;
2914 devinfo->length = NFS4_MAX_UINT64;
2915 spin_lock(&mirror->lock);
2916 devinfo->read_count =
2917 dss_info->read_stat.io_stat.ops_completed;
2918 devinfo->read_bytes =
2919 dss_info->read_stat.io_stat.bytes_completed;
2920 devinfo->write_count =
2921 dss_info->write_stat.io_stat.ops_completed;
2922 devinfo->write_bytes =
2923 dss_info->write_stat.io_stat.bytes_completed;
2924 spin_unlock(&mirror->lock);
2925 devinfo->layout_type = LAYOUT_FLEX_FILES;
2926 devinfo->ld_private.ops = &layoutstat_ops;
2927 devinfo->ld_private.data = &mirror->dss[dss_id];
2928
2929 devinfo++;
2930 i++;
2931 }
2932 }
2933 return i;
2934 }
2935
ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args * args)2936 static int ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
2937 {
2938 struct pnfs_layout_hdr *lo;
2939 struct nfs4_flexfile_layout *ff_layout;
2940 const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2941
2942 /* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2943 args->devinfo = kmalloc_objs(*args->devinfo, dev_count,
2944 nfs_io_gfp_mask());
2945 if (!args->devinfo)
2946 return -ENOMEM;
2947
2948 spin_lock(&args->inode->i_lock);
2949 lo = NFS_I(args->inode)->layout;
2950 if (lo && pnfs_layout_is_valid(lo)) {
2951 ff_layout = FF_LAYOUT_FROM_HDR(lo);
2952 args->num_dev = ff_layout_mirror_prepare_stats(
2953 &ff_layout->generic_hdr, &args->devinfo[0], dev_count,
2954 NFS4_FF_OP_LAYOUTSTATS);
2955 } else
2956 args->num_dev = 0;
2957 spin_unlock(&args->inode->i_lock);
2958 if (!args->num_dev) {
2959 kfree(args->devinfo);
2960 args->devinfo = NULL;
2961 return -ENOENT;
2962 }
2963
2964 return 0;
2965 }
2966
2967 static int
ff_layout_set_layoutdriver(struct nfs_server * server,const struct nfs_fh * dummy)2968 ff_layout_set_layoutdriver(struct nfs_server *server,
2969 const struct nfs_fh *dummy)
2970 {
2971 #if IS_ENABLED(CONFIG_NFS_V4_2)
2972 server->caps |= NFS_CAP_LAYOUTSTATS | NFS_CAP_REBOOT_LAYOUTRETURN;
2973 #endif
2974 return 0;
2975 }
2976
2977 static const struct pnfs_commit_ops ff_layout_commit_ops = {
2978 .setup_ds_info = ff_layout_setup_ds_info,
2979 .release_ds_info = ff_layout_release_ds_info,
2980 .mark_request_commit = pnfs_layout_mark_request_commit,
2981 .clear_request_commit = pnfs_generic_clear_request_commit,
2982 .scan_commit_lists = pnfs_generic_scan_commit_lists,
2983 .recover_commit_reqs = pnfs_generic_recover_commit_reqs,
2984 .commit_pagelist = ff_layout_commit_pagelist,
2985 };
2986
2987 static struct pnfs_layoutdriver_type flexfilelayout_type = {
2988 .id = LAYOUT_FLEX_FILES,
2989 .name = "LAYOUT_FLEX_FILES",
2990 .owner = THIS_MODULE,
2991 .flags = PNFS_LAYOUTGET_ON_OPEN,
2992 .max_layoutget_response = 4096, /* 1 page or so... */
2993 .set_layoutdriver = ff_layout_set_layoutdriver,
2994 .alloc_layout_hdr = ff_layout_alloc_layout_hdr,
2995 .free_layout_hdr = ff_layout_free_layout_hdr,
2996 .alloc_lseg = ff_layout_alloc_lseg,
2997 .free_lseg = ff_layout_free_lseg,
2998 .add_lseg = ff_layout_add_lseg,
2999 .pg_read_ops = &ff_layout_pg_read_ops,
3000 .pg_write_ops = &ff_layout_pg_write_ops,
3001 .get_ds_info = ff_layout_get_ds_info,
3002 .free_deviceid_node = ff_layout_free_deviceid_node,
3003 .read_pagelist = ff_layout_read_pagelist,
3004 .write_pagelist = ff_layout_write_pagelist,
3005 .alloc_deviceid_node = ff_layout_alloc_deviceid_node,
3006 .prepare_layoutreturn = ff_layout_prepare_layoutreturn,
3007 .sync = pnfs_nfs_generic_sync,
3008 .prepare_layoutstats = ff_layout_prepare_layoutstats,
3009 .cancel_io = ff_layout_cancel_io,
3010 };
3011
nfs4flexfilelayout_init(void)3012 static int __init nfs4flexfilelayout_init(void)
3013 {
3014 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
3015 __func__);
3016 return pnfs_register_layoutdriver(&flexfilelayout_type);
3017 }
3018
nfs4flexfilelayout_exit(void)3019 static void __exit nfs4flexfilelayout_exit(void)
3020 {
3021 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
3022 __func__);
3023 pnfs_unregister_layoutdriver(&flexfilelayout_type);
3024 }
3025
3026 MODULE_ALIAS("nfs-layouttype4-4");
3027
3028 MODULE_LICENSE("GPL");
3029 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
3030
3031 module_init(nfs4flexfilelayout_init);
3032 module_exit(nfs4flexfilelayout_exit);
3033
3034 module_param(io_maxretrans, ushort, 0644);
3035 MODULE_PARM_DESC(io_maxretrans, "The number of times the NFSv4.1 client "
3036 "retries an I/O request before returning an error. ");
3037