1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Module for pnfs flexfile layout driver.
4 *
5 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
6 *
7 * Tao Peng <bergwolf@primarydata.com>
8 */
9
10 #include <linux/nfs_fs.h>
11 #include <linux/nfs_mount.h>
12 #include <linux/nfs_page.h>
13 #include <linux/module.h>
14 #include <linux/file.h>
15 #include <linux/sched/mm.h>
16
17 #include <linux/sunrpc/metrics.h>
18
19 #include "flexfilelayout.h"
20 #include "../nfs4session.h"
21 #include "../nfs4idmap.h"
22 #include "../internal.h"
23 #include "../delegation.h"
24 #include "../nfs4trace.h"
25 #include "../iostat.h"
26 #include "../nfs.h"
27 #include "../nfs42.h"
28
29 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
30
31 #define FF_LAYOUT_POLL_RETRY_MAX (15*HZ)
32 #define FF_LAYOUTRETURN_MAXERR 20
33
34 enum nfs4_ff_op_type {
35 NFS4_FF_OP_LAYOUTSTATS,
36 NFS4_FF_OP_LAYOUTRETURN,
37 };
38
39 static unsigned short io_maxretrans;
40
41 static const struct pnfs_commit_ops ff_layout_commit_ops;
42 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
43 struct nfs_pgio_header *hdr);
44 static int
45 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
46 struct nfs42_layoutstat_devinfo *devinfo,
47 int dev_limit, enum nfs4_ff_op_type type);
48 static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
49 const struct nfs42_layoutstat_devinfo *devinfo,
50 struct nfs4_ff_layout_mirror *mirror);
51
52 static struct pnfs_layout_hdr *
ff_layout_alloc_layout_hdr(struct inode * inode,gfp_t gfp_flags)53 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
54 {
55 struct nfs4_flexfile_layout *ffl;
56
57 ffl = kzalloc(sizeof(*ffl), gfp_flags);
58 if (ffl) {
59 pnfs_init_ds_commit_info(&ffl->commit_info);
60 INIT_LIST_HEAD(&ffl->error_list);
61 INIT_LIST_HEAD(&ffl->mirrors);
62 ffl->last_report_time = ktime_get();
63 ffl->commit_info.ops = &ff_layout_commit_ops;
64 return &ffl->generic_hdr;
65 } else
66 return NULL;
67 }
68
69 static void
ff_layout_free_layout_hdr(struct pnfs_layout_hdr * lo)70 ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
71 {
72 struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(lo);
73 struct nfs4_ff_layout_ds_err *err, *n;
74
75 list_for_each_entry_safe(err, n, &ffl->error_list, list) {
76 list_del(&err->list);
77 kfree(err);
78 }
79 kfree_rcu(ffl, generic_hdr.plh_rcu);
80 }
81
decode_pnfs_stateid(struct xdr_stream * xdr,nfs4_stateid * stateid)82 static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
83 {
84 __be32 *p;
85
86 p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
87 if (unlikely(p == NULL))
88 return -ENOBUFS;
89 stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
90 memcpy(stateid->data, p, NFS4_STATEID_SIZE);
91 dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
92 p[0], p[1], p[2], p[3]);
93 return 0;
94 }
95
decode_deviceid(struct xdr_stream * xdr,struct nfs4_deviceid * devid)96 static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
97 {
98 __be32 *p;
99
100 p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
101 if (unlikely(!p))
102 return -ENOBUFS;
103 memcpy(devid, p, NFS4_DEVICEID4_SIZE);
104 nfs4_print_deviceid(devid);
105 return 0;
106 }
107
decode_nfs_fh(struct xdr_stream * xdr,struct nfs_fh * fh)108 static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
109 {
110 __be32 *p;
111
112 p = xdr_inline_decode(xdr, 4);
113 if (unlikely(!p))
114 return -ENOBUFS;
115 fh->size = be32_to_cpup(p++);
116 if (fh->size > NFS_MAXFHSIZE) {
117 printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
118 fh->size);
119 return -EOVERFLOW;
120 }
121 /* fh.data */
122 p = xdr_inline_decode(xdr, fh->size);
123 if (unlikely(!p))
124 return -ENOBUFS;
125 memcpy(&fh->data, p, fh->size);
126 dprintk("%s: fh len %d\n", __func__, fh->size);
127
128 return 0;
129 }
130
131 /*
132 * Currently only stringified uids and gids are accepted.
133 * I.e., kerberos is not supported to the DSes, so no pricipals.
134 *
135 * That means that one common function will suffice, but when
136 * principals are added, this should be split to accomodate
137 * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
138 */
139 static int
decode_name(struct xdr_stream * xdr,u32 * id)140 decode_name(struct xdr_stream *xdr, u32 *id)
141 {
142 __be32 *p;
143 int len;
144
145 /* opaque_length(4)*/
146 p = xdr_inline_decode(xdr, 4);
147 if (unlikely(!p))
148 return -ENOBUFS;
149 len = be32_to_cpup(p++);
150 if (len < 0)
151 return -EINVAL;
152
153 dprintk("%s: len %u\n", __func__, len);
154
155 /* opaque body */
156 p = xdr_inline_decode(xdr, len);
157 if (unlikely(!p))
158 return -ENOBUFS;
159
160 if (!nfs_map_string_to_numeric((char *)p, len, id))
161 return -EINVAL;
162
163 return 0;
164 }
165
166 static struct nfsd_file *
ff_local_open_fh(struct pnfs_layout_segment * lseg,u32 ds_idx,struct nfs_client * clp,const struct cred * cred,struct nfs_fh * fh,fmode_t mode)167 ff_local_open_fh(struct pnfs_layout_segment *lseg, u32 ds_idx,
168 struct nfs_client *clp, const struct cred *cred,
169 struct nfs_fh *fh, fmode_t mode)
170 {
171 #if IS_ENABLED(CONFIG_NFS_LOCALIO)
172 struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
173
174 return nfs_local_open_fh(clp, cred, fh, &mirror->nfl, mode);
175 #else
176 return NULL;
177 #endif
178 }
179
ff_mirror_match_fh(const struct nfs4_ff_layout_mirror * m1,const struct nfs4_ff_layout_mirror * m2)180 static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
181 const struct nfs4_ff_layout_mirror *m2)
182 {
183 int i, j;
184
185 if (m1->fh_versions_cnt != m2->fh_versions_cnt)
186 return false;
187 for (i = 0; i < m1->fh_versions_cnt; i++) {
188 bool found_fh = false;
189 for (j = 0; j < m2->fh_versions_cnt; j++) {
190 if (nfs_compare_fh(&m1->fh_versions[i],
191 &m2->fh_versions[j]) == 0) {
192 found_fh = true;
193 break;
194 }
195 }
196 if (!found_fh)
197 return false;
198 }
199 return true;
200 }
201
202 static struct nfs4_ff_layout_mirror *
ff_layout_add_mirror(struct pnfs_layout_hdr * lo,struct nfs4_ff_layout_mirror * mirror)203 ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
204 struct nfs4_ff_layout_mirror *mirror)
205 {
206 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
207 struct nfs4_ff_layout_mirror *pos;
208 struct inode *inode = lo->plh_inode;
209
210 spin_lock(&inode->i_lock);
211 list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
212 if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0)
213 continue;
214 if (!ff_mirror_match_fh(mirror, pos))
215 continue;
216 if (refcount_inc_not_zero(&pos->ref)) {
217 spin_unlock(&inode->i_lock);
218 return pos;
219 }
220 }
221 list_add(&mirror->mirrors, &ff_layout->mirrors);
222 mirror->layout = lo;
223 spin_unlock(&inode->i_lock);
224 return mirror;
225 }
226
227 static void
ff_layout_remove_mirror(struct nfs4_ff_layout_mirror * mirror)228 ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
229 {
230 struct inode *inode;
231 if (mirror->layout == NULL)
232 return;
233 inode = mirror->layout->plh_inode;
234 spin_lock(&inode->i_lock);
235 list_del(&mirror->mirrors);
236 spin_unlock(&inode->i_lock);
237 mirror->layout = NULL;
238 }
239
ff_layout_alloc_mirror(gfp_t gfp_flags)240 static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
241 {
242 struct nfs4_ff_layout_mirror *mirror;
243
244 mirror = kzalloc(sizeof(*mirror), gfp_flags);
245 if (mirror != NULL) {
246 spin_lock_init(&mirror->lock);
247 refcount_set(&mirror->ref, 1);
248 INIT_LIST_HEAD(&mirror->mirrors);
249 nfs_localio_file_init(&mirror->nfl);
250 }
251 return mirror;
252 }
253
ff_layout_free_mirror(struct nfs4_ff_layout_mirror * mirror)254 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
255 {
256 const struct cred *cred;
257
258 ff_layout_remove_mirror(mirror);
259 kfree(mirror->fh_versions);
260 nfs_close_local_fh(&mirror->nfl);
261 cred = rcu_access_pointer(mirror->ro_cred);
262 put_cred(cred);
263 cred = rcu_access_pointer(mirror->rw_cred);
264 put_cred(cred);
265 nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
266 kfree(mirror);
267 }
268
ff_layout_put_mirror(struct nfs4_ff_layout_mirror * mirror)269 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
270 {
271 if (mirror != NULL && refcount_dec_and_test(&mirror->ref))
272 ff_layout_free_mirror(mirror);
273 }
274
ff_layout_free_mirror_array(struct nfs4_ff_layout_segment * fls)275 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
276 {
277 u32 i;
278
279 for (i = 0; i < fls->mirror_array_cnt; i++)
280 ff_layout_put_mirror(fls->mirror_array[i]);
281 }
282
_ff_layout_free_lseg(struct nfs4_ff_layout_segment * fls)283 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
284 {
285 if (fls) {
286 ff_layout_free_mirror_array(fls);
287 kfree(fls);
288 }
289 }
290
291 static bool
ff_lseg_match_mirrors(struct pnfs_layout_segment * l1,struct pnfs_layout_segment * l2)292 ff_lseg_match_mirrors(struct pnfs_layout_segment *l1,
293 struct pnfs_layout_segment *l2)
294 {
295 const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1);
296 const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l1);
297 u32 i;
298
299 if (fl1->mirror_array_cnt != fl2->mirror_array_cnt)
300 return false;
301 for (i = 0; i < fl1->mirror_array_cnt; i++) {
302 if (fl1->mirror_array[i] != fl2->mirror_array[i])
303 return false;
304 }
305 return true;
306 }
307
308 static bool
ff_lseg_range_is_after(const struct pnfs_layout_range * l1,const struct pnfs_layout_range * l2)309 ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
310 const struct pnfs_layout_range *l2)
311 {
312 u64 end1, end2;
313
314 if (l1->iomode != l2->iomode)
315 return l1->iomode != IOMODE_READ;
316 end1 = pnfs_calc_offset_end(l1->offset, l1->length);
317 end2 = pnfs_calc_offset_end(l2->offset, l2->length);
318 if (end1 < l2->offset)
319 return false;
320 if (end2 < l1->offset)
321 return true;
322 return l2->offset <= l1->offset;
323 }
324
325 static bool
ff_lseg_merge(struct pnfs_layout_segment * new,struct pnfs_layout_segment * old)326 ff_lseg_merge(struct pnfs_layout_segment *new,
327 struct pnfs_layout_segment *old)
328 {
329 u64 new_end, old_end;
330
331 if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
332 return false;
333 if (new->pls_range.iomode != old->pls_range.iomode)
334 return false;
335 old_end = pnfs_calc_offset_end(old->pls_range.offset,
336 old->pls_range.length);
337 if (old_end < new->pls_range.offset)
338 return false;
339 new_end = pnfs_calc_offset_end(new->pls_range.offset,
340 new->pls_range.length);
341 if (new_end < old->pls_range.offset)
342 return false;
343 if (!ff_lseg_match_mirrors(new, old))
344 return false;
345
346 /* Mergeable: copy info from 'old' to 'new' */
347 if (new_end < old_end)
348 new_end = old_end;
349 if (new->pls_range.offset < old->pls_range.offset)
350 new->pls_range.offset = old->pls_range.offset;
351 new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
352 new_end);
353 if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
354 set_bit(NFS_LSEG_ROC, &new->pls_flags);
355 return true;
356 }
357
358 static void
ff_layout_add_lseg(struct pnfs_layout_hdr * lo,struct pnfs_layout_segment * lseg,struct list_head * free_me)359 ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
360 struct pnfs_layout_segment *lseg,
361 struct list_head *free_me)
362 {
363 pnfs_generic_layout_insert_lseg(lo, lseg,
364 ff_lseg_range_is_after,
365 ff_lseg_merge,
366 free_me);
367 }
368
ff_layout_sort_mirrors(struct nfs4_ff_layout_segment * fls)369 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
370 {
371 int i, j;
372
373 for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
374 for (j = i + 1; j < fls->mirror_array_cnt; j++)
375 if (fls->mirror_array[i]->efficiency <
376 fls->mirror_array[j]->efficiency)
377 swap(fls->mirror_array[i],
378 fls->mirror_array[j]);
379 }
380 }
381
382 static struct pnfs_layout_segment *
ff_layout_alloc_lseg(struct pnfs_layout_hdr * lh,struct nfs4_layoutget_res * lgr,gfp_t gfp_flags)383 ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
384 struct nfs4_layoutget_res *lgr,
385 gfp_t gfp_flags)
386 {
387 struct pnfs_layout_segment *ret;
388 struct nfs4_ff_layout_segment *fls = NULL;
389 struct xdr_stream stream;
390 struct xdr_buf buf;
391 struct page *scratch;
392 u64 stripe_unit;
393 u32 mirror_array_cnt;
394 __be32 *p;
395 int i, rc;
396
397 dprintk("--> %s\n", __func__);
398 scratch = alloc_page(gfp_flags);
399 if (!scratch)
400 return ERR_PTR(-ENOMEM);
401
402 xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
403 lgr->layoutp->len);
404 xdr_set_scratch_page(&stream, scratch);
405
406 /* stripe unit and mirror_array_cnt */
407 rc = -EIO;
408 p = xdr_inline_decode(&stream, 8 + 4);
409 if (!p)
410 goto out_err_free;
411
412 p = xdr_decode_hyper(p, &stripe_unit);
413 mirror_array_cnt = be32_to_cpup(p++);
414 dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
415 stripe_unit, mirror_array_cnt);
416
417 if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
418 mirror_array_cnt == 0)
419 goto out_err_free;
420
421 rc = -ENOMEM;
422 fls = kzalloc(struct_size(fls, mirror_array, mirror_array_cnt),
423 gfp_flags);
424 if (!fls)
425 goto out_err_free;
426
427 fls->mirror_array_cnt = mirror_array_cnt;
428 fls->stripe_unit = stripe_unit;
429
430 for (i = 0; i < fls->mirror_array_cnt; i++) {
431 struct nfs4_ff_layout_mirror *mirror;
432 struct cred *kcred;
433 const struct cred __rcu *cred;
434 kuid_t uid;
435 kgid_t gid;
436 u32 ds_count, fh_count, id;
437 int j;
438
439 rc = -EIO;
440 p = xdr_inline_decode(&stream, 4);
441 if (!p)
442 goto out_err_free;
443 ds_count = be32_to_cpup(p);
444
445 /* FIXME: allow for striping? */
446 if (ds_count != 1)
447 goto out_err_free;
448
449 fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
450 if (fls->mirror_array[i] == NULL) {
451 rc = -ENOMEM;
452 goto out_err_free;
453 }
454
455 fls->mirror_array[i]->ds_count = ds_count;
456
457 /* deviceid */
458 rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid);
459 if (rc)
460 goto out_err_free;
461
462 /* efficiency */
463 rc = -EIO;
464 p = xdr_inline_decode(&stream, 4);
465 if (!p)
466 goto out_err_free;
467 fls->mirror_array[i]->efficiency = be32_to_cpup(p);
468
469 /* stateid */
470 rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
471 if (rc)
472 goto out_err_free;
473
474 /* fh */
475 rc = -EIO;
476 p = xdr_inline_decode(&stream, 4);
477 if (!p)
478 goto out_err_free;
479 fh_count = be32_to_cpup(p);
480
481 fls->mirror_array[i]->fh_versions =
482 kcalloc(fh_count, sizeof(struct nfs_fh),
483 gfp_flags);
484 if (fls->mirror_array[i]->fh_versions == NULL) {
485 rc = -ENOMEM;
486 goto out_err_free;
487 }
488
489 for (j = 0; j < fh_count; j++) {
490 rc = decode_nfs_fh(&stream,
491 &fls->mirror_array[i]->fh_versions[j]);
492 if (rc)
493 goto out_err_free;
494 }
495
496 fls->mirror_array[i]->fh_versions_cnt = fh_count;
497
498 /* user */
499 rc = decode_name(&stream, &id);
500 if (rc)
501 goto out_err_free;
502
503 uid = make_kuid(&init_user_ns, id);
504
505 /* group */
506 rc = decode_name(&stream, &id);
507 if (rc)
508 goto out_err_free;
509
510 gid = make_kgid(&init_user_ns, id);
511
512 if (gfp_flags & __GFP_FS)
513 kcred = prepare_kernel_cred(&init_task);
514 else {
515 unsigned int nofs_flags = memalloc_nofs_save();
516 kcred = prepare_kernel_cred(&init_task);
517 memalloc_nofs_restore(nofs_flags);
518 }
519 rc = -ENOMEM;
520 if (!kcred)
521 goto out_err_free;
522 kcred->fsuid = uid;
523 kcred->fsgid = gid;
524 cred = RCU_INITIALIZER(kcred);
525
526 if (lgr->range.iomode == IOMODE_READ)
527 rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
528 else
529 rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
530
531 mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
532 if (mirror != fls->mirror_array[i]) {
533 /* swap cred ptrs so free_mirror will clean up old */
534 if (lgr->range.iomode == IOMODE_READ) {
535 cred = xchg(&mirror->ro_cred, cred);
536 rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
537 } else {
538 cred = xchg(&mirror->rw_cred, cred);
539 rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
540 }
541 ff_layout_free_mirror(fls->mirror_array[i]);
542 fls->mirror_array[i] = mirror;
543 }
544
545 dprintk("%s: iomode %s uid %u gid %u\n", __func__,
546 lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
547 from_kuid(&init_user_ns, uid),
548 from_kgid(&init_user_ns, gid));
549 }
550
551 p = xdr_inline_decode(&stream, 4);
552 if (!p)
553 goto out_sort_mirrors;
554 fls->flags = be32_to_cpup(p);
555
556 p = xdr_inline_decode(&stream, 4);
557 if (!p)
558 goto out_sort_mirrors;
559 for (i=0; i < fls->mirror_array_cnt; i++)
560 fls->mirror_array[i]->report_interval = be32_to_cpup(p);
561
562 out_sort_mirrors:
563 ff_layout_sort_mirrors(fls);
564 ret = &fls->generic_hdr;
565 dprintk("<-- %s (success)\n", __func__);
566 out_free_page:
567 __free_page(scratch);
568 return ret;
569 out_err_free:
570 _ff_layout_free_lseg(fls);
571 ret = ERR_PTR(rc);
572 dprintk("<-- %s (%d)\n", __func__, rc);
573 goto out_free_page;
574 }
575
576 static void
ff_layout_free_lseg(struct pnfs_layout_segment * lseg)577 ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
578 {
579 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
580
581 dprintk("--> %s\n", __func__);
582
583 if (lseg->pls_range.iomode == IOMODE_RW) {
584 struct nfs4_flexfile_layout *ffl;
585 struct inode *inode;
586
587 ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
588 inode = ffl->generic_hdr.plh_inode;
589 spin_lock(&inode->i_lock);
590 pnfs_generic_ds_cinfo_release_lseg(&ffl->commit_info, lseg);
591 spin_unlock(&inode->i_lock);
592 }
593 _ff_layout_free_lseg(fls);
594 }
595
596 static void
nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer * timer,ktime_t now)597 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
598 {
599 /* first IO request? */
600 if (atomic_inc_return(&timer->n_ops) == 1) {
601 timer->start_time = now;
602 }
603 }
604
605 static ktime_t
nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer * timer,ktime_t now)606 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
607 {
608 ktime_t start;
609
610 if (atomic_dec_return(&timer->n_ops) < 0)
611 WARN_ON_ONCE(1);
612
613 start = timer->start_time;
614 timer->start_time = now;
615 return ktime_sub(now, start);
616 }
617
618 static bool
nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror * mirror,struct nfs4_ff_layoutstat * layoutstat,ktime_t now)619 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
620 struct nfs4_ff_layoutstat *layoutstat,
621 ktime_t now)
622 {
623 s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
624 struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
625
626 nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
627 if (!mirror->start_time)
628 mirror->start_time = now;
629 if (mirror->report_interval != 0)
630 report_interval = (s64)mirror->report_interval * 1000LL;
631 else if (layoutstats_timer != 0)
632 report_interval = (s64)layoutstats_timer * 1000LL;
633 if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
634 report_interval) {
635 ffl->last_report_time = now;
636 return true;
637 }
638
639 return false;
640 }
641
642 static void
nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat * layoutstat,__u64 requested)643 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
644 __u64 requested)
645 {
646 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
647
648 iostat->ops_requested++;
649 iostat->bytes_requested += requested;
650 }
651
652 static void
nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat * layoutstat,__u64 requested,__u64 completed,ktime_t time_completed,ktime_t time_started)653 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
654 __u64 requested,
655 __u64 completed,
656 ktime_t time_completed,
657 ktime_t time_started)
658 {
659 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
660 ktime_t completion_time = ktime_sub(time_completed, time_started);
661 ktime_t timer;
662
663 iostat->ops_completed++;
664 iostat->bytes_completed += completed;
665 iostat->bytes_not_delivered += requested - completed;
666
667 timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
668 iostat->total_busy_time =
669 ktime_add(iostat->total_busy_time, timer);
670 iostat->aggregate_completion_time =
671 ktime_add(iostat->aggregate_completion_time,
672 completion_time);
673 }
674
675 static void
nfs4_ff_layout_stat_io_start_read(struct inode * inode,struct nfs4_ff_layout_mirror * mirror,__u64 requested,ktime_t now)676 nfs4_ff_layout_stat_io_start_read(struct inode *inode,
677 struct nfs4_ff_layout_mirror *mirror,
678 __u64 requested, ktime_t now)
679 {
680 bool report;
681
682 spin_lock(&mirror->lock);
683 report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
684 nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
685 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
686 spin_unlock(&mirror->lock);
687
688 if (report)
689 pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
690 }
691
692 static void
nfs4_ff_layout_stat_io_end_read(struct rpc_task * task,struct nfs4_ff_layout_mirror * mirror,__u64 requested,__u64 completed)693 nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
694 struct nfs4_ff_layout_mirror *mirror,
695 __u64 requested,
696 __u64 completed)
697 {
698 spin_lock(&mirror->lock);
699 nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
700 requested, completed,
701 ktime_get(), task->tk_start);
702 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
703 spin_unlock(&mirror->lock);
704 }
705
706 static void
nfs4_ff_layout_stat_io_start_write(struct inode * inode,struct nfs4_ff_layout_mirror * mirror,__u64 requested,ktime_t now)707 nfs4_ff_layout_stat_io_start_write(struct inode *inode,
708 struct nfs4_ff_layout_mirror *mirror,
709 __u64 requested, ktime_t now)
710 {
711 bool report;
712
713 spin_lock(&mirror->lock);
714 report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
715 nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
716 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
717 spin_unlock(&mirror->lock);
718
719 if (report)
720 pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
721 }
722
723 static void
nfs4_ff_layout_stat_io_end_write(struct rpc_task * task,struct nfs4_ff_layout_mirror * mirror,__u64 requested,__u64 completed,enum nfs3_stable_how committed)724 nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
725 struct nfs4_ff_layout_mirror *mirror,
726 __u64 requested,
727 __u64 completed,
728 enum nfs3_stable_how committed)
729 {
730 if (committed == NFS_UNSTABLE)
731 requested = completed = 0;
732
733 spin_lock(&mirror->lock);
734 nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
735 requested, completed, ktime_get(), task->tk_start);
736 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
737 spin_unlock(&mirror->lock);
738 }
739
740 static void
ff_layout_mark_ds_unreachable(struct pnfs_layout_segment * lseg,u32 idx)741 ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, u32 idx)
742 {
743 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
744
745 if (devid)
746 nfs4_mark_deviceid_unavailable(devid);
747 }
748
749 static void
ff_layout_mark_ds_reachable(struct pnfs_layout_segment * lseg,u32 idx)750 ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, u32 idx)
751 {
752 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
753
754 if (devid)
755 nfs4_mark_deviceid_available(devid);
756 }
757
758 static struct nfs4_pnfs_ds *
ff_layout_choose_ds_for_read(struct pnfs_layout_segment * lseg,u32 start_idx,u32 * best_idx,bool check_device)759 ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
760 u32 start_idx, u32 *best_idx,
761 bool check_device)
762 {
763 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
764 struct nfs4_ff_layout_mirror *mirror;
765 struct nfs4_pnfs_ds *ds;
766 u32 idx;
767
768 /* mirrors are initially sorted by efficiency */
769 for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
770 mirror = FF_LAYOUT_COMP(lseg, idx);
771 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
772 if (!ds)
773 continue;
774
775 if (check_device &&
776 nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node))
777 continue;
778
779 *best_idx = idx;
780 return ds;
781 }
782
783 return NULL;
784 }
785
786 static struct nfs4_pnfs_ds *
ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment * lseg,u32 start_idx,u32 * best_idx)787 ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg,
788 u32 start_idx, u32 *best_idx)
789 {
790 return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, false);
791 }
792
793 static struct nfs4_pnfs_ds *
ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment * lseg,u32 start_idx,u32 * best_idx)794 ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg,
795 u32 start_idx, u32 *best_idx)
796 {
797 return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, true);
798 }
799
800 static struct nfs4_pnfs_ds *
ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment * lseg,u32 start_idx,u32 * best_idx)801 ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
802 u32 start_idx, u32 *best_idx)
803 {
804 struct nfs4_pnfs_ds *ds;
805
806 ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx);
807 if (ds)
808 return ds;
809 return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
810 }
811
812 static struct nfs4_pnfs_ds *
ff_layout_get_ds_for_read(struct nfs_pageio_descriptor * pgio,u32 * best_idx)813 ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio,
814 u32 *best_idx)
815 {
816 struct pnfs_layout_segment *lseg = pgio->pg_lseg;
817 struct nfs4_pnfs_ds *ds;
818
819 ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx,
820 best_idx);
821 if (ds || !pgio->pg_mirror_idx)
822 return ds;
823 return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx);
824 }
825
826 static void
ff_layout_pg_get_read(struct nfs_pageio_descriptor * pgio,struct nfs_page * req,bool strict_iomode)827 ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
828 struct nfs_page *req,
829 bool strict_iomode)
830 {
831 pnfs_put_lseg(pgio->pg_lseg);
832 pgio->pg_lseg =
833 pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
834 req_offset(req), req->wb_bytes, IOMODE_READ,
835 strict_iomode, nfs_io_gfp_mask());
836 if (IS_ERR(pgio->pg_lseg)) {
837 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
838 pgio->pg_lseg = NULL;
839 }
840 }
841
842 static void
ff_layout_pg_init_read(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)843 ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
844 struct nfs_page *req)
845 {
846 struct nfs_pgio_mirror *pgm;
847 struct nfs4_ff_layout_mirror *mirror;
848 struct nfs4_pnfs_ds *ds;
849 u32 ds_idx;
850
851 if (NFS_SERVER(pgio->pg_inode)->flags &
852 (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
853 pgio->pg_maxretrans = io_maxretrans;
854 retry:
855 pnfs_generic_pg_check_layout(pgio, req);
856 /* Use full layout for now */
857 if (!pgio->pg_lseg) {
858 ff_layout_pg_get_read(pgio, req, false);
859 if (!pgio->pg_lseg)
860 goto out_nolseg;
861 }
862 if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
863 ff_layout_pg_get_read(pgio, req, true);
864 if (!pgio->pg_lseg)
865 goto out_nolseg;
866 }
867 /* Reset wb_nio, since getting layout segment was successful */
868 req->wb_nio = 0;
869
870 ds = ff_layout_get_ds_for_read(pgio, &ds_idx);
871 if (!ds) {
872 if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
873 goto out_mds;
874 pnfs_generic_pg_cleanup(pgio);
875 /* Sleep for 1 second before retrying */
876 ssleep(1);
877 goto retry;
878 }
879
880 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
881 pgm = &pgio->pg_mirrors[0];
882 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
883
884 pgio->pg_mirror_idx = ds_idx;
885 return;
886 out_nolseg:
887 if (pgio->pg_error < 0) {
888 if (pgio->pg_error != -EAGAIN)
889 return;
890 /* Retry getting layout segment if lower layer returned -EAGAIN */
891 if (pgio->pg_maxretrans && req->wb_nio++ > pgio->pg_maxretrans) {
892 if (NFS_SERVER(pgio->pg_inode)->flags & NFS_MOUNT_SOFTERR)
893 pgio->pg_error = -ETIMEDOUT;
894 else
895 pgio->pg_error = -EIO;
896 return;
897 }
898 pgio->pg_error = 0;
899 /* Sleep for 1 second before retrying */
900 ssleep(1);
901 goto retry;
902 }
903 out_mds:
904 trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode,
905 0, NFS4_MAX_UINT64, IOMODE_READ,
906 NFS_I(pgio->pg_inode)->layout,
907 pgio->pg_lseg);
908 pgio->pg_maxretrans = 0;
909 nfs_pageio_reset_read_mds(pgio);
910 }
911
912 static void
ff_layout_pg_init_write(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)913 ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
914 struct nfs_page *req)
915 {
916 struct nfs4_ff_layout_mirror *mirror;
917 struct nfs_pgio_mirror *pgm;
918 struct nfs4_pnfs_ds *ds;
919 u32 i;
920
921 retry:
922 pnfs_generic_pg_check_layout(pgio, req);
923 if (!pgio->pg_lseg) {
924 pgio->pg_lseg =
925 pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
926 req_offset(req), req->wb_bytes,
927 IOMODE_RW, false, nfs_io_gfp_mask());
928 if (IS_ERR(pgio->pg_lseg)) {
929 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
930 pgio->pg_lseg = NULL;
931 return;
932 }
933 }
934 /* If no lseg, fall back to write through mds */
935 if (pgio->pg_lseg == NULL)
936 goto out_mds;
937
938 /* Use a direct mapping of ds_idx to pgio mirror_idx */
939 if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))
940 goto out_eagain;
941
942 for (i = 0; i < pgio->pg_mirror_count; i++) {
943 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
944 ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true);
945 if (!ds) {
946 if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
947 goto out_mds;
948 pnfs_generic_pg_cleanup(pgio);
949 /* Sleep for 1 second before retrying */
950 ssleep(1);
951 goto retry;
952 }
953 pgm = &pgio->pg_mirrors[i];
954 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
955 }
956
957 if (NFS_SERVER(pgio->pg_inode)->flags &
958 (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
959 pgio->pg_maxretrans = io_maxretrans;
960 return;
961 out_eagain:
962 pnfs_generic_pg_cleanup(pgio);
963 pgio->pg_error = -EAGAIN;
964 return;
965 out_mds:
966 trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode,
967 0, NFS4_MAX_UINT64, IOMODE_RW,
968 NFS_I(pgio->pg_inode)->layout,
969 pgio->pg_lseg);
970 pgio->pg_maxretrans = 0;
971 nfs_pageio_reset_write_mds(pgio);
972 pgio->pg_error = -EAGAIN;
973 }
974
975 static unsigned int
ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)976 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
977 struct nfs_page *req)
978 {
979 if (!pgio->pg_lseg) {
980 pgio->pg_lseg =
981 pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
982 req_offset(req), req->wb_bytes,
983 IOMODE_RW, false, nfs_io_gfp_mask());
984 if (IS_ERR(pgio->pg_lseg)) {
985 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
986 pgio->pg_lseg = NULL;
987 goto out;
988 }
989 }
990 if (pgio->pg_lseg)
991 return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
992
993 trace_pnfs_mds_fallback_pg_get_mirror_count(pgio->pg_inode,
994 0, NFS4_MAX_UINT64, IOMODE_RW,
995 NFS_I(pgio->pg_inode)->layout,
996 pgio->pg_lseg);
997 /* no lseg means that pnfs is not in use, so no mirroring here */
998 nfs_pageio_reset_write_mds(pgio);
999 out:
1000 return 1;
1001 }
1002
1003 static u32
ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor * desc,u32 idx)1004 ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
1005 {
1006 u32 old = desc->pg_mirror_idx;
1007
1008 desc->pg_mirror_idx = idx;
1009 return old;
1010 }
1011
1012 static struct nfs_pgio_mirror *
ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor * desc,u32 idx)1013 ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
1014 {
1015 return &desc->pg_mirrors[idx];
1016 }
1017
1018 static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
1019 .pg_init = ff_layout_pg_init_read,
1020 .pg_test = pnfs_generic_pg_test,
1021 .pg_doio = pnfs_generic_pg_readpages,
1022 .pg_cleanup = pnfs_generic_pg_cleanup,
1023 };
1024
1025 static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
1026 .pg_init = ff_layout_pg_init_write,
1027 .pg_test = pnfs_generic_pg_test,
1028 .pg_doio = pnfs_generic_pg_writepages,
1029 .pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
1030 .pg_cleanup = pnfs_generic_pg_cleanup,
1031 .pg_get_mirror = ff_layout_pg_get_mirror_write,
1032 .pg_set_mirror = ff_layout_pg_set_mirror_write,
1033 };
1034
ff_layout_reset_write(struct nfs_pgio_header * hdr,bool retry_pnfs)1035 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
1036 {
1037 struct rpc_task *task = &hdr->task;
1038
1039 pnfs_layoutcommit_inode(hdr->inode, false);
1040
1041 if (retry_pnfs) {
1042 dprintk("%s Reset task %5u for i/o through pNFS "
1043 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1044 hdr->task.tk_pid,
1045 hdr->inode->i_sb->s_id,
1046 (unsigned long long)NFS_FILEID(hdr->inode),
1047 hdr->args.count,
1048 (unsigned long long)hdr->args.offset);
1049
1050 hdr->completion_ops->reschedule_io(hdr);
1051 return;
1052 }
1053
1054 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1055 dprintk("%s Reset task %5u for i/o through MDS "
1056 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1057 hdr->task.tk_pid,
1058 hdr->inode->i_sb->s_id,
1059 (unsigned long long)NFS_FILEID(hdr->inode),
1060 hdr->args.count,
1061 (unsigned long long)hdr->args.offset);
1062
1063 trace_pnfs_mds_fallback_write_done(hdr->inode,
1064 hdr->args.offset, hdr->args.count,
1065 IOMODE_RW, NFS_I(hdr->inode)->layout,
1066 hdr->lseg);
1067 task->tk_status = pnfs_write_done_resend_to_mds(hdr);
1068 }
1069 }
1070
ff_layout_resend_pnfs_read(struct nfs_pgio_header * hdr)1071 static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr)
1072 {
1073 u32 idx = hdr->pgio_mirror_idx + 1;
1074 u32 new_idx = 0;
1075
1076 if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx))
1077 ff_layout_send_layouterror(hdr->lseg);
1078 else
1079 pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1080 pnfs_read_resend_pnfs(hdr, new_idx);
1081 }
1082
ff_layout_reset_read(struct nfs_pgio_header * hdr)1083 static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
1084 {
1085 struct rpc_task *task = &hdr->task;
1086
1087 pnfs_layoutcommit_inode(hdr->inode, false);
1088 pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1089
1090 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1091 dprintk("%s Reset task %5u for i/o through MDS "
1092 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1093 hdr->task.tk_pid,
1094 hdr->inode->i_sb->s_id,
1095 (unsigned long long)NFS_FILEID(hdr->inode),
1096 hdr->args.count,
1097 (unsigned long long)hdr->args.offset);
1098
1099 trace_pnfs_mds_fallback_read_done(hdr->inode,
1100 hdr->args.offset, hdr->args.count,
1101 IOMODE_READ, NFS_I(hdr->inode)->layout,
1102 hdr->lseg);
1103 task->tk_status = pnfs_read_done_resend_to_mds(hdr);
1104 }
1105 }
1106
ff_layout_async_handle_error_v4(struct rpc_task * task,u32 op_status,struct nfs4_state * state,struct nfs_client * clp,struct pnfs_layout_segment * lseg,u32 idx)1107 static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1108 u32 op_status,
1109 struct nfs4_state *state,
1110 struct nfs_client *clp,
1111 struct pnfs_layout_segment *lseg,
1112 u32 idx)
1113 {
1114 struct pnfs_layout_hdr *lo = lseg->pls_layout;
1115 struct inode *inode = lo->plh_inode;
1116 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1117 struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
1118
1119 switch (op_status) {
1120 case NFS4_OK:
1121 case NFS4ERR_NXIO:
1122 break;
1123 case NFSERR_PERM:
1124 if (!task->tk_xprt)
1125 break;
1126 xprt_force_disconnect(task->tk_xprt);
1127 goto out_retry;
1128 case NFS4ERR_BADSESSION:
1129 case NFS4ERR_BADSLOT:
1130 case NFS4ERR_BAD_HIGH_SLOT:
1131 case NFS4ERR_DEADSESSION:
1132 case NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1133 case NFS4ERR_SEQ_FALSE_RETRY:
1134 case NFS4ERR_SEQ_MISORDERED:
1135 dprintk("%s ERROR %d, Reset session. Exchangeid "
1136 "flags 0x%x\n", __func__, task->tk_status,
1137 clp->cl_exchange_flags);
1138 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
1139 goto out_retry;
1140 case NFS4ERR_DELAY:
1141 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1142 fallthrough;
1143 case NFS4ERR_GRACE:
1144 rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1145 goto out_retry;
1146 case NFS4ERR_RETRY_UNCACHED_REP:
1147 goto out_retry;
1148 /* Invalidate Layout errors */
1149 case NFS4ERR_PNFS_NO_LAYOUT:
1150 case NFS4ERR_STALE:
1151 case NFS4ERR_BADHANDLE:
1152 case NFS4ERR_ISDIR:
1153 case NFS4ERR_FHEXPIRED:
1154 case NFS4ERR_WRONG_TYPE:
1155 dprintk("%s Invalid layout error %d\n", __func__,
1156 task->tk_status);
1157 /*
1158 * Destroy layout so new i/o will get a new layout.
1159 * Layout will not be destroyed until all current lseg
1160 * references are put. Mark layout as invalid to resend failed
1161 * i/o and all i/o waiting on the slot table to the MDS until
1162 * layout is destroyed and a new valid layout is obtained.
1163 */
1164 pnfs_destroy_layout(NFS_I(inode));
1165 rpc_wake_up(&tbl->slot_tbl_waitq);
1166 goto reset;
1167 default:
1168 break;
1169 }
1170
1171 switch (task->tk_status) {
1172 /* RPC connection errors */
1173 case -ENETDOWN:
1174 case -ENETUNREACH:
1175 if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags))
1176 return -NFS4ERR_FATAL_IOERROR;
1177 fallthrough;
1178 case -ECONNREFUSED:
1179 case -EHOSTDOWN:
1180 case -EHOSTUNREACH:
1181 case -EIO:
1182 case -ETIMEDOUT:
1183 case -EPIPE:
1184 case -EPROTO:
1185 case -ENODEV:
1186 dprintk("%s DS connection error %d\n", __func__,
1187 task->tk_status);
1188 nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1189 &devid->deviceid);
1190 rpc_wake_up(&tbl->slot_tbl_waitq);
1191 break;
1192 default:
1193 break;
1194 }
1195
1196 if (ff_layout_avoid_mds_available_ds(lseg))
1197 return -NFS4ERR_RESET_TO_PNFS;
1198 reset:
1199 dprintk("%s Retry through MDS. Error %d\n", __func__,
1200 task->tk_status);
1201 return -NFS4ERR_RESET_TO_MDS;
1202
1203 out_retry:
1204 task->tk_status = 0;
1205 return -EAGAIN;
1206 }
1207
1208 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
ff_layout_async_handle_error_v3(struct rpc_task * task,u32 op_status,struct nfs_client * clp,struct pnfs_layout_segment * lseg,u32 idx)1209 static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1210 u32 op_status,
1211 struct nfs_client *clp,
1212 struct pnfs_layout_segment *lseg,
1213 u32 idx)
1214 {
1215 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1216
1217 switch (op_status) {
1218 case NFS_OK:
1219 case NFSERR_NXIO:
1220 break;
1221 case NFSERR_PERM:
1222 if (!task->tk_xprt)
1223 break;
1224 xprt_force_disconnect(task->tk_xprt);
1225 goto out_retry;
1226 case NFSERR_ACCES:
1227 case NFSERR_BADHANDLE:
1228 case NFSERR_FBIG:
1229 case NFSERR_IO:
1230 case NFSERR_NOSPC:
1231 case NFSERR_ROFS:
1232 case NFSERR_STALE:
1233 goto out_reset_to_pnfs;
1234 case NFSERR_JUKEBOX:
1235 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1236 goto out_retry;
1237 default:
1238 break;
1239 }
1240
1241 switch (task->tk_status) {
1242 /* File access problems. Don't mark the device as unavailable */
1243 case -EACCES:
1244 case -ESTALE:
1245 case -EISDIR:
1246 case -EBADHANDLE:
1247 case -ELOOP:
1248 case -ENOSPC:
1249 break;
1250 case -EJUKEBOX:
1251 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1252 goto out_retry;
1253 case -ENETDOWN:
1254 case -ENETUNREACH:
1255 if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags))
1256 return -NFS4ERR_FATAL_IOERROR;
1257 fallthrough;
1258 default:
1259 dprintk("%s DS connection error %d\n", __func__,
1260 task->tk_status);
1261 nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1262 &devid->deviceid);
1263 }
1264 out_reset_to_pnfs:
1265 /* FIXME: Need to prevent infinite looping here. */
1266 return -NFS4ERR_RESET_TO_PNFS;
1267 out_retry:
1268 task->tk_status = 0;
1269 rpc_restart_call_prepare(task);
1270 rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1271 return -EAGAIN;
1272 }
1273
ff_layout_async_handle_error(struct rpc_task * task,u32 op_status,struct nfs4_state * state,struct nfs_client * clp,struct pnfs_layout_segment * lseg,u32 idx)1274 static int ff_layout_async_handle_error(struct rpc_task *task,
1275 u32 op_status,
1276 struct nfs4_state *state,
1277 struct nfs_client *clp,
1278 struct pnfs_layout_segment *lseg,
1279 u32 idx)
1280 {
1281 int vers = clp->cl_nfs_mod->rpc_vers->number;
1282
1283 if (task->tk_status >= 0) {
1284 ff_layout_mark_ds_reachable(lseg, idx);
1285 return 0;
1286 }
1287
1288 /* Handle the case of an invalid layout segment */
1289 if (!pnfs_is_valid_lseg(lseg))
1290 return -NFS4ERR_RESET_TO_PNFS;
1291
1292 switch (vers) {
1293 case 3:
1294 return ff_layout_async_handle_error_v3(task, op_status, clp,
1295 lseg, idx);
1296 case 4:
1297 return ff_layout_async_handle_error_v4(task, op_status, state,
1298 clp, lseg, idx);
1299 default:
1300 /* should never happen */
1301 WARN_ON_ONCE(1);
1302 return 0;
1303 }
1304 }
1305
ff_layout_io_track_ds_error(struct pnfs_layout_segment * lseg,u32 idx,u64 offset,u64 length,u32 * op_status,int opnum,int error)1306 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1307 u32 idx, u64 offset, u64 length,
1308 u32 *op_status, int opnum, int error)
1309 {
1310 struct nfs4_ff_layout_mirror *mirror;
1311 u32 status = *op_status;
1312 int err;
1313
1314 if (status == 0) {
1315 switch (error) {
1316 case -ETIMEDOUT:
1317 case -EPFNOSUPPORT:
1318 case -EPROTONOSUPPORT:
1319 case -EOPNOTSUPP:
1320 case -EINVAL:
1321 case -ECONNREFUSED:
1322 case -ECONNRESET:
1323 case -EHOSTDOWN:
1324 case -EHOSTUNREACH:
1325 case -ENETDOWN:
1326 case -ENETUNREACH:
1327 case -EADDRINUSE:
1328 case -ENOBUFS:
1329 case -EPIPE:
1330 case -EPERM:
1331 case -EPROTO:
1332 case -ENODEV:
1333 *op_status = status = NFS4ERR_NXIO;
1334 break;
1335 case -EACCES:
1336 *op_status = status = NFS4ERR_ACCESS;
1337 break;
1338 default:
1339 return;
1340 }
1341 }
1342
1343 mirror = FF_LAYOUT_COMP(lseg, idx);
1344 err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1345 mirror, offset, length, status, opnum,
1346 nfs_io_gfp_mask());
1347
1348 switch (status) {
1349 case NFS4ERR_DELAY:
1350 case NFS4ERR_GRACE:
1351 case NFS4ERR_PERM:
1352 break;
1353 case NFS4ERR_NXIO:
1354 ff_layout_mark_ds_unreachable(lseg, idx);
1355 /*
1356 * Don't return the layout if this is a read and we still
1357 * have layouts to try
1358 */
1359 if (opnum == OP_READ)
1360 break;
1361 fallthrough;
1362 default:
1363 pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
1364 lseg);
1365 }
1366
1367 dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1368 }
1369
1370 /* NFS_PROTO call done callback routines */
ff_layout_read_done_cb(struct rpc_task * task,struct nfs_pgio_header * hdr)1371 static int ff_layout_read_done_cb(struct rpc_task *task,
1372 struct nfs_pgio_header *hdr)
1373 {
1374 int err;
1375
1376 if (task->tk_status < 0) {
1377 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1378 hdr->args.offset, hdr->args.count,
1379 &hdr->res.op_status, OP_READ,
1380 task->tk_status);
1381 trace_ff_layout_read_error(hdr, task->tk_status);
1382 }
1383
1384 err = ff_layout_async_handle_error(task, hdr->res.op_status,
1385 hdr->args.context->state,
1386 hdr->ds_clp, hdr->lseg,
1387 hdr->pgio_mirror_idx);
1388
1389 trace_nfs4_pnfs_read(hdr, err);
1390 clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1391 clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1392 switch (err) {
1393 case -NFS4ERR_RESET_TO_PNFS:
1394 set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1395 return task->tk_status;
1396 case -NFS4ERR_RESET_TO_MDS:
1397 set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1398 return task->tk_status;
1399 case -EAGAIN:
1400 goto out_eagain;
1401 case -NFS4ERR_FATAL_IOERROR:
1402 task->tk_status = -EIO;
1403 return 0;
1404 }
1405
1406 return 0;
1407 out_eagain:
1408 rpc_restart_call_prepare(task);
1409 return -EAGAIN;
1410 }
1411
1412 static bool
ff_layout_need_layoutcommit(struct pnfs_layout_segment * lseg)1413 ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1414 {
1415 return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1416 }
1417
1418 /*
1419 * We reference the rpc_cred of the first WRITE that triggers the need for
1420 * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1421 * rfc5661 is not clear about which credential should be used.
1422 *
1423 * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1424 * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1425 * we always send layoutcommit after DS writes.
1426 */
1427 static void
ff_layout_set_layoutcommit(struct inode * inode,struct pnfs_layout_segment * lseg,loff_t end_offset)1428 ff_layout_set_layoutcommit(struct inode *inode,
1429 struct pnfs_layout_segment *lseg,
1430 loff_t end_offset)
1431 {
1432 if (!ff_layout_need_layoutcommit(lseg))
1433 return;
1434
1435 pnfs_set_layoutcommit(inode, lseg, end_offset);
1436 dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
1437 (unsigned long long) NFS_I(inode)->layout->plh_lwb);
1438 }
1439
ff_layout_read_record_layoutstats_start(struct rpc_task * task,struct nfs_pgio_header * hdr)1440 static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
1441 struct nfs_pgio_header *hdr)
1442 {
1443 if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1444 return;
1445 nfs4_ff_layout_stat_io_start_read(hdr->inode,
1446 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1447 hdr->args.count,
1448 task->tk_start);
1449 }
1450
ff_layout_read_record_layoutstats_done(struct rpc_task * task,struct nfs_pgio_header * hdr)1451 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
1452 struct nfs_pgio_header *hdr)
1453 {
1454 if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1455 return;
1456 nfs4_ff_layout_stat_io_end_read(task,
1457 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1458 hdr->args.count,
1459 hdr->res.count);
1460 set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1461 }
1462
ff_layout_read_prepare_common(struct rpc_task * task,struct nfs_pgio_header * hdr)1463 static int ff_layout_read_prepare_common(struct rpc_task *task,
1464 struct nfs_pgio_header *hdr)
1465 {
1466 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1467 rpc_exit(task, -EIO);
1468 return -EIO;
1469 }
1470
1471 if (!pnfs_is_valid_lseg(hdr->lseg)) {
1472 rpc_exit(task, -EAGAIN);
1473 return -EAGAIN;
1474 }
1475
1476 ff_layout_read_record_layoutstats_start(task, hdr);
1477 return 0;
1478 }
1479
1480 /*
1481 * Call ops for the async read/write cases
1482 * In the case of dense layouts, the offset needs to be reset to its
1483 * original value.
1484 */
ff_layout_read_prepare_v3(struct rpc_task * task,void * data)1485 static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1486 {
1487 struct nfs_pgio_header *hdr = data;
1488
1489 if (ff_layout_read_prepare_common(task, hdr))
1490 return;
1491
1492 rpc_call_start(task);
1493 }
1494
ff_layout_read_prepare_v4(struct rpc_task * task,void * data)1495 static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1496 {
1497 struct nfs_pgio_header *hdr = data;
1498
1499 if (nfs4_setup_sequence(hdr->ds_clp,
1500 &hdr->args.seq_args,
1501 &hdr->res.seq_res,
1502 task))
1503 return;
1504
1505 ff_layout_read_prepare_common(task, hdr);
1506 }
1507
ff_layout_read_call_done(struct rpc_task * task,void * data)1508 static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1509 {
1510 struct nfs_pgio_header *hdr = data;
1511
1512 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1513 task->tk_status == 0) {
1514 nfs4_sequence_done(task, &hdr->res.seq_res);
1515 return;
1516 }
1517
1518 /* Note this may cause RPC to be resent */
1519 hdr->mds_ops->rpc_call_done(task, hdr);
1520 }
1521
ff_layout_read_count_stats(struct rpc_task * task,void * data)1522 static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1523 {
1524 struct nfs_pgio_header *hdr = data;
1525
1526 ff_layout_read_record_layoutstats_done(task, hdr);
1527 rpc_count_iostats_metrics(task,
1528 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1529 }
1530
ff_layout_read_release(void * data)1531 static void ff_layout_read_release(void *data)
1532 {
1533 struct nfs_pgio_header *hdr = data;
1534
1535 ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
1536 if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
1537 ff_layout_resend_pnfs_read(hdr);
1538 else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1539 ff_layout_reset_read(hdr);
1540 pnfs_generic_rw_release(data);
1541 }
1542
1543
ff_layout_write_done_cb(struct rpc_task * task,struct nfs_pgio_header * hdr)1544 static int ff_layout_write_done_cb(struct rpc_task *task,
1545 struct nfs_pgio_header *hdr)
1546 {
1547 loff_t end_offs = 0;
1548 int err;
1549
1550 if (task->tk_status < 0) {
1551 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1552 hdr->args.offset, hdr->args.count,
1553 &hdr->res.op_status, OP_WRITE,
1554 task->tk_status);
1555 trace_ff_layout_write_error(hdr, task->tk_status);
1556 }
1557
1558 err = ff_layout_async_handle_error(task, hdr->res.op_status,
1559 hdr->args.context->state,
1560 hdr->ds_clp, hdr->lseg,
1561 hdr->pgio_mirror_idx);
1562
1563 trace_nfs4_pnfs_write(hdr, err);
1564 clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1565 clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1566 switch (err) {
1567 case -NFS4ERR_RESET_TO_PNFS:
1568 set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1569 return task->tk_status;
1570 case -NFS4ERR_RESET_TO_MDS:
1571 set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1572 return task->tk_status;
1573 case -EAGAIN:
1574 return -EAGAIN;
1575 case -NFS4ERR_FATAL_IOERROR:
1576 task->tk_status = -EIO;
1577 return 0;
1578 }
1579
1580 if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1581 hdr->res.verf->committed == NFS_DATA_SYNC)
1582 end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
1583
1584 /* Note: if the write is unstable, don't set end_offs until commit */
1585 ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
1586
1587 /* zero out fattr since we don't care DS attr at all */
1588 hdr->fattr.valid = 0;
1589 if (task->tk_status >= 0)
1590 nfs_writeback_update_inode(hdr);
1591
1592 return 0;
1593 }
1594
ff_layout_commit_done_cb(struct rpc_task * task,struct nfs_commit_data * data)1595 static int ff_layout_commit_done_cb(struct rpc_task *task,
1596 struct nfs_commit_data *data)
1597 {
1598 int err;
1599
1600 if (task->tk_status < 0) {
1601 ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1602 data->args.offset, data->args.count,
1603 &data->res.op_status, OP_COMMIT,
1604 task->tk_status);
1605 trace_ff_layout_commit_error(data, task->tk_status);
1606 }
1607
1608 err = ff_layout_async_handle_error(task, data->res.op_status,
1609 NULL, data->ds_clp, data->lseg,
1610 data->ds_commit_index);
1611
1612 trace_nfs4_pnfs_commit_ds(data, err);
1613 switch (err) {
1614 case -NFS4ERR_RESET_TO_PNFS:
1615 pnfs_generic_prepare_to_resend_writes(data);
1616 return -EAGAIN;
1617 case -NFS4ERR_RESET_TO_MDS:
1618 pnfs_generic_prepare_to_resend_writes(data);
1619 return -EAGAIN;
1620 case -EAGAIN:
1621 rpc_restart_call_prepare(task);
1622 return -EAGAIN;
1623 case -NFS4ERR_FATAL_IOERROR:
1624 task->tk_status = -EIO;
1625 return 0;
1626 }
1627
1628 ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
1629
1630 return 0;
1631 }
1632
ff_layout_write_record_layoutstats_start(struct rpc_task * task,struct nfs_pgio_header * hdr)1633 static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
1634 struct nfs_pgio_header *hdr)
1635 {
1636 if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1637 return;
1638 nfs4_ff_layout_stat_io_start_write(hdr->inode,
1639 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1640 hdr->args.count,
1641 task->tk_start);
1642 }
1643
ff_layout_write_record_layoutstats_done(struct rpc_task * task,struct nfs_pgio_header * hdr)1644 static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
1645 struct nfs_pgio_header *hdr)
1646 {
1647 if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1648 return;
1649 nfs4_ff_layout_stat_io_end_write(task,
1650 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1651 hdr->args.count, hdr->res.count,
1652 hdr->res.verf->committed);
1653 set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1654 }
1655
ff_layout_write_prepare_common(struct rpc_task * task,struct nfs_pgio_header * hdr)1656 static int ff_layout_write_prepare_common(struct rpc_task *task,
1657 struct nfs_pgio_header *hdr)
1658 {
1659 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1660 rpc_exit(task, -EIO);
1661 return -EIO;
1662 }
1663
1664 if (!pnfs_is_valid_lseg(hdr->lseg)) {
1665 rpc_exit(task, -EAGAIN);
1666 return -EAGAIN;
1667 }
1668
1669 ff_layout_write_record_layoutstats_start(task, hdr);
1670 return 0;
1671 }
1672
ff_layout_write_prepare_v3(struct rpc_task * task,void * data)1673 static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1674 {
1675 struct nfs_pgio_header *hdr = data;
1676
1677 if (ff_layout_write_prepare_common(task, hdr))
1678 return;
1679
1680 rpc_call_start(task);
1681 }
1682
ff_layout_write_prepare_v4(struct rpc_task * task,void * data)1683 static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1684 {
1685 struct nfs_pgio_header *hdr = data;
1686
1687 if (nfs4_setup_sequence(hdr->ds_clp,
1688 &hdr->args.seq_args,
1689 &hdr->res.seq_res,
1690 task))
1691 return;
1692
1693 ff_layout_write_prepare_common(task, hdr);
1694 }
1695
ff_layout_write_call_done(struct rpc_task * task,void * data)1696 static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1697 {
1698 struct nfs_pgio_header *hdr = data;
1699
1700 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1701 task->tk_status == 0) {
1702 nfs4_sequence_done(task, &hdr->res.seq_res);
1703 return;
1704 }
1705
1706 /* Note this may cause RPC to be resent */
1707 hdr->mds_ops->rpc_call_done(task, hdr);
1708 }
1709
ff_layout_write_count_stats(struct rpc_task * task,void * data)1710 static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1711 {
1712 struct nfs_pgio_header *hdr = data;
1713
1714 ff_layout_write_record_layoutstats_done(task, hdr);
1715 rpc_count_iostats_metrics(task,
1716 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1717 }
1718
ff_layout_write_release(void * data)1719 static void ff_layout_write_release(void *data)
1720 {
1721 struct nfs_pgio_header *hdr = data;
1722
1723 ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
1724 if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
1725 ff_layout_send_layouterror(hdr->lseg);
1726 ff_layout_reset_write(hdr, true);
1727 } else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1728 ff_layout_reset_write(hdr, false);
1729 pnfs_generic_rw_release(data);
1730 }
1731
ff_layout_commit_record_layoutstats_start(struct rpc_task * task,struct nfs_commit_data * cdata)1732 static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
1733 struct nfs_commit_data *cdata)
1734 {
1735 if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
1736 return;
1737 nfs4_ff_layout_stat_io_start_write(cdata->inode,
1738 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1739 0, task->tk_start);
1740 }
1741
ff_layout_commit_record_layoutstats_done(struct rpc_task * task,struct nfs_commit_data * cdata)1742 static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
1743 struct nfs_commit_data *cdata)
1744 {
1745 struct nfs_page *req;
1746 __u64 count = 0;
1747
1748 if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
1749 return;
1750
1751 if (task->tk_status == 0) {
1752 list_for_each_entry(req, &cdata->pages, wb_list)
1753 count += req->wb_bytes;
1754 }
1755 nfs4_ff_layout_stat_io_end_write(task,
1756 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1757 count, count, NFS_FILE_SYNC);
1758 set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
1759 }
1760
ff_layout_commit_prepare_common(struct rpc_task * task,struct nfs_commit_data * cdata)1761 static int ff_layout_commit_prepare_common(struct rpc_task *task,
1762 struct nfs_commit_data *cdata)
1763 {
1764 if (!pnfs_is_valid_lseg(cdata->lseg)) {
1765 rpc_exit(task, -EAGAIN);
1766 return -EAGAIN;
1767 }
1768
1769 ff_layout_commit_record_layoutstats_start(task, cdata);
1770 return 0;
1771 }
1772
ff_layout_commit_prepare_v3(struct rpc_task * task,void * data)1773 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1774 {
1775 if (ff_layout_commit_prepare_common(task, data))
1776 return;
1777
1778 rpc_call_start(task);
1779 }
1780
ff_layout_commit_prepare_v4(struct rpc_task * task,void * data)1781 static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1782 {
1783 struct nfs_commit_data *wdata = data;
1784
1785 if (nfs4_setup_sequence(wdata->ds_clp,
1786 &wdata->args.seq_args,
1787 &wdata->res.seq_res,
1788 task))
1789 return;
1790 ff_layout_commit_prepare_common(task, data);
1791 }
1792
ff_layout_commit_done(struct rpc_task * task,void * data)1793 static void ff_layout_commit_done(struct rpc_task *task, void *data)
1794 {
1795 pnfs_generic_write_commit_done(task, data);
1796 }
1797
ff_layout_commit_count_stats(struct rpc_task * task,void * data)1798 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1799 {
1800 struct nfs_commit_data *cdata = data;
1801
1802 ff_layout_commit_record_layoutstats_done(task, cdata);
1803 rpc_count_iostats_metrics(task,
1804 &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1805 }
1806
ff_layout_commit_release(void * data)1807 static void ff_layout_commit_release(void *data)
1808 {
1809 struct nfs_commit_data *cdata = data;
1810
1811 ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
1812 pnfs_generic_commit_release(data);
1813 }
1814
1815 static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1816 .rpc_call_prepare = ff_layout_read_prepare_v3,
1817 .rpc_call_done = ff_layout_read_call_done,
1818 .rpc_count_stats = ff_layout_read_count_stats,
1819 .rpc_release = ff_layout_read_release,
1820 };
1821
1822 static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1823 .rpc_call_prepare = ff_layout_read_prepare_v4,
1824 .rpc_call_done = ff_layout_read_call_done,
1825 .rpc_count_stats = ff_layout_read_count_stats,
1826 .rpc_release = ff_layout_read_release,
1827 };
1828
1829 static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1830 .rpc_call_prepare = ff_layout_write_prepare_v3,
1831 .rpc_call_done = ff_layout_write_call_done,
1832 .rpc_count_stats = ff_layout_write_count_stats,
1833 .rpc_release = ff_layout_write_release,
1834 };
1835
1836 static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1837 .rpc_call_prepare = ff_layout_write_prepare_v4,
1838 .rpc_call_done = ff_layout_write_call_done,
1839 .rpc_count_stats = ff_layout_write_count_stats,
1840 .rpc_release = ff_layout_write_release,
1841 };
1842
1843 static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1844 .rpc_call_prepare = ff_layout_commit_prepare_v3,
1845 .rpc_call_done = ff_layout_commit_done,
1846 .rpc_count_stats = ff_layout_commit_count_stats,
1847 .rpc_release = ff_layout_commit_release,
1848 };
1849
1850 static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1851 .rpc_call_prepare = ff_layout_commit_prepare_v4,
1852 .rpc_call_done = ff_layout_commit_done,
1853 .rpc_count_stats = ff_layout_commit_count_stats,
1854 .rpc_release = ff_layout_commit_release,
1855 };
1856
1857 static enum pnfs_try_status
ff_layout_read_pagelist(struct nfs_pgio_header * hdr)1858 ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1859 {
1860 struct pnfs_layout_segment *lseg = hdr->lseg;
1861 struct nfs4_pnfs_ds *ds;
1862 struct rpc_clnt *ds_clnt;
1863 struct nfsd_file *localio;
1864 struct nfs4_ff_layout_mirror *mirror;
1865 const struct cred *ds_cred;
1866 loff_t offset = hdr->args.offset;
1867 u32 idx = hdr->pgio_mirror_idx;
1868 int vers;
1869 struct nfs_fh *fh;
1870
1871 dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
1872 __func__, hdr->inode->i_ino,
1873 hdr->args.pgbase, (size_t)hdr->args.count, offset);
1874
1875 mirror = FF_LAYOUT_COMP(lseg, idx);
1876 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
1877 if (!ds)
1878 goto out_failed;
1879
1880 ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1881 hdr->inode);
1882 if (IS_ERR(ds_clnt))
1883 goto out_failed;
1884
1885 ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1886 if (!ds_cred)
1887 goto out_failed;
1888
1889 vers = nfs4_ff_layout_ds_version(mirror);
1890
1891 dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1892 ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
1893
1894 hdr->pgio_done_cb = ff_layout_read_done_cb;
1895 refcount_inc(&ds->ds_clp->cl_count);
1896 hdr->ds_clp = ds->ds_clp;
1897 fh = nfs4_ff_layout_select_ds_fh(mirror);
1898 if (fh)
1899 hdr->args.fh = fh;
1900
1901 nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1902
1903 /*
1904 * Note that if we ever decide to split across DSes,
1905 * then we may need to handle dense-like offsets.
1906 */
1907 hdr->args.offset = offset;
1908 hdr->mds_offset = offset;
1909
1910 /* Start IO accounting for local read */
1911 localio = ff_local_open_fh(lseg, idx, ds->ds_clp, ds_cred, fh, FMODE_READ);
1912 if (localio) {
1913 hdr->task.tk_start = ktime_get();
1914 ff_layout_read_record_layoutstats_start(&hdr->task, hdr);
1915 }
1916
1917 /* Perform an asynchronous read to ds */
1918 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1919 vers == 3 ? &ff_layout_read_call_ops_v3 :
1920 &ff_layout_read_call_ops_v4,
1921 0, RPC_TASK_SOFTCONN, localio);
1922 put_cred(ds_cred);
1923 return PNFS_ATTEMPTED;
1924
1925 out_failed:
1926 if (ff_layout_avoid_mds_available_ds(lseg))
1927 return PNFS_TRY_AGAIN;
1928 trace_pnfs_mds_fallback_read_pagelist(hdr->inode,
1929 hdr->args.offset, hdr->args.count,
1930 IOMODE_READ, NFS_I(hdr->inode)->layout, lseg);
1931 return PNFS_NOT_ATTEMPTED;
1932 }
1933
1934 /* Perform async writes. */
1935 static enum pnfs_try_status
ff_layout_write_pagelist(struct nfs_pgio_header * hdr,int sync)1936 ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1937 {
1938 struct pnfs_layout_segment *lseg = hdr->lseg;
1939 struct nfs4_pnfs_ds *ds;
1940 struct rpc_clnt *ds_clnt;
1941 struct nfsd_file *localio;
1942 struct nfs4_ff_layout_mirror *mirror;
1943 const struct cred *ds_cred;
1944 loff_t offset = hdr->args.offset;
1945 int vers;
1946 struct nfs_fh *fh;
1947 u32 idx = hdr->pgio_mirror_idx;
1948
1949 mirror = FF_LAYOUT_COMP(lseg, idx);
1950 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1951 if (!ds)
1952 goto out_failed;
1953
1954 ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1955 hdr->inode);
1956 if (IS_ERR(ds_clnt))
1957 goto out_failed;
1958
1959 ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1960 if (!ds_cred)
1961 goto out_failed;
1962
1963 vers = nfs4_ff_layout_ds_version(mirror);
1964
1965 dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
1966 __func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1967 offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count),
1968 vers);
1969
1970 hdr->pgio_done_cb = ff_layout_write_done_cb;
1971 refcount_inc(&ds->ds_clp->cl_count);
1972 hdr->ds_clp = ds->ds_clp;
1973 hdr->ds_commit_idx = idx;
1974 fh = nfs4_ff_layout_select_ds_fh(mirror);
1975 if (fh)
1976 hdr->args.fh = fh;
1977
1978 nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1979
1980 /*
1981 * Note that if we ever decide to split across DSes,
1982 * then we may need to handle dense-like offsets.
1983 */
1984 hdr->args.offset = offset;
1985
1986 /* Start IO accounting for local write */
1987 localio = ff_local_open_fh(lseg, idx, ds->ds_clp, ds_cred, fh,
1988 FMODE_READ|FMODE_WRITE);
1989 if (localio) {
1990 hdr->task.tk_start = ktime_get();
1991 ff_layout_write_record_layoutstats_start(&hdr->task, hdr);
1992 }
1993
1994 /* Perform an asynchronous write */
1995 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1996 vers == 3 ? &ff_layout_write_call_ops_v3 :
1997 &ff_layout_write_call_ops_v4,
1998 sync, RPC_TASK_SOFTCONN, localio);
1999 put_cred(ds_cred);
2000 return PNFS_ATTEMPTED;
2001
2002 out_failed:
2003 if (ff_layout_avoid_mds_available_ds(lseg))
2004 return PNFS_TRY_AGAIN;
2005 trace_pnfs_mds_fallback_write_pagelist(hdr->inode,
2006 hdr->args.offset, hdr->args.count,
2007 IOMODE_RW, NFS_I(hdr->inode)->layout, lseg);
2008 return PNFS_NOT_ATTEMPTED;
2009 }
2010
calc_ds_index_from_commit(struct pnfs_layout_segment * lseg,u32 i)2011 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
2012 {
2013 return i;
2014 }
2015
2016 static struct nfs_fh *
select_ds_fh_from_commit(struct pnfs_layout_segment * lseg,u32 i)2017 select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
2018 {
2019 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2020
2021 /* FIXME: Assume that there is only one NFS version available
2022 * for the DS.
2023 */
2024 return &flseg->mirror_array[i]->fh_versions[0];
2025 }
2026
ff_layout_initiate_commit(struct nfs_commit_data * data,int how)2027 static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
2028 {
2029 struct pnfs_layout_segment *lseg = data->lseg;
2030 struct nfs4_pnfs_ds *ds;
2031 struct rpc_clnt *ds_clnt;
2032 struct nfsd_file *localio;
2033 struct nfs4_ff_layout_mirror *mirror;
2034 const struct cred *ds_cred;
2035 u32 idx;
2036 int vers, ret;
2037 struct nfs_fh *fh;
2038
2039 if (!lseg || !(pnfs_is_valid_lseg(lseg) ||
2040 test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)))
2041 goto out_err;
2042
2043 idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
2044 mirror = FF_LAYOUT_COMP(lseg, idx);
2045 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
2046 if (!ds)
2047 goto out_err;
2048
2049 ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
2050 data->inode);
2051 if (IS_ERR(ds_clnt))
2052 goto out_err;
2053
2054 ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred);
2055 if (!ds_cred)
2056 goto out_err;
2057
2058 vers = nfs4_ff_layout_ds_version(mirror);
2059
2060 dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
2061 data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
2062 vers);
2063 data->commit_done_cb = ff_layout_commit_done_cb;
2064 data->cred = ds_cred;
2065 refcount_inc(&ds->ds_clp->cl_count);
2066 data->ds_clp = ds->ds_clp;
2067 fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
2068 if (fh)
2069 data->args.fh = fh;
2070
2071 /* Start IO accounting for local commit */
2072 localio = ff_local_open_fh(lseg, idx, ds->ds_clp, ds_cred, fh,
2073 FMODE_READ|FMODE_WRITE);
2074 if (localio) {
2075 data->task.tk_start = ktime_get();
2076 ff_layout_commit_record_layoutstats_start(&data->task, data);
2077 }
2078
2079 ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
2080 vers == 3 ? &ff_layout_commit_call_ops_v3 :
2081 &ff_layout_commit_call_ops_v4,
2082 how, RPC_TASK_SOFTCONN, localio);
2083 put_cred(ds_cred);
2084 return ret;
2085 out_err:
2086 pnfs_generic_prepare_to_resend_writes(data);
2087 pnfs_generic_commit_release(data);
2088 return -EAGAIN;
2089 }
2090
2091 static int
ff_layout_commit_pagelist(struct inode * inode,struct list_head * mds_pages,int how,struct nfs_commit_info * cinfo)2092 ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
2093 int how, struct nfs_commit_info *cinfo)
2094 {
2095 return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
2096 ff_layout_initiate_commit);
2097 }
2098
ff_layout_match_rw(const struct rpc_task * task,const struct nfs_pgio_header * hdr,const struct pnfs_layout_segment * lseg)2099 static bool ff_layout_match_rw(const struct rpc_task *task,
2100 const struct nfs_pgio_header *hdr,
2101 const struct pnfs_layout_segment *lseg)
2102 {
2103 return hdr->lseg == lseg;
2104 }
2105
ff_layout_match_commit(const struct rpc_task * task,const struct nfs_commit_data * cdata,const struct pnfs_layout_segment * lseg)2106 static bool ff_layout_match_commit(const struct rpc_task *task,
2107 const struct nfs_commit_data *cdata,
2108 const struct pnfs_layout_segment *lseg)
2109 {
2110 return cdata->lseg == lseg;
2111 }
2112
ff_layout_match_io(const struct rpc_task * task,const void * data)2113 static bool ff_layout_match_io(const struct rpc_task *task, const void *data)
2114 {
2115 const struct rpc_call_ops *ops = task->tk_ops;
2116
2117 if (ops == &ff_layout_read_call_ops_v3 ||
2118 ops == &ff_layout_read_call_ops_v4 ||
2119 ops == &ff_layout_write_call_ops_v3 ||
2120 ops == &ff_layout_write_call_ops_v4)
2121 return ff_layout_match_rw(task, task->tk_calldata, data);
2122 if (ops == &ff_layout_commit_call_ops_v3 ||
2123 ops == &ff_layout_commit_call_ops_v4)
2124 return ff_layout_match_commit(task, task->tk_calldata, data);
2125 return false;
2126 }
2127
ff_layout_cancel_io(struct pnfs_layout_segment * lseg)2128 static void ff_layout_cancel_io(struct pnfs_layout_segment *lseg)
2129 {
2130 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2131 struct nfs4_ff_layout_mirror *mirror;
2132 struct nfs4_ff_layout_ds *mirror_ds;
2133 struct nfs4_pnfs_ds *ds;
2134 struct nfs_client *ds_clp;
2135 struct rpc_clnt *clnt;
2136 u32 idx;
2137
2138 for (idx = 0; idx < flseg->mirror_array_cnt; idx++) {
2139 mirror = flseg->mirror_array[idx];
2140 mirror_ds = mirror->mirror_ds;
2141 if (IS_ERR_OR_NULL(mirror_ds))
2142 continue;
2143 ds = mirror->mirror_ds->ds;
2144 if (!ds)
2145 continue;
2146 ds_clp = ds->ds_clp;
2147 if (!ds_clp)
2148 continue;
2149 clnt = ds_clp->cl_rpcclient;
2150 if (!clnt)
2151 continue;
2152 if (!rpc_cancel_tasks(clnt, -EAGAIN, ff_layout_match_io, lseg))
2153 continue;
2154 rpc_clnt_disconnect(clnt);
2155 }
2156 }
2157
2158 static struct pnfs_ds_commit_info *
ff_layout_get_ds_info(struct inode * inode)2159 ff_layout_get_ds_info(struct inode *inode)
2160 {
2161 struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
2162
2163 if (layout == NULL)
2164 return NULL;
2165
2166 return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
2167 }
2168
2169 static void
ff_layout_setup_ds_info(struct pnfs_ds_commit_info * fl_cinfo,struct pnfs_layout_segment * lseg)2170 ff_layout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
2171 struct pnfs_layout_segment *lseg)
2172 {
2173 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2174 struct inode *inode = lseg->pls_layout->plh_inode;
2175 struct pnfs_commit_array *array, *new;
2176
2177 new = pnfs_alloc_commit_array(flseg->mirror_array_cnt,
2178 nfs_io_gfp_mask());
2179 if (new) {
2180 spin_lock(&inode->i_lock);
2181 array = pnfs_add_commit_array(fl_cinfo, new, lseg);
2182 spin_unlock(&inode->i_lock);
2183 if (array != new)
2184 pnfs_free_commit_array(new);
2185 }
2186 }
2187
2188 static void
ff_layout_release_ds_info(struct pnfs_ds_commit_info * fl_cinfo,struct inode * inode)2189 ff_layout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
2190 struct inode *inode)
2191 {
2192 spin_lock(&inode->i_lock);
2193 pnfs_generic_ds_cinfo_destroy(fl_cinfo);
2194 spin_unlock(&inode->i_lock);
2195 }
2196
2197 static void
ff_layout_free_deviceid_node(struct nfs4_deviceid_node * d)2198 ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
2199 {
2200 nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
2201 id_node));
2202 }
2203
ff_layout_encode_ioerr(struct xdr_stream * xdr,const struct nfs4_layoutreturn_args * args,const struct nfs4_flexfile_layoutreturn_args * ff_args)2204 static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
2205 const struct nfs4_layoutreturn_args *args,
2206 const struct nfs4_flexfile_layoutreturn_args *ff_args)
2207 {
2208 __be32 *start;
2209
2210 start = xdr_reserve_space(xdr, 4);
2211 if (unlikely(!start))
2212 return -E2BIG;
2213
2214 *start = cpu_to_be32(ff_args->num_errors);
2215 /* This assume we always return _ALL_ layouts */
2216 return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
2217 }
2218
2219 static void
ff_layout_encode_ff_iostat_head(struct xdr_stream * xdr,const nfs4_stateid * stateid,const struct nfs42_layoutstat_devinfo * devinfo)2220 ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
2221 const nfs4_stateid *stateid,
2222 const struct nfs42_layoutstat_devinfo *devinfo)
2223 {
2224 __be32 *p;
2225
2226 p = xdr_reserve_space(xdr, 8 + 8);
2227 p = xdr_encode_hyper(p, devinfo->offset);
2228 p = xdr_encode_hyper(p, devinfo->length);
2229 encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
2230 p = xdr_reserve_space(xdr, 4*8);
2231 p = xdr_encode_hyper(p, devinfo->read_count);
2232 p = xdr_encode_hyper(p, devinfo->read_bytes);
2233 p = xdr_encode_hyper(p, devinfo->write_count);
2234 p = xdr_encode_hyper(p, devinfo->write_bytes);
2235 encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
2236 }
2237
2238 static void
ff_layout_encode_ff_iostat(struct xdr_stream * xdr,const nfs4_stateid * stateid,const struct nfs42_layoutstat_devinfo * devinfo)2239 ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
2240 const nfs4_stateid *stateid,
2241 const struct nfs42_layoutstat_devinfo *devinfo)
2242 {
2243 ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
2244 ff_layout_encode_ff_layoutupdate(xdr, devinfo,
2245 devinfo->ld_private.data);
2246 }
2247
2248 /* report nothing for now */
ff_layout_encode_iostats_array(struct xdr_stream * xdr,const struct nfs4_layoutreturn_args * args,struct nfs4_flexfile_layoutreturn_args * ff_args)2249 static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
2250 const struct nfs4_layoutreturn_args *args,
2251 struct nfs4_flexfile_layoutreturn_args *ff_args)
2252 {
2253 __be32 *p;
2254 int i;
2255
2256 p = xdr_reserve_space(xdr, 4);
2257 *p = cpu_to_be32(ff_args->num_dev);
2258 for (i = 0; i < ff_args->num_dev; i++)
2259 ff_layout_encode_ff_iostat(xdr,
2260 &args->layout->plh_stateid,
2261 &ff_args->devinfo[i]);
2262 }
2263
2264 static void
ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo * devinfo,unsigned int num_entries)2265 ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
2266 unsigned int num_entries)
2267 {
2268 unsigned int i;
2269
2270 for (i = 0; i < num_entries; i++) {
2271 if (!devinfo[i].ld_private.ops)
2272 continue;
2273 if (!devinfo[i].ld_private.ops->free)
2274 continue;
2275 devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
2276 }
2277 }
2278
2279 static struct nfs4_deviceid_node *
ff_layout_alloc_deviceid_node(struct nfs_server * server,struct pnfs_device * pdev,gfp_t gfp_flags)2280 ff_layout_alloc_deviceid_node(struct nfs_server *server,
2281 struct pnfs_device *pdev, gfp_t gfp_flags)
2282 {
2283 struct nfs4_ff_layout_ds *dsaddr;
2284
2285 dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
2286 if (!dsaddr)
2287 return NULL;
2288 return &dsaddr->id_node;
2289 }
2290
2291 static void
ff_layout_encode_layoutreturn(struct xdr_stream * xdr,const void * voidargs,const struct nfs4_xdr_opaque_data * ff_opaque)2292 ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
2293 const void *voidargs,
2294 const struct nfs4_xdr_opaque_data *ff_opaque)
2295 {
2296 const struct nfs4_layoutreturn_args *args = voidargs;
2297 struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
2298 struct xdr_buf tmp_buf = {
2299 .head = {
2300 [0] = {
2301 .iov_base = page_address(ff_args->pages[0]),
2302 },
2303 },
2304 .buflen = PAGE_SIZE,
2305 };
2306 struct xdr_stream tmp_xdr;
2307 __be32 *start;
2308
2309 dprintk("%s: Begin\n", __func__);
2310
2311 xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL);
2312
2313 ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
2314 ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
2315
2316 start = xdr_reserve_space(xdr, 4);
2317 *start = cpu_to_be32(tmp_buf.len);
2318 xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
2319
2320 dprintk("%s: Return\n", __func__);
2321 }
2322
2323 static void
ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data * args)2324 ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
2325 {
2326 struct nfs4_flexfile_layoutreturn_args *ff_args;
2327
2328 if (!args->data)
2329 return;
2330 ff_args = args->data;
2331 args->data = NULL;
2332
2333 ff_layout_free_ds_ioerr(&ff_args->errors);
2334 ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
2335
2336 put_page(ff_args->pages[0]);
2337 kfree(ff_args);
2338 }
2339
2340 static const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
2341 .encode = ff_layout_encode_layoutreturn,
2342 .free = ff_layout_free_layoutreturn,
2343 };
2344
2345 static int
ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args * args)2346 ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
2347 {
2348 struct nfs4_flexfile_layoutreturn_args *ff_args;
2349 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
2350
2351 ff_args = kmalloc(sizeof(*ff_args), nfs_io_gfp_mask());
2352 if (!ff_args)
2353 goto out_nomem;
2354 ff_args->pages[0] = alloc_page(nfs_io_gfp_mask());
2355 if (!ff_args->pages[0])
2356 goto out_nomem_free;
2357
2358 INIT_LIST_HEAD(&ff_args->errors);
2359 ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
2360 &args->range, &ff_args->errors,
2361 FF_LAYOUTRETURN_MAXERR);
2362
2363 spin_lock(&args->inode->i_lock);
2364 ff_args->num_dev = ff_layout_mirror_prepare_stats(
2365 &ff_layout->generic_hdr, &ff_args->devinfo[0],
2366 ARRAY_SIZE(ff_args->devinfo), NFS4_FF_OP_LAYOUTRETURN);
2367 spin_unlock(&args->inode->i_lock);
2368
2369 args->ld_private->ops = &layoutreturn_ops;
2370 args->ld_private->data = ff_args;
2371 return 0;
2372 out_nomem_free:
2373 kfree(ff_args);
2374 out_nomem:
2375 return -ENOMEM;
2376 }
2377
2378 #ifdef CONFIG_NFS_V4_2
2379 void
ff_layout_send_layouterror(struct pnfs_layout_segment * lseg)2380 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2381 {
2382 struct pnfs_layout_hdr *lo = lseg->pls_layout;
2383 struct nfs42_layout_error *errors;
2384 LIST_HEAD(head);
2385
2386 if (!nfs_server_capable(lo->plh_inode, NFS_CAP_LAYOUTERROR))
2387 return;
2388 ff_layout_fetch_ds_ioerr(lo, &lseg->pls_range, &head, -1);
2389 if (list_empty(&head))
2390 return;
2391
2392 errors = kmalloc_array(NFS42_LAYOUTERROR_MAX, sizeof(*errors),
2393 nfs_io_gfp_mask());
2394 if (errors != NULL) {
2395 const struct nfs4_ff_layout_ds_err *pos;
2396 size_t n = 0;
2397
2398 list_for_each_entry(pos, &head, list) {
2399 errors[n].offset = pos->offset;
2400 errors[n].length = pos->length;
2401 nfs4_stateid_copy(&errors[n].stateid, &pos->stateid);
2402 errors[n].errors[0].dev_id = pos->deviceid;
2403 errors[n].errors[0].status = pos->status;
2404 errors[n].errors[0].opnum = pos->opnum;
2405 n++;
2406 if (!list_is_last(&pos->list, &head) &&
2407 n < NFS42_LAYOUTERROR_MAX)
2408 continue;
2409 if (nfs42_proc_layouterror(lseg, errors, n) < 0)
2410 break;
2411 n = 0;
2412 }
2413 kfree(errors);
2414 }
2415 ff_layout_free_ds_ioerr(&head);
2416 }
2417 #else
2418 void
ff_layout_send_layouterror(struct pnfs_layout_segment * lseg)2419 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2420 {
2421 }
2422 #endif
2423
2424 static int
ff_layout_ntop4(const struct sockaddr * sap,char * buf,const size_t buflen)2425 ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
2426 {
2427 const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
2428
2429 return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
2430 }
2431
2432 static size_t
ff_layout_ntop6_noscopeid(const struct sockaddr * sap,char * buf,const int buflen)2433 ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
2434 const int buflen)
2435 {
2436 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
2437 const struct in6_addr *addr = &sin6->sin6_addr;
2438
2439 /*
2440 * RFC 4291, Section 2.2.2
2441 *
2442 * Shorthanded ANY address
2443 */
2444 if (ipv6_addr_any(addr))
2445 return snprintf(buf, buflen, "::");
2446
2447 /*
2448 * RFC 4291, Section 2.2.2
2449 *
2450 * Shorthanded loopback address
2451 */
2452 if (ipv6_addr_loopback(addr))
2453 return snprintf(buf, buflen, "::1");
2454
2455 /*
2456 * RFC 4291, Section 2.2.3
2457 *
2458 * Special presentation address format for mapped v4
2459 * addresses.
2460 */
2461 if (ipv6_addr_v4mapped(addr))
2462 return snprintf(buf, buflen, "::ffff:%pI4",
2463 &addr->s6_addr32[3]);
2464
2465 /*
2466 * RFC 4291, Section 2.2.1
2467 */
2468 return snprintf(buf, buflen, "%pI6c", addr);
2469 }
2470
2471 /* Derived from rpc_sockaddr2uaddr */
2472 static void
ff_layout_encode_netaddr(struct xdr_stream * xdr,struct nfs4_pnfs_ds_addr * da)2473 ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
2474 {
2475 struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
2476 char portbuf[RPCBIND_MAXUADDRPLEN];
2477 char addrbuf[RPCBIND_MAXUADDRLEN];
2478 unsigned short port;
2479 int len, netid_len;
2480 __be32 *p;
2481
2482 switch (sap->sa_family) {
2483 case AF_INET:
2484 if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
2485 return;
2486 port = ntohs(((struct sockaddr_in *)sap)->sin_port);
2487 break;
2488 case AF_INET6:
2489 if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
2490 return;
2491 port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
2492 break;
2493 default:
2494 WARN_ON_ONCE(1);
2495 return;
2496 }
2497
2498 snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
2499 len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
2500
2501 netid_len = strlen(da->da_netid);
2502 p = xdr_reserve_space(xdr, 4 + netid_len);
2503 xdr_encode_opaque(p, da->da_netid, netid_len);
2504
2505 p = xdr_reserve_space(xdr, 4 + len);
2506 xdr_encode_opaque(p, addrbuf, len);
2507 }
2508
2509 static void
ff_layout_encode_nfstime(struct xdr_stream * xdr,ktime_t t)2510 ff_layout_encode_nfstime(struct xdr_stream *xdr,
2511 ktime_t t)
2512 {
2513 struct timespec64 ts;
2514 __be32 *p;
2515
2516 p = xdr_reserve_space(xdr, 12);
2517 ts = ktime_to_timespec64(t);
2518 p = xdr_encode_hyper(p, ts.tv_sec);
2519 *p++ = cpu_to_be32(ts.tv_nsec);
2520 }
2521
2522 static void
ff_layout_encode_io_latency(struct xdr_stream * xdr,struct nfs4_ff_io_stat * stat)2523 ff_layout_encode_io_latency(struct xdr_stream *xdr,
2524 struct nfs4_ff_io_stat *stat)
2525 {
2526 __be32 *p;
2527
2528 p = xdr_reserve_space(xdr, 5 * 8);
2529 p = xdr_encode_hyper(p, stat->ops_requested);
2530 p = xdr_encode_hyper(p, stat->bytes_requested);
2531 p = xdr_encode_hyper(p, stat->ops_completed);
2532 p = xdr_encode_hyper(p, stat->bytes_completed);
2533 p = xdr_encode_hyper(p, stat->bytes_not_delivered);
2534 ff_layout_encode_nfstime(xdr, stat->total_busy_time);
2535 ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
2536 }
2537
2538 static void
ff_layout_encode_ff_layoutupdate(struct xdr_stream * xdr,const struct nfs42_layoutstat_devinfo * devinfo,struct nfs4_ff_layout_mirror * mirror)2539 ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
2540 const struct nfs42_layoutstat_devinfo *devinfo,
2541 struct nfs4_ff_layout_mirror *mirror)
2542 {
2543 struct nfs4_pnfs_ds_addr *da;
2544 struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
2545 struct nfs_fh *fh = &mirror->fh_versions[0];
2546 __be32 *p;
2547
2548 da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
2549 dprintk("%s: DS %s: encoding address %s\n",
2550 __func__, ds->ds_remotestr, da->da_remotestr);
2551 /* netaddr4 */
2552 ff_layout_encode_netaddr(xdr, da);
2553 /* nfs_fh4 */
2554 p = xdr_reserve_space(xdr, 4 + fh->size);
2555 xdr_encode_opaque(p, fh->data, fh->size);
2556 /* ff_io_latency4 read */
2557 spin_lock(&mirror->lock);
2558 ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
2559 /* ff_io_latency4 write */
2560 ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
2561 spin_unlock(&mirror->lock);
2562 /* nfstime4 */
2563 ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
2564 /* bool */
2565 p = xdr_reserve_space(xdr, 4);
2566 *p = cpu_to_be32(false);
2567 }
2568
2569 static void
ff_layout_encode_layoutstats(struct xdr_stream * xdr,const void * args,const struct nfs4_xdr_opaque_data * opaque)2570 ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
2571 const struct nfs4_xdr_opaque_data *opaque)
2572 {
2573 struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
2574 struct nfs42_layoutstat_devinfo, ld_private);
2575 __be32 *start;
2576
2577 /* layoutupdate length */
2578 start = xdr_reserve_space(xdr, 4);
2579 ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
2580
2581 *start = cpu_to_be32((xdr->p - start - 1) * 4);
2582 }
2583
2584 static void
ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data * opaque)2585 ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
2586 {
2587 struct nfs4_ff_layout_mirror *mirror = opaque->data;
2588
2589 ff_layout_put_mirror(mirror);
2590 }
2591
2592 static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
2593 .encode = ff_layout_encode_layoutstats,
2594 .free = ff_layout_free_layoutstats,
2595 };
2596
2597 static int
ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr * lo,struct nfs42_layoutstat_devinfo * devinfo,int dev_limit,enum nfs4_ff_op_type type)2598 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
2599 struct nfs42_layoutstat_devinfo *devinfo,
2600 int dev_limit, enum nfs4_ff_op_type type)
2601 {
2602 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
2603 struct nfs4_ff_layout_mirror *mirror;
2604 struct nfs4_deviceid_node *dev;
2605 int i = 0;
2606
2607 list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2608 if (i >= dev_limit)
2609 break;
2610 if (IS_ERR_OR_NULL(mirror->mirror_ds))
2611 continue;
2612 if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL,
2613 &mirror->flags) &&
2614 type != NFS4_FF_OP_LAYOUTRETURN)
2615 continue;
2616 /* mirror refcount put in cleanup_layoutstats */
2617 if (!refcount_inc_not_zero(&mirror->ref))
2618 continue;
2619 dev = &mirror->mirror_ds->id_node;
2620 memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
2621 devinfo->offset = 0;
2622 devinfo->length = NFS4_MAX_UINT64;
2623 spin_lock(&mirror->lock);
2624 devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
2625 devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
2626 devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
2627 devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
2628 spin_unlock(&mirror->lock);
2629 devinfo->layout_type = LAYOUT_FLEX_FILES;
2630 devinfo->ld_private.ops = &layoutstat_ops;
2631 devinfo->ld_private.data = mirror;
2632
2633 devinfo++;
2634 i++;
2635 }
2636 return i;
2637 }
2638
ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args * args)2639 static int ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
2640 {
2641 struct pnfs_layout_hdr *lo;
2642 struct nfs4_flexfile_layout *ff_layout;
2643 const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2644
2645 /* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2646 args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo),
2647 nfs_io_gfp_mask());
2648 if (!args->devinfo)
2649 return -ENOMEM;
2650
2651 spin_lock(&args->inode->i_lock);
2652 lo = NFS_I(args->inode)->layout;
2653 if (lo && pnfs_layout_is_valid(lo)) {
2654 ff_layout = FF_LAYOUT_FROM_HDR(lo);
2655 args->num_dev = ff_layout_mirror_prepare_stats(
2656 &ff_layout->generic_hdr, &args->devinfo[0], dev_count,
2657 NFS4_FF_OP_LAYOUTSTATS);
2658 } else
2659 args->num_dev = 0;
2660 spin_unlock(&args->inode->i_lock);
2661 if (!args->num_dev) {
2662 kfree(args->devinfo);
2663 args->devinfo = NULL;
2664 return -ENOENT;
2665 }
2666
2667 return 0;
2668 }
2669
2670 static int
ff_layout_set_layoutdriver(struct nfs_server * server,const struct nfs_fh * dummy)2671 ff_layout_set_layoutdriver(struct nfs_server *server,
2672 const struct nfs_fh *dummy)
2673 {
2674 #if IS_ENABLED(CONFIG_NFS_V4_2)
2675 server->caps |= NFS_CAP_LAYOUTSTATS | NFS_CAP_REBOOT_LAYOUTRETURN;
2676 #endif
2677 return 0;
2678 }
2679
2680 static const struct pnfs_commit_ops ff_layout_commit_ops = {
2681 .setup_ds_info = ff_layout_setup_ds_info,
2682 .release_ds_info = ff_layout_release_ds_info,
2683 .mark_request_commit = pnfs_layout_mark_request_commit,
2684 .clear_request_commit = pnfs_generic_clear_request_commit,
2685 .scan_commit_lists = pnfs_generic_scan_commit_lists,
2686 .recover_commit_reqs = pnfs_generic_recover_commit_reqs,
2687 .commit_pagelist = ff_layout_commit_pagelist,
2688 };
2689
2690 static struct pnfs_layoutdriver_type flexfilelayout_type = {
2691 .id = LAYOUT_FLEX_FILES,
2692 .name = "LAYOUT_FLEX_FILES",
2693 .owner = THIS_MODULE,
2694 .flags = PNFS_LAYOUTGET_ON_OPEN,
2695 .max_layoutget_response = 4096, /* 1 page or so... */
2696 .set_layoutdriver = ff_layout_set_layoutdriver,
2697 .alloc_layout_hdr = ff_layout_alloc_layout_hdr,
2698 .free_layout_hdr = ff_layout_free_layout_hdr,
2699 .alloc_lseg = ff_layout_alloc_lseg,
2700 .free_lseg = ff_layout_free_lseg,
2701 .add_lseg = ff_layout_add_lseg,
2702 .pg_read_ops = &ff_layout_pg_read_ops,
2703 .pg_write_ops = &ff_layout_pg_write_ops,
2704 .get_ds_info = ff_layout_get_ds_info,
2705 .free_deviceid_node = ff_layout_free_deviceid_node,
2706 .read_pagelist = ff_layout_read_pagelist,
2707 .write_pagelist = ff_layout_write_pagelist,
2708 .alloc_deviceid_node = ff_layout_alloc_deviceid_node,
2709 .prepare_layoutreturn = ff_layout_prepare_layoutreturn,
2710 .sync = pnfs_nfs_generic_sync,
2711 .prepare_layoutstats = ff_layout_prepare_layoutstats,
2712 .cancel_io = ff_layout_cancel_io,
2713 };
2714
nfs4flexfilelayout_init(void)2715 static int __init nfs4flexfilelayout_init(void)
2716 {
2717 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
2718 __func__);
2719 return pnfs_register_layoutdriver(&flexfilelayout_type);
2720 }
2721
nfs4flexfilelayout_exit(void)2722 static void __exit nfs4flexfilelayout_exit(void)
2723 {
2724 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2725 __func__);
2726 pnfs_unregister_layoutdriver(&flexfilelayout_type);
2727 }
2728
2729 MODULE_ALIAS("nfs-layouttype4-4");
2730
2731 MODULE_LICENSE("GPL");
2732 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2733
2734 module_init(nfs4flexfilelayout_init);
2735 module_exit(nfs4flexfilelayout_exit);
2736
2737 module_param(io_maxretrans, ushort, 0644);
2738 MODULE_PARM_DESC(io_maxretrans, "The number of times the NFSv4.1 client "
2739 "retries an I/O request before returning an error. ");
2740