1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Device operations for the pnfs nfs4 file layout driver.
4 *
5 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
6 *
7 * Tao Peng <bergwolf@primarydata.com>
8 */
9
10 #include <linux/nfs_fs.h>
11 #include <linux/vmalloc.h>
12 #include <linux/module.h>
13 #include <linux/sunrpc/addr.h>
14
15 #include "../internal.h"
16 #include "../nfs4session.h"
17 #include "flexfilelayout.h"
18
19 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
20
21 static unsigned int dataserver_timeo = NFS_DEF_TCP_TIMEO;
22 static unsigned int dataserver_retrans;
23
24 static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg);
25
nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds * mirror_ds)26 void nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
27 {
28 if (!IS_ERR_OR_NULL(mirror_ds))
29 nfs4_put_deviceid_node(&mirror_ds->id_node);
30 }
31
nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds * mirror_ds)32 void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
33 {
34 nfs4_print_deviceid(&mirror_ds->id_node.deviceid);
35 nfs4_pnfs_ds_put(mirror_ds->ds);
36 kfree(mirror_ds->ds_versions);
37 kfree_rcu(mirror_ds, id_node.rcu);
38 }
39
40 /* Decode opaque device data and construct new_ds using it */
41 struct nfs4_ff_layout_ds *
nfs4_ff_alloc_deviceid_node(struct nfs_server * server,struct pnfs_device * pdev,gfp_t gfp_flags)42 nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
43 gfp_t gfp_flags)
44 {
45 struct xdr_stream stream;
46 struct xdr_buf buf;
47 struct folio *scratch;
48 struct list_head dsaddrs;
49 struct nfs4_pnfs_ds_addr *da;
50 struct nfs4_ff_layout_ds *new_ds = NULL;
51 struct nfs4_ff_ds_version *ds_versions = NULL;
52 struct net *net = server->nfs_client->cl_net;
53 u32 mp_count;
54 u32 version_count;
55 __be32 *p;
56 int i, ret = -ENOMEM;
57
58 /* set up xdr stream */
59 scratch = folio_alloc(gfp_flags, 0);
60 if (!scratch)
61 goto out_err;
62
63 new_ds = kzalloc(sizeof(struct nfs4_ff_layout_ds), gfp_flags);
64 if (!new_ds)
65 goto out_scratch;
66
67 nfs4_init_deviceid_node(&new_ds->id_node,
68 server,
69 &pdev->dev_id);
70 INIT_LIST_HEAD(&dsaddrs);
71
72 xdr_init_decode_pages(&stream, &buf, pdev->pages, pdev->pglen);
73 xdr_set_scratch_folio(&stream, scratch);
74
75 /* multipath count */
76 p = xdr_inline_decode(&stream, 4);
77 if (unlikely(!p))
78 goto out_err_drain_dsaddrs;
79 mp_count = be32_to_cpup(p);
80 dprintk("%s: multipath ds count %d\n", __func__, mp_count);
81
82 for (i = 0; i < mp_count; i++) {
83 /* multipath ds */
84 da = nfs4_decode_mp_ds_addr(net, &stream, gfp_flags);
85 if (da)
86 list_add_tail(&da->da_node, &dsaddrs);
87 }
88 if (list_empty(&dsaddrs)) {
89 dprintk("%s: no suitable DS addresses found\n",
90 __func__);
91 ret = -ENOMEDIUM;
92 goto out_err_drain_dsaddrs;
93 }
94
95 /* version count */
96 p = xdr_inline_decode(&stream, 4);
97 if (unlikely(!p))
98 goto out_err_drain_dsaddrs;
99 version_count = be32_to_cpup(p);
100 dprintk("%s: version count %d\n", __func__, version_count);
101
102 ds_versions = kcalloc(version_count,
103 sizeof(struct nfs4_ff_ds_version),
104 gfp_flags);
105 if (!ds_versions)
106 goto out_scratch;
107
108 for (i = 0; i < version_count; i++) {
109 /* 20 = version(4) + minor_version(4) + rsize(4) + wsize(4) +
110 * tightly_coupled(4) */
111 p = xdr_inline_decode(&stream, 20);
112 if (unlikely(!p))
113 goto out_err_drain_dsaddrs;
114 ds_versions[i].version = be32_to_cpup(p++);
115 ds_versions[i].minor_version = be32_to_cpup(p++);
116 ds_versions[i].rsize = nfs_io_size(be32_to_cpup(p++),
117 server->nfs_client->cl_proto);
118 ds_versions[i].wsize = nfs_io_size(be32_to_cpup(p++),
119 server->nfs_client->cl_proto);
120 ds_versions[i].tightly_coupled = be32_to_cpup(p);
121
122 if (ds_versions[i].rsize > NFS_MAX_FILE_IO_SIZE)
123 ds_versions[i].rsize = NFS_MAX_FILE_IO_SIZE;
124 if (ds_versions[i].wsize > NFS_MAX_FILE_IO_SIZE)
125 ds_versions[i].wsize = NFS_MAX_FILE_IO_SIZE;
126
127 /*
128 * check for valid major/minor combination.
129 * currently we support dataserver which talk:
130 * v3, v4.0, v4.1, v4.2
131 */
132 if (!((ds_versions[i].version == 3 && ds_versions[i].minor_version == 0) ||
133 (ds_versions[i].version == 4 && ds_versions[i].minor_version < 3))) {
134 dprintk("%s: [%d] unsupported ds version %d-%d\n", __func__,
135 i, ds_versions[i].version,
136 ds_versions[i].minor_version);
137 ret = -EPROTONOSUPPORT;
138 goto out_err_drain_dsaddrs;
139 }
140
141 dprintk("%s: [%d] vers %u minor_ver %u rsize %u wsize %u coupled %d\n",
142 __func__, i, ds_versions[i].version,
143 ds_versions[i].minor_version,
144 ds_versions[i].rsize,
145 ds_versions[i].wsize,
146 ds_versions[i].tightly_coupled);
147 }
148
149 new_ds->ds_versions = ds_versions;
150 new_ds->ds_versions_cnt = version_count;
151
152 new_ds->ds = nfs4_pnfs_ds_add(net, &dsaddrs, gfp_flags);
153 if (!new_ds->ds)
154 goto out_err_drain_dsaddrs;
155
156 /* If DS was already in cache, free ds addrs */
157 while (!list_empty(&dsaddrs)) {
158 da = list_first_entry(&dsaddrs,
159 struct nfs4_pnfs_ds_addr,
160 da_node);
161 list_del_init(&da->da_node);
162 kfree(da->da_remotestr);
163 kfree(da);
164 }
165
166 folio_put(scratch);
167 return new_ds;
168
169 out_err_drain_dsaddrs:
170 while (!list_empty(&dsaddrs)) {
171 da = list_first_entry(&dsaddrs, struct nfs4_pnfs_ds_addr,
172 da_node);
173 list_del_init(&da->da_node);
174 kfree(da->da_remotestr);
175 kfree(da);
176 }
177
178 kfree(ds_versions);
179 out_scratch:
180 folio_put(scratch);
181 out_err:
182 kfree(new_ds);
183
184 dprintk("%s ERROR: returning %d\n", __func__, ret);
185 return NULL;
186 }
187
extend_ds_error(struct nfs4_ff_layout_ds_err * err,u64 offset,u64 length)188 static void extend_ds_error(struct nfs4_ff_layout_ds_err *err,
189 u64 offset, u64 length)
190 {
191 u64 end;
192
193 end = max_t(u64, pnfs_end_offset(err->offset, err->length),
194 pnfs_end_offset(offset, length));
195 err->offset = min_t(u64, err->offset, offset);
196 err->length = end - err->offset;
197 }
198
199 static int
ff_ds_error_match(const struct nfs4_ff_layout_ds_err * e1,const struct nfs4_ff_layout_ds_err * e2)200 ff_ds_error_match(const struct nfs4_ff_layout_ds_err *e1,
201 const struct nfs4_ff_layout_ds_err *e2)
202 {
203 int ret;
204
205 if (e1->opnum != e2->opnum)
206 return e1->opnum < e2->opnum ? -1 : 1;
207 if (e1->status != e2->status)
208 return e1->status < e2->status ? -1 : 1;
209 ret = memcmp(e1->stateid.data, e2->stateid.data,
210 sizeof(e1->stateid.data));
211 if (ret != 0)
212 return ret;
213 ret = memcmp(&e1->deviceid, &e2->deviceid, sizeof(e1->deviceid));
214 if (ret != 0)
215 return ret;
216 if (pnfs_end_offset(e1->offset, e1->length) < e2->offset)
217 return -1;
218 if (e1->offset > pnfs_end_offset(e2->offset, e2->length))
219 return 1;
220 /* If ranges overlap or are contiguous, they are the same */
221 return 0;
222 }
223
224 static void
ff_layout_add_ds_error_locked(struct nfs4_flexfile_layout * flo,struct nfs4_ff_layout_ds_err * dserr)225 ff_layout_add_ds_error_locked(struct nfs4_flexfile_layout *flo,
226 struct nfs4_ff_layout_ds_err *dserr)
227 {
228 struct nfs4_ff_layout_ds_err *err, *tmp;
229 struct list_head *head = &flo->error_list;
230 int match;
231
232 /* Do insertion sort w/ merges */
233 list_for_each_entry_safe(err, tmp, &flo->error_list, list) {
234 match = ff_ds_error_match(err, dserr);
235 if (match < 0)
236 continue;
237 if (match > 0) {
238 /* Add entry "dserr" _before_ entry "err" */
239 head = &err->list;
240 break;
241 }
242 /* Entries match, so merge "err" into "dserr" */
243 extend_ds_error(dserr, err->offset, err->length);
244 list_replace(&err->list, &dserr->list);
245 kfree(err);
246 return;
247 }
248
249 list_add_tail(&dserr->list, head);
250 }
251
ff_layout_track_ds_error(struct nfs4_flexfile_layout * flo,struct nfs4_ff_layout_mirror * mirror,u32 dss_id,u64 offset,u64 length,int status,enum nfs_opnum4 opnum,gfp_t gfp_flags)252 int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
253 struct nfs4_ff_layout_mirror *mirror,
254 u32 dss_id, u64 offset, u64 length, int status,
255 enum nfs_opnum4 opnum, gfp_t gfp_flags)
256 {
257 struct nfs4_ff_layout_ds_err *dserr;
258
259 if (status == 0)
260 return 0;
261
262 if (IS_ERR_OR_NULL(mirror->dss[dss_id].mirror_ds))
263 return -EINVAL;
264
265 dserr = kmalloc(sizeof(*dserr), gfp_flags);
266 if (!dserr)
267 return -ENOMEM;
268
269 INIT_LIST_HEAD(&dserr->list);
270 dserr->offset = offset;
271 dserr->length = length;
272 dserr->status = status;
273 dserr->opnum = opnum;
274 nfs4_stateid_copy(&dserr->stateid, &mirror->dss[dss_id].stateid);
275 memcpy(&dserr->deviceid, &mirror->dss[dss_id].mirror_ds->id_node.deviceid,
276 NFS4_DEVICEID4_SIZE);
277
278 spin_lock(&flo->generic_hdr.plh_inode->i_lock);
279 ff_layout_add_ds_error_locked(flo, dserr);
280 spin_unlock(&flo->generic_hdr.plh_inode->i_lock);
281 return 0;
282 }
283
284 static const struct cred *
ff_layout_get_mirror_cred(struct nfs4_ff_layout_mirror * mirror,u32 iomode,u32 dss_id)285 ff_layout_get_mirror_cred(struct nfs4_ff_layout_mirror *mirror, u32 iomode, u32 dss_id)
286 {
287 const struct cred *cred, __rcu **pcred;
288
289 if (iomode == IOMODE_READ)
290 pcred = &mirror->dss[dss_id].ro_cred;
291 else
292 pcred = &mirror->dss[dss_id].rw_cred;
293
294 rcu_read_lock();
295 do {
296 cred = rcu_dereference(*pcred);
297 if (!cred)
298 break;
299
300 cred = get_cred_rcu(cred);
301 } while(!cred);
302 rcu_read_unlock();
303 return cred;
304 }
305
306 struct nfs_fh *
nfs4_ff_layout_select_ds_fh(struct nfs4_ff_layout_mirror * mirror,u32 dss_id)307 nfs4_ff_layout_select_ds_fh(struct nfs4_ff_layout_mirror *mirror, u32 dss_id)
308 {
309 /* FIXME: For now assume there is only 1 version available for the DS */
310 return &mirror->dss[dss_id].fh_versions[0];
311 }
312
313 void
nfs4_ff_layout_select_ds_stateid(const struct nfs4_ff_layout_mirror * mirror,u32 dss_id,nfs4_stateid * stateid)314 nfs4_ff_layout_select_ds_stateid(const struct nfs4_ff_layout_mirror *mirror,
315 u32 dss_id,
316 nfs4_stateid *stateid)
317 {
318 if (nfs4_ff_layout_ds_version(mirror, dss_id) == 4)
319 nfs4_stateid_copy(stateid, &mirror->dss[dss_id].stateid);
320 }
321
322 static bool
ff_layout_init_mirror_ds(struct pnfs_layout_hdr * lo,struct nfs4_ff_layout_mirror * mirror,u32 dss_id)323 ff_layout_init_mirror_ds(struct pnfs_layout_hdr *lo,
324 struct nfs4_ff_layout_mirror *mirror,
325 u32 dss_id)
326 {
327 if (mirror == NULL)
328 goto outerr;
329 if (mirror->dss[dss_id].mirror_ds == NULL) {
330 struct nfs4_deviceid_node *node;
331 struct nfs4_ff_layout_ds *mirror_ds = ERR_PTR(-ENODEV);
332
333 node = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode),
334 &mirror->dss[dss_id].devid, lo->plh_lc_cred,
335 GFP_KERNEL);
336 if (node)
337 mirror_ds = FF_LAYOUT_MIRROR_DS(node);
338
339 /* check for race with another call to this function */
340 if (cmpxchg(&mirror->dss[dss_id].mirror_ds, NULL, mirror_ds) &&
341 mirror_ds != ERR_PTR(-ENODEV))
342 nfs4_put_deviceid_node(node);
343 }
344
345 if (IS_ERR(mirror->dss[dss_id].mirror_ds))
346 goto outerr;
347
348 return true;
349 outerr:
350 return false;
351 }
352
353 /**
354 * nfs4_ff_layout_prepare_ds - prepare a DS connection for an RPC call
355 * @lseg: the layout segment we're operating on
356 * @mirror: layout mirror describing the DS to use
357 * @dss_id: DS stripe id to select stripe to use
358 * @fail_return: return layout on connect failure?
359 *
360 * Try to prepare a DS connection to accept an RPC call. This involves
361 * selecting a mirror to use and connecting the client to it if it's not
362 * already connected.
363 *
364 * Since we only need a single functioning mirror to satisfy a read, we don't
365 * want to return the layout if there is one. For writes though, any down
366 * mirror should result in a LAYOUTRETURN. @fail_return is how we distinguish
367 * between the two cases.
368 *
369 * Returns a pointer to a connected DS object on success or NULL on failure.
370 */
371 struct nfs4_pnfs_ds *
nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment * lseg,struct nfs4_ff_layout_mirror * mirror,u32 dss_id,bool fail_return)372 nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg,
373 struct nfs4_ff_layout_mirror *mirror,
374 u32 dss_id,
375 bool fail_return)
376 {
377 struct nfs4_pnfs_ds *ds;
378 struct inode *ino = lseg->pls_layout->plh_inode;
379 struct nfs_server *s = NFS_SERVER(ino);
380 unsigned int max_payload;
381 int status = -EAGAIN;
382
383 if (!ff_layout_init_mirror_ds(lseg->pls_layout, mirror, dss_id))
384 goto noconnect;
385
386 ds = mirror->dss[dss_id].mirror_ds->ds;
387 if (READ_ONCE(ds->ds_clp))
388 goto out;
389 /* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */
390 smp_rmb();
391
392 /* FIXME: For now we assume the server sent only one version of NFS
393 * to use for the DS.
394 */
395 status = nfs4_pnfs_ds_connect(s, ds, &mirror->dss[dss_id].mirror_ds->id_node,
396 dataserver_timeo, dataserver_retrans,
397 mirror->dss[dss_id].mirror_ds->ds_versions[0].version,
398 mirror->dss[dss_id].mirror_ds->ds_versions[0].minor_version);
399
400 /* connect success, check rsize/wsize limit */
401 if (!status) {
402 /*
403 * ds_clp is put in destroy_ds().
404 * keep ds_clp even if DS is local, so that if local IO cannot
405 * proceed somehow, we can fall back to NFS whenever we want.
406 */
407 nfs_local_probe_async(ds->ds_clp);
408 max_payload =
409 nfs_block_size(rpc_max_payload(ds->ds_clp->cl_rpcclient),
410 NULL);
411 if (mirror->dss[dss_id].mirror_ds->ds_versions[0].rsize > max_payload)
412 mirror->dss[dss_id].mirror_ds->ds_versions[0].rsize = max_payload;
413 if (mirror->dss[dss_id].mirror_ds->ds_versions[0].wsize > max_payload)
414 mirror->dss[dss_id].mirror_ds->ds_versions[0].wsize = max_payload;
415 goto out;
416 }
417 noconnect:
418 ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
419 mirror, dss_id, lseg->pls_range.offset,
420 lseg->pls_range.length, NFS4ERR_NXIO,
421 OP_ILLEGAL, GFP_NOIO);
422 ff_layout_send_layouterror(lseg);
423 if (fail_return || !ff_layout_has_available_ds(lseg))
424 pnfs_error_mark_layout_for_return(ino, lseg);
425 ds = ERR_PTR(status);
426 out:
427 return ds;
428 }
429
430 const struct cred *
ff_layout_get_ds_cred(struct nfs4_ff_layout_mirror * mirror,const struct pnfs_layout_range * range,const struct cred * mdscred,u32 dss_id)431 ff_layout_get_ds_cred(struct nfs4_ff_layout_mirror *mirror,
432 const struct pnfs_layout_range *range,
433 const struct cred *mdscred,
434 u32 dss_id)
435 {
436 const struct cred *cred;
437
438 if (mirror && !mirror->dss[dss_id].mirror_ds->ds_versions[0].tightly_coupled) {
439 cred = ff_layout_get_mirror_cred(mirror, range->iomode, dss_id);
440 if (!cred)
441 cred = get_cred(mdscred);
442 } else {
443 cred = get_cred(mdscred);
444 }
445 return cred;
446 }
447
448 /**
449 * nfs4_ff_find_or_create_ds_client - Find or create a DS rpc client
450 * @mirror: pointer to the mirror
451 * @ds_clp: nfs_client for the DS
452 * @inode: pointer to inode
453 * @dss_id: DS stripe id
454 *
455 * Find or create a DS rpc client with th MDS server rpc client auth flavor
456 * in the nfs_client cl_ds_clients list.
457 */
458 struct rpc_clnt *
nfs4_ff_find_or_create_ds_client(struct nfs4_ff_layout_mirror * mirror,struct nfs_client * ds_clp,struct inode * inode,u32 dss_id)459 nfs4_ff_find_or_create_ds_client(struct nfs4_ff_layout_mirror *mirror,
460 struct nfs_client *ds_clp, struct inode *inode,
461 u32 dss_id)
462 {
463 switch (mirror->dss[dss_id].mirror_ds->ds_versions[0].version) {
464 case 3:
465 /* For NFSv3 DS, flavor is set when creating DS connections */
466 return ds_clp->cl_rpcclient;
467 case 4:
468 return nfs4_find_or_create_ds_client(ds_clp, inode);
469 default:
470 BUG();
471 }
472 }
473
ff_layout_free_ds_ioerr(struct list_head * head)474 void ff_layout_free_ds_ioerr(struct list_head *head)
475 {
476 struct nfs4_ff_layout_ds_err *err;
477
478 while (!list_empty(head)) {
479 err = list_first_entry(head,
480 struct nfs4_ff_layout_ds_err,
481 list);
482 list_del(&err->list);
483 kfree(err);
484 }
485 }
486
487 /* called with inode i_lock held */
ff_layout_encode_ds_ioerr(struct xdr_stream * xdr,const struct list_head * head)488 int ff_layout_encode_ds_ioerr(struct xdr_stream *xdr, const struct list_head *head)
489 {
490 struct nfs4_ff_layout_ds_err *err;
491 __be32 *p;
492
493 list_for_each_entry(err, head, list) {
494 /* offset(8) + length(8) + stateid(NFS4_STATEID_SIZE)
495 * + array length + deviceid(NFS4_DEVICEID4_SIZE)
496 * + status(4) + opnum(4)
497 */
498 p = xdr_reserve_space(xdr,
499 28 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE);
500 if (unlikely(!p))
501 return -ENOBUFS;
502 p = xdr_encode_hyper(p, err->offset);
503 p = xdr_encode_hyper(p, err->length);
504 p = xdr_encode_opaque_fixed(p, &err->stateid,
505 NFS4_STATEID_SIZE);
506 /* Encode 1 error */
507 *p++ = cpu_to_be32(1);
508 p = xdr_encode_opaque_fixed(p, &err->deviceid,
509 NFS4_DEVICEID4_SIZE);
510 *p++ = cpu_to_be32(err->status);
511 *p++ = cpu_to_be32(err->opnum);
512 dprintk("%s: offset %llu length %llu status %d op %d\n",
513 __func__, err->offset, err->length, err->status,
514 err->opnum);
515 }
516
517 return 0;
518 }
519
520 static
do_layout_fetch_ds_ioerr(struct pnfs_layout_hdr * lo,const struct pnfs_layout_range * range,struct list_head * head,unsigned int maxnum)521 unsigned int do_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
522 const struct pnfs_layout_range *range,
523 struct list_head *head,
524 unsigned int maxnum)
525 {
526 struct nfs4_flexfile_layout *flo = FF_LAYOUT_FROM_HDR(lo);
527 struct inode *inode = lo->plh_inode;
528 struct nfs4_ff_layout_ds_err *err, *n;
529 unsigned int ret = 0;
530
531 spin_lock(&inode->i_lock);
532 list_for_each_entry_safe(err, n, &flo->error_list, list) {
533 if (!pnfs_is_range_intersecting(err->offset,
534 pnfs_end_offset(err->offset, err->length),
535 range->offset,
536 pnfs_end_offset(range->offset, range->length)))
537 continue;
538 if (!maxnum)
539 break;
540 list_move(&err->list, head);
541 maxnum--;
542 ret++;
543 }
544 spin_unlock(&inode->i_lock);
545 return ret;
546 }
547
ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr * lo,const struct pnfs_layout_range * range,struct list_head * head,unsigned int maxnum)548 unsigned int ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
549 const struct pnfs_layout_range *range,
550 struct list_head *head,
551 unsigned int maxnum)
552 {
553 unsigned int ret;
554
555 ret = do_layout_fetch_ds_ioerr(lo, range, head, maxnum);
556 /* If we're over the max, discard all remaining entries */
557 if (ret == maxnum) {
558 LIST_HEAD(discard);
559 do_layout_fetch_ds_ioerr(lo, range, &discard, -1);
560 ff_layout_free_ds_ioerr(&discard);
561 }
562 return ret;
563 }
564
ff_read_layout_has_available_ds(struct pnfs_layout_segment * lseg)565 static bool ff_read_layout_has_available_ds(struct pnfs_layout_segment *lseg)
566 {
567 struct nfs4_ff_layout_mirror *mirror;
568 struct nfs4_deviceid_node *devid;
569 u32 idx, dss_id;
570
571 for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) {
572 mirror = FF_LAYOUT_COMP(lseg, idx);
573 if (!mirror)
574 continue;
575 for (dss_id = 0; dss_id < mirror->dss_count; dss_id++) {
576 if (!mirror->dss[dss_id].mirror_ds)
577 return true;
578 if (IS_ERR(mirror->dss[dss_id].mirror_ds))
579 continue;
580 devid = &mirror->dss[dss_id].mirror_ds->id_node;
581 if (!nfs4_test_deviceid_unavailable(devid))
582 return true;
583 }
584 }
585
586 return false;
587 }
588
ff_rw_layout_has_available_ds(struct pnfs_layout_segment * lseg)589 static bool ff_rw_layout_has_available_ds(struct pnfs_layout_segment *lseg)
590 {
591 struct nfs4_ff_layout_mirror *mirror;
592 struct nfs4_deviceid_node *devid;
593 u32 idx, dss_id;
594
595 for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) {
596 mirror = FF_LAYOUT_COMP(lseg, idx);
597 if (!mirror)
598 return false;
599 for (dss_id = 0; dss_id < mirror->dss_count; dss_id++) {
600 if (IS_ERR(mirror->dss[dss_id].mirror_ds))
601 return false;
602 if (!mirror->dss[dss_id].mirror_ds)
603 continue;
604 devid = &mirror->dss[dss_id].mirror_ds->id_node;
605 if (nfs4_test_deviceid_unavailable(devid))
606 return false;
607 }
608 }
609
610 return FF_LAYOUT_MIRROR_COUNT(lseg) != 0;
611 }
612
ff_layout_has_available_ds(struct pnfs_layout_segment * lseg)613 static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg)
614 {
615 if (lseg->pls_range.iomode == IOMODE_READ)
616 return ff_read_layout_has_available_ds(lseg);
617 /* Note: RW layout needs all mirrors available */
618 return ff_rw_layout_has_available_ds(lseg);
619 }
620
ff_layout_avoid_mds_available_ds(struct pnfs_layout_segment * lseg)621 bool ff_layout_avoid_mds_available_ds(struct pnfs_layout_segment *lseg)
622 {
623 return ff_layout_no_fallback_to_mds(lseg) ||
624 ff_layout_has_available_ds(lseg);
625 }
626
ff_layout_avoid_read_on_rw(struct pnfs_layout_segment * lseg)627 bool ff_layout_avoid_read_on_rw(struct pnfs_layout_segment *lseg)
628 {
629 return lseg->pls_range.iomode == IOMODE_RW &&
630 ff_layout_no_read_on_rw(lseg);
631 }
632
633 module_param(dataserver_retrans, uint, 0644);
634 MODULE_PARM_DESC(dataserver_retrans, "The number of times the NFSv4.1 client "
635 "retries a request before it attempts further "
636 " recovery action.");
637 module_param(dataserver_timeo, uint, 0644);
638 MODULE_PARM_DESC(dataserver_timeo, "The time (in tenths of a second) the "
639 "NFSv4.1 client waits for a response from a "
640 " data server before it retries an NFS request.");
641