1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Device operations for the pnfs nfs4 file layout driver.
4 *
5 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
6 *
7 * Tao Peng <bergwolf@primarydata.com>
8 */
9
10 #include <linux/nfs_fs.h>
11 #include <linux/vmalloc.h>
12 #include <linux/module.h>
13 #include <linux/sunrpc/addr.h>
14
15 #include "../internal.h"
16 #include "../nfs4session.h"
17 #include "flexfilelayout.h"
18
19 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
20
21 static unsigned int dataserver_timeo = NFS_DEF_TCP_TIMEO;
22 static unsigned int dataserver_retrans;
23
24 static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg);
25
nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds * mirror_ds)26 void nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
27 {
28 if (!IS_ERR_OR_NULL(mirror_ds))
29 nfs4_put_deviceid_node(&mirror_ds->id_node);
30 }
31
nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds * mirror_ds)32 void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
33 {
34 nfs4_print_deviceid(&mirror_ds->id_node.deviceid);
35 nfs4_pnfs_ds_put(mirror_ds->ds);
36 kfree(mirror_ds->ds_versions);
37 kfree_rcu(mirror_ds, id_node.rcu);
38 }
39
40 /* Decode opaque device data and construct new_ds using it */
41 struct nfs4_ff_layout_ds *
nfs4_ff_alloc_deviceid_node(struct nfs_server * server,struct pnfs_device * pdev,gfp_t gfp_flags)42 nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
43 gfp_t gfp_flags)
44 {
45 struct xdr_stream stream;
46 struct xdr_buf buf;
47 struct folio *scratch;
48 struct list_head dsaddrs;
49 struct nfs4_pnfs_ds_addr *da;
50 struct nfs4_ff_layout_ds *new_ds = NULL;
51 struct nfs4_ff_ds_version *ds_versions = NULL;
52 struct net *net = server->nfs_client->cl_net;
53 u32 mp_count;
54 u32 version_count;
55 __be32 *p;
56 int i, ret = -ENOMEM;
57
58 /* set up xdr stream */
59 scratch = folio_alloc(gfp_flags, 0);
60 if (!scratch)
61 goto out_err;
62
63 new_ds = kzalloc_obj(struct nfs4_ff_layout_ds, gfp_flags);
64 if (!new_ds)
65 goto out_scratch;
66
67 nfs4_init_deviceid_node(&new_ds->id_node,
68 server,
69 &pdev->dev_id);
70 INIT_LIST_HEAD(&dsaddrs);
71
72 xdr_init_decode_pages(&stream, &buf, pdev->pages, pdev->pglen);
73 xdr_set_scratch_folio(&stream, scratch);
74
75 /* multipath count */
76 p = xdr_inline_decode(&stream, 4);
77 if (unlikely(!p))
78 goto out_err_drain_dsaddrs;
79 mp_count = be32_to_cpup(p);
80 dprintk("%s: multipath ds count %d\n", __func__, mp_count);
81
82 for (i = 0; i < mp_count; i++) {
83 /* multipath ds */
84 da = nfs4_decode_mp_ds_addr(net, &stream, gfp_flags);
85 if (da)
86 list_add_tail(&da->da_node, &dsaddrs);
87 }
88 if (list_empty(&dsaddrs)) {
89 dprintk("%s: no suitable DS addresses found\n",
90 __func__);
91 ret = -ENOMEDIUM;
92 goto out_err_drain_dsaddrs;
93 }
94
95 /* version count */
96 p = xdr_inline_decode(&stream, 4);
97 if (unlikely(!p))
98 goto out_err_drain_dsaddrs;
99 version_count = be32_to_cpup(p);
100
101 if (version_count == 0) {
102 ret = -EINVAL;
103 goto out_err_drain_dsaddrs;
104 }
105 dprintk("%s: version count %d\n", __func__, version_count);
106
107 ds_versions = kzalloc_objs(struct nfs4_ff_ds_version, version_count,
108 gfp_flags);
109 if (!ds_versions)
110 goto out_err_drain_dsaddrs;
111
112 for (i = 0; i < version_count; i++) {
113 /* 20 = version(4) + minor_version(4) + rsize(4) + wsize(4) +
114 * tightly_coupled(4) */
115 p = xdr_inline_decode(&stream, 20);
116 if (unlikely(!p))
117 goto out_err_drain_dsaddrs;
118 ds_versions[i].version = be32_to_cpup(p++);
119 ds_versions[i].minor_version = be32_to_cpup(p++);
120 ds_versions[i].rsize = nfs_io_size(be32_to_cpup(p++),
121 server->nfs_client->cl_proto);
122 ds_versions[i].wsize = nfs_io_size(be32_to_cpup(p++),
123 server->nfs_client->cl_proto);
124 ds_versions[i].tightly_coupled = be32_to_cpup(p);
125
126 if (ds_versions[i].rsize > NFS_MAX_FILE_IO_SIZE)
127 ds_versions[i].rsize = NFS_MAX_FILE_IO_SIZE;
128 if (ds_versions[i].wsize > NFS_MAX_FILE_IO_SIZE)
129 ds_versions[i].wsize = NFS_MAX_FILE_IO_SIZE;
130
131 /*
132 * check for valid major/minor combination.
133 * currently we support dataserver which talk:
134 * v3, v4.0, v4.1, v4.2
135 */
136 if (!((ds_versions[i].version == 3 && ds_versions[i].minor_version == 0) ||
137 (ds_versions[i].version == 4 && ds_versions[i].minor_version < 3))) {
138 dprintk("%s: [%d] unsupported ds version %d-%d\n", __func__,
139 i, ds_versions[i].version,
140 ds_versions[i].minor_version);
141 ret = -EPROTONOSUPPORT;
142 goto out_err_drain_dsaddrs;
143 }
144
145 dprintk("%s: [%d] vers %u minor_ver %u rsize %u wsize %u coupled %d\n",
146 __func__, i, ds_versions[i].version,
147 ds_versions[i].minor_version,
148 ds_versions[i].rsize,
149 ds_versions[i].wsize,
150 ds_versions[i].tightly_coupled);
151 }
152
153 new_ds->ds_versions = ds_versions;
154 new_ds->ds_versions_cnt = version_count;
155
156 new_ds->ds = nfs4_pnfs_ds_add(net, &dsaddrs, gfp_flags);
157 if (!new_ds->ds)
158 goto out_err_drain_dsaddrs;
159
160 /* If DS was already in cache, free ds addrs */
161 while (!list_empty(&dsaddrs)) {
162 da = list_first_entry(&dsaddrs,
163 struct nfs4_pnfs_ds_addr,
164 da_node);
165 list_del_init(&da->da_node);
166 kfree(da->da_remotestr);
167 kfree(da);
168 }
169
170 folio_put(scratch);
171 return new_ds;
172
173 out_err_drain_dsaddrs:
174 while (!list_empty(&dsaddrs)) {
175 da = list_first_entry(&dsaddrs, struct nfs4_pnfs_ds_addr,
176 da_node);
177 list_del_init(&da->da_node);
178 kfree(da->da_remotestr);
179 kfree(da);
180 }
181
182 kfree(ds_versions);
183 out_scratch:
184 folio_put(scratch);
185 out_err:
186 kfree(new_ds);
187
188 dprintk("%s ERROR: returning %d\n", __func__, ret);
189 return NULL;
190 }
191
extend_ds_error(struct nfs4_ff_layout_ds_err * err,u64 offset,u64 length)192 static void extend_ds_error(struct nfs4_ff_layout_ds_err *err,
193 u64 offset, u64 length)
194 {
195 u64 end;
196
197 end = max_t(u64, pnfs_end_offset(err->offset, err->length),
198 pnfs_end_offset(offset, length));
199 err->offset = min_t(u64, err->offset, offset);
200 err->length = end - err->offset;
201 }
202
203 static int
ff_ds_error_match(const struct nfs4_ff_layout_ds_err * e1,const struct nfs4_ff_layout_ds_err * e2)204 ff_ds_error_match(const struct nfs4_ff_layout_ds_err *e1,
205 const struct nfs4_ff_layout_ds_err *e2)
206 {
207 int ret;
208
209 if (e1->opnum != e2->opnum)
210 return e1->opnum < e2->opnum ? -1 : 1;
211 if (e1->status != e2->status)
212 return e1->status < e2->status ? -1 : 1;
213 ret = memcmp(e1->stateid.data, e2->stateid.data,
214 sizeof(e1->stateid.data));
215 if (ret != 0)
216 return ret;
217 ret = memcmp(&e1->deviceid, &e2->deviceid, sizeof(e1->deviceid));
218 if (ret != 0)
219 return ret;
220 if (pnfs_end_offset(e1->offset, e1->length) < e2->offset)
221 return -1;
222 if (e1->offset > pnfs_end_offset(e2->offset, e2->length))
223 return 1;
224 /* If ranges overlap or are contiguous, they are the same */
225 return 0;
226 }
227
228 static void
ff_layout_add_ds_error_locked(struct nfs4_flexfile_layout * flo,struct nfs4_ff_layout_ds_err * dserr)229 ff_layout_add_ds_error_locked(struct nfs4_flexfile_layout *flo,
230 struct nfs4_ff_layout_ds_err *dserr)
231 {
232 struct nfs4_ff_layout_ds_err *err, *tmp;
233 struct list_head *head = &flo->error_list;
234 int match;
235
236 /* Do insertion sort w/ merges */
237 list_for_each_entry_safe(err, tmp, &flo->error_list, list) {
238 match = ff_ds_error_match(err, dserr);
239 if (match < 0)
240 continue;
241 if (match > 0) {
242 /* Add entry "dserr" _before_ entry "err" */
243 head = &err->list;
244 break;
245 }
246 /* Entries match, so merge "err" into "dserr" */
247 extend_ds_error(dserr, err->offset, err->length);
248 list_replace(&err->list, &dserr->list);
249 kfree(err);
250 return;
251 }
252
253 list_add_tail(&dserr->list, head);
254 }
255
ff_layout_track_ds_error(struct nfs4_flexfile_layout * flo,struct nfs4_ff_layout_mirror * mirror,u32 dss_id,u64 offset,u64 length,int status,enum nfs_opnum4 opnum,gfp_t gfp_flags)256 int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
257 struct nfs4_ff_layout_mirror *mirror,
258 u32 dss_id, u64 offset, u64 length, int status,
259 enum nfs_opnum4 opnum, gfp_t gfp_flags)
260 {
261 struct nfs4_ff_layout_ds_err *dserr;
262
263 if (status == 0)
264 return 0;
265
266 if (IS_ERR_OR_NULL(mirror->dss[dss_id].mirror_ds))
267 return -EINVAL;
268
269 dserr = kmalloc_obj(*dserr, gfp_flags);
270 if (!dserr)
271 return -ENOMEM;
272
273 INIT_LIST_HEAD(&dserr->list);
274 dserr->offset = offset;
275 dserr->length = length;
276 dserr->status = status;
277 dserr->opnum = opnum;
278 nfs4_stateid_copy(&dserr->stateid, &mirror->dss[dss_id].stateid);
279 memcpy(&dserr->deviceid, &mirror->dss[dss_id].mirror_ds->id_node.deviceid,
280 NFS4_DEVICEID4_SIZE);
281
282 spin_lock(&flo->generic_hdr.plh_inode->i_lock);
283 ff_layout_add_ds_error_locked(flo, dserr);
284 spin_unlock(&flo->generic_hdr.plh_inode->i_lock);
285 return 0;
286 }
287
288 static const struct cred *
ff_layout_get_mirror_cred(struct nfs4_ff_layout_mirror * mirror,u32 iomode,u32 dss_id)289 ff_layout_get_mirror_cred(struct nfs4_ff_layout_mirror *mirror, u32 iomode, u32 dss_id)
290 {
291 const struct cred *cred, __rcu **pcred;
292
293 if (iomode == IOMODE_READ)
294 pcred = &mirror->dss[dss_id].ro_cred;
295 else
296 pcred = &mirror->dss[dss_id].rw_cred;
297
298 rcu_read_lock();
299 do {
300 cred = rcu_dereference(*pcred);
301 if (!cred)
302 break;
303
304 cred = get_cred_rcu(cred);
305 } while(!cred);
306 rcu_read_unlock();
307 return cred;
308 }
309
310 struct nfs_fh *
nfs4_ff_layout_select_ds_fh(struct nfs4_ff_layout_mirror * mirror,u32 dss_id)311 nfs4_ff_layout_select_ds_fh(struct nfs4_ff_layout_mirror *mirror, u32 dss_id)
312 {
313 /* FIXME: For now assume there is only 1 version available for the DS */
314 return &mirror->dss[dss_id].fh_versions[0];
315 }
316
317 void
nfs4_ff_layout_select_ds_stateid(const struct nfs4_ff_layout_mirror * mirror,u32 dss_id,nfs4_stateid * stateid)318 nfs4_ff_layout_select_ds_stateid(const struct nfs4_ff_layout_mirror *mirror,
319 u32 dss_id,
320 nfs4_stateid *stateid)
321 {
322 if (nfs4_ff_layout_ds_version(mirror, dss_id) == 4)
323 nfs4_stateid_copy(stateid, &mirror->dss[dss_id].stateid);
324 }
325
326 static bool
ff_layout_init_mirror_ds(struct pnfs_layout_hdr * lo,struct nfs4_ff_layout_mirror * mirror,u32 dss_id)327 ff_layout_init_mirror_ds(struct pnfs_layout_hdr *lo,
328 struct nfs4_ff_layout_mirror *mirror,
329 u32 dss_id)
330 {
331 if (mirror == NULL)
332 goto outerr;
333 if (mirror->dss[dss_id].mirror_ds == NULL) {
334 struct nfs4_deviceid_node *node;
335 struct nfs4_ff_layout_ds *mirror_ds = ERR_PTR(-ENODEV);
336
337 node = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode),
338 &mirror->dss[dss_id].devid, lo->plh_lc_cred,
339 GFP_KERNEL);
340 if (node)
341 mirror_ds = FF_LAYOUT_MIRROR_DS(node);
342
343 /* check for race with another call to this function */
344 if (cmpxchg(&mirror->dss[dss_id].mirror_ds, NULL, mirror_ds) &&
345 mirror_ds != ERR_PTR(-ENODEV))
346 nfs4_put_deviceid_node(node);
347 }
348
349 if (IS_ERR(mirror->dss[dss_id].mirror_ds))
350 goto outerr;
351
352 return true;
353 outerr:
354 return false;
355 }
356
357 /**
358 * nfs4_ff_layout_prepare_ds - prepare a DS connection for an RPC call
359 * @lseg: the layout segment we're operating on
360 * @mirror: layout mirror describing the DS to use
361 * @dss_id: DS stripe id to select stripe to use
362 * @fail_return: return layout on connect failure?
363 *
364 * Try to prepare a DS connection to accept an RPC call. This involves
365 * selecting a mirror to use and connecting the client to it if it's not
366 * already connected.
367 *
368 * Since we only need a single functioning mirror to satisfy a read, we don't
369 * want to return the layout if there is one. For writes though, any down
370 * mirror should result in a LAYOUTRETURN. @fail_return is how we distinguish
371 * between the two cases.
372 *
373 * Returns a pointer to a connected DS object on success or NULL on failure.
374 */
375 struct nfs4_pnfs_ds *
nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment * lseg,struct nfs4_ff_layout_mirror * mirror,u32 dss_id,bool fail_return)376 nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg,
377 struct nfs4_ff_layout_mirror *mirror,
378 u32 dss_id,
379 bool fail_return)
380 {
381 struct nfs4_pnfs_ds *ds;
382 struct inode *ino = lseg->pls_layout->plh_inode;
383 struct nfs_server *s = NFS_SERVER(ino);
384 unsigned int max_payload;
385 int status = -EAGAIN;
386
387 if (!ff_layout_init_mirror_ds(lseg->pls_layout, mirror, dss_id))
388 goto noconnect;
389
390 ds = mirror->dss[dss_id].mirror_ds->ds;
391 if (READ_ONCE(ds->ds_clp))
392 goto out;
393 /* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */
394 smp_rmb();
395
396 /* FIXME: For now we assume the server sent only one version of NFS
397 * to use for the DS.
398 */
399 status = nfs4_pnfs_ds_connect(s, ds, &mirror->dss[dss_id].mirror_ds->id_node,
400 dataserver_timeo, dataserver_retrans,
401 mirror->dss[dss_id].mirror_ds->ds_versions[0].version,
402 mirror->dss[dss_id].mirror_ds->ds_versions[0].minor_version);
403
404 /* connect success, check rsize/wsize limit */
405 if (!status) {
406 /*
407 * ds_clp is put in destroy_ds().
408 * keep ds_clp even if DS is local, so that if local IO cannot
409 * proceed somehow, we can fall back to NFS whenever we want.
410 */
411 nfs_local_probe_async(ds->ds_clp);
412 max_payload =
413 nfs_block_size(rpc_max_payload(ds->ds_clp->cl_rpcclient),
414 NULL);
415 if (mirror->dss[dss_id].mirror_ds->ds_versions[0].rsize > max_payload)
416 mirror->dss[dss_id].mirror_ds->ds_versions[0].rsize = max_payload;
417 if (mirror->dss[dss_id].mirror_ds->ds_versions[0].wsize > max_payload)
418 mirror->dss[dss_id].mirror_ds->ds_versions[0].wsize = max_payload;
419 goto out;
420 }
421 noconnect:
422 ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
423 mirror, dss_id, lseg->pls_range.offset,
424 lseg->pls_range.length, NFS4ERR_NXIO,
425 OP_ILLEGAL, GFP_NOIO);
426 ff_layout_send_layouterror(lseg);
427 if (fail_return || !ff_layout_has_available_ds(lseg))
428 pnfs_error_mark_layout_for_return(ino, lseg);
429 ds = ERR_PTR(status);
430 out:
431 return ds;
432 }
433
434 const struct cred *
ff_layout_get_ds_cred(struct nfs4_ff_layout_mirror * mirror,const struct pnfs_layout_range * range,const struct cred * mdscred,u32 dss_id)435 ff_layout_get_ds_cred(struct nfs4_ff_layout_mirror *mirror,
436 const struct pnfs_layout_range *range,
437 const struct cred *mdscred,
438 u32 dss_id)
439 {
440 const struct cred *cred;
441
442 if (mirror && !mirror->dss[dss_id].mirror_ds->ds_versions[0].tightly_coupled) {
443 cred = ff_layout_get_mirror_cred(mirror, range->iomode, dss_id);
444 if (!cred)
445 cred = get_cred(mdscred);
446 } else {
447 cred = get_cred(mdscred);
448 }
449 return cred;
450 }
451
452 /**
453 * nfs4_ff_find_or_create_ds_client - Find or create a DS rpc client
454 * @mirror: pointer to the mirror
455 * @ds_clp: nfs_client for the DS
456 * @inode: pointer to inode
457 * @dss_id: DS stripe id
458 *
459 * Find or create a DS rpc client with th MDS server rpc client auth flavor
460 * in the nfs_client cl_ds_clients list.
461 */
462 struct rpc_clnt *
nfs4_ff_find_or_create_ds_client(struct nfs4_ff_layout_mirror * mirror,struct nfs_client * ds_clp,struct inode * inode,u32 dss_id)463 nfs4_ff_find_or_create_ds_client(struct nfs4_ff_layout_mirror *mirror,
464 struct nfs_client *ds_clp, struct inode *inode,
465 u32 dss_id)
466 {
467 switch (mirror->dss[dss_id].mirror_ds->ds_versions[0].version) {
468 case 3:
469 /* For NFSv3 DS, flavor is set when creating DS connections */
470 return ds_clp->cl_rpcclient;
471 case 4:
472 return nfs4_find_or_create_ds_client(ds_clp, inode);
473 default:
474 BUG();
475 }
476 }
477
ff_layout_free_ds_ioerr(struct list_head * head)478 void ff_layout_free_ds_ioerr(struct list_head *head)
479 {
480 struct nfs4_ff_layout_ds_err *err;
481
482 while (!list_empty(head)) {
483 err = list_first_entry(head,
484 struct nfs4_ff_layout_ds_err,
485 list);
486 list_del(&err->list);
487 kfree(err);
488 }
489 }
490
491 /* called with inode i_lock held */
ff_layout_encode_ds_ioerr(struct xdr_stream * xdr,const struct list_head * head)492 int ff_layout_encode_ds_ioerr(struct xdr_stream *xdr, const struct list_head *head)
493 {
494 struct nfs4_ff_layout_ds_err *err;
495 __be32 *p;
496
497 list_for_each_entry(err, head, list) {
498 /* offset(8) + length(8) + stateid(NFS4_STATEID_SIZE)
499 * + array length + deviceid(NFS4_DEVICEID4_SIZE)
500 * + status(4) + opnum(4)
501 */
502 p = xdr_reserve_space(xdr,
503 28 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE);
504 if (unlikely(!p))
505 return -ENOBUFS;
506 p = xdr_encode_hyper(p, err->offset);
507 p = xdr_encode_hyper(p, err->length);
508 p = xdr_encode_opaque_fixed(p, &err->stateid,
509 NFS4_STATEID_SIZE);
510 /* Encode 1 error */
511 *p++ = cpu_to_be32(1);
512 p = xdr_encode_opaque_fixed(p, &err->deviceid,
513 NFS4_DEVICEID4_SIZE);
514 *p++ = cpu_to_be32(err->status);
515 *p++ = cpu_to_be32(err->opnum);
516 dprintk("%s: offset %llu length %llu status %d op %d\n",
517 __func__, err->offset, err->length, err->status,
518 err->opnum);
519 }
520
521 return 0;
522 }
523
524 static
do_layout_fetch_ds_ioerr(struct pnfs_layout_hdr * lo,const struct pnfs_layout_range * range,struct list_head * head,unsigned int maxnum)525 unsigned int do_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
526 const struct pnfs_layout_range *range,
527 struct list_head *head,
528 unsigned int maxnum)
529 {
530 struct nfs4_flexfile_layout *flo = FF_LAYOUT_FROM_HDR(lo);
531 struct inode *inode = lo->plh_inode;
532 struct nfs4_ff_layout_ds_err *err, *n;
533 unsigned int ret = 0;
534
535 spin_lock(&inode->i_lock);
536 list_for_each_entry_safe(err, n, &flo->error_list, list) {
537 if (!pnfs_is_range_intersecting(err->offset,
538 pnfs_end_offset(err->offset, err->length),
539 range->offset,
540 pnfs_end_offset(range->offset, range->length)))
541 continue;
542 if (!maxnum)
543 break;
544 list_move(&err->list, head);
545 maxnum--;
546 ret++;
547 }
548 spin_unlock(&inode->i_lock);
549 return ret;
550 }
551
ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr * lo,const struct pnfs_layout_range * range,struct list_head * head,unsigned int maxnum)552 unsigned int ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
553 const struct pnfs_layout_range *range,
554 struct list_head *head,
555 unsigned int maxnum)
556 {
557 unsigned int ret;
558
559 ret = do_layout_fetch_ds_ioerr(lo, range, head, maxnum);
560 /* If we're over the max, discard all remaining entries */
561 if (ret == maxnum) {
562 LIST_HEAD(discard);
563 do_layout_fetch_ds_ioerr(lo, range, &discard, -1);
564 ff_layout_free_ds_ioerr(&discard);
565 }
566 return ret;
567 }
568
ff_read_layout_has_available_ds(struct pnfs_layout_segment * lseg)569 static bool ff_read_layout_has_available_ds(struct pnfs_layout_segment *lseg)
570 {
571 struct nfs4_ff_layout_mirror *mirror;
572 struct nfs4_deviceid_node *devid;
573 u32 idx, dss_id;
574
575 for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) {
576 mirror = FF_LAYOUT_COMP(lseg, idx);
577 if (!mirror)
578 continue;
579 for (dss_id = 0; dss_id < mirror->dss_count; dss_id++) {
580 if (!mirror->dss[dss_id].mirror_ds)
581 return true;
582 if (IS_ERR(mirror->dss[dss_id].mirror_ds))
583 continue;
584 devid = &mirror->dss[dss_id].mirror_ds->id_node;
585 if (!nfs4_test_deviceid_unavailable(devid))
586 return true;
587 }
588 }
589
590 return false;
591 }
592
ff_rw_layout_has_available_ds(struct pnfs_layout_segment * lseg)593 static bool ff_rw_layout_has_available_ds(struct pnfs_layout_segment *lseg)
594 {
595 struct nfs4_ff_layout_mirror *mirror;
596 struct nfs4_deviceid_node *devid;
597 u32 idx, dss_id;
598
599 for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) {
600 mirror = FF_LAYOUT_COMP(lseg, idx);
601 if (!mirror)
602 return false;
603 for (dss_id = 0; dss_id < mirror->dss_count; dss_id++) {
604 if (IS_ERR(mirror->dss[dss_id].mirror_ds))
605 return false;
606 if (!mirror->dss[dss_id].mirror_ds)
607 continue;
608 devid = &mirror->dss[dss_id].mirror_ds->id_node;
609 if (nfs4_test_deviceid_unavailable(devid))
610 return false;
611 }
612 }
613
614 return FF_LAYOUT_MIRROR_COUNT(lseg) != 0;
615 }
616
ff_layout_has_available_ds(struct pnfs_layout_segment * lseg)617 static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg)
618 {
619 if (lseg->pls_range.iomode == IOMODE_READ)
620 return ff_read_layout_has_available_ds(lseg);
621 /* Note: RW layout needs all mirrors available */
622 return ff_rw_layout_has_available_ds(lseg);
623 }
624
ff_layout_avoid_mds_available_ds(struct pnfs_layout_segment * lseg)625 bool ff_layout_avoid_mds_available_ds(struct pnfs_layout_segment *lseg)
626 {
627 return ff_layout_no_fallback_to_mds(lseg) ||
628 ff_layout_has_available_ds(lseg);
629 }
630
ff_layout_avoid_read_on_rw(struct pnfs_layout_segment * lseg)631 bool ff_layout_avoid_read_on_rw(struct pnfs_layout_segment *lseg)
632 {
633 return lseg->pls_range.iomode == IOMODE_RW &&
634 ff_layout_no_read_on_rw(lseg);
635 }
636
637 module_param(dataserver_retrans, uint, 0644);
638 MODULE_PARM_DESC(dataserver_retrans, "The number of times the NFSv4.1 client "
639 "retries a request before it attempts further "
640 " recovery action.");
641 module_param(dataserver_timeo, uint, 0644);
642 MODULE_PARM_DESC(dataserver_timeo, "The time (in tenths of a second) the "
643 "NFSv4.1 client waits for a response from a "
644 " data server before it retries an NFS request.");
645