1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Common NFS I/O operations for the pnfs file based
4 * layout drivers.
5 *
6 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
7 *
8 * Tom Haynes <loghyr@primarydata.com>
9 */
10
11 #include <linux/nfs_fs.h>
12 #include <linux/nfs_page.h>
13 #include <linux/sunrpc/addr.h>
14 #include <linux/module.h>
15
16 #include "nfs4session.h"
17 #include "internal.h"
18 #include "pnfs.h"
19 #include "netns.h"
20 #include "nfs4trace.h"
21
22 #define NFSDBG_FACILITY NFSDBG_PNFS
23
pnfs_generic_rw_release(void * data)24 void pnfs_generic_rw_release(void *data)
25 {
26 struct nfs_pgio_header *hdr = data;
27
28 nfs_put_client(hdr->ds_clp);
29 hdr->mds_ops->rpc_release(data);
30 }
31 EXPORT_SYMBOL_GPL(pnfs_generic_rw_release);
32
33 /* Fake up some data that will cause nfs_commit_release to retry the writes. */
pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data * data)34 void pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data *data)
35 {
36 struct nfs_writeverf *verf = data->res.verf;
37
38 data->task.tk_status = 0;
39 memset(&verf->verifier, 0, sizeof(verf->verifier));
40 verf->committed = NFS_UNSTABLE;
41 }
42 EXPORT_SYMBOL_GPL(pnfs_generic_prepare_to_resend_writes);
43
pnfs_generic_write_commit_done(struct rpc_task * task,void * data)44 void pnfs_generic_write_commit_done(struct rpc_task *task, void *data)
45 {
46 struct nfs_commit_data *wdata = data;
47
48 /* Note this may cause RPC to be resent */
49 wdata->mds_ops->rpc_call_done(task, data);
50 }
51 EXPORT_SYMBOL_GPL(pnfs_generic_write_commit_done);
52
pnfs_generic_commit_release(void * calldata)53 void pnfs_generic_commit_release(void *calldata)
54 {
55 struct nfs_commit_data *data = calldata;
56
57 data->completion_ops->completion(data);
58 pnfs_put_lseg(data->lseg);
59 nfs_put_client(data->ds_clp);
60 nfs_commitdata_release(data);
61 }
62 EXPORT_SYMBOL_GPL(pnfs_generic_commit_release);
63
64 static struct pnfs_layout_segment *
pnfs_free_bucket_lseg(struct pnfs_commit_bucket * bucket)65 pnfs_free_bucket_lseg(struct pnfs_commit_bucket *bucket)
66 {
67 if (list_empty(&bucket->committing) && list_empty(&bucket->written)) {
68 struct pnfs_layout_segment *freeme = bucket->lseg;
69 bucket->lseg = NULL;
70 return freeme;
71 }
72 return NULL;
73 }
74
75 /* The generic layer is about to remove the req from the commit list.
76 * If this will make the bucket empty, it will need to put the lseg reference.
77 * Note this must be called holding nfsi->commit_mutex
78 */
79 void
pnfs_generic_clear_request_commit(struct nfs_page * req,struct nfs_commit_info * cinfo)80 pnfs_generic_clear_request_commit(struct nfs_page *req,
81 struct nfs_commit_info *cinfo)
82 {
83 struct pnfs_commit_bucket *bucket = NULL;
84
85 if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags))
86 goto out;
87 cinfo->ds->nwritten--;
88 if (list_is_singular(&req->wb_list))
89 bucket = list_first_entry(&req->wb_list,
90 struct pnfs_commit_bucket, written);
91 out:
92 nfs_request_remove_commit_list(req, cinfo);
93 if (bucket)
94 pnfs_put_lseg(pnfs_free_bucket_lseg(bucket));
95 }
96 EXPORT_SYMBOL_GPL(pnfs_generic_clear_request_commit);
97
98 struct pnfs_commit_array *
pnfs_alloc_commit_array(size_t n,gfp_t gfp_flags)99 pnfs_alloc_commit_array(size_t n, gfp_t gfp_flags)
100 {
101 struct pnfs_commit_array *p;
102 struct pnfs_commit_bucket *b;
103
104 p = kmalloc(struct_size(p, buckets, n), gfp_flags);
105 if (!p)
106 return NULL;
107 p->nbuckets = n;
108 INIT_LIST_HEAD(&p->cinfo_list);
109 INIT_LIST_HEAD(&p->lseg_list);
110 p->lseg = NULL;
111 for (b = &p->buckets[0]; n != 0; b++, n--) {
112 INIT_LIST_HEAD(&b->written);
113 INIT_LIST_HEAD(&b->committing);
114 b->lseg = NULL;
115 b->direct_verf.committed = NFS_INVALID_STABLE_HOW;
116 }
117 return p;
118 }
119 EXPORT_SYMBOL_GPL(pnfs_alloc_commit_array);
120
121 void
pnfs_free_commit_array(struct pnfs_commit_array * p)122 pnfs_free_commit_array(struct pnfs_commit_array *p)
123 {
124 kfree_rcu(p, rcu);
125 }
126 EXPORT_SYMBOL_GPL(pnfs_free_commit_array);
127
128 static struct pnfs_commit_array *
pnfs_find_commit_array_by_lseg(struct pnfs_ds_commit_info * fl_cinfo,struct pnfs_layout_segment * lseg)129 pnfs_find_commit_array_by_lseg(struct pnfs_ds_commit_info *fl_cinfo,
130 struct pnfs_layout_segment *lseg)
131 {
132 struct pnfs_commit_array *array;
133
134 list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) {
135 if (array->lseg == lseg)
136 return array;
137 }
138 return NULL;
139 }
140
141 struct pnfs_commit_array *
pnfs_add_commit_array(struct pnfs_ds_commit_info * fl_cinfo,struct pnfs_commit_array * new,struct pnfs_layout_segment * lseg)142 pnfs_add_commit_array(struct pnfs_ds_commit_info *fl_cinfo,
143 struct pnfs_commit_array *new,
144 struct pnfs_layout_segment *lseg)
145 {
146 struct pnfs_commit_array *array;
147
148 array = pnfs_find_commit_array_by_lseg(fl_cinfo, lseg);
149 if (array)
150 return array;
151 new->lseg = lseg;
152 refcount_set(&new->refcount, 1);
153 list_add_rcu(&new->cinfo_list, &fl_cinfo->commits);
154 list_add(&new->lseg_list, &lseg->pls_commits);
155 return new;
156 }
157 EXPORT_SYMBOL_GPL(pnfs_add_commit_array);
158
159 static struct pnfs_commit_array *
pnfs_lookup_commit_array(struct pnfs_ds_commit_info * fl_cinfo,struct pnfs_layout_segment * lseg)160 pnfs_lookup_commit_array(struct pnfs_ds_commit_info *fl_cinfo,
161 struct pnfs_layout_segment *lseg)
162 {
163 struct pnfs_commit_array *array;
164
165 rcu_read_lock();
166 array = pnfs_find_commit_array_by_lseg(fl_cinfo, lseg);
167 if (!array) {
168 rcu_read_unlock();
169 fl_cinfo->ops->setup_ds_info(fl_cinfo, lseg);
170 rcu_read_lock();
171 array = pnfs_find_commit_array_by_lseg(fl_cinfo, lseg);
172 }
173 rcu_read_unlock();
174 return array;
175 }
176
177 static void
pnfs_release_commit_array_locked(struct pnfs_commit_array * array)178 pnfs_release_commit_array_locked(struct pnfs_commit_array *array)
179 {
180 list_del_rcu(&array->cinfo_list);
181 list_del(&array->lseg_list);
182 pnfs_free_commit_array(array);
183 }
184
185 static void
pnfs_put_commit_array_locked(struct pnfs_commit_array * array)186 pnfs_put_commit_array_locked(struct pnfs_commit_array *array)
187 {
188 if (refcount_dec_and_test(&array->refcount))
189 pnfs_release_commit_array_locked(array);
190 }
191
192 static void
pnfs_put_commit_array(struct pnfs_commit_array * array,struct inode * inode)193 pnfs_put_commit_array(struct pnfs_commit_array *array, struct inode *inode)
194 {
195 if (refcount_dec_and_lock(&array->refcount, &inode->i_lock)) {
196 pnfs_release_commit_array_locked(array);
197 spin_unlock(&inode->i_lock);
198 }
199 }
200
201 static struct pnfs_commit_array *
pnfs_get_commit_array(struct pnfs_commit_array * array)202 pnfs_get_commit_array(struct pnfs_commit_array *array)
203 {
204 if (refcount_inc_not_zero(&array->refcount))
205 return array;
206 return NULL;
207 }
208
209 static void
pnfs_remove_and_free_commit_array(struct pnfs_commit_array * array)210 pnfs_remove_and_free_commit_array(struct pnfs_commit_array *array)
211 {
212 array->lseg = NULL;
213 list_del_init(&array->lseg_list);
214 pnfs_put_commit_array_locked(array);
215 }
216
217 void
pnfs_generic_ds_cinfo_release_lseg(struct pnfs_ds_commit_info * fl_cinfo,struct pnfs_layout_segment * lseg)218 pnfs_generic_ds_cinfo_release_lseg(struct pnfs_ds_commit_info *fl_cinfo,
219 struct pnfs_layout_segment *lseg)
220 {
221 struct pnfs_commit_array *array, *tmp;
222
223 list_for_each_entry_safe(array, tmp, &lseg->pls_commits, lseg_list)
224 pnfs_remove_and_free_commit_array(array);
225 }
226 EXPORT_SYMBOL_GPL(pnfs_generic_ds_cinfo_release_lseg);
227
228 void
pnfs_generic_ds_cinfo_destroy(struct pnfs_ds_commit_info * fl_cinfo)229 pnfs_generic_ds_cinfo_destroy(struct pnfs_ds_commit_info *fl_cinfo)
230 {
231 struct pnfs_commit_array *array, *tmp;
232
233 list_for_each_entry_safe(array, tmp, &fl_cinfo->commits, cinfo_list)
234 pnfs_remove_and_free_commit_array(array);
235 }
236 EXPORT_SYMBOL_GPL(pnfs_generic_ds_cinfo_destroy);
237
238 /*
239 * Locks the nfs_page requests for commit and moves them to
240 * @bucket->committing.
241 */
242 static int
pnfs_bucket_scan_ds_commit_list(struct pnfs_commit_bucket * bucket,struct nfs_commit_info * cinfo,int max)243 pnfs_bucket_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
244 struct nfs_commit_info *cinfo,
245 int max)
246 {
247 struct list_head *src = &bucket->written;
248 struct list_head *dst = &bucket->committing;
249 int ret;
250
251 lockdep_assert_held(&NFS_I(cinfo->inode)->commit_mutex);
252 ret = nfs_scan_commit_list(src, dst, cinfo, max);
253 if (ret) {
254 cinfo->ds->nwritten -= ret;
255 cinfo->ds->ncommitting += ret;
256 }
257 return ret;
258 }
259
pnfs_bucket_scan_array(struct nfs_commit_info * cinfo,struct pnfs_commit_bucket * buckets,unsigned int nbuckets,int max)260 static int pnfs_bucket_scan_array(struct nfs_commit_info *cinfo,
261 struct pnfs_commit_bucket *buckets,
262 unsigned int nbuckets,
263 int max)
264 {
265 unsigned int i;
266 int rv = 0, cnt;
267
268 for (i = 0; i < nbuckets && max != 0; i++) {
269 cnt = pnfs_bucket_scan_ds_commit_list(&buckets[i], cinfo, max);
270 rv += cnt;
271 max -= cnt;
272 }
273 return rv;
274 }
275
276 /* Move reqs from written to committing lists, returning count
277 * of number moved.
278 */
pnfs_generic_scan_commit_lists(struct nfs_commit_info * cinfo,int max)279 int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo, int max)
280 {
281 struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
282 struct pnfs_commit_array *array;
283 int rv = 0, cnt;
284
285 rcu_read_lock();
286 list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) {
287 if (!array->lseg || !pnfs_get_commit_array(array))
288 continue;
289 rcu_read_unlock();
290 cnt = pnfs_bucket_scan_array(cinfo, array->buckets,
291 array->nbuckets, max);
292 rcu_read_lock();
293 pnfs_put_commit_array(array, cinfo->inode);
294 rv += cnt;
295 max -= cnt;
296 if (!max)
297 break;
298 }
299 rcu_read_unlock();
300 return rv;
301 }
302 EXPORT_SYMBOL_GPL(pnfs_generic_scan_commit_lists);
303
304 static unsigned int
pnfs_bucket_recover_commit_reqs(struct list_head * dst,struct pnfs_commit_bucket * buckets,unsigned int nbuckets,struct nfs_commit_info * cinfo)305 pnfs_bucket_recover_commit_reqs(struct list_head *dst,
306 struct pnfs_commit_bucket *buckets,
307 unsigned int nbuckets,
308 struct nfs_commit_info *cinfo)
309 {
310 struct pnfs_commit_bucket *b;
311 struct pnfs_layout_segment *freeme;
312 unsigned int nwritten, ret = 0;
313 unsigned int i;
314
315 restart:
316 for (i = 0, b = buckets; i < nbuckets; i++, b++) {
317 nwritten = nfs_scan_commit_list(&b->written, dst, cinfo, 0);
318 if (!nwritten)
319 continue;
320 ret += nwritten;
321 freeme = pnfs_free_bucket_lseg(b);
322 if (freeme) {
323 pnfs_put_lseg(freeme);
324 goto restart;
325 }
326 }
327 return ret;
328 }
329
330 /* Pull everything off the committing lists and dump into @dst. */
pnfs_generic_recover_commit_reqs(struct list_head * dst,struct nfs_commit_info * cinfo)331 void pnfs_generic_recover_commit_reqs(struct list_head *dst,
332 struct nfs_commit_info *cinfo)
333 {
334 struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
335 struct pnfs_commit_array *array;
336 unsigned int nwritten;
337
338 lockdep_assert_held(&NFS_I(cinfo->inode)->commit_mutex);
339 rcu_read_lock();
340 list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) {
341 if (!array->lseg || !pnfs_get_commit_array(array))
342 continue;
343 rcu_read_unlock();
344 nwritten = pnfs_bucket_recover_commit_reqs(dst,
345 array->buckets,
346 array->nbuckets,
347 cinfo);
348 rcu_read_lock();
349 pnfs_put_commit_array(array, cinfo->inode);
350 fl_cinfo->nwritten -= nwritten;
351 }
352 rcu_read_unlock();
353 }
354 EXPORT_SYMBOL_GPL(pnfs_generic_recover_commit_reqs);
355
356 static struct pnfs_layout_segment *
pnfs_bucket_get_committing(struct list_head * head,struct pnfs_commit_bucket * bucket,struct nfs_commit_info * cinfo)357 pnfs_bucket_get_committing(struct list_head *head,
358 struct pnfs_commit_bucket *bucket,
359 struct nfs_commit_info *cinfo)
360 {
361 struct pnfs_layout_segment *lseg;
362 struct list_head *pos;
363
364 list_for_each(pos, &bucket->committing)
365 cinfo->ds->ncommitting--;
366 list_splice_init(&bucket->committing, head);
367 lseg = pnfs_free_bucket_lseg(bucket);
368 if (!lseg)
369 lseg = pnfs_get_lseg(bucket->lseg);
370 return lseg;
371 }
372
373 static struct nfs_commit_data *
pnfs_bucket_fetch_commitdata(struct pnfs_commit_bucket * bucket,struct nfs_commit_info * cinfo)374 pnfs_bucket_fetch_commitdata(struct pnfs_commit_bucket *bucket,
375 struct nfs_commit_info *cinfo)
376 {
377 struct nfs_commit_data *data = nfs_commitdata_alloc();
378
379 if (!data)
380 return NULL;
381 data->lseg = pnfs_bucket_get_committing(&data->pages, bucket, cinfo);
382 return data;
383 }
384
pnfs_generic_retry_commit(struct pnfs_commit_bucket * buckets,unsigned int nbuckets,struct nfs_commit_info * cinfo,unsigned int idx)385 static void pnfs_generic_retry_commit(struct pnfs_commit_bucket *buckets,
386 unsigned int nbuckets,
387 struct nfs_commit_info *cinfo,
388 unsigned int idx)
389 {
390 struct pnfs_commit_bucket *bucket;
391 struct pnfs_layout_segment *freeme;
392 LIST_HEAD(pages);
393
394 for (bucket = buckets; idx < nbuckets; bucket++, idx++) {
395 if (list_empty(&bucket->committing))
396 continue;
397 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
398 freeme = pnfs_bucket_get_committing(&pages, bucket, cinfo);
399 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
400 nfs_retry_commit(&pages, freeme, cinfo, idx);
401 pnfs_put_lseg(freeme);
402 }
403 }
404
405 static unsigned int
pnfs_bucket_alloc_ds_commits(struct list_head * list,struct pnfs_commit_bucket * buckets,unsigned int nbuckets,struct nfs_commit_info * cinfo)406 pnfs_bucket_alloc_ds_commits(struct list_head *list,
407 struct pnfs_commit_bucket *buckets,
408 unsigned int nbuckets,
409 struct nfs_commit_info *cinfo)
410 {
411 struct pnfs_commit_bucket *bucket;
412 struct nfs_commit_data *data;
413 unsigned int i;
414 unsigned int nreq = 0;
415
416 for (i = 0, bucket = buckets; i < nbuckets; i++, bucket++) {
417 if (list_empty(&bucket->committing))
418 continue;
419 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
420 if (!list_empty(&bucket->committing)) {
421 data = pnfs_bucket_fetch_commitdata(bucket, cinfo);
422 if (!data)
423 goto out_error;
424 data->ds_commit_index = i;
425 list_add_tail(&data->list, list);
426 nreq++;
427 }
428 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
429 }
430 return nreq;
431 out_error:
432 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
433 /* Clean up on error */
434 pnfs_generic_retry_commit(buckets, nbuckets, cinfo, i);
435 return nreq;
436 }
437
438 static unsigned int
pnfs_alloc_ds_commits_list(struct list_head * list,struct pnfs_ds_commit_info * fl_cinfo,struct nfs_commit_info * cinfo)439 pnfs_alloc_ds_commits_list(struct list_head *list,
440 struct pnfs_ds_commit_info *fl_cinfo,
441 struct nfs_commit_info *cinfo)
442 {
443 struct pnfs_commit_array *array;
444 unsigned int ret = 0;
445
446 rcu_read_lock();
447 list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) {
448 if (!array->lseg || !pnfs_get_commit_array(array))
449 continue;
450 rcu_read_unlock();
451 ret += pnfs_bucket_alloc_ds_commits(list, array->buckets,
452 array->nbuckets, cinfo);
453 rcu_read_lock();
454 pnfs_put_commit_array(array, cinfo->inode);
455 }
456 rcu_read_unlock();
457 return ret;
458 }
459
460 /* This follows nfs_commit_list pretty closely */
461 int
pnfs_generic_commit_pagelist(struct inode * inode,struct list_head * mds_pages,int how,struct nfs_commit_info * cinfo,int (* initiate_commit)(struct nfs_commit_data * data,int how))462 pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
463 int how, struct nfs_commit_info *cinfo,
464 int (*initiate_commit)(struct nfs_commit_data *data,
465 int how))
466 {
467 struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
468 struct nfs_commit_data *data, *tmp;
469 LIST_HEAD(list);
470 unsigned int nreq = 0;
471
472 if (!list_empty(mds_pages)) {
473 data = nfs_commitdata_alloc();
474 if (!data) {
475 nfs_retry_commit(mds_pages, NULL, cinfo, -1);
476 return -ENOMEM;
477 }
478 data->ds_commit_index = -1;
479 list_splice_init(mds_pages, &data->pages);
480 list_add_tail(&data->list, &list);
481 nreq++;
482 }
483
484 nreq += pnfs_alloc_ds_commits_list(&list, fl_cinfo, cinfo);
485 if (nreq == 0)
486 goto out;
487
488 list_for_each_entry_safe(data, tmp, &list, list) {
489 list_del(&data->list);
490 if (data->ds_commit_index < 0) {
491 nfs_init_commit(data, NULL, NULL, cinfo);
492 nfs_initiate_commit(NFS_CLIENT(inode), data,
493 NFS_PROTO(data->inode),
494 data->mds_ops, how,
495 RPC_TASK_CRED_NOREF, NULL);
496 } else {
497 nfs_init_commit(data, NULL, data->lseg, cinfo);
498 initiate_commit(data, how);
499 }
500 }
501 out:
502 return PNFS_ATTEMPTED;
503 }
504 EXPORT_SYMBOL_GPL(pnfs_generic_commit_pagelist);
505
506 /*
507 * Data server cache
508 *
509 * Data servers can be mapped to different device ids, but should
510 * never be shared between net namespaces.
511 *
512 * nfs4_pnfs_ds reference counting:
513 * - set to 1 on allocation
514 * - incremented when a device id maps a data server already in the cache.
515 * - decremented when deviceid is removed from the cache.
516 */
517
518 /* Debug routines */
519 static void
print_ds(struct nfs4_pnfs_ds * ds)520 print_ds(struct nfs4_pnfs_ds *ds)
521 {
522 if (ds == NULL) {
523 printk(KERN_WARNING "%s NULL device\n", __func__);
524 return;
525 }
526 printk(KERN_WARNING " ds %s\n"
527 " ref count %d\n"
528 " client %p\n"
529 " cl_exchange_flags %x\n",
530 ds->ds_remotestr,
531 refcount_read(&ds->ds_count), ds->ds_clp,
532 ds->ds_clp ? ds->ds_clp->cl_exchange_flags : 0);
533 }
534
535 static bool
same_sockaddr(struct sockaddr * addr1,struct sockaddr * addr2)536 same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2)
537 {
538 struct sockaddr_in *a, *b;
539 struct sockaddr_in6 *a6, *b6;
540
541 if (addr1->sa_family != addr2->sa_family)
542 return false;
543
544 switch (addr1->sa_family) {
545 case AF_INET:
546 a = (struct sockaddr_in *)addr1;
547 b = (struct sockaddr_in *)addr2;
548
549 if (a->sin_addr.s_addr == b->sin_addr.s_addr &&
550 a->sin_port == b->sin_port)
551 return true;
552 break;
553
554 case AF_INET6:
555 a6 = (struct sockaddr_in6 *)addr1;
556 b6 = (struct sockaddr_in6 *)addr2;
557
558 /* LINKLOCAL addresses must have matching scope_id */
559 if (ipv6_addr_src_scope(&a6->sin6_addr) ==
560 IPV6_ADDR_SCOPE_LINKLOCAL &&
561 a6->sin6_scope_id != b6->sin6_scope_id)
562 return false;
563
564 if (ipv6_addr_equal(&a6->sin6_addr, &b6->sin6_addr) &&
565 a6->sin6_port == b6->sin6_port)
566 return true;
567 break;
568
569 default:
570 dprintk("%s: unhandled address family: %u\n",
571 __func__, addr1->sa_family);
572 return false;
573 }
574
575 return false;
576 }
577
578 /*
579 * Checks if 'dsaddrs1' contains a subset of 'dsaddrs2'. If it does,
580 * declare a match.
581 */
582 static bool
_same_data_server_addrs_locked(const struct list_head * dsaddrs1,const struct list_head * dsaddrs2)583 _same_data_server_addrs_locked(const struct list_head *dsaddrs1,
584 const struct list_head *dsaddrs2)
585 {
586 struct nfs4_pnfs_ds_addr *da1, *da2;
587 struct sockaddr *sa1, *sa2;
588 bool match = false;
589
590 list_for_each_entry(da1, dsaddrs1, da_node) {
591 sa1 = (struct sockaddr *)&da1->da_addr;
592 match = false;
593 list_for_each_entry(da2, dsaddrs2, da_node) {
594 sa2 = (struct sockaddr *)&da2->da_addr;
595 match = same_sockaddr(sa1, sa2);
596 if (match)
597 break;
598 }
599 if (!match)
600 break;
601 }
602 return match;
603 }
604
605 /*
606 * Lookup DS by addresses. nfs4_ds_cache_lock is held
607 */
608 static struct nfs4_pnfs_ds *
_data_server_lookup_locked(const struct nfs_net * nn,const struct list_head * dsaddrs)609 _data_server_lookup_locked(const struct nfs_net *nn, const struct list_head *dsaddrs)
610 {
611 struct nfs4_pnfs_ds *ds;
612
613 list_for_each_entry(ds, &nn->nfs4_data_server_cache, ds_node)
614 if (_same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs))
615 return ds;
616 return NULL;
617 }
618
nfs4_pnfs_ds_addr_alloc(gfp_t gfp_flags)619 static struct nfs4_pnfs_ds_addr *nfs4_pnfs_ds_addr_alloc(gfp_t gfp_flags)
620 {
621 struct nfs4_pnfs_ds_addr *da = kzalloc(sizeof(*da), gfp_flags);
622 if (da)
623 INIT_LIST_HEAD(&da->da_node);
624 return da;
625 }
626
nfs4_pnfs_ds_addr_free(struct nfs4_pnfs_ds_addr * da)627 static void nfs4_pnfs_ds_addr_free(struct nfs4_pnfs_ds_addr *da)
628 {
629 kfree(da->da_remotestr);
630 kfree(da->da_netid);
631 kfree(da);
632 }
633
destroy_ds(struct nfs4_pnfs_ds * ds)634 static void destroy_ds(struct nfs4_pnfs_ds *ds)
635 {
636 struct nfs4_pnfs_ds_addr *da;
637
638 dprintk("--> %s\n", __func__);
639 ifdebug(FACILITY)
640 print_ds(ds);
641
642 nfs_put_client(ds->ds_clp);
643
644 while (!list_empty(&ds->ds_addrs)) {
645 da = list_first_entry(&ds->ds_addrs,
646 struct nfs4_pnfs_ds_addr,
647 da_node);
648 list_del_init(&da->da_node);
649 nfs4_pnfs_ds_addr_free(da);
650 }
651
652 kfree(ds->ds_remotestr);
653 kfree(ds);
654 }
655
nfs4_pnfs_ds_put(struct nfs4_pnfs_ds * ds)656 void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds)
657 {
658 struct nfs_net *nn = net_generic(ds->ds_net, nfs_net_id);
659
660 if (refcount_dec_and_lock(&ds->ds_count, &nn->nfs4_data_server_lock)) {
661 list_del_init(&ds->ds_node);
662 spin_unlock(&nn->nfs4_data_server_lock);
663 destroy_ds(ds);
664 }
665 }
666 EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_put);
667
668 /*
669 * Create a string with a human readable address and port to avoid
670 * complicated setup around many dprinks.
671 */
672 static char *
nfs4_pnfs_remotestr(struct list_head * dsaddrs,gfp_t gfp_flags)673 nfs4_pnfs_remotestr(struct list_head *dsaddrs, gfp_t gfp_flags)
674 {
675 struct nfs4_pnfs_ds_addr *da;
676 char *remotestr;
677 size_t len;
678 char *p;
679
680 len = 3; /* '{', '}' and eol */
681 list_for_each_entry(da, dsaddrs, da_node) {
682 len += strlen(da->da_remotestr) + 1; /* string plus comma */
683 }
684
685 remotestr = kzalloc(len, gfp_flags);
686 if (!remotestr)
687 return NULL;
688
689 p = remotestr;
690 *(p++) = '{';
691 len--;
692 list_for_each_entry(da, dsaddrs, da_node) {
693 size_t ll = strlen(da->da_remotestr);
694
695 if (ll > len)
696 goto out_err;
697
698 memcpy(p, da->da_remotestr, ll);
699 p += ll;
700 len -= ll;
701
702 if (len < 1)
703 goto out_err;
704 (*p++) = ',';
705 len--;
706 }
707 if (len < 2)
708 goto out_err;
709 *(p++) = '}';
710 *p = '\0';
711 return remotestr;
712 out_err:
713 kfree(remotestr);
714 return NULL;
715 }
716
717 /*
718 * Given a list of multipath struct nfs4_pnfs_ds_addr, add it to ds cache if
719 * uncached and return cached struct nfs4_pnfs_ds.
720 */
721 struct nfs4_pnfs_ds *
nfs4_pnfs_ds_add(const struct net * net,struct list_head * dsaddrs,gfp_t gfp_flags)722 nfs4_pnfs_ds_add(const struct net *net, struct list_head *dsaddrs, gfp_t gfp_flags)
723 {
724 struct nfs_net *nn = net_generic(net, nfs_net_id);
725 struct nfs4_pnfs_ds *tmp_ds, *ds = NULL;
726 char *remotestr;
727
728 if (list_empty(dsaddrs)) {
729 dprintk("%s: no addresses defined\n", __func__);
730 goto out;
731 }
732
733 ds = kzalloc(sizeof(*ds), gfp_flags);
734 if (!ds)
735 goto out;
736
737 /* this is only used for debugging, so it's ok if its NULL */
738 remotestr = nfs4_pnfs_remotestr(dsaddrs, gfp_flags);
739
740 spin_lock(&nn->nfs4_data_server_lock);
741 tmp_ds = _data_server_lookup_locked(nn, dsaddrs);
742 if (tmp_ds == NULL) {
743 INIT_LIST_HEAD(&ds->ds_addrs);
744 list_splice_init(dsaddrs, &ds->ds_addrs);
745 ds->ds_remotestr = remotestr;
746 refcount_set(&ds->ds_count, 1);
747 INIT_LIST_HEAD(&ds->ds_node);
748 ds->ds_net = net;
749 ds->ds_clp = NULL;
750 list_add(&ds->ds_node, &nn->nfs4_data_server_cache);
751 dprintk("%s add new data server %s\n", __func__,
752 ds->ds_remotestr);
753 } else {
754 kfree(remotestr);
755 kfree(ds);
756 refcount_inc(&tmp_ds->ds_count);
757 dprintk("%s data server %s found, inc'ed ds_count to %d\n",
758 __func__, tmp_ds->ds_remotestr,
759 refcount_read(&tmp_ds->ds_count));
760 ds = tmp_ds;
761 }
762 spin_unlock(&nn->nfs4_data_server_lock);
763 out:
764 return ds;
765 }
766 EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_add);
767
nfs4_wait_ds_connect(struct nfs4_pnfs_ds * ds)768 static int nfs4_wait_ds_connect(struct nfs4_pnfs_ds *ds)
769 {
770 might_sleep();
771 return wait_on_bit(&ds->ds_state, NFS4DS_CONNECTING, TASK_KILLABLE);
772 }
773
nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds * ds)774 static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds)
775 {
776 smp_mb__before_atomic();
777 clear_and_wake_up_bit(NFS4DS_CONNECTING, &ds->ds_state);
778 }
779
780 static struct nfs_client *(*get_v3_ds_connect)(
781 struct nfs_server *mds_srv,
782 const struct sockaddr_storage *ds_addr,
783 int ds_addrlen,
784 int ds_proto,
785 unsigned int ds_timeo,
786 unsigned int ds_retrans);
787
load_v3_ds_connect(void)788 static bool load_v3_ds_connect(void)
789 {
790 if (!get_v3_ds_connect) {
791 get_v3_ds_connect = symbol_request(nfs3_set_ds_client);
792 WARN_ON_ONCE(!get_v3_ds_connect);
793 }
794
795 return(get_v3_ds_connect != NULL);
796 }
797
nfs4_pnfs_v3_ds_connect_unload(void)798 void nfs4_pnfs_v3_ds_connect_unload(void)
799 {
800 if (get_v3_ds_connect) {
801 symbol_put(nfs3_set_ds_client);
802 get_v3_ds_connect = NULL;
803 }
804 }
805
_nfs4_pnfs_v3_ds_connect(struct nfs_server * mds_srv,struct nfs4_pnfs_ds * ds,unsigned int timeo,unsigned int retrans)806 static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
807 struct nfs4_pnfs_ds *ds,
808 unsigned int timeo,
809 unsigned int retrans)
810 {
811 struct nfs_client *clp = ERR_PTR(-EIO);
812 struct nfs_client *mds_clp = mds_srv->nfs_client;
813 enum xprtsec_policies xprtsec_policy = mds_clp->cl_xprtsec.policy;
814 struct nfs4_pnfs_ds_addr *da;
815 unsigned long connect_timeout = timeo * (retrans + 1) * HZ / 10;
816 int ds_proto;
817 int status = 0;
818
819 dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr);
820
821 if (!load_v3_ds_connect())
822 return -EPROTONOSUPPORT;
823
824 list_for_each_entry(da, &ds->ds_addrs, da_node) {
825 dprintk("%s: DS %s: trying address %s\n",
826 __func__, ds->ds_remotestr, da->da_remotestr);
827
828 if (!IS_ERR(clp)) {
829 struct xprt_create xprt_args = {
830 .ident = da->da_transport,
831 .net = clp->cl_net,
832 .dstaddr = (struct sockaddr *)&da->da_addr,
833 .addrlen = da->da_addrlen,
834 .servername = clp->cl_hostname,
835 .connect_timeout = connect_timeout,
836 .reconnect_timeout = connect_timeout,
837 .xprtsec = clp->cl_xprtsec,
838 };
839
840 if (xprt_args.ident == XPRT_TRANSPORT_TCP &&
841 clp->cl_proto == XPRT_TRANSPORT_TCP_TLS)
842 xprt_args.ident = XPRT_TRANSPORT_TCP_TLS;
843
844 if (xprt_args.ident != clp->cl_proto)
845 continue;
846 if (xprt_args.dstaddr->sa_family !=
847 clp->cl_addr.ss_family)
848 continue;
849 /* Add this address as an alias */
850 rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
851 rpc_clnt_test_and_add_xprt, NULL);
852 continue;
853 }
854
855 ds_proto = da->da_transport;
856 if (ds_proto == XPRT_TRANSPORT_TCP &&
857 xprtsec_policy != RPC_XPRTSEC_NONE)
858 ds_proto = XPRT_TRANSPORT_TCP_TLS;
859
860 clp = get_v3_ds_connect(mds_srv, &da->da_addr, da->da_addrlen,
861 ds_proto, timeo, retrans);
862 if (IS_ERR(clp))
863 continue;
864 clp->cl_rpcclient->cl_softerr = 0;
865 clp->cl_rpcclient->cl_softrtry = 0;
866 }
867
868 if (IS_ERR(clp)) {
869 status = PTR_ERR(clp);
870 goto out;
871 }
872
873 smp_wmb();
874 WRITE_ONCE(ds->ds_clp, clp);
875 dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
876 out:
877 return status;
878 }
879
_nfs4_pnfs_v4_ds_connect(struct nfs_server * mds_srv,struct nfs4_pnfs_ds * ds,unsigned int timeo,unsigned int retrans,u32 minor_version)880 static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
881 struct nfs4_pnfs_ds *ds,
882 unsigned int timeo,
883 unsigned int retrans,
884 u32 minor_version)
885 {
886 struct nfs_client *clp = ERR_PTR(-EIO);
887 struct nfs_client *mds_clp = mds_srv->nfs_client;
888 enum xprtsec_policies xprtsec_policy = mds_clp->cl_xprtsec.policy;
889 struct nfs4_pnfs_ds_addr *da;
890 int ds_proto;
891 int status = 0;
892
893 dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr);
894
895 list_for_each_entry(da, &ds->ds_addrs, da_node) {
896 char servername[48];
897
898 dprintk("%s: DS %s: trying address %s\n",
899 __func__, ds->ds_remotestr, da->da_remotestr);
900
901 if (!IS_ERR(clp) && clp->cl_mvops->session_trunk) {
902 struct xprt_create xprt_args = {
903 .ident = da->da_transport,
904 .net = clp->cl_net,
905 .dstaddr = (struct sockaddr *)&da->da_addr,
906 .addrlen = da->da_addrlen,
907 .servername = clp->cl_hostname,
908 .xprtsec = clp->cl_xprtsec,
909 };
910 struct nfs4_add_xprt_data xprtdata = {
911 .clp = clp,
912 };
913 struct rpc_add_xprt_test rpcdata = {
914 .add_xprt_test = clp->cl_mvops->session_trunk,
915 .data = &xprtdata,
916 };
917
918 if (xprt_args.ident == XPRT_TRANSPORT_TCP &&
919 clp->cl_proto == XPRT_TRANSPORT_TCP_TLS) {
920 struct sockaddr *addr =
921 (struct sockaddr *)&da->da_addr;
922 struct sockaddr_in *sin =
923 (struct sockaddr_in *)&da->da_addr;
924 struct sockaddr_in6 *sin6 =
925 (struct sockaddr_in6 *)&da->da_addr;
926
927 /* for NFS with TLS we need to supply a correct
928 * servername of the trunked transport, not the
929 * servername of the main transport stored in
930 * clp->cl_hostname. And set the protocol to
931 * indicate to use TLS
932 */
933 servername[0] = '\0';
934 switch(addr->sa_family) {
935 case AF_INET:
936 snprintf(servername, sizeof(servername),
937 "%pI4", &sin->sin_addr.s_addr);
938 break;
939 case AF_INET6:
940 snprintf(servername, sizeof(servername),
941 "%pI6", &sin6->sin6_addr);
942 break;
943 default:
944 /* do not consider this address */
945 continue;
946 }
947 xprt_args.ident = XPRT_TRANSPORT_TCP_TLS;
948 xprt_args.servername = servername;
949 }
950 if (xprt_args.ident != clp->cl_proto)
951 continue;
952 if (xprt_args.dstaddr->sa_family !=
953 clp->cl_addr.ss_family)
954 continue;
955
956 /**
957 * Test this address for session trunking and
958 * add as an alias
959 */
960 xprtdata.cred = nfs4_get_clid_cred(clp);
961 rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
962 rpc_clnt_setup_test_and_add_xprt,
963 &rpcdata);
964 if (xprtdata.cred)
965 put_cred(xprtdata.cred);
966 } else {
967 ds_proto = da->da_transport;
968 if (ds_proto == XPRT_TRANSPORT_TCP &&
969 xprtsec_policy != RPC_XPRTSEC_NONE)
970 ds_proto = XPRT_TRANSPORT_TCP_TLS;
971
972 clp = nfs4_set_ds_client(mds_srv, &da->da_addr,
973 da->da_addrlen, ds_proto,
974 timeo, retrans, minor_version);
975 if (IS_ERR(clp))
976 continue;
977
978 status = nfs4_init_ds_session(clp,
979 mds_srv->nfs_client->cl_lease_time);
980 if (status) {
981 nfs_put_client(clp);
982 clp = ERR_PTR(-EIO);
983 continue;
984 }
985 }
986 }
987
988 if (IS_ERR(clp)) {
989 status = PTR_ERR(clp);
990 goto out;
991 }
992
993 smp_wmb();
994 WRITE_ONCE(ds->ds_clp, clp);
995 dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
996 out:
997 return status;
998 }
999
1000 /*
1001 * Create an rpc connection to the nfs4_pnfs_ds data server.
1002 * Currently only supports IPv4 and IPv6 addresses.
1003 * If connection fails, make devid unavailable and return a -errno.
1004 */
nfs4_pnfs_ds_connect(struct nfs_server * mds_srv,struct nfs4_pnfs_ds * ds,struct nfs4_deviceid_node * devid,unsigned int timeo,unsigned int retrans,u32 version,u32 minor_version)1005 int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
1006 struct nfs4_deviceid_node *devid, unsigned int timeo,
1007 unsigned int retrans, u32 version, u32 minor_version)
1008 {
1009 int err;
1010
1011 do {
1012 err = nfs4_wait_ds_connect(ds);
1013 if (err || ds->ds_clp)
1014 goto out;
1015 if (nfs4_test_deviceid_unavailable(devid)) {
1016 err = -ENODEV;
1017 goto out;
1018 }
1019 } while (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) != 0);
1020
1021 if (ds->ds_clp)
1022 goto connect_done;
1023
1024 switch (version) {
1025 case 3:
1026 err = _nfs4_pnfs_v3_ds_connect(mds_srv, ds, timeo, retrans);
1027 break;
1028 case 4:
1029 err = _nfs4_pnfs_v4_ds_connect(mds_srv, ds, timeo, retrans,
1030 minor_version);
1031 break;
1032 default:
1033 dprintk("%s: unsupported DS version %d\n", __func__, version);
1034 err = -EPROTONOSUPPORT;
1035 }
1036
1037 connect_done:
1038 nfs4_clear_ds_conn_bit(ds);
1039 out:
1040 /*
1041 * At this point the ds->ds_clp should be ready, but it might have
1042 * hit an error.
1043 */
1044 if (!err) {
1045 if (!ds->ds_clp || !nfs_client_init_is_complete(ds->ds_clp)) {
1046 WARN_ON_ONCE(ds->ds_clp ||
1047 !nfs4_test_deviceid_unavailable(devid));
1048 err = -EINVAL;
1049 } else
1050 err = nfs_client_init_status(ds->ds_clp);
1051 }
1052
1053 trace_pnfs_ds_connect(ds->ds_remotestr, err);
1054 return err;
1055 }
1056 EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_connect);
1057
1058 /*
1059 * Currently only supports ipv4, ipv6 and one multi-path address.
1060 */
1061 struct nfs4_pnfs_ds_addr *
nfs4_decode_mp_ds_addr(struct net * net,struct xdr_stream * xdr,gfp_t gfp_flags)1062 nfs4_decode_mp_ds_addr(struct net *net, struct xdr_stream *xdr, gfp_t gfp_flags)
1063 {
1064 struct nfs4_pnfs_ds_addr *da = NULL;
1065 char *buf, *portstr;
1066 __be16 port;
1067 ssize_t nlen, rlen;
1068 int tmp[2];
1069 char *netid;
1070 size_t len;
1071 char *startsep = "";
1072 char *endsep = "";
1073
1074
1075 /* r_netid */
1076 nlen = xdr_stream_decode_string_dup(xdr, &netid, XDR_MAX_NETOBJ,
1077 gfp_flags);
1078 if (unlikely(nlen < 0))
1079 goto out_err;
1080
1081 /* r_addr: ip/ip6addr with port in dec octets - see RFC 5665 */
1082 /* port is ".ABC.DEF", 8 chars max */
1083 rlen = xdr_stream_decode_string_dup(xdr, &buf, INET6_ADDRSTRLEN +
1084 IPV6_SCOPE_ID_LEN + 8, gfp_flags);
1085 if (unlikely(rlen < 0))
1086 goto out_free_netid;
1087
1088 /* replace port '.' with '-' */
1089 portstr = strrchr(buf, '.');
1090 if (!portstr) {
1091 dprintk("%s: Failed finding expected dot in port\n",
1092 __func__);
1093 goto out_free_buf;
1094 }
1095 *portstr = '-';
1096
1097 /* find '.' between address and port */
1098 portstr = strrchr(buf, '.');
1099 if (!portstr) {
1100 dprintk("%s: Failed finding expected dot between address and "
1101 "port\n", __func__);
1102 goto out_free_buf;
1103 }
1104 *portstr = '\0';
1105
1106 da = nfs4_pnfs_ds_addr_alloc(gfp_flags);
1107 if (unlikely(!da))
1108 goto out_free_buf;
1109
1110 if (!rpc_pton(net, buf, portstr-buf, (struct sockaddr *)&da->da_addr,
1111 sizeof(da->da_addr))) {
1112 dprintk("%s: error parsing address %s\n", __func__, buf);
1113 goto out_free_da;
1114 }
1115
1116 portstr++;
1117 sscanf(portstr, "%d-%d", &tmp[0], &tmp[1]);
1118 port = htons((tmp[0] << 8) | (tmp[1]));
1119
1120 switch (da->da_addr.ss_family) {
1121 case AF_INET:
1122 ((struct sockaddr_in *)&da->da_addr)->sin_port = port;
1123 da->da_addrlen = sizeof(struct sockaddr_in);
1124 break;
1125
1126 case AF_INET6:
1127 ((struct sockaddr_in6 *)&da->da_addr)->sin6_port = port;
1128 da->da_addrlen = sizeof(struct sockaddr_in6);
1129 startsep = "[";
1130 endsep = "]";
1131 break;
1132
1133 default:
1134 dprintk("%s: unsupported address family: %u\n",
1135 __func__, da->da_addr.ss_family);
1136 goto out_free_da;
1137 }
1138
1139 da->da_transport = xprt_find_transport_ident(netid);
1140 if (da->da_transport < 0) {
1141 dprintk("%s: ERROR: unknown r_netid \"%s\"\n",
1142 __func__, netid);
1143 goto out_free_da;
1144 }
1145
1146 da->da_netid = netid;
1147
1148 /* save human readable address */
1149 len = strlen(startsep) + strlen(buf) + strlen(endsep) + 7;
1150 da->da_remotestr = kzalloc(len, gfp_flags);
1151
1152 /* NULL is ok, only used for dprintk */
1153 if (da->da_remotestr)
1154 snprintf(da->da_remotestr, len, "%s%s%s:%u", startsep,
1155 buf, endsep, ntohs(port));
1156
1157 dprintk("%s: Parsed DS addr %s\n", __func__, da->da_remotestr);
1158 kfree(buf);
1159 return da;
1160
1161 out_free_da:
1162 kfree(da);
1163 out_free_buf:
1164 dprintk("%s: Error parsing DS addr: %s\n", __func__, buf);
1165 kfree(buf);
1166 out_free_netid:
1167 kfree(netid);
1168 out_err:
1169 return NULL;
1170 }
1171 EXPORT_SYMBOL_GPL(nfs4_decode_mp_ds_addr);
1172
1173 void
pnfs_layout_mark_request_commit(struct nfs_page * req,struct pnfs_layout_segment * lseg,struct nfs_commit_info * cinfo,u32 ds_commit_idx)1174 pnfs_layout_mark_request_commit(struct nfs_page *req,
1175 struct pnfs_layout_segment *lseg,
1176 struct nfs_commit_info *cinfo,
1177 u32 ds_commit_idx)
1178 {
1179 struct list_head *list;
1180 struct pnfs_commit_array *array;
1181 struct pnfs_commit_bucket *bucket;
1182
1183 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
1184 array = pnfs_lookup_commit_array(cinfo->ds, lseg);
1185 if (!array || !pnfs_is_valid_lseg(lseg))
1186 goto out_resched;
1187 bucket = &array->buckets[ds_commit_idx];
1188 list = &bucket->written;
1189 /* Non-empty buckets hold a reference on the lseg. That ref
1190 * is normally transferred to the COMMIT call and released
1191 * there. It could also be released if the last req is pulled
1192 * off due to a rewrite, in which case it will be done in
1193 * pnfs_common_clear_request_commit
1194 */
1195 if (!bucket->lseg)
1196 bucket->lseg = pnfs_get_lseg(lseg);
1197 set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
1198 cinfo->ds->nwritten++;
1199
1200 nfs_request_add_commit_list_locked(req, list, cinfo);
1201 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
1202 nfs_folio_mark_unstable(nfs_page_to_folio(req), cinfo);
1203 return;
1204 out_resched:
1205 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
1206 cinfo->completion_ops->resched_write(cinfo, req);
1207 }
1208 EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit);
1209
1210 int
pnfs_nfs_generic_sync(struct inode * inode,bool datasync)1211 pnfs_nfs_generic_sync(struct inode *inode, bool datasync)
1212 {
1213 int ret;
1214
1215 if (!pnfs_layoutcommit_outstanding(inode))
1216 return 0;
1217 ret = nfs_commit_inode(inode, FLUSH_SYNC);
1218 if (ret < 0)
1219 return ret;
1220 if (datasync)
1221 return 0;
1222 return pnfs_layoutcommit_inode(inode, true);
1223 }
1224 EXPORT_SYMBOL_GPL(pnfs_nfs_generic_sync);
1225
1226