xref: /linux/fs/nfs/pnfs.c (revision 3932b9ca55b0be314a36d3e84faff3e823c081f5)
1 /*
2  *  pNFS functions to call and manage layout drivers.
3  *
4  *  Copyright (c) 2002 [year of first publication]
5  *  The Regents of the University of Michigan
6  *  All Rights Reserved
7  *
8  *  Dean Hildebrand <dhildebz@umich.edu>
9  *
10  *  Permission is granted to use, copy, create derivative works, and
11  *  redistribute this software and such derivative works for any purpose,
12  *  so long as the name of the University of Michigan is not used in
13  *  any advertising or publicity pertaining to the use or distribution
14  *  of this software without specific, written prior authorization. If
15  *  the above copyright notice or any other identification of the
16  *  University of Michigan is included in any copy of any portion of
17  *  this software, then the disclaimer below must also be included.
18  *
19  *  This software is provided as is, without representation or warranty
20  *  of any kind either express or implied, including without limitation
21  *  the implied warranties of merchantability, fitness for a particular
22  *  purpose, or noninfringement.  The Regents of the University of
23  *  Michigan shall not be liable for any damages, including special,
24  *  indirect, incidental, or consequential damages, with respect to any
25  *  claim arising out of or in connection with the use of the software,
26  *  even if it has been or is hereafter advised of the possibility of
27  *  such damages.
28  */
29 
30 #include <linux/nfs_fs.h>
31 #include <linux/nfs_page.h>
32 #include <linux/module.h>
33 #include "internal.h"
34 #include "pnfs.h"
35 #include "iostat.h"
36 #include "nfs4trace.h"
37 
38 #define NFSDBG_FACILITY		NFSDBG_PNFS
39 #define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
40 
41 /* Locking:
42  *
43  * pnfs_spinlock:
44  *      protects pnfs_modules_tbl.
45  */
46 static DEFINE_SPINLOCK(pnfs_spinlock);
47 
48 /*
49  * pnfs_modules_tbl holds all pnfs modules
50  */
51 static LIST_HEAD(pnfs_modules_tbl);
52 
53 /* Return the registered pnfs layout driver module matching given id */
54 static struct pnfs_layoutdriver_type *
55 find_pnfs_driver_locked(u32 id)
56 {
57 	struct pnfs_layoutdriver_type *local;
58 
59 	list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
60 		if (local->id == id)
61 			goto out;
62 	local = NULL;
63 out:
64 	dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
65 	return local;
66 }
67 
68 static struct pnfs_layoutdriver_type *
69 find_pnfs_driver(u32 id)
70 {
71 	struct pnfs_layoutdriver_type *local;
72 
73 	spin_lock(&pnfs_spinlock);
74 	local = find_pnfs_driver_locked(id);
75 	if (local != NULL && !try_module_get(local->owner)) {
76 		dprintk("%s: Could not grab reference on module\n", __func__);
77 		local = NULL;
78 	}
79 	spin_unlock(&pnfs_spinlock);
80 	return local;
81 }
82 
83 void
84 unset_pnfs_layoutdriver(struct nfs_server *nfss)
85 {
86 	if (nfss->pnfs_curr_ld) {
87 		if (nfss->pnfs_curr_ld->clear_layoutdriver)
88 			nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
89 		/* Decrement the MDS count. Purge the deviceid cache if zero */
90 		if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
91 			nfs4_deviceid_purge_client(nfss->nfs_client);
92 		module_put(nfss->pnfs_curr_ld->owner);
93 	}
94 	nfss->pnfs_curr_ld = NULL;
95 }
96 
97 /*
98  * Try to set the server's pnfs module to the pnfs layout type specified by id.
99  * Currently only one pNFS layout driver per filesystem is supported.
100  *
101  * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
102  */
103 void
104 set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
105 		      u32 id)
106 {
107 	struct pnfs_layoutdriver_type *ld_type = NULL;
108 
109 	if (id == 0)
110 		goto out_no_driver;
111 	if (!(server->nfs_client->cl_exchange_flags &
112 		 (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
113 		printk(KERN_ERR "NFS: %s: id %u cl_exchange_flags 0x%x\n",
114 			__func__, id, server->nfs_client->cl_exchange_flags);
115 		goto out_no_driver;
116 	}
117 	ld_type = find_pnfs_driver(id);
118 	if (!ld_type) {
119 		request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
120 		ld_type = find_pnfs_driver(id);
121 		if (!ld_type) {
122 			dprintk("%s: No pNFS module found for %u.\n",
123 				__func__, id);
124 			goto out_no_driver;
125 		}
126 	}
127 	server->pnfs_curr_ld = ld_type;
128 	if (ld_type->set_layoutdriver
129 	    && ld_type->set_layoutdriver(server, mntfh)) {
130 		printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
131 			"driver %u.\n", __func__, id);
132 		module_put(ld_type->owner);
133 		goto out_no_driver;
134 	}
135 	/* Bump the MDS count */
136 	atomic_inc(&server->nfs_client->cl_mds_count);
137 
138 	dprintk("%s: pNFS module for %u set\n", __func__, id);
139 	return;
140 
141 out_no_driver:
142 	dprintk("%s: Using NFSv4 I/O\n", __func__);
143 	server->pnfs_curr_ld = NULL;
144 }
145 
146 int
147 pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
148 {
149 	int status = -EINVAL;
150 	struct pnfs_layoutdriver_type *tmp;
151 
152 	if (ld_type->id == 0) {
153 		printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
154 		return status;
155 	}
156 	if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
157 		printk(KERN_ERR "NFS: %s Layout driver must provide "
158 		       "alloc_lseg and free_lseg.\n", __func__);
159 		return status;
160 	}
161 
162 	spin_lock(&pnfs_spinlock);
163 	tmp = find_pnfs_driver_locked(ld_type->id);
164 	if (!tmp) {
165 		list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
166 		status = 0;
167 		dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
168 			ld_type->name);
169 	} else {
170 		printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
171 			__func__, ld_type->id);
172 	}
173 	spin_unlock(&pnfs_spinlock);
174 
175 	return status;
176 }
177 EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
178 
179 void
180 pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
181 {
182 	dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
183 	spin_lock(&pnfs_spinlock);
184 	list_del(&ld_type->pnfs_tblid);
185 	spin_unlock(&pnfs_spinlock);
186 }
187 EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
188 
189 /*
190  * pNFS client layout cache
191  */
192 
193 /* Need to hold i_lock if caller does not already hold reference */
194 void
195 pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
196 {
197 	atomic_inc(&lo->plh_refcount);
198 }
199 
200 static struct pnfs_layout_hdr *
201 pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
202 {
203 	struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
204 	return ld->alloc_layout_hdr(ino, gfp_flags);
205 }
206 
207 static void
208 pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
209 {
210 	struct nfs_server *server = NFS_SERVER(lo->plh_inode);
211 	struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
212 
213 	if (!list_empty(&lo->plh_layouts)) {
214 		struct nfs_client *clp = server->nfs_client;
215 
216 		spin_lock(&clp->cl_lock);
217 		list_del_init(&lo->plh_layouts);
218 		spin_unlock(&clp->cl_lock);
219 	}
220 	put_rpccred(lo->plh_lc_cred);
221 	return ld->free_layout_hdr(lo);
222 }
223 
224 static void
225 pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
226 {
227 	struct nfs_inode *nfsi = NFS_I(lo->plh_inode);
228 	dprintk("%s: freeing layout cache %p\n", __func__, lo);
229 	nfsi->layout = NULL;
230 	/* Reset MDS Threshold I/O counters */
231 	nfsi->write_io = 0;
232 	nfsi->read_io = 0;
233 }
234 
235 void
236 pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
237 {
238 	struct inode *inode = lo->plh_inode;
239 
240 	if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
241 		pnfs_detach_layout_hdr(lo);
242 		spin_unlock(&inode->i_lock);
243 		pnfs_free_layout_hdr(lo);
244 	}
245 }
246 
247 static int
248 pnfs_iomode_to_fail_bit(u32 iomode)
249 {
250 	return iomode == IOMODE_RW ?
251 		NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
252 }
253 
254 static void
255 pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
256 {
257 	lo->plh_retry_timestamp = jiffies;
258 	if (!test_and_set_bit(fail_bit, &lo->plh_flags))
259 		atomic_inc(&lo->plh_refcount);
260 }
261 
262 static void
263 pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
264 {
265 	if (test_and_clear_bit(fail_bit, &lo->plh_flags))
266 		atomic_dec(&lo->plh_refcount);
267 }
268 
269 static void
270 pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
271 {
272 	struct inode *inode = lo->plh_inode;
273 	struct pnfs_layout_range range = {
274 		.iomode = iomode,
275 		.offset = 0,
276 		.length = NFS4_MAX_UINT64,
277 	};
278 	LIST_HEAD(head);
279 
280 	spin_lock(&inode->i_lock);
281 	pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
282 	pnfs_mark_matching_lsegs_invalid(lo, &head, &range);
283 	spin_unlock(&inode->i_lock);
284 	pnfs_free_lseg_list(&head);
285 	dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
286 			iomode == IOMODE_RW ?  "RW" : "READ");
287 }
288 
289 static bool
290 pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
291 {
292 	unsigned long start, end;
293 	int fail_bit = pnfs_iomode_to_fail_bit(iomode);
294 
295 	if (test_bit(fail_bit, &lo->plh_flags) == 0)
296 		return false;
297 	end = jiffies;
298 	start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT;
299 	if (!time_in_range(lo->plh_retry_timestamp, start, end)) {
300 		/* It is time to retry the failed layoutgets */
301 		pnfs_layout_clear_fail_bit(lo, fail_bit);
302 		return false;
303 	}
304 	return true;
305 }
306 
307 static void
308 init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
309 {
310 	INIT_LIST_HEAD(&lseg->pls_list);
311 	INIT_LIST_HEAD(&lseg->pls_lc_list);
312 	atomic_set(&lseg->pls_refcount, 1);
313 	smp_mb();
314 	set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
315 	lseg->pls_layout = lo;
316 }
317 
318 static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
319 {
320 	struct inode *ino = lseg->pls_layout->plh_inode;
321 
322 	NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
323 }
324 
325 static void
326 pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
327 		struct pnfs_layout_segment *lseg)
328 {
329 	struct inode *inode = lo->plh_inode;
330 
331 	WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
332 	list_del_init(&lseg->pls_list);
333 	/* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
334 	atomic_dec(&lo->plh_refcount);
335 	if (list_empty(&lo->plh_segs))
336 		clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
337 	rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
338 }
339 
340 void
341 pnfs_put_lseg(struct pnfs_layout_segment *lseg)
342 {
343 	struct pnfs_layout_hdr *lo;
344 	struct inode *inode;
345 
346 	if (!lseg)
347 		return;
348 
349 	dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
350 		atomic_read(&lseg->pls_refcount),
351 		test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
352 	lo = lseg->pls_layout;
353 	inode = lo->plh_inode;
354 	if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
355 		pnfs_get_layout_hdr(lo);
356 		pnfs_layout_remove_lseg(lo, lseg);
357 		spin_unlock(&inode->i_lock);
358 		pnfs_free_lseg(lseg);
359 		pnfs_put_layout_hdr(lo);
360 	}
361 }
362 EXPORT_SYMBOL_GPL(pnfs_put_lseg);
363 
364 static void pnfs_put_lseg_async_work(struct work_struct *work)
365 {
366 	struct pnfs_layout_segment *lseg;
367 
368 	lseg = container_of(work, struct pnfs_layout_segment, pls_work);
369 
370 	pnfs_put_lseg(lseg);
371 }
372 
373 void
374 pnfs_put_lseg_async(struct pnfs_layout_segment *lseg)
375 {
376 	INIT_WORK(&lseg->pls_work, pnfs_put_lseg_async_work);
377 	schedule_work(&lseg->pls_work);
378 }
379 EXPORT_SYMBOL_GPL(pnfs_put_lseg_async);
380 
381 static u64
382 end_offset(u64 start, u64 len)
383 {
384 	u64 end;
385 
386 	end = start + len;
387 	return end >= start ? end : NFS4_MAX_UINT64;
388 }
389 
390 /*
391  * is l2 fully contained in l1?
392  *   start1                             end1
393  *   [----------------------------------)
394  *           start2           end2
395  *           [----------------)
396  */
397 static bool
398 pnfs_lseg_range_contained(const struct pnfs_layout_range *l1,
399 		 const struct pnfs_layout_range *l2)
400 {
401 	u64 start1 = l1->offset;
402 	u64 end1 = end_offset(start1, l1->length);
403 	u64 start2 = l2->offset;
404 	u64 end2 = end_offset(start2, l2->length);
405 
406 	return (start1 <= start2) && (end1 >= end2);
407 }
408 
409 /*
410  * is l1 and l2 intersecting?
411  *   start1                             end1
412  *   [----------------------------------)
413  *                              start2           end2
414  *                              [----------------)
415  */
416 static bool
417 pnfs_lseg_range_intersecting(const struct pnfs_layout_range *l1,
418 		    const struct pnfs_layout_range *l2)
419 {
420 	u64 start1 = l1->offset;
421 	u64 end1 = end_offset(start1, l1->length);
422 	u64 start2 = l2->offset;
423 	u64 end2 = end_offset(start2, l2->length);
424 
425 	return (end1 == NFS4_MAX_UINT64 || end1 > start2) &&
426 	       (end2 == NFS4_MAX_UINT64 || end2 > start1);
427 }
428 
429 static bool
430 should_free_lseg(const struct pnfs_layout_range *lseg_range,
431 		 const struct pnfs_layout_range *recall_range)
432 {
433 	return (recall_range->iomode == IOMODE_ANY ||
434 		lseg_range->iomode == recall_range->iomode) &&
435 	       pnfs_lseg_range_intersecting(lseg_range, recall_range);
436 }
437 
438 static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
439 		struct list_head *tmp_list)
440 {
441 	if (!atomic_dec_and_test(&lseg->pls_refcount))
442 		return false;
443 	pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
444 	list_add(&lseg->pls_list, tmp_list);
445 	return true;
446 }
447 
448 /* Returns 1 if lseg is removed from list, 0 otherwise */
449 static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
450 			     struct list_head *tmp_list)
451 {
452 	int rv = 0;
453 
454 	if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
455 		/* Remove the reference keeping the lseg in the
456 		 * list.  It will now be removed when all
457 		 * outstanding io is finished.
458 		 */
459 		dprintk("%s: lseg %p ref %d\n", __func__, lseg,
460 			atomic_read(&lseg->pls_refcount));
461 		if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
462 			rv = 1;
463 	}
464 	return rv;
465 }
466 
467 /* Returns count of number of matching invalid lsegs remaining in list
468  * after call.
469  */
470 int
471 pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
472 			    struct list_head *tmp_list,
473 			    struct pnfs_layout_range *recall_range)
474 {
475 	struct pnfs_layout_segment *lseg, *next;
476 	int invalid = 0, removed = 0;
477 
478 	dprintk("%s:Begin lo %p\n", __func__, lo);
479 
480 	if (list_empty(&lo->plh_segs))
481 		return 0;
482 	list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
483 		if (!recall_range ||
484 		    should_free_lseg(&lseg->pls_range, recall_range)) {
485 			dprintk("%s: freeing lseg %p iomode %d "
486 				"offset %llu length %llu\n", __func__,
487 				lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
488 				lseg->pls_range.length);
489 			invalid++;
490 			removed += mark_lseg_invalid(lseg, tmp_list);
491 		}
492 	dprintk("%s:Return %i\n", __func__, invalid - removed);
493 	return invalid - removed;
494 }
495 
496 /* note free_me must contain lsegs from a single layout_hdr */
497 void
498 pnfs_free_lseg_list(struct list_head *free_me)
499 {
500 	struct pnfs_layout_segment *lseg, *tmp;
501 
502 	if (list_empty(free_me))
503 		return;
504 
505 	list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
506 		list_del(&lseg->pls_list);
507 		pnfs_free_lseg(lseg);
508 	}
509 }
510 
511 void
512 pnfs_destroy_layout(struct nfs_inode *nfsi)
513 {
514 	struct pnfs_layout_hdr *lo;
515 	LIST_HEAD(tmp_list);
516 
517 	spin_lock(&nfsi->vfs_inode.i_lock);
518 	lo = nfsi->layout;
519 	if (lo) {
520 		lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
521 		pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
522 		pnfs_get_layout_hdr(lo);
523 		pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
524 		pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
525 		spin_unlock(&nfsi->vfs_inode.i_lock);
526 		pnfs_free_lseg_list(&tmp_list);
527 		pnfs_put_layout_hdr(lo);
528 	} else
529 		spin_unlock(&nfsi->vfs_inode.i_lock);
530 }
531 EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
532 
533 static bool
534 pnfs_layout_add_bulk_destroy_list(struct inode *inode,
535 		struct list_head *layout_list)
536 {
537 	struct pnfs_layout_hdr *lo;
538 	bool ret = false;
539 
540 	spin_lock(&inode->i_lock);
541 	lo = NFS_I(inode)->layout;
542 	if (lo != NULL && list_empty(&lo->plh_bulk_destroy)) {
543 		pnfs_get_layout_hdr(lo);
544 		list_add(&lo->plh_bulk_destroy, layout_list);
545 		ret = true;
546 	}
547 	spin_unlock(&inode->i_lock);
548 	return ret;
549 }
550 
551 /* Caller must hold rcu_read_lock and clp->cl_lock */
552 static int
553 pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
554 		struct nfs_server *server,
555 		struct list_head *layout_list)
556 {
557 	struct pnfs_layout_hdr *lo, *next;
558 	struct inode *inode;
559 
560 	list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) {
561 		inode = igrab(lo->plh_inode);
562 		if (inode == NULL)
563 			continue;
564 		list_del_init(&lo->plh_layouts);
565 		if (pnfs_layout_add_bulk_destroy_list(inode, layout_list))
566 			continue;
567 		rcu_read_unlock();
568 		spin_unlock(&clp->cl_lock);
569 		iput(inode);
570 		spin_lock(&clp->cl_lock);
571 		rcu_read_lock();
572 		return -EAGAIN;
573 	}
574 	return 0;
575 }
576 
577 static int
578 pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
579 		bool is_bulk_recall)
580 {
581 	struct pnfs_layout_hdr *lo;
582 	struct inode *inode;
583 	struct pnfs_layout_range range = {
584 		.iomode = IOMODE_ANY,
585 		.offset = 0,
586 		.length = NFS4_MAX_UINT64,
587 	};
588 	LIST_HEAD(lseg_list);
589 	int ret = 0;
590 
591 	while (!list_empty(layout_list)) {
592 		lo = list_entry(layout_list->next, struct pnfs_layout_hdr,
593 				plh_bulk_destroy);
594 		dprintk("%s freeing layout for inode %lu\n", __func__,
595 			lo->plh_inode->i_ino);
596 		inode = lo->plh_inode;
597 		spin_lock(&inode->i_lock);
598 		list_del_init(&lo->plh_bulk_destroy);
599 		lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
600 		if (is_bulk_recall)
601 			set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
602 		if (pnfs_mark_matching_lsegs_invalid(lo, &lseg_list, &range))
603 			ret = -EAGAIN;
604 		spin_unlock(&inode->i_lock);
605 		pnfs_free_lseg_list(&lseg_list);
606 		pnfs_put_layout_hdr(lo);
607 		iput(inode);
608 	}
609 	return ret;
610 }
611 
612 int
613 pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
614 		struct nfs_fsid *fsid,
615 		bool is_recall)
616 {
617 	struct nfs_server *server;
618 	LIST_HEAD(layout_list);
619 
620 	spin_lock(&clp->cl_lock);
621 	rcu_read_lock();
622 restart:
623 	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
624 		if (memcmp(&server->fsid, fsid, sizeof(*fsid)) != 0)
625 			continue;
626 		if (pnfs_layout_bulk_destroy_byserver_locked(clp,
627 				server,
628 				&layout_list) != 0)
629 			goto restart;
630 	}
631 	rcu_read_unlock();
632 	spin_unlock(&clp->cl_lock);
633 
634 	if (list_empty(&layout_list))
635 		return 0;
636 	return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
637 }
638 
639 int
640 pnfs_destroy_layouts_byclid(struct nfs_client *clp,
641 		bool is_recall)
642 {
643 	struct nfs_server *server;
644 	LIST_HEAD(layout_list);
645 
646 	spin_lock(&clp->cl_lock);
647 	rcu_read_lock();
648 restart:
649 	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
650 		if (pnfs_layout_bulk_destroy_byserver_locked(clp,
651 					server,
652 					&layout_list) != 0)
653 			goto restart;
654 	}
655 	rcu_read_unlock();
656 	spin_unlock(&clp->cl_lock);
657 
658 	if (list_empty(&layout_list))
659 		return 0;
660 	return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
661 }
662 
663 /*
664  * Called by the state manger to remove all layouts established under an
665  * expired lease.
666  */
667 void
668 pnfs_destroy_all_layouts(struct nfs_client *clp)
669 {
670 	nfs4_deviceid_mark_client_invalid(clp);
671 	nfs4_deviceid_purge_client(clp);
672 
673 	pnfs_destroy_layouts_byclid(clp, false);
674 }
675 
676 /*
677  * Compare 2 layout stateid sequence ids, to see which is newer,
678  * taking into account wraparound issues.
679  */
680 static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
681 {
682 	return (s32)(s1 - s2) > 0;
683 }
684 
685 static void
686 pnfs_verify_layout_stateid(struct pnfs_layout_hdr *lo,
687 		const nfs4_stateid *new,
688 		struct list_head *free_me_list)
689 {
690 	if (nfs4_stateid_match_other(&lo->plh_stateid, new))
691 		return;
692 	/* Layout is new! Kill existing layout segments */
693 	pnfs_mark_matching_lsegs_invalid(lo, free_me_list, NULL);
694 }
695 
696 /* update lo->plh_stateid with new if is more recent */
697 void
698 pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
699 			bool update_barrier)
700 {
701 	u32 oldseq, newseq, new_barrier;
702 	int empty = list_empty(&lo->plh_segs);
703 
704 	oldseq = be32_to_cpu(lo->plh_stateid.seqid);
705 	newseq = be32_to_cpu(new->seqid);
706 	if (empty || pnfs_seqid_is_newer(newseq, oldseq)) {
707 		nfs4_stateid_copy(&lo->plh_stateid, new);
708 		if (update_barrier) {
709 			new_barrier = be32_to_cpu(new->seqid);
710 		} else {
711 			/* Because of wraparound, we want to keep the barrier
712 			 * "close" to the current seqids.
713 			 */
714 			new_barrier = newseq - atomic_read(&lo->plh_outstanding);
715 		}
716 		if (empty || pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
717 			lo->plh_barrier = new_barrier;
718 	}
719 }
720 
721 static bool
722 pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
723 		const nfs4_stateid *stateid)
724 {
725 	u32 seqid = be32_to_cpu(stateid->seqid);
726 
727 	return !pnfs_seqid_is_newer(seqid, lo->plh_barrier);
728 }
729 
730 /* lget is set to 1 if called from inside send_layoutget call chain */
731 static bool
732 pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo, int lget)
733 {
734 	return lo->plh_block_lgets ||
735 		test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
736 		(list_empty(&lo->plh_segs) &&
737 		 (atomic_read(&lo->plh_outstanding) > lget));
738 }
739 
740 int
741 pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
742 			      struct nfs4_state *open_state)
743 {
744 	int status = 0;
745 
746 	dprintk("--> %s\n", __func__);
747 	spin_lock(&lo->plh_inode->i_lock);
748 	if (pnfs_layoutgets_blocked(lo, 1)) {
749 		status = -EAGAIN;
750 	} else if (!nfs4_valid_open_stateid(open_state)) {
751 		status = -EBADF;
752 	} else if (list_empty(&lo->plh_segs)) {
753 		int seq;
754 
755 		do {
756 			seq = read_seqbegin(&open_state->seqlock);
757 			nfs4_stateid_copy(dst, &open_state->stateid);
758 		} while (read_seqretry(&open_state->seqlock, seq));
759 	} else
760 		nfs4_stateid_copy(dst, &lo->plh_stateid);
761 	spin_unlock(&lo->plh_inode->i_lock);
762 	dprintk("<-- %s\n", __func__);
763 	return status;
764 }
765 
766 /*
767 * Get layout from server.
768 *    for now, assume that whole file layouts are requested.
769 *    arg->offset: 0
770 *    arg->length: all ones
771 */
772 static struct pnfs_layout_segment *
773 send_layoutget(struct pnfs_layout_hdr *lo,
774 	   struct nfs_open_context *ctx,
775 	   struct pnfs_layout_range *range,
776 	   gfp_t gfp_flags)
777 {
778 	struct inode *ino = lo->plh_inode;
779 	struct nfs_server *server = NFS_SERVER(ino);
780 	struct nfs4_layoutget *lgp;
781 	struct pnfs_layout_segment *lseg;
782 
783 	dprintk("--> %s\n", __func__);
784 
785 	lgp = kzalloc(sizeof(*lgp), gfp_flags);
786 	if (lgp == NULL)
787 		return NULL;
788 
789 	lgp->args.minlength = PAGE_CACHE_SIZE;
790 	if (lgp->args.minlength > range->length)
791 		lgp->args.minlength = range->length;
792 	lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
793 	lgp->args.range = *range;
794 	lgp->args.type = server->pnfs_curr_ld->id;
795 	lgp->args.inode = ino;
796 	lgp->args.ctx = get_nfs_open_context(ctx);
797 	lgp->gfp_flags = gfp_flags;
798 	lgp->cred = lo->plh_lc_cred;
799 
800 	/* Synchronously retrieve layout information from server and
801 	 * store in lseg.
802 	 */
803 	lseg = nfs4_proc_layoutget(lgp, gfp_flags);
804 	if (IS_ERR(lseg)) {
805 		switch (PTR_ERR(lseg)) {
806 		case -ENOMEM:
807 		case -ERESTARTSYS:
808 			break;
809 		default:
810 			/* remember that LAYOUTGET failed and suspend trying */
811 			pnfs_layout_io_set_failed(lo, range->iomode);
812 		}
813 		return NULL;
814 	}
815 
816 	return lseg;
817 }
818 
819 static void pnfs_clear_layoutcommit(struct inode *inode,
820 		struct list_head *head)
821 {
822 	struct nfs_inode *nfsi = NFS_I(inode);
823 	struct pnfs_layout_segment *lseg, *tmp;
824 
825 	if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
826 		return;
827 	list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) {
828 		if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
829 			continue;
830 		pnfs_lseg_dec_and_remove_zero(lseg, head);
831 	}
832 }
833 
834 /*
835  * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
836  * when the layout segment list is empty.
837  *
838  * Note that a pnfs_layout_hdr can exist with an empty layout segment
839  * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
840  * deviceid is marked invalid.
841  */
842 int
843 _pnfs_return_layout(struct inode *ino)
844 {
845 	struct pnfs_layout_hdr *lo = NULL;
846 	struct nfs_inode *nfsi = NFS_I(ino);
847 	LIST_HEAD(tmp_list);
848 	struct nfs4_layoutreturn *lrp;
849 	nfs4_stateid stateid;
850 	int status = 0, empty;
851 
852 	dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
853 
854 	spin_lock(&ino->i_lock);
855 	lo = nfsi->layout;
856 	if (!lo) {
857 		spin_unlock(&ino->i_lock);
858 		dprintk("NFS: %s no layout to return\n", __func__);
859 		goto out;
860 	}
861 	stateid = nfsi->layout->plh_stateid;
862 	/* Reference matched in nfs4_layoutreturn_release */
863 	pnfs_get_layout_hdr(lo);
864 	empty = list_empty(&lo->plh_segs);
865 	pnfs_clear_layoutcommit(ino, &tmp_list);
866 	pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
867 	/* Don't send a LAYOUTRETURN if list was initially empty */
868 	if (empty) {
869 		spin_unlock(&ino->i_lock);
870 		pnfs_put_layout_hdr(lo);
871 		dprintk("NFS: %s no layout segments to return\n", __func__);
872 		goto out;
873 	}
874 	lo->plh_block_lgets++;
875 	spin_unlock(&ino->i_lock);
876 	pnfs_free_lseg_list(&tmp_list);
877 
878 	lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
879 	if (unlikely(lrp == NULL)) {
880 		status = -ENOMEM;
881 		spin_lock(&ino->i_lock);
882 		lo->plh_block_lgets--;
883 		spin_unlock(&ino->i_lock);
884 		pnfs_put_layout_hdr(lo);
885 		goto out;
886 	}
887 
888 	lrp->args.stateid = stateid;
889 	lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
890 	lrp->args.inode = ino;
891 	lrp->args.layout = lo;
892 	lrp->clp = NFS_SERVER(ino)->nfs_client;
893 	lrp->cred = lo->plh_lc_cred;
894 
895 	status = nfs4_proc_layoutreturn(lrp);
896 out:
897 	dprintk("<-- %s status: %d\n", __func__, status);
898 	return status;
899 }
900 EXPORT_SYMBOL_GPL(_pnfs_return_layout);
901 
902 int
903 pnfs_commit_and_return_layout(struct inode *inode)
904 {
905 	struct pnfs_layout_hdr *lo;
906 	int ret;
907 
908 	spin_lock(&inode->i_lock);
909 	lo = NFS_I(inode)->layout;
910 	if (lo == NULL) {
911 		spin_unlock(&inode->i_lock);
912 		return 0;
913 	}
914 	pnfs_get_layout_hdr(lo);
915 	/* Block new layoutgets and read/write to ds */
916 	lo->plh_block_lgets++;
917 	spin_unlock(&inode->i_lock);
918 	filemap_fdatawait(inode->i_mapping);
919 	ret = pnfs_layoutcommit_inode(inode, true);
920 	if (ret == 0)
921 		ret = _pnfs_return_layout(inode);
922 	spin_lock(&inode->i_lock);
923 	lo->plh_block_lgets--;
924 	spin_unlock(&inode->i_lock);
925 	pnfs_put_layout_hdr(lo);
926 	return ret;
927 }
928 
929 bool pnfs_roc(struct inode *ino)
930 {
931 	struct pnfs_layout_hdr *lo;
932 	struct pnfs_layout_segment *lseg, *tmp;
933 	LIST_HEAD(tmp_list);
934 	bool found = false;
935 
936 	spin_lock(&ino->i_lock);
937 	lo = NFS_I(ino)->layout;
938 	if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
939 	    test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
940 		goto out_nolayout;
941 	list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
942 		if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
943 			mark_lseg_invalid(lseg, &tmp_list);
944 			found = true;
945 		}
946 	if (!found)
947 		goto out_nolayout;
948 	lo->plh_block_lgets++;
949 	pnfs_get_layout_hdr(lo); /* matched in pnfs_roc_release */
950 	spin_unlock(&ino->i_lock);
951 	pnfs_free_lseg_list(&tmp_list);
952 	return true;
953 
954 out_nolayout:
955 	spin_unlock(&ino->i_lock);
956 	return false;
957 }
958 
959 void pnfs_roc_release(struct inode *ino)
960 {
961 	struct pnfs_layout_hdr *lo;
962 
963 	spin_lock(&ino->i_lock);
964 	lo = NFS_I(ino)->layout;
965 	lo->plh_block_lgets--;
966 	if (atomic_dec_and_test(&lo->plh_refcount)) {
967 		pnfs_detach_layout_hdr(lo);
968 		spin_unlock(&ino->i_lock);
969 		pnfs_free_layout_hdr(lo);
970 	} else
971 		spin_unlock(&ino->i_lock);
972 }
973 
974 void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
975 {
976 	struct pnfs_layout_hdr *lo;
977 
978 	spin_lock(&ino->i_lock);
979 	lo = NFS_I(ino)->layout;
980 	if (pnfs_seqid_is_newer(barrier, lo->plh_barrier))
981 		lo->plh_barrier = barrier;
982 	spin_unlock(&ino->i_lock);
983 }
984 
985 bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
986 {
987 	struct nfs_inode *nfsi = NFS_I(ino);
988 	struct pnfs_layout_hdr *lo;
989 	struct pnfs_layout_segment *lseg;
990 	u32 current_seqid;
991 	bool found = false;
992 
993 	spin_lock(&ino->i_lock);
994 	list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
995 		if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
996 			rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
997 			found = true;
998 			goto out;
999 		}
1000 	lo = nfsi->layout;
1001 	current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
1002 
1003 	/* Since close does not return a layout stateid for use as
1004 	 * a barrier, we choose the worst-case barrier.
1005 	 */
1006 	*barrier = current_seqid + atomic_read(&lo->plh_outstanding);
1007 out:
1008 	spin_unlock(&ino->i_lock);
1009 	return found;
1010 }
1011 
1012 /*
1013  * Compare two layout segments for sorting into layout cache.
1014  * We want to preferentially return RW over RO layouts, so ensure those
1015  * are seen first.
1016  */
1017 static s64
1018 pnfs_lseg_range_cmp(const struct pnfs_layout_range *l1,
1019 	   const struct pnfs_layout_range *l2)
1020 {
1021 	s64 d;
1022 
1023 	/* high offset > low offset */
1024 	d = l1->offset - l2->offset;
1025 	if (d)
1026 		return d;
1027 
1028 	/* short length > long length */
1029 	d = l2->length - l1->length;
1030 	if (d)
1031 		return d;
1032 
1033 	/* read > read/write */
1034 	return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
1035 }
1036 
1037 static void
1038 pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
1039 		   struct pnfs_layout_segment *lseg)
1040 {
1041 	struct pnfs_layout_segment *lp;
1042 
1043 	dprintk("%s:Begin\n", __func__);
1044 
1045 	list_for_each_entry(lp, &lo->plh_segs, pls_list) {
1046 		if (pnfs_lseg_range_cmp(&lseg->pls_range, &lp->pls_range) > 0)
1047 			continue;
1048 		list_add_tail(&lseg->pls_list, &lp->pls_list);
1049 		dprintk("%s: inserted lseg %p "
1050 			"iomode %d offset %llu length %llu before "
1051 			"lp %p iomode %d offset %llu length %llu\n",
1052 			__func__, lseg, lseg->pls_range.iomode,
1053 			lseg->pls_range.offset, lseg->pls_range.length,
1054 			lp, lp->pls_range.iomode, lp->pls_range.offset,
1055 			lp->pls_range.length);
1056 		goto out;
1057 	}
1058 	list_add_tail(&lseg->pls_list, &lo->plh_segs);
1059 	dprintk("%s: inserted lseg %p "
1060 		"iomode %d offset %llu length %llu at tail\n",
1061 		__func__, lseg, lseg->pls_range.iomode,
1062 		lseg->pls_range.offset, lseg->pls_range.length);
1063 out:
1064 	pnfs_get_layout_hdr(lo);
1065 
1066 	dprintk("%s:Return\n", __func__);
1067 }
1068 
1069 static struct pnfs_layout_hdr *
1070 alloc_init_layout_hdr(struct inode *ino,
1071 		      struct nfs_open_context *ctx,
1072 		      gfp_t gfp_flags)
1073 {
1074 	struct pnfs_layout_hdr *lo;
1075 
1076 	lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
1077 	if (!lo)
1078 		return NULL;
1079 	atomic_set(&lo->plh_refcount, 1);
1080 	INIT_LIST_HEAD(&lo->plh_layouts);
1081 	INIT_LIST_HEAD(&lo->plh_segs);
1082 	INIT_LIST_HEAD(&lo->plh_bulk_destroy);
1083 	lo->plh_inode = ino;
1084 	lo->plh_lc_cred = get_rpccred(ctx->cred);
1085 	return lo;
1086 }
1087 
1088 static struct pnfs_layout_hdr *
1089 pnfs_find_alloc_layout(struct inode *ino,
1090 		       struct nfs_open_context *ctx,
1091 		       gfp_t gfp_flags)
1092 {
1093 	struct nfs_inode *nfsi = NFS_I(ino);
1094 	struct pnfs_layout_hdr *new = NULL;
1095 
1096 	dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
1097 
1098 	if (nfsi->layout != NULL)
1099 		goto out_existing;
1100 	spin_unlock(&ino->i_lock);
1101 	new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
1102 	spin_lock(&ino->i_lock);
1103 
1104 	if (likely(nfsi->layout == NULL)) {	/* Won the race? */
1105 		nfsi->layout = new;
1106 		return new;
1107 	} else if (new != NULL)
1108 		pnfs_free_layout_hdr(new);
1109 out_existing:
1110 	pnfs_get_layout_hdr(nfsi->layout);
1111 	return nfsi->layout;
1112 }
1113 
1114 /*
1115  * iomode matching rules:
1116  * iomode	lseg	match
1117  * -----	-----	-----
1118  * ANY		READ	true
1119  * ANY		RW	true
1120  * RW		READ	false
1121  * RW		RW	true
1122  * READ		READ	true
1123  * READ		RW	true
1124  */
1125 static bool
1126 pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
1127 		 const struct pnfs_layout_range *range)
1128 {
1129 	struct pnfs_layout_range range1;
1130 
1131 	if ((range->iomode == IOMODE_RW &&
1132 	     ls_range->iomode != IOMODE_RW) ||
1133 	    !pnfs_lseg_range_intersecting(ls_range, range))
1134 		return 0;
1135 
1136 	/* range1 covers only the first byte in the range */
1137 	range1 = *range;
1138 	range1.length = 1;
1139 	return pnfs_lseg_range_contained(ls_range, &range1);
1140 }
1141 
1142 /*
1143  * lookup range in layout
1144  */
1145 static struct pnfs_layout_segment *
1146 pnfs_find_lseg(struct pnfs_layout_hdr *lo,
1147 		struct pnfs_layout_range *range)
1148 {
1149 	struct pnfs_layout_segment *lseg, *ret = NULL;
1150 
1151 	dprintk("%s:Begin\n", __func__);
1152 
1153 	list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
1154 		if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
1155 		    pnfs_lseg_range_match(&lseg->pls_range, range)) {
1156 			ret = pnfs_get_lseg(lseg);
1157 			break;
1158 		}
1159 		if (lseg->pls_range.offset > range->offset)
1160 			break;
1161 	}
1162 
1163 	dprintk("%s:Return lseg %p ref %d\n",
1164 		__func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
1165 	return ret;
1166 }
1167 
1168 /*
1169  * Use mdsthreshold hints set at each OPEN to determine if I/O should go
1170  * to the MDS or over pNFS
1171  *
1172  * The nfs_inode read_io and write_io fields are cumulative counters reset
1173  * when there are no layout segments. Note that in pnfs_update_layout iomode
1174  * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
1175  * WRITE request.
1176  *
1177  * A return of true means use MDS I/O.
1178  *
1179  * From rfc 5661:
1180  * If a file's size is smaller than the file size threshold, data accesses
1181  * SHOULD be sent to the metadata server.  If an I/O request has a length that
1182  * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
1183  * server.  If both file size and I/O size are provided, the client SHOULD
1184  * reach or exceed  both thresholds before sending its read or write
1185  * requests to the data server.
1186  */
1187 static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
1188 				     struct inode *ino, int iomode)
1189 {
1190 	struct nfs4_threshold *t = ctx->mdsthreshold;
1191 	struct nfs_inode *nfsi = NFS_I(ino);
1192 	loff_t fsize = i_size_read(ino);
1193 	bool size = false, size_set = false, io = false, io_set = false, ret = false;
1194 
1195 	if (t == NULL)
1196 		return ret;
1197 
1198 	dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
1199 		__func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
1200 
1201 	switch (iomode) {
1202 	case IOMODE_READ:
1203 		if (t->bm & THRESHOLD_RD) {
1204 			dprintk("%s fsize %llu\n", __func__, fsize);
1205 			size_set = true;
1206 			if (fsize < t->rd_sz)
1207 				size = true;
1208 		}
1209 		if (t->bm & THRESHOLD_RD_IO) {
1210 			dprintk("%s nfsi->read_io %llu\n", __func__,
1211 				nfsi->read_io);
1212 			io_set = true;
1213 			if (nfsi->read_io < t->rd_io_sz)
1214 				io = true;
1215 		}
1216 		break;
1217 	case IOMODE_RW:
1218 		if (t->bm & THRESHOLD_WR) {
1219 			dprintk("%s fsize %llu\n", __func__, fsize);
1220 			size_set = true;
1221 			if (fsize < t->wr_sz)
1222 				size = true;
1223 		}
1224 		if (t->bm & THRESHOLD_WR_IO) {
1225 			dprintk("%s nfsi->write_io %llu\n", __func__,
1226 				nfsi->write_io);
1227 			io_set = true;
1228 			if (nfsi->write_io < t->wr_io_sz)
1229 				io = true;
1230 		}
1231 		break;
1232 	}
1233 	if (size_set && io_set) {
1234 		if (size && io)
1235 			ret = true;
1236 	} else if (size || io)
1237 		ret = true;
1238 
1239 	dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
1240 	return ret;
1241 }
1242 
1243 /*
1244  * Layout segment is retreived from the server if not cached.
1245  * The appropriate layout segment is referenced and returned to the caller.
1246  */
1247 struct pnfs_layout_segment *
1248 pnfs_update_layout(struct inode *ino,
1249 		   struct nfs_open_context *ctx,
1250 		   loff_t pos,
1251 		   u64 count,
1252 		   enum pnfs_iomode iomode,
1253 		   gfp_t gfp_flags)
1254 {
1255 	struct pnfs_layout_range arg = {
1256 		.iomode = iomode,
1257 		.offset = pos,
1258 		.length = count,
1259 	};
1260 	unsigned pg_offset;
1261 	struct nfs_server *server = NFS_SERVER(ino);
1262 	struct nfs_client *clp = server->nfs_client;
1263 	struct pnfs_layout_hdr *lo;
1264 	struct pnfs_layout_segment *lseg = NULL;
1265 	bool first;
1266 
1267 	if (!pnfs_enabled_sb(NFS_SERVER(ino)))
1268 		goto out;
1269 
1270 	if (pnfs_within_mdsthreshold(ctx, ino, iomode))
1271 		goto out;
1272 
1273 	spin_lock(&ino->i_lock);
1274 	lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
1275 	if (lo == NULL) {
1276 		spin_unlock(&ino->i_lock);
1277 		goto out;
1278 	}
1279 
1280 	/* Do we even need to bother with this? */
1281 	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1282 		dprintk("%s matches recall, use MDS\n", __func__);
1283 		goto out_unlock;
1284 	}
1285 
1286 	/* if LAYOUTGET already failed once we don't try again */
1287 	if (pnfs_layout_io_test_failed(lo, iomode))
1288 		goto out_unlock;
1289 
1290 	/* Check to see if the layout for the given range already exists */
1291 	lseg = pnfs_find_lseg(lo, &arg);
1292 	if (lseg)
1293 		goto out_unlock;
1294 
1295 	if (pnfs_layoutgets_blocked(lo, 0))
1296 		goto out_unlock;
1297 	atomic_inc(&lo->plh_outstanding);
1298 
1299 	first = list_empty(&lo->plh_layouts) ? true : false;
1300 	spin_unlock(&ino->i_lock);
1301 
1302 	if (first) {
1303 		/* The lo must be on the clp list if there is any
1304 		 * chance of a CB_LAYOUTRECALL(FILE) coming in.
1305 		 */
1306 		spin_lock(&clp->cl_lock);
1307 		list_add_tail(&lo->plh_layouts, &server->layouts);
1308 		spin_unlock(&clp->cl_lock);
1309 	}
1310 
1311 	pg_offset = arg.offset & ~PAGE_CACHE_MASK;
1312 	if (pg_offset) {
1313 		arg.offset -= pg_offset;
1314 		arg.length += pg_offset;
1315 	}
1316 	if (arg.length != NFS4_MAX_UINT64)
1317 		arg.length = PAGE_CACHE_ALIGN(arg.length);
1318 
1319 	lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
1320 	atomic_dec(&lo->plh_outstanding);
1321 out_put_layout_hdr:
1322 	pnfs_put_layout_hdr(lo);
1323 out:
1324 	dprintk("%s: inode %s/%llu pNFS layout segment %s for "
1325 			"(%s, offset: %llu, length: %llu)\n",
1326 			__func__, ino->i_sb->s_id,
1327 			(unsigned long long)NFS_FILEID(ino),
1328 			lseg == NULL ? "not found" : "found",
1329 			iomode==IOMODE_RW ?  "read/write" : "read-only",
1330 			(unsigned long long)pos,
1331 			(unsigned long long)count);
1332 	return lseg;
1333 out_unlock:
1334 	spin_unlock(&ino->i_lock);
1335 	goto out_put_layout_hdr;
1336 }
1337 EXPORT_SYMBOL_GPL(pnfs_update_layout);
1338 
1339 struct pnfs_layout_segment *
1340 pnfs_layout_process(struct nfs4_layoutget *lgp)
1341 {
1342 	struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
1343 	struct nfs4_layoutget_res *res = &lgp->res;
1344 	struct pnfs_layout_segment *lseg;
1345 	struct inode *ino = lo->plh_inode;
1346 	LIST_HEAD(free_me);
1347 	int status = 0;
1348 
1349 	/* Inject layout blob into I/O device driver */
1350 	lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
1351 	if (!lseg || IS_ERR(lseg)) {
1352 		if (!lseg)
1353 			status = -ENOMEM;
1354 		else
1355 			status = PTR_ERR(lseg);
1356 		dprintk("%s: Could not allocate layout: error %d\n",
1357 		       __func__, status);
1358 		goto out;
1359 	}
1360 
1361 	spin_lock(&ino->i_lock);
1362 	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1363 		dprintk("%s forget reply due to recall\n", __func__);
1364 		goto out_forget_reply;
1365 	}
1366 
1367 	if (pnfs_layoutgets_blocked(lo, 1) ||
1368 	    pnfs_layout_stateid_blocked(lo, &res->stateid)) {
1369 		dprintk("%s forget reply due to state\n", __func__);
1370 		goto out_forget_reply;
1371 	}
1372 
1373 	/* Check that the new stateid matches the old stateid */
1374 	pnfs_verify_layout_stateid(lo, &res->stateid, &free_me);
1375 	/* Done processing layoutget. Set the layout stateid */
1376 	pnfs_set_layout_stateid(lo, &res->stateid, false);
1377 
1378 	init_lseg(lo, lseg);
1379 	lseg->pls_range = res->range;
1380 	pnfs_get_lseg(lseg);
1381 	pnfs_layout_insert_lseg(lo, lseg);
1382 
1383 	if (res->return_on_close) {
1384 		set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
1385 		set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
1386 	}
1387 
1388 	spin_unlock(&ino->i_lock);
1389 	pnfs_free_lseg_list(&free_me);
1390 	return lseg;
1391 out:
1392 	return ERR_PTR(status);
1393 
1394 out_forget_reply:
1395 	spin_unlock(&ino->i_lock);
1396 	lseg->pls_layout = lo;
1397 	NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
1398 	goto out;
1399 }
1400 
1401 void
1402 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1403 {
1404 	u64 rd_size = req->wb_bytes;
1405 
1406 	WARN_ON_ONCE(pgio->pg_lseg != NULL);
1407 
1408 	if (pgio->pg_dreq == NULL)
1409 		rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
1410 	else
1411 		rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
1412 
1413 	pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1414 					   req->wb_context,
1415 					   req_offset(req),
1416 					   rd_size,
1417 					   IOMODE_READ,
1418 					   GFP_KERNEL);
1419 	/* If no lseg, fall back to read through mds */
1420 	if (pgio->pg_lseg == NULL)
1421 		nfs_pageio_reset_read_mds(pgio);
1422 
1423 }
1424 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
1425 
1426 void
1427 pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
1428 			   struct nfs_page *req, u64 wb_size)
1429 {
1430 	WARN_ON_ONCE(pgio->pg_lseg != NULL);
1431 
1432 	pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1433 					   req->wb_context,
1434 					   req_offset(req),
1435 					   wb_size,
1436 					   IOMODE_RW,
1437 					   GFP_NOFS);
1438 	/* If no lseg, fall back to write through mds */
1439 	if (pgio->pg_lseg == NULL)
1440 		nfs_pageio_reset_write_mds(pgio);
1441 }
1442 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
1443 
1444 /*
1445  * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
1446  * of bytes (maximum @req->wb_bytes) that can be coalesced.
1447  */
1448 size_t
1449 pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
1450 		     struct nfs_page *req)
1451 {
1452 	unsigned int size;
1453 	u64 seg_end, req_start, seg_left;
1454 
1455 	size = nfs_generic_pg_test(pgio, prev, req);
1456 	if (!size)
1457 		return 0;
1458 
1459 	/*
1460 	 * 'size' contains the number of bytes left in the current page (up
1461 	 * to the original size asked for in @req->wb_bytes).
1462 	 *
1463 	 * Calculate how many bytes are left in the layout segment
1464 	 * and if there are less bytes than 'size', return that instead.
1465 	 *
1466 	 * Please also note that 'end_offset' is actually the offset of the
1467 	 * first byte that lies outside the pnfs_layout_range. FIXME?
1468 	 *
1469 	 */
1470 	if (pgio->pg_lseg) {
1471 		seg_end = end_offset(pgio->pg_lseg->pls_range.offset,
1472 				     pgio->pg_lseg->pls_range.length);
1473 		req_start = req_offset(req);
1474 		WARN_ON_ONCE(req_start > seg_end);
1475 		/* start of request is past the last byte of this segment */
1476 		if (req_start >= seg_end)
1477 			return 0;
1478 
1479 		/* adjust 'size' iff there are fewer bytes left in the
1480 		 * segment than what nfs_generic_pg_test returned */
1481 		seg_left = seg_end - req_start;
1482 		if (seg_left < size)
1483 			size = (unsigned int)seg_left;
1484 	}
1485 
1486 	return size;
1487 }
1488 EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
1489 
1490 int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr)
1491 {
1492 	struct nfs_pageio_descriptor pgio;
1493 
1494 	/* Resend all requests through the MDS */
1495 	nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true,
1496 			      hdr->completion_ops);
1497 	return nfs_pageio_resend(&pgio, hdr);
1498 }
1499 EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
1500 
1501 static void pnfs_ld_handle_write_error(struct nfs_pgio_header *hdr)
1502 {
1503 
1504 	dprintk("pnfs write error = %d\n", hdr->pnfs_error);
1505 	if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
1506 	    PNFS_LAYOUTRET_ON_ERROR) {
1507 		pnfs_return_layout(hdr->inode);
1508 	}
1509 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
1510 		hdr->task.tk_status = pnfs_write_done_resend_to_mds(hdr);
1511 }
1512 
1513 /*
1514  * Called by non rpc-based layout drivers
1515  */
1516 void pnfs_ld_write_done(struct nfs_pgio_header *hdr)
1517 {
1518 	trace_nfs4_pnfs_write(hdr, hdr->pnfs_error);
1519 	if (!hdr->pnfs_error) {
1520 		pnfs_set_layoutcommit(hdr);
1521 		hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
1522 	} else
1523 		pnfs_ld_handle_write_error(hdr);
1524 	hdr->mds_ops->rpc_release(hdr);
1525 }
1526 EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
1527 
1528 static void
1529 pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
1530 		struct nfs_pgio_header *hdr)
1531 {
1532 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1533 		list_splice_tail_init(&hdr->pages, &desc->pg_list);
1534 		nfs_pageio_reset_write_mds(desc);
1535 		desc->pg_recoalesce = 1;
1536 	}
1537 	nfs_pgio_data_destroy(hdr);
1538 }
1539 
1540 static enum pnfs_try_status
1541 pnfs_try_to_write_data(struct nfs_pgio_header *hdr,
1542 			const struct rpc_call_ops *call_ops,
1543 			struct pnfs_layout_segment *lseg,
1544 			int how)
1545 {
1546 	struct inode *inode = hdr->inode;
1547 	enum pnfs_try_status trypnfs;
1548 	struct nfs_server *nfss = NFS_SERVER(inode);
1549 
1550 	hdr->mds_ops = call_ops;
1551 
1552 	dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
1553 		inode->i_ino, hdr->args.count, hdr->args.offset, how);
1554 	trypnfs = nfss->pnfs_curr_ld->write_pagelist(hdr, how);
1555 	if (trypnfs != PNFS_NOT_ATTEMPTED)
1556 		nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
1557 	dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
1558 	return trypnfs;
1559 }
1560 
1561 static void
1562 pnfs_do_write(struct nfs_pageio_descriptor *desc,
1563 	      struct nfs_pgio_header *hdr, int how)
1564 {
1565 	const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
1566 	struct pnfs_layout_segment *lseg = desc->pg_lseg;
1567 	enum pnfs_try_status trypnfs;
1568 
1569 	desc->pg_lseg = NULL;
1570 	trypnfs = pnfs_try_to_write_data(hdr, call_ops, lseg, how);
1571 	if (trypnfs == PNFS_NOT_ATTEMPTED)
1572 		pnfs_write_through_mds(desc, hdr);
1573 	pnfs_put_lseg(lseg);
1574 }
1575 
1576 static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
1577 {
1578 	pnfs_put_lseg(hdr->lseg);
1579 	nfs_pgio_header_free(hdr);
1580 }
1581 EXPORT_SYMBOL_GPL(pnfs_writehdr_free);
1582 
1583 int
1584 pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
1585 {
1586 	struct nfs_pgio_header *hdr;
1587 	int ret;
1588 
1589 	hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
1590 	if (!hdr) {
1591 		desc->pg_completion_ops->error_cleanup(&desc->pg_list);
1592 		pnfs_put_lseg(desc->pg_lseg);
1593 		desc->pg_lseg = NULL;
1594 		return -ENOMEM;
1595 	}
1596 	nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
1597 	hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
1598 	ret = nfs_generic_pgio(desc, hdr);
1599 	if (ret != 0) {
1600 		pnfs_put_lseg(desc->pg_lseg);
1601 		desc->pg_lseg = NULL;
1602 	} else
1603 		pnfs_do_write(desc, hdr, desc->pg_ioflags);
1604 	return ret;
1605 }
1606 EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
1607 
1608 int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *hdr)
1609 {
1610 	struct nfs_pageio_descriptor pgio;
1611 
1612 	/* Resend all requests through the MDS */
1613 	nfs_pageio_init_read(&pgio, hdr->inode, true, hdr->completion_ops);
1614 	return nfs_pageio_resend(&pgio, hdr);
1615 }
1616 EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
1617 
1618 static void pnfs_ld_handle_read_error(struct nfs_pgio_header *hdr)
1619 {
1620 	dprintk("pnfs read error = %d\n", hdr->pnfs_error);
1621 	if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
1622 	    PNFS_LAYOUTRET_ON_ERROR) {
1623 		pnfs_return_layout(hdr->inode);
1624 	}
1625 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
1626 		hdr->task.tk_status = pnfs_read_done_resend_to_mds(hdr);
1627 }
1628 
1629 /*
1630  * Called by non rpc-based layout drivers
1631  */
1632 void pnfs_ld_read_done(struct nfs_pgio_header *hdr)
1633 {
1634 	trace_nfs4_pnfs_read(hdr, hdr->pnfs_error);
1635 	if (likely(!hdr->pnfs_error)) {
1636 		__nfs4_read_done_cb(hdr);
1637 		hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
1638 	} else
1639 		pnfs_ld_handle_read_error(hdr);
1640 	hdr->mds_ops->rpc_release(hdr);
1641 }
1642 EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
1643 
1644 static void
1645 pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
1646 		struct nfs_pgio_header *hdr)
1647 {
1648 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1649 		list_splice_tail_init(&hdr->pages, &desc->pg_list);
1650 		nfs_pageio_reset_read_mds(desc);
1651 		desc->pg_recoalesce = 1;
1652 	}
1653 	nfs_pgio_data_destroy(hdr);
1654 }
1655 
1656 /*
1657  * Call the appropriate parallel I/O subsystem read function.
1658  */
1659 static enum pnfs_try_status
1660 pnfs_try_to_read_data(struct nfs_pgio_header *hdr,
1661 		       const struct rpc_call_ops *call_ops,
1662 		       struct pnfs_layout_segment *lseg)
1663 {
1664 	struct inode *inode = hdr->inode;
1665 	struct nfs_server *nfss = NFS_SERVER(inode);
1666 	enum pnfs_try_status trypnfs;
1667 
1668 	hdr->mds_ops = call_ops;
1669 
1670 	dprintk("%s: Reading ino:%lu %u@%llu\n",
1671 		__func__, inode->i_ino, hdr->args.count, hdr->args.offset);
1672 
1673 	trypnfs = nfss->pnfs_curr_ld->read_pagelist(hdr);
1674 	if (trypnfs != PNFS_NOT_ATTEMPTED)
1675 		nfs_inc_stats(inode, NFSIOS_PNFS_READ);
1676 	dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
1677 	return trypnfs;
1678 }
1679 
1680 static void
1681 pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr)
1682 {
1683 	const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
1684 	struct pnfs_layout_segment *lseg = desc->pg_lseg;
1685 	enum pnfs_try_status trypnfs;
1686 
1687 	desc->pg_lseg = NULL;
1688 	trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg);
1689 	if (trypnfs == PNFS_NOT_ATTEMPTED)
1690 		pnfs_read_through_mds(desc, hdr);
1691 	pnfs_put_lseg(lseg);
1692 }
1693 
1694 static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
1695 {
1696 	pnfs_put_lseg(hdr->lseg);
1697 	nfs_pgio_header_free(hdr);
1698 }
1699 EXPORT_SYMBOL_GPL(pnfs_readhdr_free);
1700 
1701 int
1702 pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
1703 {
1704 	struct nfs_pgio_header *hdr;
1705 	int ret;
1706 
1707 	hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
1708 	if (!hdr) {
1709 		desc->pg_completion_ops->error_cleanup(&desc->pg_list);
1710 		ret = -ENOMEM;
1711 		pnfs_put_lseg(desc->pg_lseg);
1712 		desc->pg_lseg = NULL;
1713 		return ret;
1714 	}
1715 	nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
1716 	hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
1717 	ret = nfs_generic_pgio(desc, hdr);
1718 	if (ret != 0) {
1719 		pnfs_put_lseg(desc->pg_lseg);
1720 		desc->pg_lseg = NULL;
1721 	} else
1722 		pnfs_do_read(desc, hdr);
1723 	return ret;
1724 }
1725 EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
1726 
1727 static void pnfs_clear_layoutcommitting(struct inode *inode)
1728 {
1729 	unsigned long *bitlock = &NFS_I(inode)->flags;
1730 
1731 	clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
1732 	smp_mb__after_atomic();
1733 	wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
1734 }
1735 
1736 /*
1737  * There can be multiple RW segments.
1738  */
1739 static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
1740 {
1741 	struct pnfs_layout_segment *lseg;
1742 
1743 	list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
1744 		if (lseg->pls_range.iomode == IOMODE_RW &&
1745 		    test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
1746 			list_add(&lseg->pls_lc_list, listp);
1747 	}
1748 }
1749 
1750 static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
1751 {
1752 	struct pnfs_layout_segment *lseg, *tmp;
1753 
1754 	/* Matched by references in pnfs_set_layoutcommit */
1755 	list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
1756 		list_del_init(&lseg->pls_lc_list);
1757 		pnfs_put_lseg(lseg);
1758 	}
1759 
1760 	pnfs_clear_layoutcommitting(inode);
1761 }
1762 
1763 void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
1764 {
1765 	pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
1766 }
1767 EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
1768 
1769 void
1770 pnfs_set_layoutcommit(struct nfs_pgio_header *hdr)
1771 {
1772 	struct inode *inode = hdr->inode;
1773 	struct nfs_inode *nfsi = NFS_I(inode);
1774 	loff_t end_pos = hdr->mds_offset + hdr->res.count;
1775 	bool mark_as_dirty = false;
1776 
1777 	spin_lock(&inode->i_lock);
1778 	if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
1779 		mark_as_dirty = true;
1780 		dprintk("%s: Set layoutcommit for inode %lu ",
1781 			__func__, inode->i_ino);
1782 	}
1783 	if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &hdr->lseg->pls_flags)) {
1784 		/* references matched in nfs4_layoutcommit_release */
1785 		pnfs_get_lseg(hdr->lseg);
1786 	}
1787 	if (end_pos > nfsi->layout->plh_lwb)
1788 		nfsi->layout->plh_lwb = end_pos;
1789 	spin_unlock(&inode->i_lock);
1790 	dprintk("%s: lseg %p end_pos %llu\n",
1791 		__func__, hdr->lseg, nfsi->layout->plh_lwb);
1792 
1793 	/* if pnfs_layoutcommit_inode() runs between inode locks, the next one
1794 	 * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
1795 	if (mark_as_dirty)
1796 		mark_inode_dirty_sync(inode);
1797 }
1798 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
1799 
1800 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
1801 {
1802 	struct nfs_server *nfss = NFS_SERVER(data->args.inode);
1803 
1804 	if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
1805 		nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
1806 	pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
1807 }
1808 
1809 /*
1810  * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
1811  * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
1812  * data to disk to allow the server to recover the data if it crashes.
1813  * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
1814  * is off, and a COMMIT is sent to a data server, or
1815  * if WRITEs to a data server return NFS_DATA_SYNC.
1816  */
1817 int
1818 pnfs_layoutcommit_inode(struct inode *inode, bool sync)
1819 {
1820 	struct nfs4_layoutcommit_data *data;
1821 	struct nfs_inode *nfsi = NFS_I(inode);
1822 	loff_t end_pos;
1823 	int status;
1824 
1825 	if (!pnfs_layoutcommit_outstanding(inode))
1826 		return 0;
1827 
1828 	dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
1829 
1830 	status = -EAGAIN;
1831 	if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
1832 		if (!sync)
1833 			goto out;
1834 		status = wait_on_bit_lock_action(&nfsi->flags,
1835 				NFS_INO_LAYOUTCOMMITTING,
1836 				nfs_wait_bit_killable,
1837 				TASK_KILLABLE);
1838 		if (status)
1839 			goto out;
1840 	}
1841 
1842 	status = -ENOMEM;
1843 	/* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
1844 	data = kzalloc(sizeof(*data), GFP_NOFS);
1845 	if (!data)
1846 		goto clear_layoutcommitting;
1847 
1848 	status = 0;
1849 	spin_lock(&inode->i_lock);
1850 	if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
1851 		goto out_unlock;
1852 
1853 	INIT_LIST_HEAD(&data->lseg_list);
1854 	pnfs_list_write_lseg(inode, &data->lseg_list);
1855 
1856 	end_pos = nfsi->layout->plh_lwb;
1857 	nfsi->layout->plh_lwb = 0;
1858 
1859 	nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
1860 	spin_unlock(&inode->i_lock);
1861 
1862 	data->args.inode = inode;
1863 	data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
1864 	nfs_fattr_init(&data->fattr);
1865 	data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
1866 	data->res.fattr = &data->fattr;
1867 	data->args.lastbytewritten = end_pos - 1;
1868 	data->res.server = NFS_SERVER(inode);
1869 
1870 	status = nfs4_proc_layoutcommit(data, sync);
1871 out:
1872 	if (status)
1873 		mark_inode_dirty_sync(inode);
1874 	dprintk("<-- %s status %d\n", __func__, status);
1875 	return status;
1876 out_unlock:
1877 	spin_unlock(&inode->i_lock);
1878 	kfree(data);
1879 clear_layoutcommitting:
1880 	pnfs_clear_layoutcommitting(inode);
1881 	goto out;
1882 }
1883 
1884 struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
1885 {
1886 	struct nfs4_threshold *thp;
1887 
1888 	thp = kzalloc(sizeof(*thp), GFP_NOFS);
1889 	if (!thp) {
1890 		dprintk("%s mdsthreshold allocation failed\n", __func__);
1891 		return NULL;
1892 	}
1893 	return thp;
1894 }
1895