xref: /linux/fs/nfs/pagelist.c (revision 60684c2bd35064043360e6f716d1b7c20e967b7d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/fs/nfs/pagelist.c
4  *
5  * A set of helper functions for managing NFS read and write requests.
6  * The main purpose of these routines is to provide support for the
7  * coalescing of several requests into a single RPC call.
8  *
9  * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
10  *
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/file.h>
15 #include <linux/sched.h>
16 #include <linux/sunrpc/clnt.h>
17 #include <linux/nfs.h>
18 #include <linux/nfs3.h>
19 #include <linux/nfs4.h>
20 #include <linux/nfs_fs.h>
21 #include <linux/nfs_page.h>
22 #include <linux/nfs_mount.h>
23 #include <linux/export.h>
24 #include <linux/filelock.h>
25 
26 #include "internal.h"
27 #include "pnfs.h"
28 #include "nfstrace.h"
29 
30 #define NFSDBG_FACILITY		NFSDBG_PAGECACHE
31 
32 static struct kmem_cache *nfs_page_cachep;
33 static const struct rpc_call_ops nfs_pgio_common_ops;
34 
35 struct nfs_page_iter_page {
36 	const struct nfs_page *req;
37 	size_t count;
38 };
39 
40 static void nfs_page_iter_page_init(struct nfs_page_iter_page *i,
41 				    const struct nfs_page *req)
42 {
43 	i->req = req;
44 	i->count = 0;
45 }
46 
47 static void nfs_page_iter_page_advance(struct nfs_page_iter_page *i, size_t sz)
48 {
49 	const struct nfs_page *req = i->req;
50 	size_t tmp = i->count + sz;
51 
52 	i->count = (tmp < req->wb_bytes) ? tmp : req->wb_bytes;
53 }
54 
55 static struct page *nfs_page_iter_page_get(struct nfs_page_iter_page *i)
56 {
57 	const struct nfs_page *req = i->req;
58 	struct page *page;
59 
60 	if (i->count != req->wb_bytes) {
61 		size_t base = i->count + req->wb_pgbase;
62 		size_t len = PAGE_SIZE - offset_in_page(base);
63 
64 		page = nfs_page_to_page(req, base);
65 		nfs_page_iter_page_advance(i, len);
66 		return page;
67 	}
68 	return NULL;
69 }
70 
71 static struct nfs_pgio_mirror *
72 nfs_pgio_get_mirror(struct nfs_pageio_descriptor *desc, u32 idx)
73 {
74 	if (desc->pg_ops->pg_get_mirror)
75 		return desc->pg_ops->pg_get_mirror(desc, idx);
76 	return &desc->pg_mirrors[0];
77 }
78 
79 struct nfs_pgio_mirror *
80 nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc)
81 {
82 	return nfs_pgio_get_mirror(desc, desc->pg_mirror_idx);
83 }
84 EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror);
85 
86 static u32
87 nfs_pgio_set_current_mirror(struct nfs_pageio_descriptor *desc, u32 idx)
88 {
89 	if (desc->pg_ops->pg_set_mirror)
90 		return desc->pg_ops->pg_set_mirror(desc, idx);
91 	return desc->pg_mirror_idx;
92 }
93 
94 void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
95 		       struct nfs_pgio_header *hdr,
96 		       void (*release)(struct nfs_pgio_header *hdr))
97 {
98 	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
99 
100 
101 	hdr->req = nfs_list_entry(mirror->pg_list.next);
102 	hdr->inode = desc->pg_inode;
103 	hdr->cred = nfs_req_openctx(hdr->req)->cred;
104 	hdr->io_start = req_offset(hdr->req);
105 	hdr->good_bytes = mirror->pg_count;
106 	hdr->io_completion = desc->pg_io_completion;
107 	hdr->dreq = desc->pg_dreq;
108 	hdr->release = release;
109 	hdr->completion_ops = desc->pg_completion_ops;
110 	if (hdr->completion_ops->init_hdr)
111 		hdr->completion_ops->init_hdr(hdr);
112 
113 	hdr->pgio_mirror_idx = desc->pg_mirror_idx;
114 }
115 EXPORT_SYMBOL_GPL(nfs_pgheader_init);
116 
117 void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
118 {
119 	unsigned int new = pos - hdr->io_start;
120 
121 	trace_nfs_pgio_error(hdr, error, pos);
122 	if (hdr->good_bytes > new) {
123 		hdr->good_bytes = new;
124 		clear_bit(NFS_IOHDR_EOF, &hdr->flags);
125 		if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags))
126 			hdr->error = error;
127 	}
128 }
129 
130 static inline struct nfs_page *nfs_page_alloc(void)
131 {
132 	struct nfs_page *p =
133 		kmem_cache_zalloc(nfs_page_cachep, nfs_io_gfp_mask());
134 	if (p)
135 		INIT_LIST_HEAD(&p->wb_list);
136 	return p;
137 }
138 
139 static inline void
140 nfs_page_free(struct nfs_page *p)
141 {
142 	kmem_cache_free(nfs_page_cachep, p);
143 }
144 
145 /**
146  * nfs_iocounter_wait - wait for i/o to complete
147  * @l_ctx: nfs_lock_context with io_counter to use
148  *
149  * returns -ERESTARTSYS if interrupted by a fatal signal.
150  * Otherwise returns 0 once the io_count hits 0.
151  */
152 int
153 nfs_iocounter_wait(struct nfs_lock_context *l_ctx)
154 {
155 	return wait_var_event_killable(&l_ctx->io_count,
156 				       !atomic_read(&l_ctx->io_count));
157 }
158 
159 /**
160  * nfs_async_iocounter_wait - wait on a rpc_waitqueue for I/O
161  * to complete
162  * @task: the rpc_task that should wait
163  * @l_ctx: nfs_lock_context with io_counter to check
164  *
165  * Returns true if there is outstanding I/O to wait on and the
166  * task has been put to sleep.
167  */
168 bool
169 nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx)
170 {
171 	struct inode *inode = d_inode(l_ctx->open_context->dentry);
172 	bool ret = false;
173 
174 	if (atomic_read(&l_ctx->io_count) > 0) {
175 		rpc_sleep_on(&NFS_SERVER(inode)->uoc_rpcwaitq, task, NULL);
176 		ret = true;
177 	}
178 
179 	if (atomic_read(&l_ctx->io_count) == 0) {
180 		rpc_wake_up_queued_task(&NFS_SERVER(inode)->uoc_rpcwaitq, task);
181 		ret = false;
182 	}
183 
184 	return ret;
185 }
186 EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait);
187 
188 /*
189  * nfs_page_lock_head_request - page lock the head of the page group
190  * @req: any member of the page group
191  */
192 struct nfs_page *
193 nfs_page_group_lock_head(struct nfs_page *req)
194 {
195 	struct nfs_page *head = req->wb_head;
196 
197 	while (!nfs_lock_request(head)) {
198 		int ret = nfs_wait_on_request(head);
199 		if (ret < 0)
200 			return ERR_PTR(ret);
201 	}
202 	if (head != req)
203 		kref_get(&head->wb_kref);
204 	return head;
205 }
206 
207 /*
208  * nfs_unroll_locks -  unlock all newly locked reqs and wait on @req
209  * @head: head request of page group, must be holding head lock
210  * @req: request that couldn't lock and needs to wait on the req bit lock
211  *
212  * This is a helper function for nfs_lock_and_join_requests
213  * returns 0 on success, < 0 on error.
214  */
215 static void
216 nfs_unroll_locks(struct nfs_page *head, struct nfs_page *req)
217 {
218 	struct nfs_page *tmp;
219 
220 	/* relinquish all the locks successfully grabbed this run */
221 	for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) {
222 		if (!kref_read(&tmp->wb_kref))
223 			continue;
224 		nfs_unlock_and_release_request(tmp);
225 	}
226 }
227 
228 /*
229  * nfs_page_group_lock_subreq -  try to lock a subrequest
230  * @head: head request of page group
231  * @subreq: request to lock
232  *
233  * This is a helper function for nfs_lock_and_join_requests which
234  * must be called with the head request and page group both locked.
235  * On error, it returns with the page group unlocked.
236  */
237 static int
238 nfs_page_group_lock_subreq(struct nfs_page *head, struct nfs_page *subreq)
239 {
240 	int ret;
241 
242 	if (!kref_get_unless_zero(&subreq->wb_kref))
243 		return 0;
244 	while (!nfs_lock_request(subreq)) {
245 		nfs_page_group_unlock(head);
246 		ret = nfs_wait_on_request(subreq);
247 		if (!ret)
248 			ret = nfs_page_group_lock(head);
249 		if (ret < 0) {
250 			nfs_unroll_locks(head, subreq);
251 			nfs_release_request(subreq);
252 			return ret;
253 		}
254 	}
255 	return 0;
256 }
257 
258 /*
259  * nfs_page_group_lock_subrequests -  try to lock the subrequests
260  * @head: head request of page group
261  *
262  * This is a helper function for nfs_lock_and_join_requests which
263  * must be called with the head request locked.
264  */
265 int nfs_page_group_lock_subrequests(struct nfs_page *head)
266 {
267 	struct nfs_page *subreq;
268 	int ret;
269 
270 	ret = nfs_page_group_lock(head);
271 	if (ret < 0)
272 		return ret;
273 	/* lock each request in the page group */
274 	for (subreq = head->wb_this_page; subreq != head;
275 			subreq = subreq->wb_this_page) {
276 		ret = nfs_page_group_lock_subreq(head, subreq);
277 		if (ret < 0)
278 			return ret;
279 	}
280 	nfs_page_group_unlock(head);
281 	return 0;
282 }
283 
284 /*
285  * nfs_page_set_headlock - set the request PG_HEADLOCK
286  * @req: request that is to be locked
287  *
288  * this lock must be held when modifying req->wb_head
289  *
290  * return 0 on success, < 0 on error
291  */
292 int
293 nfs_page_set_headlock(struct nfs_page *req)
294 {
295 	if (!test_and_set_bit(PG_HEADLOCK, &req->wb_flags))
296 		return 0;
297 
298 	set_bit(PG_CONTENDED1, &req->wb_flags);
299 	smp_mb__after_atomic();
300 	return wait_on_bit_lock(&req->wb_flags, PG_HEADLOCK,
301 				TASK_UNINTERRUPTIBLE);
302 }
303 
304 /*
305  * nfs_page_clear_headlock - clear the request PG_HEADLOCK
306  * @req: request that is to be locked
307  */
308 void
309 nfs_page_clear_headlock(struct nfs_page *req)
310 {
311 	clear_bit_unlock(PG_HEADLOCK, &req->wb_flags);
312 	smp_mb__after_atomic();
313 	if (!test_bit(PG_CONTENDED1, &req->wb_flags))
314 		return;
315 	wake_up_bit(&req->wb_flags, PG_HEADLOCK);
316 }
317 
318 /*
319  * nfs_page_group_lock - lock the head of the page group
320  * @req: request in group that is to be locked
321  *
322  * this lock must be held when traversing or modifying the page
323  * group list
324  *
325  * return 0 on success, < 0 on error
326  */
327 int
328 nfs_page_group_lock(struct nfs_page *req)
329 {
330 	int ret;
331 
332 	ret = nfs_page_set_headlock(req);
333 	if (ret || req->wb_head == req)
334 		return ret;
335 	return nfs_page_set_headlock(req->wb_head);
336 }
337 
338 /*
339  * nfs_page_group_unlock - unlock the head of the page group
340  * @req: request in group that is to be unlocked
341  */
342 void
343 nfs_page_group_unlock(struct nfs_page *req)
344 {
345 	if (req != req->wb_head)
346 		nfs_page_clear_headlock(req->wb_head);
347 	nfs_page_clear_headlock(req);
348 }
349 
350 /*
351  * nfs_page_group_sync_on_bit_locked
352  *
353  * must be called with page group lock held
354  */
355 static bool
356 nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
357 {
358 	struct nfs_page *head = req->wb_head;
359 	struct nfs_page *tmp;
360 
361 	WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_flags));
362 	WARN_ON_ONCE(test_and_set_bit(bit, &req->wb_flags));
363 
364 	tmp = req->wb_this_page;
365 	while (tmp != req) {
366 		if (!test_bit(bit, &tmp->wb_flags))
367 			return false;
368 		tmp = tmp->wb_this_page;
369 	}
370 
371 	/* true! reset all bits */
372 	tmp = req;
373 	do {
374 		clear_bit(bit, &tmp->wb_flags);
375 		tmp = tmp->wb_this_page;
376 	} while (tmp != req);
377 
378 	return true;
379 }
380 
381 /*
382  * nfs_page_group_sync_on_bit - set bit on current request, but only
383  *   return true if the bit is set for all requests in page group
384  * @req - request in page group
385  * @bit - PG_* bit that is used to sync page group
386  */
387 bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit)
388 {
389 	bool ret;
390 
391 	nfs_page_group_lock(req);
392 	ret = nfs_page_group_sync_on_bit_locked(req, bit);
393 	nfs_page_group_unlock(req);
394 
395 	return ret;
396 }
397 
398 /*
399  * nfs_page_group_init - Initialize the page group linkage for @req
400  * @req - a new nfs request
401  * @prev - the previous request in page group, or NULL if @req is the first
402  *         or only request in the group (the head).
403  */
404 static inline void
405 nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
406 {
407 	struct inode *inode;
408 	WARN_ON_ONCE(prev == req);
409 
410 	if (!prev) {
411 		/* a head request */
412 		req->wb_head = req;
413 		req->wb_this_page = req;
414 	} else {
415 		/* a subrequest */
416 		WARN_ON_ONCE(prev->wb_this_page != prev->wb_head);
417 		WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags));
418 		req->wb_head = prev->wb_head;
419 		req->wb_this_page = prev->wb_this_page;
420 		prev->wb_this_page = req;
421 
422 		/* All subrequests take a ref on the head request until
423 		 * nfs_page_group_destroy is called */
424 		kref_get(&req->wb_head->wb_kref);
425 
426 		/* grab extra ref and bump the request count if head request
427 		 * has extra ref from the write/commit path to handle handoff
428 		 * between write and commit lists. */
429 		if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) {
430 			inode = nfs_page_to_inode(req);
431 			set_bit(PG_INODE_REF, &req->wb_flags);
432 			kref_get(&req->wb_kref);
433 			atomic_long_inc(&NFS_I(inode)->nrequests);
434 		}
435 	}
436 }
437 
438 /*
439  * nfs_page_group_destroy - sync the destruction of page groups
440  * @req - request that no longer needs the page group
441  *
442  * releases the page group reference from each member once all
443  * members have called this function.
444  */
445 static void
446 nfs_page_group_destroy(struct kref *kref)
447 {
448 	struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
449 	struct nfs_page *head = req->wb_head;
450 	struct nfs_page *tmp, *next;
451 
452 	if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN))
453 		goto out;
454 
455 	tmp = req;
456 	do {
457 		next = tmp->wb_this_page;
458 		/* unlink and free */
459 		tmp->wb_this_page = tmp;
460 		tmp->wb_head = tmp;
461 		nfs_free_request(tmp);
462 		tmp = next;
463 	} while (tmp != req);
464 out:
465 	/* subrequests must release the ref on the head request */
466 	if (head != req)
467 		nfs_release_request(head);
468 }
469 
470 static struct nfs_page *nfs_page_create(struct nfs_lock_context *l_ctx,
471 					unsigned int pgbase, pgoff_t index,
472 					unsigned int offset, unsigned int count)
473 {
474 	struct nfs_page		*req;
475 	struct nfs_open_context *ctx = l_ctx->open_context;
476 
477 	if (test_bit(NFS_CONTEXT_BAD, &ctx->flags))
478 		return ERR_PTR(-EBADF);
479 	/* try to allocate the request struct */
480 	req = nfs_page_alloc();
481 	if (req == NULL)
482 		return ERR_PTR(-ENOMEM);
483 
484 	req->wb_lock_context = l_ctx;
485 	refcount_inc(&l_ctx->count);
486 	atomic_inc(&l_ctx->io_count);
487 
488 	/* Initialize the request struct. Initially, we assume a
489 	 * long write-back delay. This will be adjusted in
490 	 * update_nfs_request below if the region is not locked. */
491 	req->wb_pgbase = pgbase;
492 	req->wb_index = index;
493 	req->wb_offset = offset;
494 	req->wb_bytes = count;
495 	kref_init(&req->wb_kref);
496 	req->wb_nio = 0;
497 	return req;
498 }
499 
500 static void nfs_page_assign_folio(struct nfs_page *req, struct folio *folio)
501 {
502 	if (folio != NULL) {
503 		req->wb_folio = folio;
504 		folio_get(folio);
505 		set_bit(PG_FOLIO, &req->wb_flags);
506 	}
507 }
508 
509 static void nfs_page_assign_page(struct nfs_page *req, struct page *page)
510 {
511 	if (page != NULL) {
512 		req->wb_page = page;
513 		get_page(page);
514 	}
515 }
516 
517 /**
518  * nfs_page_create_from_page - Create an NFS read/write request.
519  * @ctx: open context to use
520  * @page: page to write
521  * @pgbase: starting offset within the page for the write
522  * @offset: file offset for the write
523  * @count: number of bytes to read/write
524  *
525  * The page must be locked by the caller. This makes sure we never
526  * create two different requests for the same page.
527  * User should ensure it is safe to sleep in this function.
528  */
529 struct nfs_page *nfs_page_create_from_page(struct nfs_open_context *ctx,
530 					   struct page *page,
531 					   unsigned int pgbase, loff_t offset,
532 					   unsigned int count)
533 {
534 	struct nfs_lock_context *l_ctx = nfs_get_lock_context(ctx);
535 	struct nfs_page *ret;
536 
537 	if (IS_ERR(l_ctx))
538 		return ERR_CAST(l_ctx);
539 	ret = nfs_page_create(l_ctx, pgbase, offset >> PAGE_SHIFT,
540 			      offset_in_page(offset), count);
541 	if (!IS_ERR(ret)) {
542 		nfs_page_assign_page(ret, page);
543 		nfs_page_group_init(ret, NULL);
544 	}
545 	nfs_put_lock_context(l_ctx);
546 	return ret;
547 }
548 
549 /**
550  * nfs_page_create_from_folio - Create an NFS read/write request.
551  * @ctx: open context to use
552  * @folio: folio to write
553  * @offset: starting offset within the folio for the write
554  * @count: number of bytes to read/write
555  *
556  * The page must be locked by the caller. This makes sure we never
557  * create two different requests for the same page.
558  * User should ensure it is safe to sleep in this function.
559  */
560 struct nfs_page *nfs_page_create_from_folio(struct nfs_open_context *ctx,
561 					    struct folio *folio,
562 					    unsigned int offset,
563 					    unsigned int count)
564 {
565 	struct nfs_lock_context *l_ctx = nfs_get_lock_context(ctx);
566 	struct nfs_page *ret;
567 
568 	if (IS_ERR(l_ctx))
569 		return ERR_CAST(l_ctx);
570 	ret = nfs_page_create(l_ctx, offset, folio_index(folio), offset, count);
571 	if (!IS_ERR(ret)) {
572 		nfs_page_assign_folio(ret, folio);
573 		nfs_page_group_init(ret, NULL);
574 	}
575 	nfs_put_lock_context(l_ctx);
576 	return ret;
577 }
578 
579 static struct nfs_page *
580 nfs_create_subreq(struct nfs_page *req,
581 		  unsigned int pgbase,
582 		  unsigned int offset,
583 		  unsigned int count)
584 {
585 	struct nfs_page *last;
586 	struct nfs_page *ret;
587 	struct folio *folio = nfs_page_to_folio(req);
588 	struct page *page = nfs_page_to_page(req, pgbase);
589 
590 	ret = nfs_page_create(req->wb_lock_context, pgbase, req->wb_index,
591 			      offset, count);
592 	if (!IS_ERR(ret)) {
593 		if (folio)
594 			nfs_page_assign_folio(ret, folio);
595 		else
596 			nfs_page_assign_page(ret, page);
597 		/* find the last request */
598 		for (last = req->wb_head;
599 		     last->wb_this_page != req->wb_head;
600 		     last = last->wb_this_page)
601 			;
602 
603 		nfs_lock_request(ret);
604 		nfs_page_group_init(ret, last);
605 		ret->wb_nio = req->wb_nio;
606 	}
607 	return ret;
608 }
609 
610 /**
611  * nfs_unlock_request - Unlock request and wake up sleepers.
612  * @req: pointer to request
613  */
614 void nfs_unlock_request(struct nfs_page *req)
615 {
616 	clear_bit_unlock(PG_BUSY, &req->wb_flags);
617 	smp_mb__after_atomic();
618 	if (!test_bit(PG_CONTENDED2, &req->wb_flags))
619 		return;
620 	wake_up_bit(&req->wb_flags, PG_BUSY);
621 }
622 
623 /**
624  * nfs_unlock_and_release_request - Unlock request and release the nfs_page
625  * @req: pointer to request
626  */
627 void nfs_unlock_and_release_request(struct nfs_page *req)
628 {
629 	nfs_unlock_request(req);
630 	nfs_release_request(req);
631 }
632 
633 /*
634  * nfs_clear_request - Free up all resources allocated to the request
635  * @req:
636  *
637  * Release page and open context resources associated with a read/write
638  * request after it has completed.
639  */
640 static void nfs_clear_request(struct nfs_page *req)
641 {
642 	struct folio *folio = nfs_page_to_folio(req);
643 	struct page *page = req->wb_page;
644 	struct nfs_lock_context *l_ctx = req->wb_lock_context;
645 	struct nfs_open_context *ctx;
646 
647 	if (folio != NULL) {
648 		folio_put(folio);
649 		req->wb_folio = NULL;
650 		clear_bit(PG_FOLIO, &req->wb_flags);
651 	} else if (page != NULL) {
652 		put_page(page);
653 		req->wb_page = NULL;
654 	}
655 	if (l_ctx != NULL) {
656 		if (atomic_dec_and_test(&l_ctx->io_count)) {
657 			wake_up_var(&l_ctx->io_count);
658 			ctx = l_ctx->open_context;
659 			if (test_bit(NFS_CONTEXT_UNLOCK, &ctx->flags))
660 				rpc_wake_up(&NFS_SERVER(d_inode(ctx->dentry))->uoc_rpcwaitq);
661 		}
662 		nfs_put_lock_context(l_ctx);
663 		req->wb_lock_context = NULL;
664 	}
665 }
666 
667 /**
668  * nfs_free_request - Release the count on an NFS read/write request
669  * @req: request to release
670  *
671  * Note: Should never be called with the spinlock held!
672  */
673 void nfs_free_request(struct nfs_page *req)
674 {
675 	WARN_ON_ONCE(req->wb_this_page != req);
676 
677 	/* extra debug: make sure no sync bits are still set */
678 	WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
679 	WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE, &req->wb_flags));
680 	WARN_ON_ONCE(test_bit(PG_UPTODATE, &req->wb_flags));
681 	WARN_ON_ONCE(test_bit(PG_WB_END, &req->wb_flags));
682 	WARN_ON_ONCE(test_bit(PG_REMOVE, &req->wb_flags));
683 
684 	/* Release struct file and open context */
685 	nfs_clear_request(req);
686 	nfs_page_free(req);
687 }
688 
689 void nfs_release_request(struct nfs_page *req)
690 {
691 	kref_put(&req->wb_kref, nfs_page_group_destroy);
692 }
693 EXPORT_SYMBOL_GPL(nfs_release_request);
694 
695 /**
696  * nfs_wait_on_request - Wait for a request to complete.
697  * @req: request to wait upon.
698  *
699  * Interruptible by fatal signals only.
700  * The user is responsible for holding a count on the request.
701  */
702 int
703 nfs_wait_on_request(struct nfs_page *req)
704 {
705 	if (!test_bit(PG_BUSY, &req->wb_flags))
706 		return 0;
707 	set_bit(PG_CONTENDED2, &req->wb_flags);
708 	smp_mb__after_atomic();
709 	return wait_on_bit_io(&req->wb_flags, PG_BUSY,
710 			      TASK_UNINTERRUPTIBLE);
711 }
712 EXPORT_SYMBOL_GPL(nfs_wait_on_request);
713 
714 /*
715  * nfs_generic_pg_test - determine if requests can be coalesced
716  * @desc: pointer to descriptor
717  * @prev: previous request in desc, or NULL
718  * @req: this request
719  *
720  * Returns zero if @req cannot be coalesced into @desc, otherwise it returns
721  * the size of the request.
722  */
723 size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
724 			   struct nfs_page *prev, struct nfs_page *req)
725 {
726 	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
727 
728 
729 	if (mirror->pg_count > mirror->pg_bsize) {
730 		/* should never happen */
731 		WARN_ON_ONCE(1);
732 		return 0;
733 	}
734 
735 	/*
736 	 * Limit the request size so that we can still allocate a page array
737 	 * for it without upsetting the slab allocator.
738 	 */
739 	if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) *
740 			sizeof(struct page *) > PAGE_SIZE)
741 		return 0;
742 
743 	return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes);
744 }
745 EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
746 
747 struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops)
748 {
749 	struct nfs_pgio_header *hdr = ops->rw_alloc_header();
750 
751 	if (hdr) {
752 		INIT_LIST_HEAD(&hdr->pages);
753 		hdr->rw_ops = ops;
754 	}
755 	return hdr;
756 }
757 EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc);
758 
759 /**
760  * nfs_pgio_data_destroy - make @hdr suitable for reuse
761  *
762  * Frees memory and releases refs from nfs_generic_pgio, so that it may
763  * be called again.
764  *
765  * @hdr: A header that has had nfs_generic_pgio called
766  */
767 static void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
768 {
769 	if (hdr->args.context)
770 		put_nfs_open_context(hdr->args.context);
771 	if (hdr->page_array.pagevec != hdr->page_array.page_array)
772 		kfree(hdr->page_array.pagevec);
773 }
774 
775 /*
776  * nfs_pgio_header_free - Free a read or write header
777  * @hdr: The header to free
778  */
779 void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
780 {
781 	nfs_pgio_data_destroy(hdr);
782 	hdr->rw_ops->rw_free_header(hdr);
783 }
784 EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
785 
786 /**
787  * nfs_pgio_rpcsetup - Set up arguments for a pageio call
788  * @hdr: The pageio hdr
789  * @pgbase: base
790  * @count: Number of bytes to read
791  * @how: How to commit data (writes only)
792  * @cinfo: Commit information for the call (writes only)
793  */
794 static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr, unsigned int pgbase,
795 			      unsigned int count, int how,
796 			      struct nfs_commit_info *cinfo)
797 {
798 	struct nfs_page *req = hdr->req;
799 
800 	/* Set up the RPC argument and reply structs
801 	 * NB: take care not to mess about with hdr->commit et al. */
802 
803 	hdr->args.fh     = NFS_FH(hdr->inode);
804 	hdr->args.offset = req_offset(req);
805 	/* pnfs_set_layoutcommit needs this */
806 	hdr->mds_offset = hdr->args.offset;
807 	hdr->args.pgbase = pgbase;
808 	hdr->args.pages  = hdr->page_array.pagevec;
809 	hdr->args.count  = count;
810 	hdr->args.context = get_nfs_open_context(nfs_req_openctx(req));
811 	hdr->args.lock_context = req->wb_lock_context;
812 	hdr->args.stable  = NFS_UNSTABLE;
813 	switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) {
814 	case 0:
815 		break;
816 	case FLUSH_COND_STABLE:
817 		if (nfs_reqs_to_commit(cinfo))
818 			break;
819 		fallthrough;
820 	default:
821 		hdr->args.stable = NFS_FILE_SYNC;
822 	}
823 
824 	hdr->res.fattr   = &hdr->fattr;
825 	hdr->res.count   = 0;
826 	hdr->res.eof     = 0;
827 	hdr->res.verf    = &hdr->verf;
828 	nfs_fattr_init(&hdr->fattr);
829 }
830 
831 /**
832  * nfs_pgio_prepare - Prepare pageio hdr to go over the wire
833  * @task: The current task
834  * @calldata: pageio header to prepare
835  */
836 static void nfs_pgio_prepare(struct rpc_task *task, void *calldata)
837 {
838 	struct nfs_pgio_header *hdr = calldata;
839 	int err;
840 	err = NFS_PROTO(hdr->inode)->pgio_rpc_prepare(task, hdr);
841 	if (err)
842 		rpc_exit(task, err);
843 }
844 
845 int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
846 		      const struct cred *cred, const struct nfs_rpc_ops *rpc_ops,
847 		      const struct rpc_call_ops *call_ops, int how, int flags)
848 {
849 	struct rpc_task *task;
850 	struct rpc_message msg = {
851 		.rpc_argp = &hdr->args,
852 		.rpc_resp = &hdr->res,
853 		.rpc_cred = cred,
854 	};
855 	struct rpc_task_setup task_setup_data = {
856 		.rpc_client = clnt,
857 		.task = &hdr->task,
858 		.rpc_message = &msg,
859 		.callback_ops = call_ops,
860 		.callback_data = hdr,
861 		.workqueue = nfsiod_workqueue,
862 		.flags = RPC_TASK_ASYNC | flags,
863 	};
864 
865 	if (nfs_server_capable(hdr->inode, NFS_CAP_MOVEABLE))
866 		task_setup_data.flags |= RPC_TASK_MOVEABLE;
867 
868 	hdr->rw_ops->rw_initiate(hdr, &msg, rpc_ops, &task_setup_data, how);
869 
870 	dprintk("NFS: initiated pgio call "
871 		"(req %s/%llu, %u bytes @ offset %llu)\n",
872 		hdr->inode->i_sb->s_id,
873 		(unsigned long long)NFS_FILEID(hdr->inode),
874 		hdr->args.count,
875 		(unsigned long long)hdr->args.offset);
876 
877 	task = rpc_run_task(&task_setup_data);
878 	if (IS_ERR(task))
879 		return PTR_ERR(task);
880 	rpc_put_task(task);
881 	return 0;
882 }
883 EXPORT_SYMBOL_GPL(nfs_initiate_pgio);
884 
885 /**
886  * nfs_pgio_error - Clean up from a pageio error
887  * @hdr: pageio header
888  */
889 static void nfs_pgio_error(struct nfs_pgio_header *hdr)
890 {
891 	set_bit(NFS_IOHDR_REDO, &hdr->flags);
892 	hdr->completion_ops->completion(hdr);
893 }
894 
895 /**
896  * nfs_pgio_release - Release pageio data
897  * @calldata: The pageio header to release
898  */
899 static void nfs_pgio_release(void *calldata)
900 {
901 	struct nfs_pgio_header *hdr = calldata;
902 	hdr->completion_ops->completion(hdr);
903 }
904 
905 static void nfs_pageio_mirror_init(struct nfs_pgio_mirror *mirror,
906 				   unsigned int bsize)
907 {
908 	INIT_LIST_HEAD(&mirror->pg_list);
909 	mirror->pg_bytes_written = 0;
910 	mirror->pg_count = 0;
911 	mirror->pg_bsize = bsize;
912 	mirror->pg_base = 0;
913 	mirror->pg_recoalesce = 0;
914 }
915 
916 /**
917  * nfs_pageio_init - initialise a page io descriptor
918  * @desc: pointer to descriptor
919  * @inode: pointer to inode
920  * @pg_ops: pointer to pageio operations
921  * @compl_ops: pointer to pageio completion operations
922  * @rw_ops: pointer to nfs read/write operations
923  * @bsize: io block size
924  * @io_flags: extra parameters for the io function
925  */
926 void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
927 		     struct inode *inode,
928 		     const struct nfs_pageio_ops *pg_ops,
929 		     const struct nfs_pgio_completion_ops *compl_ops,
930 		     const struct nfs_rw_ops *rw_ops,
931 		     size_t bsize,
932 		     int io_flags)
933 {
934 	desc->pg_moreio = 0;
935 	desc->pg_inode = inode;
936 	desc->pg_ops = pg_ops;
937 	desc->pg_completion_ops = compl_ops;
938 	desc->pg_rw_ops = rw_ops;
939 	desc->pg_ioflags = io_flags;
940 	desc->pg_error = 0;
941 	desc->pg_lseg = NULL;
942 	desc->pg_io_completion = NULL;
943 	desc->pg_dreq = NULL;
944 	desc->pg_bsize = bsize;
945 
946 	desc->pg_mirror_count = 1;
947 	desc->pg_mirror_idx = 0;
948 
949 	desc->pg_mirrors_dynamic = NULL;
950 	desc->pg_mirrors = desc->pg_mirrors_static;
951 	nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize);
952 	desc->pg_maxretrans = 0;
953 }
954 
955 /**
956  * nfs_pgio_result - Basic pageio error handling
957  * @task: The task that ran
958  * @calldata: Pageio header to check
959  */
960 static void nfs_pgio_result(struct rpc_task *task, void *calldata)
961 {
962 	struct nfs_pgio_header *hdr = calldata;
963 	struct inode *inode = hdr->inode;
964 
965 	if (hdr->rw_ops->rw_done(task, hdr, inode) != 0)
966 		return;
967 	if (task->tk_status < 0)
968 		nfs_set_pgio_error(hdr, task->tk_status, hdr->args.offset);
969 	else
970 		hdr->rw_ops->rw_result(task, hdr);
971 }
972 
973 /*
974  * Create an RPC task for the given read or write request and kick it.
975  * The page must have been locked by the caller.
976  *
977  * It may happen that the page we're passed is not marked dirty.
978  * This is the case if nfs_updatepage detects a conflicting request
979  * that has been written but not committed.
980  */
981 int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
982 		     struct nfs_pgio_header *hdr)
983 {
984 	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
985 
986 	struct nfs_page		*req;
987 	struct page		**pages,
988 				*last_page;
989 	struct list_head *head = &mirror->pg_list;
990 	struct nfs_commit_info cinfo;
991 	struct nfs_page_array *pg_array = &hdr->page_array;
992 	unsigned int pagecount, pageused;
993 	unsigned int pg_base = offset_in_page(mirror->pg_base);
994 	gfp_t gfp_flags = nfs_io_gfp_mask();
995 
996 	pagecount = nfs_page_array_len(pg_base, mirror->pg_count);
997 	pg_array->npages = pagecount;
998 
999 	if (pagecount <= ARRAY_SIZE(pg_array->page_array))
1000 		pg_array->pagevec = pg_array->page_array;
1001 	else {
1002 		pg_array->pagevec = kcalloc(pagecount, sizeof(struct page *), gfp_flags);
1003 		if (!pg_array->pagevec) {
1004 			pg_array->npages = 0;
1005 			nfs_pgio_error(hdr);
1006 			desc->pg_error = -ENOMEM;
1007 			return desc->pg_error;
1008 		}
1009 	}
1010 
1011 	nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
1012 	pages = hdr->page_array.pagevec;
1013 	last_page = NULL;
1014 	pageused = 0;
1015 	while (!list_empty(head)) {
1016 		struct nfs_page_iter_page i;
1017 		struct page *page;
1018 
1019 		req = nfs_list_entry(head->next);
1020 		nfs_list_move_request(req, &hdr->pages);
1021 
1022 		if (req->wb_pgbase == 0)
1023 			last_page = NULL;
1024 
1025 		nfs_page_iter_page_init(&i, req);
1026 		while ((page = nfs_page_iter_page_get(&i)) != NULL) {
1027 			if (last_page != page) {
1028 				pageused++;
1029 				if (pageused > pagecount)
1030 					goto full;
1031 				*pages++ = last_page = page;
1032 			}
1033 		}
1034 	}
1035 full:
1036 	if (WARN_ON_ONCE(pageused != pagecount)) {
1037 		nfs_pgio_error(hdr);
1038 		desc->pg_error = -EINVAL;
1039 		return desc->pg_error;
1040 	}
1041 
1042 	if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
1043 	    (desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
1044 		desc->pg_ioflags &= ~FLUSH_COND_STABLE;
1045 
1046 	/* Set up the argument struct */
1047 	nfs_pgio_rpcsetup(hdr, pg_base, mirror->pg_count, desc->pg_ioflags,
1048 			  &cinfo);
1049 	desc->pg_rpc_callops = &nfs_pgio_common_ops;
1050 	return 0;
1051 }
1052 EXPORT_SYMBOL_GPL(nfs_generic_pgio);
1053 
1054 static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
1055 {
1056 	struct nfs_pgio_header *hdr;
1057 	int ret;
1058 	unsigned short task_flags = 0;
1059 
1060 	hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
1061 	if (!hdr) {
1062 		desc->pg_error = -ENOMEM;
1063 		return desc->pg_error;
1064 	}
1065 	nfs_pgheader_init(desc, hdr, nfs_pgio_header_free);
1066 	ret = nfs_generic_pgio(desc, hdr);
1067 	if (ret == 0) {
1068 		if (NFS_SERVER(hdr->inode)->nfs_client->cl_minorversion)
1069 			task_flags = RPC_TASK_MOVEABLE;
1070 		ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode),
1071 					hdr,
1072 					hdr->cred,
1073 					NFS_PROTO(hdr->inode),
1074 					desc->pg_rpc_callops,
1075 					desc->pg_ioflags,
1076 					RPC_TASK_CRED_NOREF | task_flags);
1077 	}
1078 	return ret;
1079 }
1080 
1081 static struct nfs_pgio_mirror *
1082 nfs_pageio_alloc_mirrors(struct nfs_pageio_descriptor *desc,
1083 		unsigned int mirror_count)
1084 {
1085 	struct nfs_pgio_mirror *ret;
1086 	unsigned int i;
1087 
1088 	kfree(desc->pg_mirrors_dynamic);
1089 	desc->pg_mirrors_dynamic = NULL;
1090 	if (mirror_count == 1)
1091 		return desc->pg_mirrors_static;
1092 	ret = kmalloc_array(mirror_count, sizeof(*ret), nfs_io_gfp_mask());
1093 	if (ret != NULL) {
1094 		for (i = 0; i < mirror_count; i++)
1095 			nfs_pageio_mirror_init(&ret[i], desc->pg_bsize);
1096 		desc->pg_mirrors_dynamic = ret;
1097 	}
1098 	return ret;
1099 }
1100 
1101 /*
1102  * nfs_pageio_setup_mirroring - determine if mirroring is to be used
1103  *				by calling the pg_get_mirror_count op
1104  */
1105 static void nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
1106 				       struct nfs_page *req)
1107 {
1108 	unsigned int mirror_count = 1;
1109 
1110 	if (pgio->pg_ops->pg_get_mirror_count)
1111 		mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
1112 	if (mirror_count == pgio->pg_mirror_count || pgio->pg_error < 0)
1113 		return;
1114 
1115 	if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX) {
1116 		pgio->pg_error = -EINVAL;
1117 		return;
1118 	}
1119 
1120 	pgio->pg_mirrors = nfs_pageio_alloc_mirrors(pgio, mirror_count);
1121 	if (pgio->pg_mirrors == NULL) {
1122 		pgio->pg_error = -ENOMEM;
1123 		pgio->pg_mirrors = pgio->pg_mirrors_static;
1124 		mirror_count = 1;
1125 	}
1126 	pgio->pg_mirror_count = mirror_count;
1127 }
1128 
1129 static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio)
1130 {
1131 	pgio->pg_mirror_count = 1;
1132 	pgio->pg_mirror_idx = 0;
1133 	pgio->pg_mirrors = pgio->pg_mirrors_static;
1134 	kfree(pgio->pg_mirrors_dynamic);
1135 	pgio->pg_mirrors_dynamic = NULL;
1136 }
1137 
1138 static bool nfs_match_lock_context(const struct nfs_lock_context *l1,
1139 		const struct nfs_lock_context *l2)
1140 {
1141 	return l1->lockowner == l2->lockowner;
1142 }
1143 
1144 static bool nfs_page_is_contiguous(const struct nfs_page *prev,
1145 				   const struct nfs_page *req)
1146 {
1147 	size_t prev_end = prev->wb_pgbase + prev->wb_bytes;
1148 
1149 	if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
1150 		return false;
1151 	if (req->wb_pgbase == 0)
1152 		return prev_end == nfs_page_max_length(prev);
1153 	if (req->wb_pgbase == prev_end) {
1154 		struct folio *folio = nfs_page_to_folio(req);
1155 		if (folio)
1156 			return folio == nfs_page_to_folio(prev);
1157 		return req->wb_page == prev->wb_page;
1158 	}
1159 	return false;
1160 }
1161 
1162 /**
1163  * nfs_coalesce_size - test two requests for compatibility
1164  * @prev: pointer to nfs_page
1165  * @req: pointer to nfs_page
1166  * @pgio: pointer to nfs_pagio_descriptor
1167  *
1168  * The nfs_page structures 'prev' and 'req' are compared to ensure that the
1169  * page data area they describe is contiguous, and that their RPC
1170  * credentials, NFSv4 open state, and lockowners are the same.
1171  *
1172  * Returns size of the request that can be coalesced
1173  */
1174 static unsigned int nfs_coalesce_size(struct nfs_page *prev,
1175 				      struct nfs_page *req,
1176 				      struct nfs_pageio_descriptor *pgio)
1177 {
1178 	struct file_lock_context *flctx;
1179 
1180 	if (prev) {
1181 		if (!nfs_match_open_context(nfs_req_openctx(req), nfs_req_openctx(prev)))
1182 			return 0;
1183 		flctx = locks_inode_context(d_inode(nfs_req_openctx(req)->dentry));
1184 		if (flctx != NULL &&
1185 		    !(list_empty_careful(&flctx->flc_posix) &&
1186 		      list_empty_careful(&flctx->flc_flock)) &&
1187 		    !nfs_match_lock_context(req->wb_lock_context,
1188 					    prev->wb_lock_context))
1189 			return 0;
1190 		if (!nfs_page_is_contiguous(prev, req))
1191 			return 0;
1192 	}
1193 	return pgio->pg_ops->pg_test(pgio, prev, req);
1194 }
1195 
1196 /**
1197  * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
1198  * @desc: destination io descriptor
1199  * @req: request
1200  *
1201  * If the request 'req' was successfully coalesced into the existing list
1202  * of pages 'desc', it returns the size of req.
1203  */
1204 static unsigned int
1205 nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
1206 		struct nfs_page *req)
1207 {
1208 	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1209 	struct nfs_page *prev = NULL;
1210 	unsigned int size;
1211 
1212 	if (list_empty(&mirror->pg_list)) {
1213 		if (desc->pg_ops->pg_init)
1214 			desc->pg_ops->pg_init(desc, req);
1215 		if (desc->pg_error < 0)
1216 			return 0;
1217 		mirror->pg_base = req->wb_pgbase;
1218 		mirror->pg_count = 0;
1219 		mirror->pg_recoalesce = 0;
1220 	} else
1221 		prev = nfs_list_entry(mirror->pg_list.prev);
1222 
1223 	if (desc->pg_maxretrans && req->wb_nio > desc->pg_maxretrans) {
1224 		if (NFS_SERVER(desc->pg_inode)->flags & NFS_MOUNT_SOFTERR)
1225 			desc->pg_error = -ETIMEDOUT;
1226 		else
1227 			desc->pg_error = -EIO;
1228 		return 0;
1229 	}
1230 
1231 	size = nfs_coalesce_size(prev, req, desc);
1232 	if (size < req->wb_bytes)
1233 		return size;
1234 	nfs_list_move_request(req, &mirror->pg_list);
1235 	mirror->pg_count += req->wb_bytes;
1236 	return req->wb_bytes;
1237 }
1238 
1239 /*
1240  * Helper for nfs_pageio_add_request and nfs_pageio_complete
1241  */
1242 static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
1243 {
1244 	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1245 
1246 	if (!list_empty(&mirror->pg_list)) {
1247 		int error = desc->pg_ops->pg_doio(desc);
1248 		if (error < 0)
1249 			desc->pg_error = error;
1250 		if (list_empty(&mirror->pg_list))
1251 			mirror->pg_bytes_written += mirror->pg_count;
1252 	}
1253 }
1254 
1255 static void
1256 nfs_pageio_cleanup_request(struct nfs_pageio_descriptor *desc,
1257 		struct nfs_page *req)
1258 {
1259 	LIST_HEAD(head);
1260 
1261 	nfs_list_move_request(req, &head);
1262 	desc->pg_completion_ops->error_cleanup(&head, desc->pg_error);
1263 }
1264 
1265 /**
1266  * __nfs_pageio_add_request - Attempt to coalesce a request into a page list.
1267  * @desc: destination io descriptor
1268  * @req: request
1269  *
1270  * This may split a request into subrequests which are all part of the
1271  * same page group. If so, it will submit @req as the last one, to ensure
1272  * the pointer to @req is still valid in case of failure.
1273  *
1274  * Returns true if the request 'req' was successfully coalesced into the
1275  * existing list of pages 'desc'.
1276  */
1277 static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
1278 			   struct nfs_page *req)
1279 {
1280 	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1281 	struct nfs_page *subreq;
1282 	unsigned int size, subreq_size;
1283 
1284 	nfs_page_group_lock(req);
1285 
1286 	subreq = req;
1287 	subreq_size = subreq->wb_bytes;
1288 	for(;;) {
1289 		size = nfs_pageio_do_add_request(desc, subreq);
1290 		if (size == subreq_size) {
1291 			/* We successfully submitted a request */
1292 			if (subreq == req)
1293 				break;
1294 			req->wb_pgbase += size;
1295 			req->wb_bytes -= size;
1296 			req->wb_offset += size;
1297 			subreq_size = req->wb_bytes;
1298 			subreq = req;
1299 			continue;
1300 		}
1301 		if (WARN_ON_ONCE(subreq != req)) {
1302 			nfs_page_group_unlock(req);
1303 			nfs_pageio_cleanup_request(desc, subreq);
1304 			subreq = req;
1305 			subreq_size = req->wb_bytes;
1306 			nfs_page_group_lock(req);
1307 		}
1308 		if (!size) {
1309 			/* Can't coalesce any more, so do I/O */
1310 			nfs_page_group_unlock(req);
1311 			desc->pg_moreio = 1;
1312 			nfs_pageio_doio(desc);
1313 			if (desc->pg_error < 0 || mirror->pg_recoalesce)
1314 				return 0;
1315 			/* retry add_request for this subreq */
1316 			nfs_page_group_lock(req);
1317 			continue;
1318 		}
1319 		subreq = nfs_create_subreq(req, req->wb_pgbase,
1320 				req->wb_offset, size);
1321 		if (IS_ERR(subreq))
1322 			goto err_ptr;
1323 		subreq_size = size;
1324 	}
1325 
1326 	nfs_page_group_unlock(req);
1327 	return 1;
1328 err_ptr:
1329 	desc->pg_error = PTR_ERR(subreq);
1330 	nfs_page_group_unlock(req);
1331 	return 0;
1332 }
1333 
1334 static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
1335 {
1336 	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1337 	LIST_HEAD(head);
1338 
1339 	do {
1340 		list_splice_init(&mirror->pg_list, &head);
1341 		mirror->pg_recoalesce = 0;
1342 
1343 		while (!list_empty(&head)) {
1344 			struct nfs_page *req;
1345 
1346 			req = list_first_entry(&head, struct nfs_page, wb_list);
1347 			if (__nfs_pageio_add_request(desc, req))
1348 				continue;
1349 			if (desc->pg_error < 0) {
1350 				list_splice_tail(&head, &mirror->pg_list);
1351 				mirror->pg_recoalesce = 1;
1352 				return 0;
1353 			}
1354 			break;
1355 		}
1356 	} while (mirror->pg_recoalesce);
1357 	return 1;
1358 }
1359 
1360 static int nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor *desc,
1361 		struct nfs_page *req)
1362 {
1363 	int ret;
1364 
1365 	do {
1366 		ret = __nfs_pageio_add_request(desc, req);
1367 		if (ret)
1368 			break;
1369 		if (desc->pg_error < 0)
1370 			break;
1371 		ret = nfs_do_recoalesce(desc);
1372 	} while (ret);
1373 
1374 	return ret;
1375 }
1376 
1377 static void nfs_pageio_error_cleanup(struct nfs_pageio_descriptor *desc)
1378 {
1379 	u32 midx;
1380 	struct nfs_pgio_mirror *mirror;
1381 
1382 	if (!desc->pg_error)
1383 		return;
1384 
1385 	for (midx = 0; midx < desc->pg_mirror_count; midx++) {
1386 		mirror = nfs_pgio_get_mirror(desc, midx);
1387 		desc->pg_completion_ops->error_cleanup(&mirror->pg_list,
1388 				desc->pg_error);
1389 	}
1390 }
1391 
1392 int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
1393 			   struct nfs_page *req)
1394 {
1395 	u32 midx;
1396 	unsigned int pgbase, offset, bytes;
1397 	struct nfs_page *dupreq;
1398 
1399 	pgbase = req->wb_pgbase;
1400 	offset = req->wb_offset;
1401 	bytes = req->wb_bytes;
1402 
1403 	nfs_pageio_setup_mirroring(desc, req);
1404 	if (desc->pg_error < 0)
1405 		goto out_failed;
1406 
1407 	/* Create the mirror instances first, and fire them off */
1408 	for (midx = 1; midx < desc->pg_mirror_count; midx++) {
1409 		nfs_page_group_lock(req);
1410 
1411 		dupreq = nfs_create_subreq(req,
1412 				pgbase, offset, bytes);
1413 
1414 		nfs_page_group_unlock(req);
1415 		if (IS_ERR(dupreq)) {
1416 			desc->pg_error = PTR_ERR(dupreq);
1417 			goto out_failed;
1418 		}
1419 
1420 		nfs_pgio_set_current_mirror(desc, midx);
1421 		if (!nfs_pageio_add_request_mirror(desc, dupreq))
1422 			goto out_cleanup_subreq;
1423 	}
1424 
1425 	nfs_pgio_set_current_mirror(desc, 0);
1426 	if (!nfs_pageio_add_request_mirror(desc, req))
1427 		goto out_failed;
1428 
1429 	return 1;
1430 
1431 out_cleanup_subreq:
1432 	nfs_pageio_cleanup_request(desc, dupreq);
1433 out_failed:
1434 	nfs_pageio_error_cleanup(desc);
1435 	return 0;
1436 }
1437 
1438 /*
1439  * nfs_pageio_complete_mirror - Complete I/O on the current mirror of an
1440  *				nfs_pageio_descriptor
1441  * @desc: pointer to io descriptor
1442  * @mirror_idx: pointer to mirror index
1443  */
1444 static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
1445 				       u32 mirror_idx)
1446 {
1447 	struct nfs_pgio_mirror *mirror;
1448 	u32 restore_idx;
1449 
1450 	restore_idx = nfs_pgio_set_current_mirror(desc, mirror_idx);
1451 	mirror = nfs_pgio_current_mirror(desc);
1452 
1453 	for (;;) {
1454 		nfs_pageio_doio(desc);
1455 		if (desc->pg_error < 0 || !mirror->pg_recoalesce)
1456 			break;
1457 		if (!nfs_do_recoalesce(desc))
1458 			break;
1459 	}
1460 	nfs_pgio_set_current_mirror(desc, restore_idx);
1461 }
1462 
1463 /*
1464  * nfs_pageio_resend - Transfer requests to new descriptor and resend
1465  * @hdr - the pgio header to move request from
1466  * @desc - the pageio descriptor to add requests to
1467  *
1468  * Try to move each request (nfs_page) from @hdr to @desc then attempt
1469  * to send them.
1470  *
1471  * Returns 0 on success and < 0 on error.
1472  */
1473 int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
1474 		      struct nfs_pgio_header *hdr)
1475 {
1476 	LIST_HEAD(pages);
1477 
1478 	desc->pg_io_completion = hdr->io_completion;
1479 	desc->pg_dreq = hdr->dreq;
1480 	list_splice_init(&hdr->pages, &pages);
1481 	while (!list_empty(&pages)) {
1482 		struct nfs_page *req = nfs_list_entry(pages.next);
1483 
1484 		if (!nfs_pageio_add_request(desc, req))
1485 			break;
1486 	}
1487 	nfs_pageio_complete(desc);
1488 	if (!list_empty(&pages)) {
1489 		int err = desc->pg_error < 0 ? desc->pg_error : -EIO;
1490 		hdr->completion_ops->error_cleanup(&pages, err);
1491 		nfs_set_pgio_error(hdr, err, hdr->io_start);
1492 		return err;
1493 	}
1494 	return 0;
1495 }
1496 EXPORT_SYMBOL_GPL(nfs_pageio_resend);
1497 
1498 /**
1499  * nfs_pageio_complete - Complete I/O then cleanup an nfs_pageio_descriptor
1500  * @desc: pointer to io descriptor
1501  */
1502 void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
1503 {
1504 	u32 midx;
1505 
1506 	for (midx = 0; midx < desc->pg_mirror_count; midx++)
1507 		nfs_pageio_complete_mirror(desc, midx);
1508 
1509 	if (desc->pg_error < 0)
1510 		nfs_pageio_error_cleanup(desc);
1511 	if (desc->pg_ops->pg_cleanup)
1512 		desc->pg_ops->pg_cleanup(desc);
1513 	nfs_pageio_cleanup_mirroring(desc);
1514 }
1515 
1516 /**
1517  * nfs_pageio_cond_complete - Conditional I/O completion
1518  * @desc: pointer to io descriptor
1519  * @index: page index
1520  *
1521  * It is important to ensure that processes don't try to take locks
1522  * on non-contiguous ranges of pages as that might deadlock. This
1523  * function should be called before attempting to wait on a locked
1524  * nfs_page. It will complete the I/O if the page index 'index'
1525  * is not contiguous with the existing list of pages in 'desc'.
1526  */
1527 void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
1528 {
1529 	struct nfs_pgio_mirror *mirror;
1530 	struct nfs_page *prev;
1531 	struct folio *folio;
1532 	u32 midx;
1533 
1534 	for (midx = 0; midx < desc->pg_mirror_count; midx++) {
1535 		mirror = nfs_pgio_get_mirror(desc, midx);
1536 		if (!list_empty(&mirror->pg_list)) {
1537 			prev = nfs_list_entry(mirror->pg_list.prev);
1538 			folio = nfs_page_to_folio(prev);
1539 			if (folio) {
1540 				if (index == folio_next_index(folio))
1541 					continue;
1542 			} else if (index == prev->wb_index + 1)
1543 				continue;
1544 			nfs_pageio_complete(desc);
1545 			break;
1546 		}
1547 	}
1548 }
1549 
1550 /*
1551  * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1)
1552  */
1553 void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio)
1554 {
1555 	nfs_pageio_complete(pgio);
1556 }
1557 
1558 int __init nfs_init_nfspagecache(void)
1559 {
1560 	nfs_page_cachep = kmem_cache_create("nfs_page",
1561 					    sizeof(struct nfs_page),
1562 					    0, SLAB_HWCACHE_ALIGN,
1563 					    NULL);
1564 	if (nfs_page_cachep == NULL)
1565 		return -ENOMEM;
1566 
1567 	return 0;
1568 }
1569 
1570 void nfs_destroy_nfspagecache(void)
1571 {
1572 	kmem_cache_destroy(nfs_page_cachep);
1573 }
1574 
1575 static const struct rpc_call_ops nfs_pgio_common_ops = {
1576 	.rpc_call_prepare = nfs_pgio_prepare,
1577 	.rpc_call_done = nfs_pgio_result,
1578 	.rpc_release = nfs_pgio_release,
1579 };
1580 
1581 const struct nfs_pageio_ops nfs_pgio_rw_ops = {
1582 	.pg_test = nfs_generic_pg_test,
1583 	.pg_doio = nfs_generic_pg_pgios,
1584 };
1585