xref: /linux/fs/netfs/read_collect.c (revision 796a4049640b54cb1daf9e7fe543292c5ca02c74)
1ee4cdf7bSDavid Howells // SPDX-License-Identifier: GPL-2.0-only
2ee4cdf7bSDavid Howells /* Network filesystem read subrequest result collection, assessment and
3ee4cdf7bSDavid Howells  * retrying.
4ee4cdf7bSDavid Howells  *
5ee4cdf7bSDavid Howells  * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
6ee4cdf7bSDavid Howells  * Written by David Howells (dhowells@redhat.com)
7ee4cdf7bSDavid Howells  */
8ee4cdf7bSDavid Howells 
9ee4cdf7bSDavid Howells #include <linux/export.h>
10ee4cdf7bSDavid Howells #include <linux/fs.h>
11ee4cdf7bSDavid Howells #include <linux/mm.h>
12ee4cdf7bSDavid Howells #include <linux/pagemap.h>
13ee4cdf7bSDavid Howells #include <linux/slab.h>
14ee4cdf7bSDavid Howells #include <linux/task_io_accounting_ops.h>
15ee4cdf7bSDavid Howells #include "internal.h"
16ee4cdf7bSDavid Howells 
17ee4cdf7bSDavid Howells /*
18ee4cdf7bSDavid Howells  * Clear the unread part of an I/O request.
19ee4cdf7bSDavid Howells  */
20ee4cdf7bSDavid Howells static void netfs_clear_unread(struct netfs_io_subrequest *subreq)
21ee4cdf7bSDavid Howells {
22ee4cdf7bSDavid Howells 	netfs_reset_iter(subreq);
23ee4cdf7bSDavid Howells 	WARN_ON_ONCE(subreq->len - subreq->transferred != iov_iter_count(&subreq->io_iter));
24ee4cdf7bSDavid Howells 	iov_iter_zero(iov_iter_count(&subreq->io_iter), &subreq->io_iter);
25ee4cdf7bSDavid Howells 	if (subreq->start + subreq->transferred >= subreq->rreq->i_size)
26ee4cdf7bSDavid Howells 		__set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
27ee4cdf7bSDavid Howells }
28ee4cdf7bSDavid Howells 
29ee4cdf7bSDavid Howells /*
30ee4cdf7bSDavid Howells  * Flush, mark and unlock a folio that's now completely read.  If we want to
31ee4cdf7bSDavid Howells  * cache the folio, we set the group to NETFS_FOLIO_COPY_TO_CACHE, mark it
32ee4cdf7bSDavid Howells  * dirty and let writeback handle it.
33ee4cdf7bSDavid Howells  */
34ee4cdf7bSDavid Howells static void netfs_unlock_read_folio(struct netfs_io_subrequest *subreq,
35ee4cdf7bSDavid Howells 				    struct netfs_io_request *rreq,
36ee4cdf7bSDavid Howells 				    struct folio_queue *folioq,
37ee4cdf7bSDavid Howells 				    int slot)
38ee4cdf7bSDavid Howells {
39ee4cdf7bSDavid Howells 	struct netfs_folio *finfo;
40ee4cdf7bSDavid Howells 	struct folio *folio = folioq_folio(folioq, slot);
41ee4cdf7bSDavid Howells 
42ee4cdf7bSDavid Howells 	flush_dcache_folio(folio);
43ee4cdf7bSDavid Howells 	folio_mark_uptodate(folio);
44ee4cdf7bSDavid Howells 
45ee4cdf7bSDavid Howells 	if (!test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags)) {
46ee4cdf7bSDavid Howells 		finfo = netfs_folio_info(folio);
47ee4cdf7bSDavid Howells 		if (finfo) {
48ee4cdf7bSDavid Howells 			trace_netfs_folio(folio, netfs_folio_trace_filled_gaps);
49ee4cdf7bSDavid Howells 			if (finfo->netfs_group)
50ee4cdf7bSDavid Howells 				folio_change_private(folio, finfo->netfs_group);
51ee4cdf7bSDavid Howells 			else
52ee4cdf7bSDavid Howells 				folio_detach_private(folio);
53ee4cdf7bSDavid Howells 			kfree(finfo);
54ee4cdf7bSDavid Howells 		}
55ee4cdf7bSDavid Howells 
56ee4cdf7bSDavid Howells 		if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
57ee4cdf7bSDavid Howells 			if (!WARN_ON_ONCE(folio_get_private(folio) != NULL)) {
58ee4cdf7bSDavid Howells 				trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
59ee4cdf7bSDavid Howells 				folio_attach_private(folio, NETFS_FOLIO_COPY_TO_CACHE);
60ee4cdf7bSDavid Howells 				folio_mark_dirty(folio);
61ee4cdf7bSDavid Howells 			}
62ee4cdf7bSDavid Howells 		} else {
63ee4cdf7bSDavid Howells 			trace_netfs_folio(folio, netfs_folio_trace_read_done);
64ee4cdf7bSDavid Howells 		}
65ee4cdf7bSDavid Howells 	} else {
66ee4cdf7bSDavid Howells 		// TODO: Use of PG_private_2 is deprecated.
67ee4cdf7bSDavid Howells 		if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
68ee4cdf7bSDavid Howells 			netfs_pgpriv2_mark_copy_to_cache(subreq, rreq, folioq, slot);
69ee4cdf7bSDavid Howells 	}
70ee4cdf7bSDavid Howells 
71ee4cdf7bSDavid Howells 	if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
72ee4cdf7bSDavid Howells 		if (folio->index == rreq->no_unlock_folio &&
73ee4cdf7bSDavid Howells 		    test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags)) {
74ee4cdf7bSDavid Howells 			_debug("no unlock");
75ee4cdf7bSDavid Howells 		} else {
76ee4cdf7bSDavid Howells 			trace_netfs_folio(folio, netfs_folio_trace_read_unlock);
77ee4cdf7bSDavid Howells 			folio_unlock(folio);
78ee4cdf7bSDavid Howells 		}
79ee4cdf7bSDavid Howells 	}
80*796a4049SDavid Howells 
81*796a4049SDavid Howells 	folioq_clear(folioq, slot);
82ee4cdf7bSDavid Howells }
83ee4cdf7bSDavid Howells 
84ee4cdf7bSDavid Howells /*
85ee4cdf7bSDavid Howells  * Unlock any folios that are now completely read.  Returns true if the
86ee4cdf7bSDavid Howells  * subrequest is removed from the list.
87ee4cdf7bSDavid Howells  */
88ee4cdf7bSDavid Howells static bool netfs_consume_read_data(struct netfs_io_subrequest *subreq, bool was_async)
89ee4cdf7bSDavid Howells {
90ee4cdf7bSDavid Howells 	struct netfs_io_subrequest *prev, *next;
91ee4cdf7bSDavid Howells 	struct netfs_io_request *rreq = subreq->rreq;
92ee4cdf7bSDavid Howells 	struct folio_queue *folioq = subreq->curr_folioq;
93ee4cdf7bSDavid Howells 	size_t avail, prev_donated, next_donated, fsize, part, excess;
94ee4cdf7bSDavid Howells 	loff_t fpos, start;
95ee4cdf7bSDavid Howells 	loff_t fend;
96ee4cdf7bSDavid Howells 	int slot = subreq->curr_folioq_slot;
97ee4cdf7bSDavid Howells 
98ee4cdf7bSDavid Howells 	if (WARN(subreq->transferred > subreq->len,
99ee4cdf7bSDavid Howells 		 "Subreq overread: R%x[%x] %zu > %zu",
100ee4cdf7bSDavid Howells 		 rreq->debug_id, subreq->debug_index,
101ee4cdf7bSDavid Howells 		 subreq->transferred, subreq->len))
102ee4cdf7bSDavid Howells 		subreq->transferred = subreq->len;
103ee4cdf7bSDavid Howells 
104ee4cdf7bSDavid Howells next_folio:
105ee4cdf7bSDavid Howells 	fsize = PAGE_SIZE << subreq->curr_folio_order;
106ee4cdf7bSDavid Howells 	fpos = round_down(subreq->start + subreq->consumed, fsize);
107ee4cdf7bSDavid Howells 	fend = fpos + fsize;
108ee4cdf7bSDavid Howells 
109ee4cdf7bSDavid Howells 	if (WARN_ON_ONCE(!folioq) ||
110ee4cdf7bSDavid Howells 	    WARN_ON_ONCE(!folioq_folio(folioq, slot)) ||
111ee4cdf7bSDavid Howells 	    WARN_ON_ONCE(folioq_folio(folioq, slot)->index != fpos / PAGE_SIZE)) {
112ee4cdf7bSDavid Howells 		pr_err("R=%08x[%x] s=%llx-%llx ctl=%zx/%zx/%zx sl=%u\n",
113ee4cdf7bSDavid Howells 		       rreq->debug_id, subreq->debug_index,
114ee4cdf7bSDavid Howells 		       subreq->start, subreq->start + subreq->transferred - 1,
115ee4cdf7bSDavid Howells 		       subreq->consumed, subreq->transferred, subreq->len,
116ee4cdf7bSDavid Howells 		       slot);
117ee4cdf7bSDavid Howells 		if (folioq) {
118ee4cdf7bSDavid Howells 			struct folio *folio = folioq_folio(folioq, slot);
119ee4cdf7bSDavid Howells 
120ee4cdf7bSDavid Howells 			pr_err("folioq: orders=%02x%02x%02x%02x\n",
121ee4cdf7bSDavid Howells 			       folioq->orders[0], folioq->orders[1],
122ee4cdf7bSDavid Howells 			       folioq->orders[2], folioq->orders[3]);
123ee4cdf7bSDavid Howells 			if (folio)
124ee4cdf7bSDavid Howells 				pr_err("folio: %llx-%llx ix=%llx o=%u qo=%u\n",
125ee4cdf7bSDavid Howells 				       fpos, fend - 1, folio_pos(folio), folio_order(folio),
126ee4cdf7bSDavid Howells 				       folioq_folio_order(folioq, slot));
127ee4cdf7bSDavid Howells 		}
128ee4cdf7bSDavid Howells 	}
129ee4cdf7bSDavid Howells 
130ee4cdf7bSDavid Howells donation_changed:
131ee4cdf7bSDavid Howells 	/* Try to consume the current folio if we've hit or passed the end of
132ee4cdf7bSDavid Howells 	 * it.  There's a possibility that this subreq doesn't start at the
133ee4cdf7bSDavid Howells 	 * beginning of the folio, in which case we need to donate to/from the
134ee4cdf7bSDavid Howells 	 * preceding subreq.
135ee4cdf7bSDavid Howells 	 *
136ee4cdf7bSDavid Howells 	 * We also need to include any potential donation back from the
137ee4cdf7bSDavid Howells 	 * following subreq.
138ee4cdf7bSDavid Howells 	 */
139ee4cdf7bSDavid Howells 	prev_donated = READ_ONCE(subreq->prev_donated);
140ee4cdf7bSDavid Howells 	next_donated =  READ_ONCE(subreq->next_donated);
141ee4cdf7bSDavid Howells 	if (prev_donated || next_donated) {
142ee4cdf7bSDavid Howells 		spin_lock_bh(&rreq->lock);
143ee4cdf7bSDavid Howells 		prev_donated = subreq->prev_donated;
144ee4cdf7bSDavid Howells 		next_donated =  subreq->next_donated;
145ee4cdf7bSDavid Howells 		subreq->start -= prev_donated;
146ee4cdf7bSDavid Howells 		subreq->len += prev_donated;
147ee4cdf7bSDavid Howells 		subreq->transferred += prev_donated;
148ee4cdf7bSDavid Howells 		prev_donated = subreq->prev_donated = 0;
149ee4cdf7bSDavid Howells 		if (subreq->transferred == subreq->len) {
150ee4cdf7bSDavid Howells 			subreq->len += next_donated;
151ee4cdf7bSDavid Howells 			subreq->transferred += next_donated;
152ee4cdf7bSDavid Howells 			next_donated = subreq->next_donated = 0;
153ee4cdf7bSDavid Howells 		}
154ee4cdf7bSDavid Howells 		trace_netfs_sreq(subreq, netfs_sreq_trace_add_donations);
155ee4cdf7bSDavid Howells 		spin_unlock_bh(&rreq->lock);
156ee4cdf7bSDavid Howells 	}
157ee4cdf7bSDavid Howells 
158ee4cdf7bSDavid Howells 	avail = subreq->transferred;
159ee4cdf7bSDavid Howells 	if (avail == subreq->len)
160ee4cdf7bSDavid Howells 		avail += next_donated;
161ee4cdf7bSDavid Howells 	start = subreq->start;
162ee4cdf7bSDavid Howells 	if (subreq->consumed == 0) {
163ee4cdf7bSDavid Howells 		start -= prev_donated;
164ee4cdf7bSDavid Howells 		avail += prev_donated;
165ee4cdf7bSDavid Howells 	} else {
166ee4cdf7bSDavid Howells 		start += subreq->consumed;
167ee4cdf7bSDavid Howells 		avail -= subreq->consumed;
168ee4cdf7bSDavid Howells 	}
169ee4cdf7bSDavid Howells 	part = umin(avail, fsize);
170ee4cdf7bSDavid Howells 
171ee4cdf7bSDavid Howells 	trace_netfs_progress(subreq, start, avail, part);
172ee4cdf7bSDavid Howells 
173ee4cdf7bSDavid Howells 	if (start + avail >= fend) {
174ee4cdf7bSDavid Howells 		if (fpos == start) {
175ee4cdf7bSDavid Howells 			/* Flush, unlock and mark for caching any folio we've just read. */
176ee4cdf7bSDavid Howells 			subreq->consumed = fend - subreq->start;
177ee4cdf7bSDavid Howells 			netfs_unlock_read_folio(subreq, rreq, folioq, slot);
178ee4cdf7bSDavid Howells 			folioq_mark2(folioq, slot);
179ee4cdf7bSDavid Howells 			if (subreq->consumed >= subreq->len)
180ee4cdf7bSDavid Howells 				goto remove_subreq;
181ee4cdf7bSDavid Howells 		} else if (fpos < start) {
182ee4cdf7bSDavid Howells 			excess = fend - subreq->start;
183ee4cdf7bSDavid Howells 
184ee4cdf7bSDavid Howells 			spin_lock_bh(&rreq->lock);
185ee4cdf7bSDavid Howells 			/* If we complete first on a folio split with the
186ee4cdf7bSDavid Howells 			 * preceding subreq, donate to that subreq - otherwise
187ee4cdf7bSDavid Howells 			 * we get the responsibility.
188ee4cdf7bSDavid Howells 			 */
189ee4cdf7bSDavid Howells 			if (subreq->prev_donated != prev_donated) {
190ee4cdf7bSDavid Howells 				spin_unlock_bh(&rreq->lock);
191ee4cdf7bSDavid Howells 				goto donation_changed;
192ee4cdf7bSDavid Howells 			}
193ee4cdf7bSDavid Howells 
194ee4cdf7bSDavid Howells 			if (list_is_first(&subreq->rreq_link, &rreq->subrequests)) {
195ee4cdf7bSDavid Howells 				spin_unlock_bh(&rreq->lock);
196ee4cdf7bSDavid Howells 				pr_err("Can't donate prior to front\n");
197ee4cdf7bSDavid Howells 				goto bad;
198ee4cdf7bSDavid Howells 			}
199ee4cdf7bSDavid Howells 
200ee4cdf7bSDavid Howells 			prev = list_prev_entry(subreq, rreq_link);
201ee4cdf7bSDavid Howells 			WRITE_ONCE(prev->next_donated, prev->next_donated + excess);
202ee4cdf7bSDavid Howells 			subreq->start += excess;
203ee4cdf7bSDavid Howells 			subreq->len -= excess;
204ee4cdf7bSDavid Howells 			subreq->transferred -= excess;
205ee4cdf7bSDavid Howells 			trace_netfs_donate(rreq, subreq, prev, excess,
206ee4cdf7bSDavid Howells 					   netfs_trace_donate_tail_to_prev);
207ee4cdf7bSDavid Howells 			trace_netfs_sreq(subreq, netfs_sreq_trace_donate_to_prev);
208ee4cdf7bSDavid Howells 
209ee4cdf7bSDavid Howells 			if (subreq->consumed >= subreq->len)
210ee4cdf7bSDavid Howells 				goto remove_subreq_locked;
211ee4cdf7bSDavid Howells 			spin_unlock_bh(&rreq->lock);
212ee4cdf7bSDavid Howells 		} else {
213ee4cdf7bSDavid Howells 			pr_err("fpos > start\n");
214ee4cdf7bSDavid Howells 			goto bad;
215ee4cdf7bSDavid Howells 		}
216ee4cdf7bSDavid Howells 
217ee4cdf7bSDavid Howells 		/* Advance the rolling buffer to the next folio. */
218ee4cdf7bSDavid Howells 		slot++;
219ee4cdf7bSDavid Howells 		if (slot >= folioq_nr_slots(folioq)) {
220ee4cdf7bSDavid Howells 			slot = 0;
221ee4cdf7bSDavid Howells 			folioq = folioq->next;
222ee4cdf7bSDavid Howells 			subreq->curr_folioq = folioq;
223ee4cdf7bSDavid Howells 		}
224ee4cdf7bSDavid Howells 		subreq->curr_folioq_slot = slot;
225ee4cdf7bSDavid Howells 		if (folioq && folioq_folio(folioq, slot))
226ee4cdf7bSDavid Howells 			subreq->curr_folio_order = folioq->orders[slot];
227ee4cdf7bSDavid Howells 		if (!was_async)
228ee4cdf7bSDavid Howells 			cond_resched();
229ee4cdf7bSDavid Howells 		goto next_folio;
230ee4cdf7bSDavid Howells 	}
231ee4cdf7bSDavid Howells 
232ee4cdf7bSDavid Howells 	/* Deal with partial progress. */
233ee4cdf7bSDavid Howells 	if (subreq->transferred < subreq->len)
234ee4cdf7bSDavid Howells 		return false;
235ee4cdf7bSDavid Howells 
236ee4cdf7bSDavid Howells 	/* Donate the remaining downloaded data to one of the neighbouring
237ee4cdf7bSDavid Howells 	 * subrequests.  Note that we may race with them doing the same thing.
238ee4cdf7bSDavid Howells 	 */
239ee4cdf7bSDavid Howells 	spin_lock_bh(&rreq->lock);
240ee4cdf7bSDavid Howells 
241ee4cdf7bSDavid Howells 	if (subreq->prev_donated != prev_donated ||
242ee4cdf7bSDavid Howells 	    subreq->next_donated != next_donated) {
243ee4cdf7bSDavid Howells 		spin_unlock_bh(&rreq->lock);
244ee4cdf7bSDavid Howells 		cond_resched();
245ee4cdf7bSDavid Howells 		goto donation_changed;
246ee4cdf7bSDavid Howells 	}
247ee4cdf7bSDavid Howells 
248ee4cdf7bSDavid Howells 	/* Deal with the trickiest case: that this subreq is in the middle of a
249ee4cdf7bSDavid Howells 	 * folio, not touching either edge, but finishes first.  In such a
250ee4cdf7bSDavid Howells 	 * case, we donate to the previous subreq, if there is one, so that the
251ee4cdf7bSDavid Howells 	 * donation is only handled when that completes - and remove this
252ee4cdf7bSDavid Howells 	 * subreq from the list.
253ee4cdf7bSDavid Howells 	 *
254ee4cdf7bSDavid Howells 	 * If the previous subreq finished first, we will have acquired their
255ee4cdf7bSDavid Howells 	 * donation and should be able to unlock folios and/or donate nextwards.
256ee4cdf7bSDavid Howells 	 */
257ee4cdf7bSDavid Howells 	if (!subreq->consumed &&
258ee4cdf7bSDavid Howells 	    !prev_donated &&
259ee4cdf7bSDavid Howells 	    !list_is_first(&subreq->rreq_link, &rreq->subrequests)) {
260ee4cdf7bSDavid Howells 		prev = list_prev_entry(subreq, rreq_link);
261ee4cdf7bSDavid Howells 		WRITE_ONCE(prev->next_donated, prev->next_donated + subreq->len);
262ee4cdf7bSDavid Howells 		subreq->start += subreq->len;
263ee4cdf7bSDavid Howells 		subreq->len = 0;
264ee4cdf7bSDavid Howells 		subreq->transferred = 0;
265ee4cdf7bSDavid Howells 		trace_netfs_donate(rreq, subreq, prev, subreq->len,
266ee4cdf7bSDavid Howells 				   netfs_trace_donate_to_prev);
267ee4cdf7bSDavid Howells 		trace_netfs_sreq(subreq, netfs_sreq_trace_donate_to_prev);
268ee4cdf7bSDavid Howells 		goto remove_subreq_locked;
269ee4cdf7bSDavid Howells 	}
270ee4cdf7bSDavid Howells 
271ee4cdf7bSDavid Howells 	/* If we can't donate down the chain, donate up the chain instead. */
272ee4cdf7bSDavid Howells 	excess = subreq->len - subreq->consumed + next_donated;
273ee4cdf7bSDavid Howells 
274ee4cdf7bSDavid Howells 	if (!subreq->consumed)
275ee4cdf7bSDavid Howells 		excess += prev_donated;
276ee4cdf7bSDavid Howells 
277ee4cdf7bSDavid Howells 	if (list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
278ee4cdf7bSDavid Howells 		rreq->prev_donated = excess;
279ee4cdf7bSDavid Howells 		trace_netfs_donate(rreq, subreq, NULL, excess,
280ee4cdf7bSDavid Howells 				   netfs_trace_donate_to_deferred_next);
281ee4cdf7bSDavid Howells 	} else {
282ee4cdf7bSDavid Howells 		next = list_next_entry(subreq, rreq_link);
283ee4cdf7bSDavid Howells 		WRITE_ONCE(next->prev_donated, excess);
284ee4cdf7bSDavid Howells 		trace_netfs_donate(rreq, subreq, next, excess,
285ee4cdf7bSDavid Howells 				   netfs_trace_donate_to_next);
286ee4cdf7bSDavid Howells 	}
287ee4cdf7bSDavid Howells 	trace_netfs_sreq(subreq, netfs_sreq_trace_donate_to_next);
288ee4cdf7bSDavid Howells 	subreq->len = subreq->consumed;
289ee4cdf7bSDavid Howells 	subreq->transferred = subreq->consumed;
290ee4cdf7bSDavid Howells 	goto remove_subreq_locked;
291ee4cdf7bSDavid Howells 
292ee4cdf7bSDavid Howells remove_subreq:
293ee4cdf7bSDavid Howells 	spin_lock_bh(&rreq->lock);
294ee4cdf7bSDavid Howells remove_subreq_locked:
295ee4cdf7bSDavid Howells 	subreq->consumed = subreq->len;
296ee4cdf7bSDavid Howells 	list_del(&subreq->rreq_link);
297ee4cdf7bSDavid Howells 	spin_unlock_bh(&rreq->lock);
298ee4cdf7bSDavid Howells 	netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_consumed);
299ee4cdf7bSDavid Howells 	return true;
300ee4cdf7bSDavid Howells 
301ee4cdf7bSDavid Howells bad:
302ee4cdf7bSDavid Howells 	/* Errr... prev and next both donated to us, but insufficient to finish
303ee4cdf7bSDavid Howells 	 * the folio.
304ee4cdf7bSDavid Howells 	 */
305ee4cdf7bSDavid Howells 	printk("R=%08x[%x] s=%llx-%llx %zx/%zx/%zx\n",
306ee4cdf7bSDavid Howells 	       rreq->debug_id, subreq->debug_index,
307ee4cdf7bSDavid Howells 	       subreq->start, subreq->start + subreq->transferred - 1,
308ee4cdf7bSDavid Howells 	       subreq->consumed, subreq->transferred, subreq->len);
309ee4cdf7bSDavid Howells 	printk("folio: %llx-%llx\n", fpos, fend - 1);
310ee4cdf7bSDavid Howells 	printk("donated: prev=%zx next=%zx\n", prev_donated, next_donated);
311ee4cdf7bSDavid Howells 	printk("s=%llx av=%zx part=%zx\n", start, avail, part);
312ee4cdf7bSDavid Howells 	BUG();
313ee4cdf7bSDavid Howells }
314ee4cdf7bSDavid Howells 
315ee4cdf7bSDavid Howells /*
316ee4cdf7bSDavid Howells  * Do page flushing and suchlike after DIO.
317ee4cdf7bSDavid Howells  */
318ee4cdf7bSDavid Howells static void netfs_rreq_assess_dio(struct netfs_io_request *rreq)
319ee4cdf7bSDavid Howells {
320ee4cdf7bSDavid Howells 	struct netfs_io_subrequest *subreq;
321ee4cdf7bSDavid Howells 	unsigned int i;
322ee4cdf7bSDavid Howells 
323ee4cdf7bSDavid Howells 	/* Collect unbuffered reads and direct reads, adding up the transfer
324ee4cdf7bSDavid Howells 	 * sizes until we find the first short or failed subrequest.
325ee4cdf7bSDavid Howells 	 */
326ee4cdf7bSDavid Howells 	list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
327ee4cdf7bSDavid Howells 		rreq->transferred += subreq->transferred;
328ee4cdf7bSDavid Howells 
329ee4cdf7bSDavid Howells 		if (subreq->transferred < subreq->len ||
330ee4cdf7bSDavid Howells 		    test_bit(NETFS_SREQ_FAILED, &subreq->flags)) {
331ee4cdf7bSDavid Howells 			rreq->error = subreq->error;
332ee4cdf7bSDavid Howells 			break;
333ee4cdf7bSDavid Howells 		}
334ee4cdf7bSDavid Howells 	}
335ee4cdf7bSDavid Howells 
336ee4cdf7bSDavid Howells 	if (rreq->origin == NETFS_DIO_READ) {
337ee4cdf7bSDavid Howells 		for (i = 0; i < rreq->direct_bv_count; i++) {
338ee4cdf7bSDavid Howells 			flush_dcache_page(rreq->direct_bv[i].bv_page);
339ee4cdf7bSDavid Howells 			// TODO: cifs marks pages in the destination buffer
340ee4cdf7bSDavid Howells 			// dirty under some circumstances after a read.  Do we
341ee4cdf7bSDavid Howells 			// need to do that too?
342ee4cdf7bSDavid Howells 			set_page_dirty(rreq->direct_bv[i].bv_page);
343ee4cdf7bSDavid Howells 		}
344ee4cdf7bSDavid Howells 	}
345ee4cdf7bSDavid Howells 
346ee4cdf7bSDavid Howells 	if (rreq->iocb) {
347ee4cdf7bSDavid Howells 		rreq->iocb->ki_pos += rreq->transferred;
348ee4cdf7bSDavid Howells 		if (rreq->iocb->ki_complete)
349ee4cdf7bSDavid Howells 			rreq->iocb->ki_complete(
350ee4cdf7bSDavid Howells 				rreq->iocb, rreq->error ? rreq->error : rreq->transferred);
351ee4cdf7bSDavid Howells 	}
352ee4cdf7bSDavid Howells 	if (rreq->netfs_ops->done)
353ee4cdf7bSDavid Howells 		rreq->netfs_ops->done(rreq);
354ee4cdf7bSDavid Howells 	if (rreq->origin == NETFS_DIO_READ)
355ee4cdf7bSDavid Howells 		inode_dio_end(rreq->inode);
356ee4cdf7bSDavid Howells }
357ee4cdf7bSDavid Howells 
358ee4cdf7bSDavid Howells /*
359ee4cdf7bSDavid Howells  * Assess the state of a read request and decide what to do next.
360ee4cdf7bSDavid Howells  *
361ee4cdf7bSDavid Howells  * Note that we're in normal kernel thread context at this point, possibly
362ee4cdf7bSDavid Howells  * running on a workqueue.
363ee4cdf7bSDavid Howells  */
364ee4cdf7bSDavid Howells static void netfs_rreq_assess(struct netfs_io_request *rreq)
365ee4cdf7bSDavid Howells {
366ee4cdf7bSDavid Howells 	trace_netfs_rreq(rreq, netfs_rreq_trace_assess);
367ee4cdf7bSDavid Howells 
368ee4cdf7bSDavid Howells 	//netfs_rreq_is_still_valid(rreq);
369ee4cdf7bSDavid Howells 
370ee4cdf7bSDavid Howells 	if (test_and_clear_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags)) {
371ee4cdf7bSDavid Howells 		netfs_retry_reads(rreq);
372ee4cdf7bSDavid Howells 		return;
373ee4cdf7bSDavid Howells 	}
374ee4cdf7bSDavid Howells 
375ee4cdf7bSDavid Howells 	if (rreq->origin == NETFS_DIO_READ ||
376ee4cdf7bSDavid Howells 	    rreq->origin == NETFS_READ_GAPS)
377ee4cdf7bSDavid Howells 		netfs_rreq_assess_dio(rreq);
378ee4cdf7bSDavid Howells 	task_io_account_read(rreq->transferred);
379ee4cdf7bSDavid Howells 
380ee4cdf7bSDavid Howells 	trace_netfs_rreq(rreq, netfs_rreq_trace_wake_ip);
381ee4cdf7bSDavid Howells 	clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
382ee4cdf7bSDavid Howells 	wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
383ee4cdf7bSDavid Howells 
384ee4cdf7bSDavid Howells 	trace_netfs_rreq(rreq, netfs_rreq_trace_done);
385ee4cdf7bSDavid Howells 	netfs_clear_subrequests(rreq, false);
386ee4cdf7bSDavid Howells 	netfs_unlock_abandoned_read_pages(rreq);
387ee4cdf7bSDavid Howells 	if (unlikely(test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags)))
388ee4cdf7bSDavid Howells 		netfs_pgpriv2_write_to_the_cache(rreq);
389ee4cdf7bSDavid Howells }
390ee4cdf7bSDavid Howells 
391ee4cdf7bSDavid Howells void netfs_read_termination_worker(struct work_struct *work)
392ee4cdf7bSDavid Howells {
393ee4cdf7bSDavid Howells 	struct netfs_io_request *rreq =
394ee4cdf7bSDavid Howells 		container_of(work, struct netfs_io_request, work);
395ee4cdf7bSDavid Howells 	netfs_see_request(rreq, netfs_rreq_trace_see_work);
396ee4cdf7bSDavid Howells 	netfs_rreq_assess(rreq);
397ee4cdf7bSDavid Howells 	netfs_put_request(rreq, false, netfs_rreq_trace_put_work_complete);
398ee4cdf7bSDavid Howells }
399ee4cdf7bSDavid Howells 
400ee4cdf7bSDavid Howells /*
401ee4cdf7bSDavid Howells  * Handle the completion of all outstanding I/O operations on a read request.
402ee4cdf7bSDavid Howells  * We inherit a ref from the caller.
403ee4cdf7bSDavid Howells  */
404ee4cdf7bSDavid Howells void netfs_rreq_terminated(struct netfs_io_request *rreq, bool was_async)
405ee4cdf7bSDavid Howells {
406ee4cdf7bSDavid Howells 	if (!was_async)
407ee4cdf7bSDavid Howells 		return netfs_rreq_assess(rreq);
408ee4cdf7bSDavid Howells 	if (!work_pending(&rreq->work)) {
409ee4cdf7bSDavid Howells 		netfs_get_request(rreq, netfs_rreq_trace_get_work);
410ee4cdf7bSDavid Howells 		if (!queue_work(system_unbound_wq, &rreq->work))
411ee4cdf7bSDavid Howells 			netfs_put_request(rreq, was_async, netfs_rreq_trace_put_work_nq);
412ee4cdf7bSDavid Howells 	}
413ee4cdf7bSDavid Howells }
414ee4cdf7bSDavid Howells 
415ee4cdf7bSDavid Howells /**
416ee4cdf7bSDavid Howells  * netfs_read_subreq_progress - Note progress of a read operation.
417ee4cdf7bSDavid Howells  * @subreq: The read request that has terminated.
418ee4cdf7bSDavid Howells  * @was_async: True if we're in an asynchronous context.
419ee4cdf7bSDavid Howells  *
420ee4cdf7bSDavid Howells  * This tells the read side of netfs lib that a contributory I/O operation has
421ee4cdf7bSDavid Howells  * made some progress and that it may be possible to unlock some folios.
422ee4cdf7bSDavid Howells  *
423ee4cdf7bSDavid Howells  * Before calling, the filesystem should update subreq->transferred to track
424ee4cdf7bSDavid Howells  * the amount of data copied into the output buffer.
425ee4cdf7bSDavid Howells  *
426ee4cdf7bSDavid Howells  * If @was_async is true, the caller might be running in softirq or interrupt
427ee4cdf7bSDavid Howells  * context and we can't sleep.
428ee4cdf7bSDavid Howells  */
429ee4cdf7bSDavid Howells void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq,
430ee4cdf7bSDavid Howells 				bool was_async)
431ee4cdf7bSDavid Howells {
432ee4cdf7bSDavid Howells 	struct netfs_io_request *rreq = subreq->rreq;
433ee4cdf7bSDavid Howells 
434ee4cdf7bSDavid Howells 	trace_netfs_sreq(subreq, netfs_sreq_trace_progress);
435ee4cdf7bSDavid Howells 
436ee4cdf7bSDavid Howells 	if (subreq->transferred > subreq->consumed &&
437ee4cdf7bSDavid Howells 	    (rreq->origin == NETFS_READAHEAD ||
438ee4cdf7bSDavid Howells 	     rreq->origin == NETFS_READPAGE ||
439ee4cdf7bSDavid Howells 	     rreq->origin == NETFS_READ_FOR_WRITE)) {
440ee4cdf7bSDavid Howells 		netfs_consume_read_data(subreq, was_async);
441ee4cdf7bSDavid Howells 		__clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
442ee4cdf7bSDavid Howells 	}
443ee4cdf7bSDavid Howells }
444ee4cdf7bSDavid Howells EXPORT_SYMBOL(netfs_read_subreq_progress);
445ee4cdf7bSDavid Howells 
446ee4cdf7bSDavid Howells /**
447ee4cdf7bSDavid Howells  * netfs_read_subreq_terminated - Note the termination of an I/O operation.
448ee4cdf7bSDavid Howells  * @subreq: The I/O request that has terminated.
449ee4cdf7bSDavid Howells  * @error: Error code indicating type of completion.
450ee4cdf7bSDavid Howells  * @was_async: The termination was asynchronous
451ee4cdf7bSDavid Howells  *
452ee4cdf7bSDavid Howells  * This tells the read helper that a contributory I/O operation has terminated,
453ee4cdf7bSDavid Howells  * one way or another, and that it should integrate the results.
454ee4cdf7bSDavid Howells  *
455ee4cdf7bSDavid Howells  * The caller indicates the outcome of the operation through @error, supplying
456ee4cdf7bSDavid Howells  * 0 to indicate a successful or retryable transfer (if NETFS_SREQ_NEED_RETRY
457ee4cdf7bSDavid Howells  * is set) or a negative error code.  The helper will look after reissuing I/O
458ee4cdf7bSDavid Howells  * operations as appropriate and writing downloaded data to the cache.
459ee4cdf7bSDavid Howells  *
460ee4cdf7bSDavid Howells  * Before calling, the filesystem should update subreq->transferred to track
461ee4cdf7bSDavid Howells  * the amount of data copied into the output buffer.
462ee4cdf7bSDavid Howells  *
463ee4cdf7bSDavid Howells  * If @was_async is true, the caller might be running in softirq or interrupt
464ee4cdf7bSDavid Howells  * context and we can't sleep.
465ee4cdf7bSDavid Howells  */
466ee4cdf7bSDavid Howells void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq,
467ee4cdf7bSDavid Howells 				  int error, bool was_async)
468ee4cdf7bSDavid Howells {
469ee4cdf7bSDavid Howells 	struct netfs_io_request *rreq = subreq->rreq;
470ee4cdf7bSDavid Howells 
471ee4cdf7bSDavid Howells 	switch (subreq->source) {
472ee4cdf7bSDavid Howells 	case NETFS_READ_FROM_CACHE:
473ee4cdf7bSDavid Howells 		netfs_stat(&netfs_n_rh_read_done);
474ee4cdf7bSDavid Howells 		break;
475ee4cdf7bSDavid Howells 	case NETFS_DOWNLOAD_FROM_SERVER:
476ee4cdf7bSDavid Howells 		netfs_stat(&netfs_n_rh_download_done);
477ee4cdf7bSDavid Howells 		break;
478ee4cdf7bSDavid Howells 	default:
479ee4cdf7bSDavid Howells 		break;
480ee4cdf7bSDavid Howells 	}
481ee4cdf7bSDavid Howells 
482ee4cdf7bSDavid Howells 	if (rreq->origin != NETFS_DIO_READ) {
483ee4cdf7bSDavid Howells 		/* Collect buffered reads.
484ee4cdf7bSDavid Howells 		 *
485ee4cdf7bSDavid Howells 		 * If the read completed validly short, then we can clear the
486ee4cdf7bSDavid Howells 		 * tail before going on to unlock the folios.
487ee4cdf7bSDavid Howells 		 */
488ee4cdf7bSDavid Howells 		if (error == 0 && subreq->transferred < subreq->len &&
489ee4cdf7bSDavid Howells 		    (test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags) ||
490ee4cdf7bSDavid Howells 		     test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags))) {
491ee4cdf7bSDavid Howells 			netfs_clear_unread(subreq);
492ee4cdf7bSDavid Howells 			subreq->transferred = subreq->len;
493ee4cdf7bSDavid Howells 			trace_netfs_sreq(subreq, netfs_sreq_trace_clear);
494ee4cdf7bSDavid Howells 		}
495ee4cdf7bSDavid Howells 		if (subreq->transferred > subreq->consumed &&
496ee4cdf7bSDavid Howells 		    (rreq->origin == NETFS_READAHEAD ||
497ee4cdf7bSDavid Howells 		     rreq->origin == NETFS_READPAGE ||
498ee4cdf7bSDavid Howells 		     rreq->origin == NETFS_READ_FOR_WRITE)) {
499ee4cdf7bSDavid Howells 			netfs_consume_read_data(subreq, was_async);
500ee4cdf7bSDavid Howells 			__clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
501ee4cdf7bSDavid Howells 		}
502ee4cdf7bSDavid Howells 		rreq->transferred += subreq->transferred;
503ee4cdf7bSDavid Howells 	}
504ee4cdf7bSDavid Howells 
505ee4cdf7bSDavid Howells 	/* Deal with retry requests, short reads and errors.  If we retry
506ee4cdf7bSDavid Howells 	 * but don't make progress, we abandon the attempt.
507ee4cdf7bSDavid Howells 	 */
508ee4cdf7bSDavid Howells 	if (!error && subreq->transferred < subreq->len) {
509ee4cdf7bSDavid Howells 		if (test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags)) {
510ee4cdf7bSDavid Howells 			trace_netfs_sreq(subreq, netfs_sreq_trace_hit_eof);
511ee4cdf7bSDavid Howells 		} else {
512ee4cdf7bSDavid Howells 			trace_netfs_sreq(subreq, netfs_sreq_trace_short);
513ee4cdf7bSDavid Howells 			if (subreq->transferred > subreq->consumed) {
514ee4cdf7bSDavid Howells 				__set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
515ee4cdf7bSDavid Howells 				__clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
516ee4cdf7bSDavid Howells 				set_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags);
517ee4cdf7bSDavid Howells 			} else if (!__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
518ee4cdf7bSDavid Howells 				__set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
519ee4cdf7bSDavid Howells 				set_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags);
520ee4cdf7bSDavid Howells 			} else {
521ee4cdf7bSDavid Howells 				__set_bit(NETFS_SREQ_FAILED, &subreq->flags);
522ee4cdf7bSDavid Howells 				error = -ENODATA;
523ee4cdf7bSDavid Howells 			}
524ee4cdf7bSDavid Howells 		}
525ee4cdf7bSDavid Howells 	}
526ee4cdf7bSDavid Howells 
527ee4cdf7bSDavid Howells 	subreq->error = error;
528ee4cdf7bSDavid Howells 	trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
529ee4cdf7bSDavid Howells 
530ee4cdf7bSDavid Howells 	if (unlikely(error < 0)) {
531ee4cdf7bSDavid Howells 		trace_netfs_failure(rreq, subreq, error, netfs_fail_read);
532ee4cdf7bSDavid Howells 		if (subreq->source == NETFS_READ_FROM_CACHE) {
533ee4cdf7bSDavid Howells 			netfs_stat(&netfs_n_rh_read_failed);
534ee4cdf7bSDavid Howells 		} else {
535ee4cdf7bSDavid Howells 			netfs_stat(&netfs_n_rh_download_failed);
536ee4cdf7bSDavid Howells 			set_bit(NETFS_RREQ_FAILED, &rreq->flags);
537ee4cdf7bSDavid Howells 			rreq->error = subreq->error;
538ee4cdf7bSDavid Howells 		}
539ee4cdf7bSDavid Howells 	}
540ee4cdf7bSDavid Howells 
541ee4cdf7bSDavid Howells 	if (atomic_dec_and_test(&rreq->nr_outstanding))
542ee4cdf7bSDavid Howells 		netfs_rreq_terminated(rreq, was_async);
543ee4cdf7bSDavid Howells 
544ee4cdf7bSDavid Howells 	netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
545ee4cdf7bSDavid Howells }
546ee4cdf7bSDavid Howells EXPORT_SYMBOL(netfs_read_subreq_terminated);
547