xref: /linux/fs/netfs/read_retry.c (revision 35219bc5c71f4197c8bd10297597de797c1eece5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Network filesystem read subrequest retrying.
3  *
4  * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/fs.h>
9 #include <linux/slab.h>
10 #include "internal.h"
11 
netfs_reissue_read(struct netfs_io_request * rreq,struct netfs_io_subrequest * subreq)12 static void netfs_reissue_read(struct netfs_io_request *rreq,
13 			       struct netfs_io_subrequest *subreq)
14 {
15 	struct iov_iter *io_iter = &subreq->io_iter;
16 
17 	if (iov_iter_is_folioq(io_iter)) {
18 		subreq->curr_folioq = (struct folio_queue *)io_iter->folioq;
19 		subreq->curr_folioq_slot = io_iter->folioq_slot;
20 		subreq->curr_folio_order = subreq->curr_folioq->orders[subreq->curr_folioq_slot];
21 	}
22 
23 	atomic_inc(&rreq->nr_outstanding);
24 	__set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
25 	netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
26 	subreq->rreq->netfs_ops->issue_read(subreq);
27 }
28 
29 /*
30  * Go through the list of failed/short reads, retrying all retryable ones.  We
31  * need to switch failed cache reads to network downloads.
32  */
netfs_retry_read_subrequests(struct netfs_io_request * rreq)33 static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
34 {
35 	struct netfs_io_subrequest *subreq;
36 	struct netfs_io_stream *stream0 = &rreq->io_streams[0];
37 	LIST_HEAD(sublist);
38 	LIST_HEAD(queue);
39 
40 	_enter("R=%x", rreq->debug_id);
41 
42 	if (list_empty(&rreq->subrequests))
43 		return;
44 
45 	if (rreq->netfs_ops->retry_request)
46 		rreq->netfs_ops->retry_request(rreq, NULL);
47 
48 	/* If there's no renegotiation to do, just resend each retryable subreq
49 	 * up to the first permanently failed one.
50 	 */
51 	if (!rreq->netfs_ops->prepare_read &&
52 	    !test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags)) {
53 		struct netfs_io_subrequest *subreq;
54 
55 		list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
56 			if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
57 				break;
58 			if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
59 				netfs_reset_iter(subreq);
60 				netfs_reissue_read(rreq, subreq);
61 			}
62 		}
63 		return;
64 	}
65 
66 	/* Okay, we need to renegotiate all the download requests and flip any
67 	 * failed cache reads over to being download requests and negotiate
68 	 * those also.  All fully successful subreqs have been removed from the
69 	 * list and any spare data from those has been donated.
70 	 *
71 	 * What we do is decant the list and rebuild it one subreq at a time so
72 	 * that we don't end up with donations jumping over a gap we're busy
73 	 * populating with smaller subrequests.  In the event that the subreq
74 	 * we just launched finishes before we insert the next subreq, it'll
75 	 * fill in rreq->prev_donated instead.
76 
77 	 * Note: Alternatively, we could split the tail subrequest right before
78 	 * we reissue it and fix up the donations under lock.
79 	 */
80 	list_splice_init(&rreq->subrequests, &queue);
81 
82 	do {
83 		struct netfs_io_subrequest *from;
84 		struct iov_iter source;
85 		unsigned long long start, len;
86 		size_t part, deferred_next_donated = 0;
87 		bool boundary = false;
88 
89 		/* Go through the subreqs and find the next span of contiguous
90 		 * buffer that we then rejig (cifs, for example, needs the
91 		 * rsize renegotiating) and reissue.
92 		 */
93 		from = list_first_entry(&queue, struct netfs_io_subrequest, rreq_link);
94 		list_move_tail(&from->rreq_link, &sublist);
95 		start = from->start + from->transferred;
96 		len   = from->len   - from->transferred;
97 
98 		_debug("from R=%08x[%x] s=%llx ctl=%zx/%zx/%zx",
99 		       rreq->debug_id, from->debug_index,
100 		       from->start, from->consumed, from->transferred, from->len);
101 
102 		if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
103 		    !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
104 			goto abandon;
105 
106 		deferred_next_donated = from->next_donated;
107 		while ((subreq = list_first_entry_or_null(
108 				&queue, struct netfs_io_subrequest, rreq_link))) {
109 			if (subreq->start != start + len ||
110 			    subreq->transferred > 0 ||
111 			    !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
112 				break;
113 			list_move_tail(&subreq->rreq_link, &sublist);
114 			len += subreq->len;
115 			deferred_next_donated = subreq->next_donated;
116 			if (test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags))
117 				break;
118 		}
119 
120 		_debug(" - range: %llx-%llx %llx", start, start + len - 1, len);
121 
122 		/* Determine the set of buffers we're going to use.  Each
123 		 * subreq gets a subset of a single overall contiguous buffer.
124 		 */
125 		netfs_reset_iter(from);
126 		source = from->io_iter;
127 		source.count = len;
128 
129 		/* Work through the sublist. */
130 		while ((subreq = list_first_entry_or_null(
131 				&sublist, struct netfs_io_subrequest, rreq_link))) {
132 			list_del(&subreq->rreq_link);
133 
134 			subreq->source	= NETFS_DOWNLOAD_FROM_SERVER;
135 			subreq->start	= start - subreq->transferred;
136 			subreq->len	= len   + subreq->transferred;
137 			stream0->sreq_max_len = subreq->len;
138 
139 			__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
140 			__set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
141 
142 			spin_lock_bh(&rreq->lock);
143 			list_add_tail(&subreq->rreq_link, &rreq->subrequests);
144 			subreq->prev_donated += rreq->prev_donated;
145 			rreq->prev_donated = 0;
146 			trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
147 			spin_unlock_bh(&rreq->lock);
148 
149 			BUG_ON(!len);
150 
151 			/* Renegotiate max_len (rsize) */
152 			if (rreq->netfs_ops->prepare_read(subreq) < 0) {
153 				trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed);
154 				__set_bit(NETFS_SREQ_FAILED, &subreq->flags);
155 			}
156 
157 			part = umin(len, stream0->sreq_max_len);
158 			if (unlikely(rreq->io_streams[0].sreq_max_segs))
159 				part = netfs_limit_iter(&source, 0, part, stream0->sreq_max_segs);
160 			subreq->len = subreq->transferred + part;
161 			subreq->io_iter = source;
162 			iov_iter_truncate(&subreq->io_iter, part);
163 			iov_iter_advance(&source, part);
164 			len -= part;
165 			start += part;
166 			if (!len) {
167 				if (boundary)
168 					__set_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
169 				subreq->next_donated = deferred_next_donated;
170 			} else {
171 				__clear_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
172 				subreq->next_donated = 0;
173 			}
174 
175 			netfs_reissue_read(rreq, subreq);
176 			if (!len)
177 				break;
178 
179 			/* If we ran out of subrequests, allocate another. */
180 			if (list_empty(&sublist)) {
181 				subreq = netfs_alloc_subrequest(rreq);
182 				if (!subreq)
183 					goto abandon;
184 				subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
185 				subreq->start = start;
186 
187 				/* We get two refs, but need just one. */
188 				netfs_put_subrequest(subreq, false, netfs_sreq_trace_new);
189 				trace_netfs_sreq(subreq, netfs_sreq_trace_split);
190 				list_add_tail(&subreq->rreq_link, &sublist);
191 			}
192 		}
193 
194 		/* If we managed to use fewer subreqs, we can discard the
195 		 * excess.
196 		 */
197 		while ((subreq = list_first_entry_or_null(
198 				&sublist, struct netfs_io_subrequest, rreq_link))) {
199 			trace_netfs_sreq(subreq, netfs_sreq_trace_discard);
200 			list_del(&subreq->rreq_link);
201 			netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done);
202 		}
203 
204 	} while (!list_empty(&queue));
205 
206 	return;
207 
208 	/* If we hit ENOMEM, fail all remaining subrequests */
209 abandon:
210 	list_splice_init(&sublist, &queue);
211 	list_for_each_entry(subreq, &queue, rreq_link) {
212 		if (!subreq->error)
213 			subreq->error = -ENOMEM;
214 		__clear_bit(NETFS_SREQ_FAILED, &subreq->flags);
215 		__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
216 		__clear_bit(NETFS_SREQ_RETRYING, &subreq->flags);
217 	}
218 	spin_lock_bh(&rreq->lock);
219 	list_splice_tail_init(&queue, &rreq->subrequests);
220 	spin_unlock_bh(&rreq->lock);
221 }
222 
223 /*
224  * Retry reads.
225  */
netfs_retry_reads(struct netfs_io_request * rreq)226 void netfs_retry_reads(struct netfs_io_request *rreq)
227 {
228 	trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
229 
230 	atomic_inc(&rreq->nr_outstanding);
231 
232 	netfs_retry_read_subrequests(rreq);
233 
234 	if (atomic_dec_and_test(&rreq->nr_outstanding))
235 		netfs_rreq_terminated(rreq, false);
236 }
237 
238 /*
239  * Unlock any the pages that haven't been unlocked yet due to abandoned
240  * subrequests.
241  */
netfs_unlock_abandoned_read_pages(struct netfs_io_request * rreq)242 void netfs_unlock_abandoned_read_pages(struct netfs_io_request *rreq)
243 {
244 	struct folio_queue *p;
245 
246 	for (p = rreq->buffer; p; p = p->next) {
247 		for (int slot = 0; slot < folioq_count(p); slot++) {
248 			struct folio *folio = folioq_folio(p, slot);
249 
250 			if (folio && !folioq_is_marked2(p, slot)) {
251 				trace_netfs_folio(folio, netfs_folio_trace_abandon);
252 				folio_unlock(folio);
253 			}
254 		}
255 	}
256 }
257