1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Network filesystem write retrying.
3 *
4 * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8 #include <linux/fs.h>
9 #include <linux/mm.h>
10 #include <linux/pagemap.h>
11 #include <linux/slab.h>
12 #include "internal.h"
13
14 /*
15 * Perform retries on the streams that need it.
16 */
netfs_retry_write_stream(struct netfs_io_request * wreq,struct netfs_io_stream * stream)17 static void netfs_retry_write_stream(struct netfs_io_request *wreq,
18 struct netfs_io_stream *stream)
19 {
20 struct list_head *next;
21
22 _enter("R=%x[%x:]", wreq->debug_id, stream->stream_nr);
23
24 if (list_empty(&stream->subrequests))
25 return;
26
27 if (stream->source == NETFS_UPLOAD_TO_SERVER &&
28 wreq->netfs_ops->retry_request)
29 wreq->netfs_ops->retry_request(wreq, stream);
30
31 if (unlikely(stream->failed))
32 return;
33
34 /* If there's no renegotiation to do, just resend each failed subreq. */
35 if (!stream->prepare_write) {
36 struct netfs_io_subrequest *subreq;
37
38 list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
39 if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
40 break;
41 if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
42 struct iov_iter source;
43
44 netfs_reset_iter(subreq);
45 source = subreq->io_iter;
46 netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
47 netfs_reissue_write(stream, subreq, &source);
48 }
49 }
50 return;
51 }
52
53 next = stream->subrequests.next;
54
55 do {
56 struct netfs_io_subrequest *subreq = NULL, *from, *to, *tmp;
57 struct iov_iter source;
58 unsigned long long start, len;
59 size_t part;
60 bool boundary = false;
61
62 /* Go through the stream and find the next span of contiguous
63 * data that we then rejig (cifs, for example, needs the wsize
64 * renegotiating) and reissue.
65 */
66 from = list_entry(next, struct netfs_io_subrequest, rreq_link);
67 to = from;
68 start = from->start + from->transferred;
69 len = from->len - from->transferred;
70
71 if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
72 !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
73 return;
74
75 list_for_each_continue(next, &stream->subrequests) {
76 subreq = list_entry(next, struct netfs_io_subrequest, rreq_link);
77 if (subreq->start + subreq->transferred != start + len ||
78 test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) ||
79 !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
80 break;
81 to = subreq;
82 len += to->len;
83 }
84
85 /* Determine the set of buffers we're going to use. Each
86 * subreq gets a subset of a single overall contiguous buffer.
87 */
88 netfs_reset_iter(from);
89 source = from->io_iter;
90 source.count = len;
91
92 /* Work through the sublist. */
93 subreq = from;
94 list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
95 if (!len)
96 break;
97
98 subreq->start = start;
99 subreq->len = len;
100 __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
101 subreq->retry_count++;
102 trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
103
104 /* Renegotiate max_len (wsize) */
105 stream->sreq_max_len = len;
106 stream->prepare_write(subreq);
107
108 part = umin(len, stream->sreq_max_len);
109 if (unlikely(stream->sreq_max_segs))
110 part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs);
111 subreq->len = part;
112 subreq->transferred = 0;
113 len -= part;
114 start += part;
115 if (len && subreq == to &&
116 __test_and_clear_bit(NETFS_SREQ_BOUNDARY, &to->flags))
117 boundary = true;
118
119 netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
120 netfs_reissue_write(stream, subreq, &source);
121 if (subreq == to)
122 break;
123 }
124
125 /* If we managed to use fewer subreqs, we can discard the
126 * excess; if we used the same number, then we're done.
127 */
128 if (!len) {
129 if (subreq == to)
130 continue;
131 list_for_each_entry_safe_from(subreq, tmp,
132 &stream->subrequests, rreq_link) {
133 trace_netfs_sreq(subreq, netfs_sreq_trace_discard);
134 list_del(&subreq->rreq_link);
135 netfs_put_subrequest(subreq, netfs_sreq_trace_put_done);
136 if (subreq == to)
137 break;
138 }
139 continue;
140 }
141
142 /* We ran out of subrequests, so we need to allocate some more
143 * and insert them after.
144 */
145 do {
146 subreq = netfs_alloc_subrequest(wreq);
147 subreq->source = to->source;
148 subreq->start = start;
149 subreq->stream_nr = to->stream_nr;
150 subreq->retry_count = 1;
151
152 trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
153 refcount_read(&subreq->ref),
154 netfs_sreq_trace_new);
155 trace_netfs_sreq(subreq, netfs_sreq_trace_split);
156
157 list_add(&subreq->rreq_link, &to->rreq_link);
158 to = list_next_entry(to, rreq_link);
159 trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
160
161 stream->sreq_max_len = len;
162 stream->sreq_max_segs = INT_MAX;
163 switch (stream->source) {
164 case NETFS_UPLOAD_TO_SERVER:
165 netfs_stat(&netfs_n_wh_upload);
166 stream->sreq_max_len = umin(len, wreq->wsize);
167 break;
168 case NETFS_WRITE_TO_CACHE:
169 netfs_stat(&netfs_n_wh_write);
170 break;
171 default:
172 WARN_ON_ONCE(1);
173 }
174
175 stream->prepare_write(subreq);
176
177 part = umin(len, stream->sreq_max_len);
178 subreq->len = subreq->transferred + part;
179 len -= part;
180 start += part;
181 if (!len && boundary) {
182 __set_bit(NETFS_SREQ_BOUNDARY, &to->flags);
183 boundary = false;
184 }
185
186 netfs_reissue_write(stream, subreq, &source);
187 if (!len)
188 break;
189
190 } while (len);
191
192 } while (!list_is_head(next, &stream->subrequests));
193 }
194
195 /*
196 * Perform retries on the streams that need it. If we're doing content
197 * encryption and the server copy changed due to a third-party write, we may
198 * need to do an RMW cycle and also rewrite the data to the cache.
199 */
netfs_retry_writes(struct netfs_io_request * wreq)200 void netfs_retry_writes(struct netfs_io_request *wreq)
201 {
202 struct netfs_io_stream *stream;
203 int s;
204
205 netfs_stat(&netfs_n_wh_retry_write_req);
206
207 /* Wait for all outstanding I/O to quiesce before performing retries as
208 * we may need to renegotiate the I/O sizes.
209 */
210 set_bit(NETFS_RREQ_RETRYING, &wreq->flags);
211 for (s = 0; s < NR_IO_STREAMS; s++) {
212 stream = &wreq->io_streams[s];
213 if (stream->active)
214 netfs_wait_for_in_progress_stream(wreq, stream);
215 }
216 clear_bit(NETFS_RREQ_RETRYING, &wreq->flags);
217
218 // TODO: Enc: Fetch changed partial pages
219 // TODO: Enc: Reencrypt content if needed.
220 // TODO: Enc: Wind back transferred point.
221 // TODO: Enc: Mark cache pages for retry.
222
223 for (s = 0; s < NR_IO_STREAMS; s++) {
224 stream = &wreq->io_streams[s];
225 if (stream->need_retry) {
226 stream->need_retry = false;
227 netfs_retry_write_stream(wreq, stream);
228 }
229 }
230 }
231