xref: /linux/fs/netfs/write_retry.c (revision 2408a807bfc3f738850ef5ad5e3fd59d66168996)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Network filesystem write retrying.
3  *
4  * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/fs.h>
9 #include <linux/mm.h>
10 #include <linux/pagemap.h>
11 #include <linux/slab.h>
12 #include "internal.h"
13 
14 /*
15  * Perform retries on the streams that need it.
16  */
netfs_retry_write_stream(struct netfs_io_request * wreq,struct netfs_io_stream * stream)17 static void netfs_retry_write_stream(struct netfs_io_request *wreq,
18 				     struct netfs_io_stream *stream)
19 {
20 	struct list_head *next;
21 
22 	_enter("R=%x[%x:]", wreq->debug_id, stream->stream_nr);
23 
24 	if (list_empty(&stream->subrequests))
25 		return;
26 
27 	if (stream->source == NETFS_UPLOAD_TO_SERVER &&
28 	    wreq->netfs_ops->retry_request)
29 		wreq->netfs_ops->retry_request(wreq, stream);
30 
31 	if (unlikely(stream->failed))
32 		return;
33 
34 	/* If there's no renegotiation to do, just resend each failed subreq. */
35 	if (!stream->prepare_write) {
36 		struct netfs_io_subrequest *subreq;
37 
38 		list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
39 			if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
40 				break;
41 			if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
42 				struct iov_iter source = subreq->io_iter;
43 
44 				iov_iter_revert(&source, subreq->len - source.count);
45 				netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
46 				netfs_reissue_write(stream, subreq, &source);
47 			}
48 		}
49 		return;
50 	}
51 
52 	next = stream->subrequests.next;
53 
54 	do {
55 		struct netfs_io_subrequest *subreq = NULL, *from, *to, *tmp;
56 		struct iov_iter source;
57 		unsigned long long start, len;
58 		size_t part;
59 		bool boundary = false;
60 
61 		/* Go through the stream and find the next span of contiguous
62 		 * data that we then rejig (cifs, for example, needs the wsize
63 		 * renegotiating) and reissue.
64 		 */
65 		from = list_entry(next, struct netfs_io_subrequest, rreq_link);
66 		to = from;
67 		start = from->start + from->transferred;
68 		len   = from->len   - from->transferred;
69 
70 		if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
71 		    !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
72 			return;
73 
74 		list_for_each_continue(next, &stream->subrequests) {
75 			subreq = list_entry(next, struct netfs_io_subrequest, rreq_link);
76 			if (subreq->start + subreq->transferred != start + len ||
77 			    test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) ||
78 			    !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
79 				break;
80 			to = subreq;
81 			len += to->len;
82 		}
83 
84 		/* Determine the set of buffers we're going to use.  Each
85 		 * subreq gets a subset of a single overall contiguous buffer.
86 		 */
87 		netfs_reset_iter(from);
88 		source = from->io_iter;
89 		source.count = len;
90 
91 		/* Work through the sublist. */
92 		subreq = from;
93 		list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
94 			if (!len)
95 				break;
96 
97 			subreq->start	= start;
98 			subreq->len	= len;
99 			__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
100 			subreq->retry_count++;
101 			trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
102 
103 			/* Renegotiate max_len (wsize) */
104 			stream->sreq_max_len = len;
105 			stream->prepare_write(subreq);
106 
107 			part = umin(len, stream->sreq_max_len);
108 			if (unlikely(stream->sreq_max_segs))
109 				part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs);
110 			subreq->len = part;
111 			subreq->transferred = 0;
112 			len -= part;
113 			start += part;
114 			if (len && subreq == to &&
115 			    __test_and_clear_bit(NETFS_SREQ_BOUNDARY, &to->flags))
116 				boundary = true;
117 
118 			netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
119 			netfs_reissue_write(stream, subreq, &source);
120 			if (subreq == to)
121 				break;
122 		}
123 
124 		/* If we managed to use fewer subreqs, we can discard the
125 		 * excess; if we used the same number, then we're done.
126 		 */
127 		if (!len) {
128 			if (subreq == to)
129 				continue;
130 			list_for_each_entry_safe_from(subreq, tmp,
131 						      &stream->subrequests, rreq_link) {
132 				trace_netfs_sreq(subreq, netfs_sreq_trace_discard);
133 				list_del(&subreq->rreq_link);
134 				netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done);
135 				if (subreq == to)
136 					break;
137 			}
138 			continue;
139 		}
140 
141 		/* We ran out of subrequests, so we need to allocate some more
142 		 * and insert them after.
143 		 */
144 		do {
145 			subreq = netfs_alloc_subrequest(wreq);
146 			subreq->source		= to->source;
147 			subreq->start		= start;
148 			subreq->debug_index	= atomic_inc_return(&wreq->subreq_counter);
149 			subreq->stream_nr	= to->stream_nr;
150 			subreq->retry_count	= 1;
151 
152 			trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
153 					     refcount_read(&subreq->ref),
154 					     netfs_sreq_trace_new);
155 			netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
156 
157 			list_add(&subreq->rreq_link, &to->rreq_link);
158 			to = list_next_entry(to, rreq_link);
159 			trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
160 
161 			stream->sreq_max_len	= len;
162 			stream->sreq_max_segs	= INT_MAX;
163 			switch (stream->source) {
164 			case NETFS_UPLOAD_TO_SERVER:
165 				netfs_stat(&netfs_n_wh_upload);
166 				stream->sreq_max_len = umin(len, wreq->wsize);
167 				break;
168 			case NETFS_WRITE_TO_CACHE:
169 				netfs_stat(&netfs_n_wh_write);
170 				break;
171 			default:
172 				WARN_ON_ONCE(1);
173 			}
174 
175 			stream->prepare_write(subreq);
176 
177 			part = umin(len, stream->sreq_max_len);
178 			subreq->len = subreq->transferred + part;
179 			len -= part;
180 			start += part;
181 			if (!len && boundary) {
182 				__set_bit(NETFS_SREQ_BOUNDARY, &to->flags);
183 				boundary = false;
184 			}
185 
186 			netfs_reissue_write(stream, subreq, &source);
187 			if (!len)
188 				break;
189 
190 		} while (len);
191 
192 	} while (!list_is_head(next, &stream->subrequests));
193 }
194 
195 /*
196  * Perform retries on the streams that need it.  If we're doing content
197  * encryption and the server copy changed due to a third-party write, we may
198  * need to do an RMW cycle and also rewrite the data to the cache.
199  */
netfs_retry_writes(struct netfs_io_request * wreq)200 void netfs_retry_writes(struct netfs_io_request *wreq)
201 {
202 	struct netfs_io_subrequest *subreq;
203 	struct netfs_io_stream *stream;
204 	int s;
205 
206 	netfs_stat(&netfs_n_wh_retry_write_req);
207 
208 	/* Wait for all outstanding I/O to quiesce before performing retries as
209 	 * we may need to renegotiate the I/O sizes.
210 	 */
211 	for (s = 0; s < NR_IO_STREAMS; s++) {
212 		stream = &wreq->io_streams[s];
213 		if (!stream->active)
214 			continue;
215 
216 		list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
217 			wait_on_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS,
218 				    TASK_UNINTERRUPTIBLE);
219 		}
220 	}
221 
222 	// TODO: Enc: Fetch changed partial pages
223 	// TODO: Enc: Reencrypt content if needed.
224 	// TODO: Enc: Wind back transferred point.
225 	// TODO: Enc: Mark cache pages for retry.
226 
227 	for (s = 0; s < NR_IO_STREAMS; s++) {
228 		stream = &wreq->io_streams[s];
229 		if (stream->need_retry) {
230 			stream->need_retry = false;
231 			netfs_retry_write_stream(wreq, stream);
232 		}
233 	}
234 }
235