1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Network filesystem write retrying. 3 * 4 * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/fs.h> 9 #include <linux/mm.h> 10 #include <linux/pagemap.h> 11 #include <linux/slab.h> 12 #include "internal.h" 13 14 /* 15 * Perform retries on the streams that need it. 16 */ 17 static void netfs_retry_write_stream(struct netfs_io_request *wreq, 18 struct netfs_io_stream *stream) 19 { 20 struct list_head *next; 21 22 _enter("R=%x[%x:]", wreq->debug_id, stream->stream_nr); 23 24 if (list_empty(&stream->subrequests)) 25 return; 26 27 if (stream->source == NETFS_UPLOAD_TO_SERVER && 28 wreq->netfs_ops->retry_request) 29 wreq->netfs_ops->retry_request(wreq, stream); 30 31 if (unlikely(stream->failed)) 32 return; 33 34 /* If there's no renegotiation to do, just resend each failed subreq. */ 35 if (!stream->prepare_write) { 36 struct netfs_io_subrequest *subreq; 37 38 list_for_each_entry(subreq, &stream->subrequests, rreq_link) { 39 if (test_bit(NETFS_SREQ_FAILED, &subreq->flags)) 40 break; 41 if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) { 42 struct iov_iter source; 43 44 netfs_reset_iter(subreq); 45 source = subreq->io_iter; 46 netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); 47 netfs_reissue_write(stream, subreq, &source); 48 } 49 } 50 return; 51 } 52 53 next = stream->subrequests.next; 54 55 do { 56 struct netfs_io_subrequest *subreq = NULL, *from, *to, *tmp; 57 struct iov_iter source; 58 unsigned long long start, len; 59 size_t part; 60 bool boundary = false; 61 62 /* Go through the stream and find the next span of contiguous 63 * data that we then rejig (cifs, for example, needs the wsize 64 * renegotiating) and reissue. 65 */ 66 from = list_entry(next, struct netfs_io_subrequest, rreq_link); 67 to = from; 68 start = from->start + from->transferred; 69 len = from->len - from->transferred; 70 71 if (test_bit(NETFS_SREQ_FAILED, &from->flags) || 72 !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags)) 73 return; 74 75 list_for_each_continue(next, &stream->subrequests) { 76 subreq = list_entry(next, struct netfs_io_subrequest, rreq_link); 77 if (subreq->start + subreq->transferred != start + len || 78 test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) || 79 !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) 80 break; 81 to = subreq; 82 len += to->len; 83 } 84 85 /* Determine the set of buffers we're going to use. Each 86 * subreq gets a subset of a single overall contiguous buffer. 87 */ 88 netfs_reset_iter(from); 89 source = from->io_iter; 90 source.count = len; 91 92 /* Work through the sublist. */ 93 subreq = from; 94 list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) { 95 if (!len) 96 break; 97 98 subreq->start = start; 99 subreq->len = len; 100 __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); 101 trace_netfs_sreq(subreq, netfs_sreq_trace_retry); 102 103 /* Renegotiate max_len (wsize) */ 104 stream->sreq_max_len = len; 105 stream->prepare_write(subreq); 106 107 part = umin(len, stream->sreq_max_len); 108 if (unlikely(stream->sreq_max_segs)) 109 part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs); 110 subreq->len = part; 111 subreq->transferred = 0; 112 len -= part; 113 start += part; 114 if (len && subreq == to && 115 __test_and_clear_bit(NETFS_SREQ_BOUNDARY, &to->flags)) 116 boundary = true; 117 118 netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); 119 netfs_reissue_write(stream, subreq, &source); 120 if (subreq == to) 121 break; 122 } 123 124 /* If we managed to use fewer subreqs, we can discard the 125 * excess; if we used the same number, then we're done. 126 */ 127 if (!len) { 128 if (subreq == to) 129 continue; 130 list_for_each_entry_safe_from(subreq, tmp, 131 &stream->subrequests, rreq_link) { 132 trace_netfs_sreq(subreq, netfs_sreq_trace_discard); 133 list_del(&subreq->rreq_link); 134 netfs_put_subrequest(subreq, netfs_sreq_trace_put_done); 135 if (subreq == to) 136 break; 137 } 138 continue; 139 } 140 141 /* We ran out of subrequests, so we need to allocate some more 142 * and insert them after. 143 */ 144 do { 145 subreq = netfs_alloc_subrequest(wreq); 146 subreq->source = to->source; 147 subreq->start = start; 148 subreq->stream_nr = to->stream_nr; 149 subreq->retry_count = 1; 150 151 trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index, 152 refcount_read(&subreq->ref), 153 netfs_sreq_trace_new); 154 trace_netfs_sreq(subreq, netfs_sreq_trace_split); 155 156 list_add(&subreq->rreq_link, &to->rreq_link); 157 to = list_next_entry(to, rreq_link); 158 trace_netfs_sreq(subreq, netfs_sreq_trace_retry); 159 160 stream->sreq_max_len = len; 161 stream->sreq_max_segs = INT_MAX; 162 switch (stream->source) { 163 case NETFS_UPLOAD_TO_SERVER: 164 netfs_stat(&netfs_n_wh_upload); 165 stream->sreq_max_len = umin(len, wreq->wsize); 166 break; 167 case NETFS_WRITE_TO_CACHE: 168 netfs_stat(&netfs_n_wh_write); 169 break; 170 default: 171 WARN_ON_ONCE(1); 172 } 173 174 stream->prepare_write(subreq); 175 176 part = umin(len, stream->sreq_max_len); 177 subreq->len = subreq->transferred + part; 178 len -= part; 179 start += part; 180 if (!len && boundary) { 181 __set_bit(NETFS_SREQ_BOUNDARY, &to->flags); 182 boundary = false; 183 } 184 185 netfs_reissue_write(stream, subreq, &source); 186 if (!len) 187 break; 188 189 } while (len); 190 191 } while (!list_is_head(next, &stream->subrequests)); 192 } 193 194 /* 195 * Perform retries on the streams that need it. If we're doing content 196 * encryption and the server copy changed due to a third-party write, we may 197 * need to do an RMW cycle and also rewrite the data to the cache. 198 */ 199 void netfs_retry_writes(struct netfs_io_request *wreq) 200 { 201 struct netfs_io_stream *stream; 202 int s; 203 204 netfs_stat(&netfs_n_wh_retry_write_req); 205 206 /* Wait for all outstanding I/O to quiesce before performing retries as 207 * we may need to renegotiate the I/O sizes. 208 */ 209 set_bit(NETFS_RREQ_RETRYING, &wreq->flags); 210 for (s = 0; s < NR_IO_STREAMS; s++) { 211 stream = &wreq->io_streams[s]; 212 if (stream->active) 213 netfs_wait_for_in_progress_stream(wreq, stream); 214 } 215 clear_bit(NETFS_RREQ_RETRYING, &wreq->flags); 216 217 // TODO: Enc: Fetch changed partial pages 218 // TODO: Enc: Reencrypt content if needed. 219 // TODO: Enc: Wind back transferred point. 220 // TODO: Enc: Mark cache pages for retry. 221 222 for (s = 0; s < NR_IO_STREAMS; s++) { 223 stream = &wreq->io_streams[s]; 224 if (stream->need_retry) { 225 stream->need_retry = false; 226 netfs_retry_write_stream(wreq, stream); 227 } 228 } 229 } 230