1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Network filesystem write subrequest result collection, assessment
3 * and retrying.
4 *
5 * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
6 * Written by David Howells (dhowells@redhat.com)
7 */
8
9 #include <linux/export.h>
10 #include <linux/fs.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/slab.h>
14 #include "internal.h"
15
16 /* Notes made in the collector */
17 #define HIT_PENDING 0x01 /* A front op was still pending */
18 #define NEED_REASSESS 0x02 /* Need to loop round and reassess */
19 #define MADE_PROGRESS 0x04 /* Made progress cleaning up a stream or the folio set */
20 #define BUFFERED 0x08 /* The pagecache needs cleaning up */
21 #define NEED_RETRY 0x10 /* A front op requests retrying */
22 #define SAW_FAILURE 0x20 /* One stream or hit a permanent failure */
23
24 /*
25 * Successful completion of write of a folio to the server and/or cache. Note
26 * that we are not allowed to lock the folio here on pain of deadlocking with
27 * truncate.
28 */
netfs_folio_written_back(struct folio * folio)29 int netfs_folio_written_back(struct folio *folio)
30 {
31 enum netfs_folio_trace why = netfs_folio_trace_clear;
32 struct netfs_inode *ictx = netfs_inode(folio->mapping->host);
33 struct netfs_folio *finfo;
34 struct netfs_group *group = NULL;
35 int gcount = 0;
36
37 if ((finfo = netfs_folio_info(folio))) {
38 /* Streaming writes cannot be redirtied whilst under writeback,
39 * so discard the streaming record.
40 */
41 unsigned long long fend;
42
43 fend = folio_pos(folio) + finfo->dirty_offset + finfo->dirty_len;
44 if (fend > ictx->zero_point)
45 ictx->zero_point = fend;
46
47 folio_detach_private(folio);
48 group = finfo->netfs_group;
49 gcount++;
50 kfree(finfo);
51 why = netfs_folio_trace_clear_s;
52 goto end_wb;
53 }
54
55 if ((group = netfs_folio_group(folio))) {
56 if (group == NETFS_FOLIO_COPY_TO_CACHE) {
57 why = netfs_folio_trace_clear_cc;
58 folio_detach_private(folio);
59 goto end_wb;
60 }
61
62 /* Need to detach the group pointer if the page didn't get
63 * redirtied. If it has been redirtied, then it must be within
64 * the same group.
65 */
66 why = netfs_folio_trace_redirtied;
67 if (!folio_test_dirty(folio)) {
68 folio_detach_private(folio);
69 gcount++;
70 why = netfs_folio_trace_clear_g;
71 }
72 }
73
74 end_wb:
75 trace_netfs_folio(folio, why);
76 folio_end_writeback(folio);
77 return gcount;
78 }
79
80 /*
81 * Unlock any folios we've finished with.
82 */
netfs_writeback_unlock_folios(struct netfs_io_request * wreq,unsigned int * notes)83 static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
84 unsigned int *notes)
85 {
86 struct folio_queue *folioq = wreq->buffer;
87 unsigned long long collected_to = wreq->collected_to;
88 unsigned int slot = wreq->buffer_head_slot;
89
90 if (wreq->origin == NETFS_PGPRIV2_COPY_TO_CACHE) {
91 if (netfs_pgpriv2_unlock_copied_folios(wreq))
92 *notes |= MADE_PROGRESS;
93 return;
94 }
95
96 if (slot >= folioq_nr_slots(folioq)) {
97 folioq = netfs_delete_buffer_head(wreq);
98 slot = 0;
99 }
100
101 for (;;) {
102 struct folio *folio;
103 struct netfs_folio *finfo;
104 unsigned long long fpos, fend;
105 size_t fsize, flen;
106
107 folio = folioq_folio(folioq, slot);
108 if (WARN_ONCE(!folio_test_writeback(folio),
109 "R=%08x: folio %lx is not under writeback\n",
110 wreq->debug_id, folio->index))
111 trace_netfs_folio(folio, netfs_folio_trace_not_under_wback);
112
113 fpos = folio_pos(folio);
114 fsize = folio_size(folio);
115 finfo = netfs_folio_info(folio);
116 flen = finfo ? finfo->dirty_offset + finfo->dirty_len : fsize;
117
118 fend = min_t(unsigned long long, fpos + flen, wreq->i_size);
119
120 trace_netfs_collect_folio(wreq, folio, fend, collected_to);
121
122 /* Unlock any folio we've transferred all of. */
123 if (collected_to < fend)
124 break;
125
126 wreq->nr_group_rel += netfs_folio_written_back(folio);
127 wreq->cleaned_to = fpos + fsize;
128 *notes |= MADE_PROGRESS;
129
130 /* Clean up the head folioq. If we clear an entire folioq, then
131 * we can get rid of it provided it's not also the tail folioq
132 * being filled by the issuer.
133 */
134 folioq_clear(folioq, slot);
135 slot++;
136 if (slot >= folioq_nr_slots(folioq)) {
137 if (READ_ONCE(wreq->buffer_tail) == folioq)
138 break;
139 folioq = netfs_delete_buffer_head(wreq);
140 slot = 0;
141 }
142
143 if (fpos + fsize >= collected_to)
144 break;
145 }
146
147 wreq->buffer = folioq;
148 wreq->buffer_head_slot = slot;
149 }
150
151 /*
152 * Perform retries on the streams that need it.
153 */
netfs_retry_write_stream(struct netfs_io_request * wreq,struct netfs_io_stream * stream)154 static void netfs_retry_write_stream(struct netfs_io_request *wreq,
155 struct netfs_io_stream *stream)
156 {
157 struct list_head *next;
158
159 _enter("R=%x[%x:]", wreq->debug_id, stream->stream_nr);
160
161 if (list_empty(&stream->subrequests))
162 return;
163
164 if (stream->source == NETFS_UPLOAD_TO_SERVER &&
165 wreq->netfs_ops->retry_request)
166 wreq->netfs_ops->retry_request(wreq, stream);
167
168 if (unlikely(stream->failed))
169 return;
170
171 /* If there's no renegotiation to do, just resend each failed subreq. */
172 if (!stream->prepare_write) {
173 struct netfs_io_subrequest *subreq;
174
175 list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
176 if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
177 break;
178 if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
179 struct iov_iter source = subreq->io_iter;
180
181 iov_iter_revert(&source, subreq->len - source.count);
182 __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
183 netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
184 netfs_reissue_write(stream, subreq, &source);
185 }
186 }
187 return;
188 }
189
190 next = stream->subrequests.next;
191
192 do {
193 struct netfs_io_subrequest *subreq = NULL, *from, *to, *tmp;
194 struct iov_iter source;
195 unsigned long long start, len;
196 size_t part;
197 bool boundary = false;
198
199 /* Go through the stream and find the next span of contiguous
200 * data that we then rejig (cifs, for example, needs the wsize
201 * renegotiating) and reissue.
202 */
203 from = list_entry(next, struct netfs_io_subrequest, rreq_link);
204 to = from;
205 start = from->start + from->transferred;
206 len = from->len - from->transferred;
207
208 if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
209 !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
210 return;
211
212 list_for_each_continue(next, &stream->subrequests) {
213 subreq = list_entry(next, struct netfs_io_subrequest, rreq_link);
214 if (subreq->start + subreq->transferred != start + len ||
215 test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) ||
216 !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
217 break;
218 to = subreq;
219 len += to->len;
220 }
221
222 /* Determine the set of buffers we're going to use. Each
223 * subreq gets a subset of a single overall contiguous buffer.
224 */
225 netfs_reset_iter(from);
226 source = from->io_iter;
227 source.count = len;
228
229 /* Work through the sublist. */
230 subreq = from;
231 list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
232 if (!len)
233 break;
234 /* Renegotiate max_len (wsize) */
235 trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
236 __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
237 __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
238 stream->prepare_write(subreq);
239
240 part = min(len, stream->sreq_max_len);
241 subreq->len = part;
242 subreq->start = start;
243 subreq->transferred = 0;
244 len -= part;
245 start += part;
246 if (len && subreq == to &&
247 __test_and_clear_bit(NETFS_SREQ_BOUNDARY, &to->flags))
248 boundary = true;
249
250 netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
251 netfs_reissue_write(stream, subreq, &source);
252 if (subreq == to)
253 break;
254 }
255
256 /* If we managed to use fewer subreqs, we can discard the
257 * excess; if we used the same number, then we're done.
258 */
259 if (!len) {
260 if (subreq == to)
261 continue;
262 list_for_each_entry_safe_from(subreq, tmp,
263 &stream->subrequests, rreq_link) {
264 trace_netfs_sreq(subreq, netfs_sreq_trace_discard);
265 list_del(&subreq->rreq_link);
266 netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done);
267 if (subreq == to)
268 break;
269 }
270 continue;
271 }
272
273 /* We ran out of subrequests, so we need to allocate some more
274 * and insert them after.
275 */
276 do {
277 subreq = netfs_alloc_subrequest(wreq);
278 subreq->source = to->source;
279 subreq->start = start;
280 subreq->debug_index = atomic_inc_return(&wreq->subreq_counter);
281 subreq->stream_nr = to->stream_nr;
282 __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
283
284 trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
285 refcount_read(&subreq->ref),
286 netfs_sreq_trace_new);
287 netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
288
289 list_add(&subreq->rreq_link, &to->rreq_link);
290 to = list_next_entry(to, rreq_link);
291 trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
292
293 stream->sreq_max_len = len;
294 stream->sreq_max_segs = INT_MAX;
295 switch (stream->source) {
296 case NETFS_UPLOAD_TO_SERVER:
297 netfs_stat(&netfs_n_wh_upload);
298 stream->sreq_max_len = umin(len, wreq->wsize);
299 break;
300 case NETFS_WRITE_TO_CACHE:
301 netfs_stat(&netfs_n_wh_write);
302 break;
303 default:
304 WARN_ON_ONCE(1);
305 }
306
307 stream->prepare_write(subreq);
308
309 part = umin(len, stream->sreq_max_len);
310 subreq->len = subreq->transferred + part;
311 len -= part;
312 start += part;
313 if (!len && boundary) {
314 __set_bit(NETFS_SREQ_BOUNDARY, &to->flags);
315 boundary = false;
316 }
317
318 netfs_reissue_write(stream, subreq, &source);
319 if (!len)
320 break;
321
322 } while (len);
323
324 } while (!list_is_head(next, &stream->subrequests));
325 }
326
327 /*
328 * Perform retries on the streams that need it. If we're doing content
329 * encryption and the server copy changed due to a third-party write, we may
330 * need to do an RMW cycle and also rewrite the data to the cache.
331 */
netfs_retry_writes(struct netfs_io_request * wreq)332 static void netfs_retry_writes(struct netfs_io_request *wreq)
333 {
334 struct netfs_io_subrequest *subreq;
335 struct netfs_io_stream *stream;
336 int s;
337
338 /* Wait for all outstanding I/O to quiesce before performing retries as
339 * we may need to renegotiate the I/O sizes.
340 */
341 for (s = 0; s < NR_IO_STREAMS; s++) {
342 stream = &wreq->io_streams[s];
343 if (!stream->active)
344 continue;
345
346 list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
347 wait_on_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS,
348 TASK_UNINTERRUPTIBLE);
349 }
350 }
351
352 // TODO: Enc: Fetch changed partial pages
353 // TODO: Enc: Reencrypt content if needed.
354 // TODO: Enc: Wind back transferred point.
355 // TODO: Enc: Mark cache pages for retry.
356
357 for (s = 0; s < NR_IO_STREAMS; s++) {
358 stream = &wreq->io_streams[s];
359 if (stream->need_retry) {
360 stream->need_retry = false;
361 netfs_retry_write_stream(wreq, stream);
362 }
363 }
364 }
365
366 /*
367 * Collect and assess the results of various write subrequests. We may need to
368 * retry some of the results - or even do an RMW cycle for content crypto.
369 *
370 * Note that we have a number of parallel, overlapping lists of subrequests,
371 * one to the server and one to the local cache for example, which may not be
372 * the same size or starting position and may not even correspond in boundary
373 * alignment.
374 */
netfs_collect_write_results(struct netfs_io_request * wreq)375 static void netfs_collect_write_results(struct netfs_io_request *wreq)
376 {
377 struct netfs_io_subrequest *front, *remove;
378 struct netfs_io_stream *stream;
379 unsigned long long collected_to, issued_to;
380 unsigned int notes;
381 int s;
382
383 _enter("%llx-%llx", wreq->start, wreq->start + wreq->len);
384 trace_netfs_collect(wreq);
385 trace_netfs_rreq(wreq, netfs_rreq_trace_collect);
386
387 reassess_streams:
388 issued_to = atomic64_read(&wreq->issued_to);
389 smp_rmb();
390 collected_to = ULLONG_MAX;
391 if (wreq->origin == NETFS_WRITEBACK ||
392 wreq->origin == NETFS_WRITETHROUGH ||
393 wreq->origin == NETFS_PGPRIV2_COPY_TO_CACHE)
394 notes = BUFFERED;
395 else
396 notes = 0;
397
398 /* Remove completed subrequests from the front of the streams and
399 * advance the completion point on each stream. We stop when we hit
400 * something that's in progress. The issuer thread may be adding stuff
401 * to the tail whilst we're doing this.
402 */
403 for (s = 0; s < NR_IO_STREAMS; s++) {
404 stream = &wreq->io_streams[s];
405 /* Read active flag before list pointers */
406 if (!smp_load_acquire(&stream->active))
407 continue;
408
409 front = stream->front;
410 while (front) {
411 trace_netfs_collect_sreq(wreq, front);
412 //_debug("sreq [%x] %llx %zx/%zx",
413 // front->debug_index, front->start, front->transferred, front->len);
414
415 if (stream->collected_to < front->start) {
416 trace_netfs_collect_gap(wreq, stream, issued_to, 'F');
417 stream->collected_to = front->start;
418 }
419
420 /* Stall if the front is still undergoing I/O. */
421 if (test_bit(NETFS_SREQ_IN_PROGRESS, &front->flags)) {
422 notes |= HIT_PENDING;
423 break;
424 }
425 smp_rmb(); /* Read counters after I-P flag. */
426
427 if (stream->failed) {
428 stream->collected_to = front->start + front->len;
429 notes |= MADE_PROGRESS | SAW_FAILURE;
430 goto cancel;
431 }
432 if (front->start + front->transferred > stream->collected_to) {
433 stream->collected_to = front->start + front->transferred;
434 stream->transferred = stream->collected_to - wreq->start;
435 notes |= MADE_PROGRESS;
436 }
437 if (test_bit(NETFS_SREQ_FAILED, &front->flags)) {
438 stream->failed = true;
439 stream->error = front->error;
440 if (stream->source == NETFS_UPLOAD_TO_SERVER)
441 mapping_set_error(wreq->mapping, front->error);
442 notes |= NEED_REASSESS | SAW_FAILURE;
443 break;
444 }
445 if (front->transferred < front->len) {
446 stream->need_retry = true;
447 notes |= NEED_RETRY | MADE_PROGRESS;
448 break;
449 }
450
451 cancel:
452 /* Remove if completely consumed. */
453 spin_lock_bh(&wreq->lock);
454
455 remove = front;
456 list_del_init(&front->rreq_link);
457 front = list_first_entry_or_null(&stream->subrequests,
458 struct netfs_io_subrequest, rreq_link);
459 stream->front = front;
460 spin_unlock_bh(&wreq->lock);
461 netfs_put_subrequest(remove, false,
462 notes & SAW_FAILURE ?
463 netfs_sreq_trace_put_cancel :
464 netfs_sreq_trace_put_done);
465 }
466
467 /* If we have an empty stream, we need to jump it forward
468 * otherwise the collection point will never advance.
469 */
470 if (!front && issued_to > stream->collected_to) {
471 trace_netfs_collect_gap(wreq, stream, issued_to, 'E');
472 stream->collected_to = issued_to;
473 }
474
475 if (stream->collected_to < collected_to)
476 collected_to = stream->collected_to;
477 }
478
479 if (collected_to != ULLONG_MAX && collected_to > wreq->collected_to)
480 wreq->collected_to = collected_to;
481
482 for (s = 0; s < NR_IO_STREAMS; s++) {
483 stream = &wreq->io_streams[s];
484 if (stream->active)
485 trace_netfs_collect_stream(wreq, stream);
486 }
487
488 trace_netfs_collect_state(wreq, wreq->collected_to, notes);
489
490 /* Unlock any folios that we have now finished with. */
491 if (notes & BUFFERED) {
492 if (wreq->cleaned_to < wreq->collected_to)
493 netfs_writeback_unlock_folios(wreq, ¬es);
494 } else {
495 wreq->cleaned_to = wreq->collected_to;
496 }
497
498 // TODO: Discard encryption buffers
499
500 if (notes & NEED_RETRY)
501 goto need_retry;
502 if ((notes & MADE_PROGRESS) && test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) {
503 trace_netfs_rreq(wreq, netfs_rreq_trace_unpause);
504 clear_bit_unlock(NETFS_RREQ_PAUSE, &wreq->flags);
505 wake_up_bit(&wreq->flags, NETFS_RREQ_PAUSE);
506 }
507
508 if (notes & NEED_REASSESS) {
509 //cond_resched();
510 goto reassess_streams;
511 }
512 if (notes & MADE_PROGRESS) {
513 //cond_resched();
514 goto reassess_streams;
515 }
516
517 out:
518 netfs_put_group_many(wreq->group, wreq->nr_group_rel);
519 wreq->nr_group_rel = 0;
520 _leave(" = %x", notes);
521 return;
522
523 need_retry:
524 /* Okay... We're going to have to retry one or both streams. Note
525 * that any partially completed op will have had any wholly transferred
526 * folios removed from it.
527 */
528 _debug("retry");
529 netfs_retry_writes(wreq);
530 goto out;
531 }
532
533 /*
534 * Perform the collection of subrequests, folios and encryption buffers.
535 */
netfs_write_collection_worker(struct work_struct * work)536 void netfs_write_collection_worker(struct work_struct *work)
537 {
538 struct netfs_io_request *wreq = container_of(work, struct netfs_io_request, work);
539 struct netfs_inode *ictx = netfs_inode(wreq->inode);
540 size_t transferred;
541 int s;
542
543 _enter("R=%x", wreq->debug_id);
544
545 netfs_see_request(wreq, netfs_rreq_trace_see_work);
546 if (!test_bit(NETFS_RREQ_IN_PROGRESS, &wreq->flags)) {
547 netfs_put_request(wreq, false, netfs_rreq_trace_put_work);
548 return;
549 }
550
551 netfs_collect_write_results(wreq);
552
553 /* We're done when the app thread has finished posting subreqs and all
554 * the queues in all the streams are empty.
555 */
556 if (!test_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags)) {
557 netfs_put_request(wreq, false, netfs_rreq_trace_put_work);
558 return;
559 }
560 smp_rmb(); /* Read ALL_QUEUED before lists. */
561
562 transferred = LONG_MAX;
563 for (s = 0; s < NR_IO_STREAMS; s++) {
564 struct netfs_io_stream *stream = &wreq->io_streams[s];
565 if (!stream->active)
566 continue;
567 if (!list_empty(&stream->subrequests)) {
568 netfs_put_request(wreq, false, netfs_rreq_trace_put_work);
569 return;
570 }
571 if (stream->transferred < transferred)
572 transferred = stream->transferred;
573 }
574
575 /* Okay, declare that all I/O is complete. */
576 wreq->transferred = transferred;
577 trace_netfs_rreq(wreq, netfs_rreq_trace_write_done);
578
579 if (wreq->io_streams[1].active &&
580 wreq->io_streams[1].failed) {
581 /* Cache write failure doesn't prevent writeback completion
582 * unless we're in disconnected mode.
583 */
584 ictx->ops->invalidate_cache(wreq);
585 }
586
587 if (wreq->cleanup)
588 wreq->cleanup(wreq);
589
590 if (wreq->origin == NETFS_DIO_WRITE &&
591 wreq->mapping->nrpages) {
592 /* mmap may have got underfoot and we may now have folios
593 * locally covering the region we just wrote. Attempt to
594 * discard the folios, but leave in place any modified locally.
595 * ->write_iter() is prevented from interfering by the DIO
596 * counter.
597 */
598 pgoff_t first = wreq->start >> PAGE_SHIFT;
599 pgoff_t last = (wreq->start + wreq->transferred - 1) >> PAGE_SHIFT;
600 invalidate_inode_pages2_range(wreq->mapping, first, last);
601 }
602
603 if (wreq->origin == NETFS_DIO_WRITE)
604 inode_dio_end(wreq->inode);
605
606 _debug("finished");
607 trace_netfs_rreq(wreq, netfs_rreq_trace_wake_ip);
608 clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &wreq->flags);
609 wake_up_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS);
610
611 if (wreq->iocb) {
612 size_t written = min(wreq->transferred, wreq->len);
613 wreq->iocb->ki_pos += written;
614 if (wreq->iocb->ki_complete)
615 wreq->iocb->ki_complete(
616 wreq->iocb, wreq->error ? wreq->error : written);
617 wreq->iocb = VFS_PTR_POISON;
618 }
619
620 netfs_clear_subrequests(wreq, false);
621 netfs_put_request(wreq, false, netfs_rreq_trace_put_work_complete);
622 }
623
624 /*
625 * Wake the collection work item.
626 */
netfs_wake_write_collector(struct netfs_io_request * wreq,bool was_async)627 void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async)
628 {
629 if (!work_pending(&wreq->work)) {
630 netfs_get_request(wreq, netfs_rreq_trace_get_work);
631 if (!queue_work(system_unbound_wq, &wreq->work))
632 netfs_put_request(wreq, was_async, netfs_rreq_trace_put_work_nq);
633 }
634 }
635
636 /**
637 * netfs_write_subrequest_terminated - Note the termination of a write operation.
638 * @_op: The I/O request that has terminated.
639 * @transferred_or_error: The amount of data transferred or an error code.
640 * @was_async: The termination was asynchronous
641 *
642 * This tells the library that a contributory write I/O operation has
643 * terminated, one way or another, and that it should collect the results.
644 *
645 * The caller indicates in @transferred_or_error the outcome of the operation,
646 * supplying a positive value to indicate the number of bytes transferred or a
647 * negative error code. The library will look after reissuing I/O operations
648 * as appropriate and writing downloaded data to the cache.
649 *
650 * If @was_async is true, the caller might be running in softirq or interrupt
651 * context and we can't sleep.
652 *
653 * When this is called, ownership of the subrequest is transferred back to the
654 * library, along with a ref.
655 *
656 * Note that %_op is a void* so that the function can be passed to
657 * kiocb::term_func without the need for a casting wrapper.
658 */
netfs_write_subrequest_terminated(void * _op,ssize_t transferred_or_error,bool was_async)659 void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
660 bool was_async)
661 {
662 struct netfs_io_subrequest *subreq = _op;
663 struct netfs_io_request *wreq = subreq->rreq;
664 struct netfs_io_stream *stream = &wreq->io_streams[subreq->stream_nr];
665
666 _enter("%x[%x] %zd", wreq->debug_id, subreq->debug_index, transferred_or_error);
667
668 switch (subreq->source) {
669 case NETFS_UPLOAD_TO_SERVER:
670 netfs_stat(&netfs_n_wh_upload_done);
671 break;
672 case NETFS_WRITE_TO_CACHE:
673 netfs_stat(&netfs_n_wh_write_done);
674 break;
675 case NETFS_INVALID_WRITE:
676 break;
677 default:
678 BUG();
679 }
680
681 if (IS_ERR_VALUE(transferred_or_error)) {
682 subreq->error = transferred_or_error;
683 if (subreq->error == -EAGAIN)
684 set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
685 else
686 set_bit(NETFS_SREQ_FAILED, &subreq->flags);
687 trace_netfs_failure(wreq, subreq, transferred_or_error, netfs_fail_write);
688
689 switch (subreq->source) {
690 case NETFS_WRITE_TO_CACHE:
691 netfs_stat(&netfs_n_wh_write_failed);
692 break;
693 case NETFS_UPLOAD_TO_SERVER:
694 netfs_stat(&netfs_n_wh_upload_failed);
695 break;
696 default:
697 break;
698 }
699 trace_netfs_rreq(wreq, netfs_rreq_trace_set_pause);
700 set_bit(NETFS_RREQ_PAUSE, &wreq->flags);
701 } else {
702 if (WARN(transferred_or_error > subreq->len - subreq->transferred,
703 "Subreq excess write: R=%x[%x] %zd > %zu - %zu",
704 wreq->debug_id, subreq->debug_index,
705 transferred_or_error, subreq->len, subreq->transferred))
706 transferred_or_error = subreq->len - subreq->transferred;
707
708 subreq->error = 0;
709 subreq->transferred += transferred_or_error;
710
711 if (subreq->transferred < subreq->len)
712 set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
713 }
714
715 trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
716
717 clear_bit_unlock(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
718 wake_up_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS);
719
720 /* If we are at the head of the queue, wake up the collector,
721 * transferring a ref to it if we were the ones to do so.
722 */
723 if (list_is_first(&subreq->rreq_link, &stream->subrequests))
724 netfs_wake_write_collector(wreq, was_async);
725
726 netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
727 }
728 EXPORT_SYMBOL(netfs_write_subrequest_terminated);
729