Lines Matching +full:write +full:- +full:back

1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Network filesystem high-level (buffered) writeback.
11 * +---+---+-----+-----+---+----------+
13 * +---+---+-----+-----+---+----------+
15 * +------+------+ +----+----+
17 * (Stream 0) +------+------+ +----+----+
19 * +------+------+------+------+------+
21 * (Stream 1) +------+------+------+------+------+
30 * may be glued together into single vectored write RPCs.
59 /* Kill folio from streaming write. */ in netfs_kill_dirty_pages()
60 group = finfo->netfs_group; in netfs_kill_dirty_pages()
65 /* Kill copy-to-cache folio */ in netfs_kill_dirty_pages()
88 * Create a write request and set it up appropriately for the origin type.
106 _enter("R=%x", wreq->debug_id); in netfs_create_write_req()
108 ictx = netfs_inode(wreq->inode); in netfs_create_write_req()
110 fscache_begin_write_operation(&wreq->cache_resources, netfs_i_cookie(ictx)); in netfs_create_write_req()
111 if (rolling_buffer_init(&wreq->buffer, wreq->debug_id, ITER_SOURCE) < 0) in netfs_create_write_req()
114 wreq->cleaned_to = wreq->start; in netfs_create_write_req()
116 wreq->io_streams[0].stream_nr = 0; in netfs_create_write_req()
117 wreq->io_streams[0].source = NETFS_UPLOAD_TO_SERVER; in netfs_create_write_req()
118 wreq->io_streams[0].prepare_write = ictx->ops->prepare_write; in netfs_create_write_req()
119 wreq->io_streams[0].issue_write = ictx->ops->issue_write; in netfs_create_write_req()
120 wreq->io_streams[0].collected_to = start; in netfs_create_write_req()
121 wreq->io_streams[0].transferred = LONG_MAX; in netfs_create_write_req()
123 wreq->io_streams[1].stream_nr = 1; in netfs_create_write_req()
124 wreq->io_streams[1].source = NETFS_WRITE_TO_CACHE; in netfs_create_write_req()
125 wreq->io_streams[1].collected_to = start; in netfs_create_write_req()
126 wreq->io_streams[1].transferred = LONG_MAX; in netfs_create_write_req()
127 if (fscache_resources_valid(&wreq->cache_resources)) { in netfs_create_write_req()
128 wreq->io_streams[1].avail = true; in netfs_create_write_req()
129 wreq->io_streams[1].active = true; in netfs_create_write_req()
130 wreq->io_streams[1].prepare_write = wreq->cache_resources.ops->prepare_write_subreq; in netfs_create_write_req()
131 wreq->io_streams[1].issue_write = wreq->cache_resources.ops->issue_write; in netfs_create_write_req()
136 wreq->error = -ENOMEM; in netfs_create_write_req()
138 return ERR_PTR(-ENOMEM); in netfs_create_write_req()
142 * netfs_prepare_write_failed - Note write preparation failed
145 * Mark a subrequest to note that preparation for write failed.
149 __set_bit(NETFS_SREQ_FAILED, &subreq->flags); in netfs_prepare_write_failed()
155 * Prepare a write subrequest. We need to allocate a new subrequest
163 struct iov_iter *wreq_iter = &wreq->buffer.iter; in netfs_prepare_write()
165 /* Make sure we don't point the iterator at a used-up folio_queue in netfs_prepare_write()
170 wreq_iter->folioq_slot >= folioq_nr_slots(wreq_iter->folioq)) in netfs_prepare_write()
171 rolling_buffer_make_space(&wreq->buffer); in netfs_prepare_write()
174 subreq->source = stream->source; in netfs_prepare_write()
175 subreq->start = start; in netfs_prepare_write()
176 subreq->stream_nr = stream->stream_nr; in netfs_prepare_write()
177 subreq->io_iter = *wreq_iter; in netfs_prepare_write()
179 _enter("R=%x[%x]", wreq->debug_id, subreq->debug_index); in netfs_prepare_write()
183 stream->sreq_max_len = UINT_MAX; in netfs_prepare_write()
184 stream->sreq_max_segs = INT_MAX; in netfs_prepare_write()
185 switch (stream->source) { in netfs_prepare_write()
188 stream->sreq_max_len = wreq->wsize; in netfs_prepare_write()
198 if (stream->prepare_write) in netfs_prepare_write()
199 stream->prepare_write(subreq); in netfs_prepare_write()
201 __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags); in netfs_prepare_write()
207 spin_lock(&wreq->lock); in netfs_prepare_write()
208 list_add_tail(&subreq->rreq_link, &stream->subrequests); in netfs_prepare_write()
209 if (list_is_first(&subreq->rreq_link, &stream->subrequests)) { in netfs_prepare_write()
210 stream->front = subreq; in netfs_prepare_write()
211 if (!stream->active) { in netfs_prepare_write()
212 stream->collected_to = stream->front->start; in netfs_prepare_write()
213 /* Write list pointers before active flag */ in netfs_prepare_write()
214 smp_store_release(&stream->active, true); in netfs_prepare_write()
218 spin_unlock(&wreq->lock); in netfs_prepare_write()
220 stream->construct = subreq; in netfs_prepare_write()
231 struct netfs_io_request *wreq = subreq->rreq; in netfs_do_issue_write()
233 _enter("R=%x[%x],%zx", wreq->debug_id, subreq->debug_index, subreq->len); in netfs_do_issue_write()
235 if (test_bit(NETFS_SREQ_FAILED, &subreq->flags)) in netfs_do_issue_write()
236 return netfs_write_subrequest_terminated(subreq, subreq->error, false); in netfs_do_issue_write()
239 stream->issue_write(subreq); in netfs_do_issue_write()
246 size_t size = subreq->len - subreq->transferred; in netfs_reissue_write()
249 subreq->io_iter = *source; in netfs_reissue_write()
251 iov_iter_truncate(&subreq->io_iter, size); in netfs_reissue_write()
253 subreq->retry_count++; in netfs_reissue_write()
254 __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags); in netfs_reissue_write()
255 __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags); in netfs_reissue_write()
263 struct netfs_io_subrequest *subreq = stream->construct; in netfs_issue_write()
267 stream->construct = NULL; in netfs_issue_write()
268 subreq->io_iter.count = subreq->len; in netfs_issue_write()
273 * Add data to the write subrequest, dispatching each as we fill it up or if it
276 * content-crypto preparation with network writes.
282 struct netfs_io_subrequest *subreq = stream->construct; in netfs_advance_write()
285 if (!stream->avail) { in netfs_advance_write()
286 _leave("no write"); in netfs_advance_write()
290 _enter("R=%x[%x]", wreq->debug_id, subreq ? subreq->debug_index : 0); in netfs_advance_write()
292 if (subreq && start != subreq->start + subreq->len) { in netfs_advance_write()
297 if (!stream->construct) in netfs_advance_write()
299 subreq = stream->construct; in netfs_advance_write()
301 part = umin(stream->sreq_max_len - subreq->len, len); in netfs_advance_write()
302 _debug("part %zx/%zx %zx/%zx", subreq->len, stream->sreq_max_len, part, len); in netfs_advance_write()
303 subreq->len += part; in netfs_advance_write()
304 subreq->nr_segs++; in netfs_advance_write()
305 stream->submit_extendable_to -= part; in netfs_advance_write()
307 if (subreq->len >= stream->sreq_max_len || in netfs_advance_write()
308 subreq->nr_segs >= stream->sreq_max_segs || in netfs_advance_write()
318 * Write some of a pending folio data back to the server.
324 struct netfs_io_stream *upload = &wreq->io_streams[0]; in netfs_write_folio()
325 struct netfs_io_stream *cache = &wreq->io_streams[1]; in netfs_write_folio()
337 if (rolling_buffer_make_space(&wreq->buffer) < 0) in netfs_write_folio()
338 return -ENOMEM; in netfs_write_folio()
344 i_size = i_size_read(wreq->inode); in netfs_write_folio()
351 wreq->nr_group_rel += netfs_folio_written_back(folio); in netfs_write_folio()
352 netfs_put_group_many(wreq->group, wreq->nr_group_rel); in netfs_write_folio()
353 wreq->nr_group_rel = 0; in netfs_write_folio()
357 if (fpos + fsize > wreq->i_size) in netfs_write_folio()
358 wreq->i_size = i_size; in netfs_write_folio()
363 foff = finfo->dirty_offset; in netfs_write_folio()
364 flen = foff + finfo->dirty_len; in netfs_write_folio()
368 if (wreq->origin == NETFS_WRITETHROUGH) { in netfs_write_folio()
370 if (flen > i_size - fpos) in netfs_write_folio()
371 flen = i_size - fpos; in netfs_write_folio()
372 } else if (flen > i_size - fpos) { in netfs_write_folio()
373 flen = i_size - fpos; in netfs_write_folio()
377 } else if (flen == i_size - fpos) { in netfs_write_folio()
380 flen -= foff; in netfs_write_folio()
387 * (1) Intervening non-dirty pages from random-access writes, multiple in netfs_write_folio()
388 * flushers writing back different parts simultaneously and manual in netfs_write_folio()
391 * (2) Partially-written pages from write-streaming. in netfs_write_folio()
393 * (3) Pages that belong to a different write-back group (eg. Ceph in netfs_write_folio()
396 * (4) Actually-clean pages that were marked for write to the cache in netfs_write_folio()
398 * write-back group. in netfs_write_folio()
402 } else if (fgroup != wreq->group) { in netfs_write_folio()
403 /* We can't write this page to the server yet. */ in netfs_write_folio()
418 * from write-through, then the page has already been put into the wb in netfs_write_folio()
421 if (wreq->origin == NETFS_WRITEBACK) in netfs_write_folio()
426 if (!cache->avail) { in netfs_write_folio()
433 } else if (!upload->avail && !cache->avail) { in netfs_write_folio()
437 } else if (!upload->construct) { in netfs_write_folio()
444 rolling_buffer_append(&wreq->buffer, folio, 0); in netfs_write_folio()
446 /* Move the submission point forward to allow for write-streaming data in netfs_write_folio()
447 * not starting at the front of the page. We don't do write-streaming in netfs_write_folio()
454 stream = &wreq->io_streams[s]; in netfs_write_folio()
455 stream->submit_off = foff; in netfs_write_folio()
456 stream->submit_len = flen; in netfs_write_folio()
457 if (!stream->avail || in netfs_write_folio()
458 (stream->source == NETFS_WRITE_TO_CACHE && streamw) || in netfs_write_folio()
459 (stream->source == NETFS_UPLOAD_TO_SERVER && in netfs_write_folio()
461 stream->submit_off = UINT_MAX; in netfs_write_folio()
462 stream->submit_len = 0; in netfs_write_folio()
467 * could end up with thousands of subrequests if the wsize is small - in netfs_write_folio()
474 int choose_s = -1; in netfs_write_folio()
476 /* Always add to the lowest-submitted stream first. */ in netfs_write_folio()
478 stream = &wreq->io_streams[s]; in netfs_write_folio()
479 if (stream->submit_len > 0 && in netfs_write_folio()
480 stream->submit_off < lowest_off) { in netfs_write_folio()
481 lowest_off = stream->submit_off; in netfs_write_folio()
488 stream = &wreq->io_streams[choose_s]; in netfs_write_folio()
491 if (stream->submit_off > iter_off) { in netfs_write_folio()
492 rolling_buffer_advance(&wreq->buffer, stream->submit_off - iter_off); in netfs_write_folio()
493 iter_off = stream->submit_off; in netfs_write_folio()
496 atomic64_set(&wreq->issued_to, fpos + stream->submit_off); in netfs_write_folio()
497 stream->submit_extendable_to = fsize - stream->submit_off; in netfs_write_folio()
498 part = netfs_advance_write(wreq, stream, fpos + stream->submit_off, in netfs_write_folio()
499 stream->submit_len, to_eof); in netfs_write_folio()
500 stream->submit_off += part; in netfs_write_folio()
501 if (part > stream->submit_len) in netfs_write_folio()
502 stream->submit_len = 0; in netfs_write_folio()
504 stream->submit_len -= part; in netfs_write_folio()
510 rolling_buffer_advance(&wreq->buffer, fsize - iter_off); in netfs_write_folio()
511 atomic64_set(&wreq->issued_to, fpos + fsize); in netfs_write_folio()
514 kdebug("R=%x: No submit", wreq->debug_id); in netfs_write_folio()
518 netfs_issue_write(wreq, &wreq->io_streams[s]); in netfs_write_folio()
531 smp_wmb(); /* Write subreq lists before ALL_QUEUED. */ in netfs_end_issue_write()
532 set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags); in netfs_end_issue_write()
535 struct netfs_io_stream *stream = &wreq->io_streams[s]; in netfs_end_issue_write()
537 if (!stream->active) in netfs_end_issue_write()
539 if (!list_empty(&stream->subrequests)) in netfs_end_issue_write()
549 * Write some of the pending data back to the server
554 struct netfs_inode *ictx = netfs_inode(mapping->host); in netfs_writepages()
559 if (!mutex_trylock(&ictx->wb_lock)) { in netfs_writepages()
560 if (wbc->sync_mode == WB_SYNC_NONE) { in netfs_writepages()
565 mutex_lock(&ictx->wb_lock); in netfs_writepages()
583 _debug("wbiter %lx %llx", folio->index, atomic64_read(&wreq->issued_to)); in netfs_writepages()
586 WARN_ON_ONCE(wreq && folio_pos(folio) < atomic64_read(&wreq->issued_to)); in netfs_writepages()
589 unlikely(!test_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags))) { in netfs_writepages()
590 set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags); in netfs_writepages()
591 wreq->netfs_ops->begin_writeback(wreq); in netfs_writepages()
601 mutex_unlock(&ictx->wb_lock); in netfs_writepages()
610 mutex_unlock(&ictx->wb_lock); in netfs_writepages()
617 * Begin a write operation for writing through the pagecache.
622 struct netfs_inode *ictx = netfs_inode(file_inode(iocb->ki_filp)); in netfs_begin_writethrough()
624 mutex_lock(&ictx->wb_lock); in netfs_begin_writethrough()
626 wreq = netfs_create_write_req(iocb->ki_filp->f_mapping, iocb->ki_filp, in netfs_begin_writethrough()
627 iocb->ki_pos, NETFS_WRITETHROUGH); in netfs_begin_writethrough()
629 mutex_unlock(&ictx->wb_lock); in netfs_begin_writethrough()
633 wreq->io_streams[0].avail = true; in netfs_begin_writethrough()
639 * Advance the state of the write operation used when writing through the
649 wreq->debug_id, wreq->buffer.iter.count, wreq->wsize, copied, to_page_end); in netfs_advance_writethrough()
658 if (wreq->len == 0) in netfs_advance_writethrough()
665 wreq->len += copied; in netfs_advance_writethrough()
674 * End a write operation used when writing through the pagecache.
679 struct netfs_inode *ictx = netfs_inode(wreq->inode); in netfs_end_writethrough()
682 _enter("R=%x", wreq->debug_id); in netfs_end_writethrough()
689 mutex_unlock(&ictx->wb_lock); in netfs_end_writethrough()
691 if (wreq->iocb) { in netfs_end_writethrough()
692 ret = -EIOCBQUEUED; in netfs_end_writethrough()
694 wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS, TASK_UNINTERRUPTIBLE); in netfs_end_writethrough()
695 ret = wreq->error; in netfs_end_writethrough()
702 * Write data to the server without going through the pagecache and without
707 struct netfs_io_stream *upload = &wreq->io_streams[0]; in netfs_unbuffered_write()
709 loff_t start = wreq->start; in netfs_unbuffered_write()
714 if (wreq->origin == NETFS_DIO_WRITE) in netfs_unbuffered_write()
715 inode_dio_begin(wreq->inode); in netfs_unbuffered_write()
723 len -= part; in netfs_unbuffered_write()
724 rolling_buffer_advance(&wreq->buffer, part); in netfs_unbuffered_write()
725 if (test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) { in netfs_unbuffered_write()
727 wait_event(wreq->waitq, !test_bit(NETFS_RREQ_PAUSE, &wreq->flags)); in netfs_unbuffered_write()
729 if (test_bit(NETFS_RREQ_FAILED, &wreq->flags)) in netfs_unbuffered_write()
739 * Write some of a pending folio data back to the server and/or the cache.
744 struct netfs_io_stream *upload = &wreq->io_streams[0]; in netfs_write_folio_single()
745 struct netfs_io_stream *cache = &wreq->io_streams[1]; in netfs_write_folio_single()
756 if (flen > wreq->i_size - fpos) { in netfs_write_folio_single()
757 flen = wreq->i_size - fpos; in netfs_write_folio_single()
760 } else if (flen == wreq->i_size - fpos) { in netfs_write_folio_single()
766 if (!upload->avail && !cache->avail) { in netfs_write_folio_single()
771 if (!upload->construct) in netfs_write_folio_single()
778 rolling_buffer_append(&wreq->buffer, folio, NETFS_ROLLBUF_PUT_MARK); in netfs_write_folio_single()
780 /* Move the submission point forward to allow for write-streaming data in netfs_write_folio_single()
781 * not starting at the front of the page. We don't do write-streaming in netfs_write_folio_single()
788 stream = &wreq->io_streams[s]; in netfs_write_folio_single()
789 stream->submit_off = 0; in netfs_write_folio_single()
790 stream->submit_len = flen; in netfs_write_folio_single()
791 if (!stream->avail) { in netfs_write_folio_single()
792 stream->submit_off = UINT_MAX; in netfs_write_folio_single()
793 stream->submit_len = 0; in netfs_write_folio_single()
798 * could end up with thousands of subrequests if the wsize is small - in netfs_write_folio_single()
805 int choose_s = -1; in netfs_write_folio_single()
807 /* Always add to the lowest-submitted stream first. */ in netfs_write_folio_single()
809 stream = &wreq->io_streams[s]; in netfs_write_folio_single()
810 if (stream->submit_len > 0 && in netfs_write_folio_single()
811 stream->submit_off < lowest_off) { in netfs_write_folio_single()
812 lowest_off = stream->submit_off; in netfs_write_folio_single()
819 stream = &wreq->io_streams[choose_s]; in netfs_write_folio_single()
822 if (stream->submit_off > iter_off) { in netfs_write_folio_single()
823 rolling_buffer_advance(&wreq->buffer, stream->submit_off - iter_off); in netfs_write_folio_single()
824 iter_off = stream->submit_off; in netfs_write_folio_single()
827 atomic64_set(&wreq->issued_to, fpos + stream->submit_off); in netfs_write_folio_single()
828 stream->submit_extendable_to = fsize - stream->submit_off; in netfs_write_folio_single()
829 part = netfs_advance_write(wreq, stream, fpos + stream->submit_off, in netfs_write_folio_single()
830 stream->submit_len, to_eof); in netfs_write_folio_single()
831 stream->submit_off += part; in netfs_write_folio_single()
832 if (part > stream->submit_len) in netfs_write_folio_single()
833 stream->submit_len = 0; in netfs_write_folio_single()
835 stream->submit_len -= part; in netfs_write_folio_single()
840 wreq->buffer.iter.iov_offset = 0; in netfs_write_folio_single()
842 rolling_buffer_advance(&wreq->buffer, fsize - iter_off); in netfs_write_folio_single()
843 atomic64_set(&wreq->issued_to, fpos + fsize); in netfs_write_folio_single()
846 kdebug("R=%x: No submit", wreq->debug_id); in netfs_write_folio_single()
852 * netfs_writeback_single - Write back a monolithic payload
853 * @mapping: The mapping to write from
855 * @iter: Data to write, must be ITER_FOLIOQ.
857 * Write a monolithic, non-pagecache object back to the server and/or
865 struct netfs_inode *ictx = netfs_inode(mapping->host); in netfs_writeback_single()
871 return -EIO; in netfs_writeback_single()
873 if (!mutex_trylock(&ictx->wb_lock)) { in netfs_writeback_single()
874 if (wbc->sync_mode == WB_SYNC_NONE) { in netfs_writeback_single()
879 mutex_lock(&ictx->wb_lock); in netfs_writeback_single()
891 if (__test_and_set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags)) in netfs_writeback_single()
892 wreq->netfs_ops->begin_writeback(wreq); in netfs_writeback_single()
894 for (fq = (struct folio_queue *)iter->folioq; fq; fq = fq->next) { in netfs_writeback_single()
899 _debug("wbiter %lx %llx", folio->index, atomic64_read(&wreq->issued_to)); in netfs_writeback_single()
904 size -= part; in netfs_writeback_single()
912 netfs_issue_write(wreq, &wreq->io_streams[s]); in netfs_writeback_single()
913 smp_wmb(); /* Write lists before ALL_QUEUED. */ in netfs_writeback_single()
914 set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags); in netfs_writeback_single()
916 mutex_unlock(&ictx->wb_lock); in netfs_writeback_single()
923 mutex_unlock(&ictx->wb_lock); in netfs_writeback_single()