Lines Matching +full:x +full:- +full:origin
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Network filesystem high-level (buffered) writeback.
11 * +---+---+-----+-----+---+----------+
13 * +---+---+-----+-----+---+----------+
15 * +------+------+ +----+----+
17 * (Stream 0) +------+------+ +----+----+
19 * +------+------+------+------+------+
21 * (Stream 1) +------+------+------+------+------+
60 group = finfo->netfs_group; in netfs_kill_dirty_pages()
65 /* Kill copy-to-cache folio */ in netfs_kill_dirty_pages()
88 * Create a write request and set it up appropriately for the origin type.
93 enum netfs_io_origin origin) in netfs_create_write_req() argument
97 bool is_cacheable = (origin == NETFS_WRITEBACK || in netfs_create_write_req()
98 origin == NETFS_WRITEBACK_SINGLE || in netfs_create_write_req()
99 origin == NETFS_WRITETHROUGH || in netfs_create_write_req()
100 origin == NETFS_PGPRIV2_COPY_TO_CACHE); in netfs_create_write_req()
102 wreq = netfs_alloc_request(mapping, file, start, 0, origin); in netfs_create_write_req()
106 _enter("R=%x", wreq->debug_id); in netfs_create_write_req()
108 ictx = netfs_inode(wreq->inode); in netfs_create_write_req()
110 fscache_begin_write_operation(&wreq->cache_resources, netfs_i_cookie(ictx)); in netfs_create_write_req()
111 if (rolling_buffer_init(&wreq->buffer, wreq->debug_id, ITER_SOURCE) < 0) in netfs_create_write_req()
114 wreq->cleaned_to = wreq->start; in netfs_create_write_req()
116 wreq->io_streams[0].stream_nr = 0; in netfs_create_write_req()
117 wreq->io_streams[0].source = NETFS_UPLOAD_TO_SERVER; in netfs_create_write_req()
118 wreq->io_streams[0].prepare_write = ictx->ops->prepare_write; in netfs_create_write_req()
119 wreq->io_streams[0].issue_write = ictx->ops->issue_write; in netfs_create_write_req()
120 wreq->io_streams[0].collected_to = start; in netfs_create_write_req()
121 wreq->io_streams[0].transferred = 0; in netfs_create_write_req()
123 wreq->io_streams[1].stream_nr = 1; in netfs_create_write_req()
124 wreq->io_streams[1].source = NETFS_WRITE_TO_CACHE; in netfs_create_write_req()
125 wreq->io_streams[1].collected_to = start; in netfs_create_write_req()
126 wreq->io_streams[1].transferred = 0; in netfs_create_write_req()
127 if (fscache_resources_valid(&wreq->cache_resources)) { in netfs_create_write_req()
128 wreq->io_streams[1].avail = true; in netfs_create_write_req()
129 wreq->io_streams[1].active = true; in netfs_create_write_req()
130 wreq->io_streams[1].prepare_write = wreq->cache_resources.ops->prepare_write_subreq; in netfs_create_write_req()
131 wreq->io_streams[1].issue_write = wreq->cache_resources.ops->issue_write; in netfs_create_write_req()
137 return ERR_PTR(-ENOMEM); in netfs_create_write_req()
141 * netfs_prepare_write_failed - Note write preparation failed
148 __set_bit(NETFS_SREQ_FAILED, &subreq->flags); in netfs_prepare_write_failed()
162 struct iov_iter *wreq_iter = &wreq->buffer.iter; in netfs_prepare_write()
164 /* Make sure we don't point the iterator at a used-up folio_queue in netfs_prepare_write()
169 wreq_iter->folioq_slot >= folioq_nr_slots(wreq_iter->folioq)) in netfs_prepare_write()
170 rolling_buffer_make_space(&wreq->buffer); in netfs_prepare_write()
173 subreq->source = stream->source; in netfs_prepare_write()
174 subreq->start = start; in netfs_prepare_write()
175 subreq->stream_nr = stream->stream_nr; in netfs_prepare_write()
176 subreq->io_iter = *wreq_iter; in netfs_prepare_write()
178 _enter("R=%x[%x]", wreq->debug_id, subreq->debug_index); in netfs_prepare_write()
182 stream->sreq_max_len = UINT_MAX; in netfs_prepare_write()
183 stream->sreq_max_segs = INT_MAX; in netfs_prepare_write()
184 switch (stream->source) { in netfs_prepare_write()
187 stream->sreq_max_len = wreq->wsize; in netfs_prepare_write()
197 if (stream->prepare_write) in netfs_prepare_write()
198 stream->prepare_write(subreq); in netfs_prepare_write()
200 __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags); in netfs_prepare_write()
206 spin_lock(&wreq->lock); in netfs_prepare_write()
207 list_add_tail(&subreq->rreq_link, &stream->subrequests); in netfs_prepare_write()
208 if (list_is_first(&subreq->rreq_link, &stream->subrequests)) { in netfs_prepare_write()
209 stream->front = subreq; in netfs_prepare_write()
210 if (!stream->active) { in netfs_prepare_write()
211 stream->collected_to = stream->front->start; in netfs_prepare_write()
213 smp_store_release(&stream->active, true); in netfs_prepare_write()
217 spin_unlock(&wreq->lock); in netfs_prepare_write()
219 stream->construct = subreq; in netfs_prepare_write()
230 struct netfs_io_request *wreq = subreq->rreq; in netfs_do_issue_write()
232 _enter("R=%x[%x],%zx", wreq->debug_id, subreq->debug_index, subreq->len); in netfs_do_issue_write()
234 if (test_bit(NETFS_SREQ_FAILED, &subreq->flags)) in netfs_do_issue_write()
235 return netfs_write_subrequest_terminated(subreq, subreq->error); in netfs_do_issue_write()
238 stream->issue_write(subreq); in netfs_do_issue_write()
245 size_t size = subreq->len - subreq->transferred; in netfs_reissue_write()
248 subreq->io_iter = *source; in netfs_reissue_write()
250 iov_iter_truncate(&subreq->io_iter, size); in netfs_reissue_write()
252 subreq->retry_count++; in netfs_reissue_write()
253 __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags); in netfs_reissue_write()
254 __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags); in netfs_reissue_write()
262 struct netfs_io_subrequest *subreq = stream->construct; in netfs_issue_write()
266 stream->construct = NULL; in netfs_issue_write()
267 subreq->io_iter.count = subreq->len; in netfs_issue_write()
275 * content-crypto preparation with network writes.
281 struct netfs_io_subrequest *subreq = stream->construct; in netfs_advance_write()
284 if (!stream->avail) { in netfs_advance_write()
289 _enter("R=%x[%x]", wreq->debug_id, subreq ? subreq->debug_index : 0); in netfs_advance_write()
291 if (subreq && start != subreq->start + subreq->len) { in netfs_advance_write()
296 if (!stream->construct) in netfs_advance_write()
298 subreq = stream->construct; in netfs_advance_write()
300 part = umin(stream->sreq_max_len - subreq->len, len); in netfs_advance_write()
301 _debug("part %zx/%zx %zx/%zx", subreq->len, stream->sreq_max_len, part, len); in netfs_advance_write()
302 subreq->len += part; in netfs_advance_write()
303 subreq->nr_segs++; in netfs_advance_write()
304 stream->submit_extendable_to -= part; in netfs_advance_write()
306 if (subreq->len >= stream->sreq_max_len || in netfs_advance_write()
307 subreq->nr_segs >= stream->sreq_max_segs || in netfs_advance_write()
323 struct netfs_io_stream *upload = &wreq->io_streams[0]; in netfs_write_folio()
324 struct netfs_io_stream *cache = &wreq->io_streams[1]; in netfs_write_folio()
336 if (rolling_buffer_make_space(&wreq->buffer) < 0) in netfs_write_folio()
337 return -ENOMEM; in netfs_write_folio()
343 i_size = i_size_read(wreq->inode); in netfs_write_folio()
350 wreq->nr_group_rel += netfs_folio_written_back(folio); in netfs_write_folio()
351 netfs_put_group_many(wreq->group, wreq->nr_group_rel); in netfs_write_folio()
352 wreq->nr_group_rel = 0; in netfs_write_folio()
356 if (fpos + fsize > wreq->i_size) in netfs_write_folio()
357 wreq->i_size = i_size; in netfs_write_folio()
362 foff = finfo->dirty_offset; in netfs_write_folio()
363 flen = foff + finfo->dirty_len; in netfs_write_folio()
367 if (wreq->origin == NETFS_WRITETHROUGH) { in netfs_write_folio()
369 if (flen > i_size - fpos) in netfs_write_folio()
370 flen = i_size - fpos; in netfs_write_folio()
371 } else if (flen > i_size - fpos) { in netfs_write_folio()
372 flen = i_size - fpos; in netfs_write_folio()
376 } else if (flen == i_size - fpos) { in netfs_write_folio()
379 flen -= foff; in netfs_write_folio()
386 * (1) Intervening non-dirty pages from random-access writes, multiple in netfs_write_folio()
390 * (2) Partially-written pages from write-streaming. in netfs_write_folio()
392 * (3) Pages that belong to a different write-back group (eg. Ceph in netfs_write_folio()
395 * (4) Actually-clean pages that were marked for write to the cache in netfs_write_folio()
397 * write-back group. in netfs_write_folio()
401 } else if (fgroup != wreq->group) { in netfs_write_folio()
417 * from write-through, then the page has already been put into the wb in netfs_write_folio()
420 if (wreq->origin == NETFS_WRITEBACK) in netfs_write_folio()
425 if (!cache->avail) { in netfs_write_folio()
432 } else if (!upload->avail && !cache->avail) { in netfs_write_folio()
436 } else if (!upload->construct) { in netfs_write_folio()
443 rolling_buffer_append(&wreq->buffer, folio, 0); in netfs_write_folio()
445 /* Move the submission point forward to allow for write-streaming data in netfs_write_folio()
446 * not starting at the front of the page. We don't do write-streaming in netfs_write_folio()
453 stream = &wreq->io_streams[s]; in netfs_write_folio()
454 stream->submit_off = foff; in netfs_write_folio()
455 stream->submit_len = flen; in netfs_write_folio()
456 if (!stream->avail || in netfs_write_folio()
457 (stream->source == NETFS_WRITE_TO_CACHE && streamw) || in netfs_write_folio()
458 (stream->source == NETFS_UPLOAD_TO_SERVER && in netfs_write_folio()
460 stream->submit_off = UINT_MAX; in netfs_write_folio()
461 stream->submit_len = 0; in netfs_write_folio()
466 * could end up with thousands of subrequests if the wsize is small - in netfs_write_folio()
473 int choose_s = -1; in netfs_write_folio()
475 /* Always add to the lowest-submitted stream first. */ in netfs_write_folio()
477 stream = &wreq->io_streams[s]; in netfs_write_folio()
478 if (stream->submit_len > 0 && in netfs_write_folio()
479 stream->submit_off < lowest_off) { in netfs_write_folio()
480 lowest_off = stream->submit_off; in netfs_write_folio()
487 stream = &wreq->io_streams[choose_s]; in netfs_write_folio()
490 if (stream->submit_off > iter_off) { in netfs_write_folio()
491 rolling_buffer_advance(&wreq->buffer, stream->submit_off - iter_off); in netfs_write_folio()
492 iter_off = stream->submit_off; in netfs_write_folio()
495 atomic64_set(&wreq->issued_to, fpos + stream->submit_off); in netfs_write_folio()
496 stream->submit_extendable_to = fsize - stream->submit_off; in netfs_write_folio()
497 part = netfs_advance_write(wreq, stream, fpos + stream->submit_off, in netfs_write_folio()
498 stream->submit_len, to_eof); in netfs_write_folio()
499 stream->submit_off += part; in netfs_write_folio()
500 if (part > stream->submit_len) in netfs_write_folio()
501 stream->submit_len = 0; in netfs_write_folio()
503 stream->submit_len -= part; in netfs_write_folio()
509 rolling_buffer_advance(&wreq->buffer, fsize - iter_off); in netfs_write_folio()
510 atomic64_set(&wreq->issued_to, fpos + fsize); in netfs_write_folio()
513 kdebug("R=%x: No submit", wreq->debug_id); in netfs_write_folio()
517 netfs_issue_write(wreq, &wreq->io_streams[s]); in netfs_write_folio()
531 set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags); in netfs_end_issue_write()
534 struct netfs_io_stream *stream = &wreq->io_streams[s]; in netfs_end_issue_write()
536 if (!stream->active) in netfs_end_issue_write()
538 if (!list_empty(&stream->subrequests)) in netfs_end_issue_write()
553 struct netfs_inode *ictx = netfs_inode(mapping->host); in netfs_writepages()
558 if (!mutex_trylock(&ictx->wb_lock)) { in netfs_writepages()
559 if (wbc->sync_mode == WB_SYNC_NONE) { in netfs_writepages()
564 mutex_lock(&ictx->wb_lock); in netfs_writepages()
578 __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &wreq->flags); in netfs_writepages()
583 _debug("wbiter %lx %llx", folio->index, atomic64_read(&wreq->issued_to)); in netfs_writepages()
586 WARN_ON_ONCE(wreq && folio_pos(folio) < atomic64_read(&wreq->issued_to)); in netfs_writepages()
589 unlikely(!test_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags))) { in netfs_writepages()
590 set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags); in netfs_writepages()
591 wreq->netfs_ops->begin_writeback(wreq); in netfs_writepages()
601 mutex_unlock(&ictx->wb_lock); in netfs_writepages()
611 mutex_unlock(&ictx->wb_lock); in netfs_writepages()
623 struct netfs_inode *ictx = netfs_inode(file_inode(iocb->ki_filp)); in netfs_begin_writethrough()
625 mutex_lock(&ictx->wb_lock); in netfs_begin_writethrough()
627 wreq = netfs_create_write_req(iocb->ki_filp->f_mapping, iocb->ki_filp, in netfs_begin_writethrough()
628 iocb->ki_pos, NETFS_WRITETHROUGH); in netfs_begin_writethrough()
630 mutex_unlock(&ictx->wb_lock); in netfs_begin_writethrough()
634 wreq->io_streams[0].avail = true; in netfs_begin_writethrough()
649 _enter("R=%x ic=%zu ws=%u cp=%zu tp=%u", in netfs_advance_writethrough()
650 wreq->debug_id, wreq->buffer.iter.count, wreq->wsize, copied, to_page_end); in netfs_advance_writethrough()
659 if (wreq->len == 0) in netfs_advance_writethrough()
666 wreq->len += copied; in netfs_advance_writethrough()
680 struct netfs_inode *ictx = netfs_inode(wreq->inode); in netfs_end_writethrough()
683 _enter("R=%x", wreq->debug_id); in netfs_end_writethrough()
690 mutex_unlock(&ictx->wb_lock); in netfs_end_writethrough()
692 if (wreq->iocb) in netfs_end_writethrough()
693 ret = -EIOCBQUEUED; in netfs_end_writethrough()
706 struct netfs_io_stream *upload = &wreq->io_streams[0]; in netfs_unbuffered_write()
708 loff_t start = wreq->start; in netfs_unbuffered_write()
713 if (wreq->origin == NETFS_DIO_WRITE) in netfs_unbuffered_write()
714 inode_dio_begin(wreq->inode); in netfs_unbuffered_write()
722 len -= part; in netfs_unbuffered_write()
723 rolling_buffer_advance(&wreq->buffer, part); in netfs_unbuffered_write()
724 if (test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) in netfs_unbuffered_write()
726 if (test_bit(NETFS_RREQ_FAILED, &wreq->flags)) in netfs_unbuffered_write()
741 struct netfs_io_stream *upload = &wreq->io_streams[0]; in netfs_write_folio_single()
742 struct netfs_io_stream *cache = &wreq->io_streams[1]; in netfs_write_folio_single()
753 if (flen > wreq->i_size - fpos) { in netfs_write_folio_single()
754 flen = wreq->i_size - fpos; in netfs_write_folio_single()
757 } else if (flen == wreq->i_size - fpos) { in netfs_write_folio_single()
763 if (!upload->avail && !cache->avail) { in netfs_write_folio_single()
768 if (!upload->construct) in netfs_write_folio_single()
775 rolling_buffer_append(&wreq->buffer, folio, NETFS_ROLLBUF_PUT_MARK); in netfs_write_folio_single()
777 /* Move the submission point forward to allow for write-streaming data in netfs_write_folio_single()
778 * not starting at the front of the page. We don't do write-streaming in netfs_write_folio_single()
785 stream = &wreq->io_streams[s]; in netfs_write_folio_single()
786 stream->submit_off = 0; in netfs_write_folio_single()
787 stream->submit_len = flen; in netfs_write_folio_single()
788 if (!stream->avail) { in netfs_write_folio_single()
789 stream->submit_off = UINT_MAX; in netfs_write_folio_single()
790 stream->submit_len = 0; in netfs_write_folio_single()
795 * could end up with thousands of subrequests if the wsize is small - in netfs_write_folio_single()
802 int choose_s = -1; in netfs_write_folio_single()
804 /* Always add to the lowest-submitted stream first. */ in netfs_write_folio_single()
806 stream = &wreq->io_streams[s]; in netfs_write_folio_single()
807 if (stream->submit_len > 0 && in netfs_write_folio_single()
808 stream->submit_off < lowest_off) { in netfs_write_folio_single()
809 lowest_off = stream->submit_off; in netfs_write_folio_single()
816 stream = &wreq->io_streams[choose_s]; in netfs_write_folio_single()
819 if (stream->submit_off > iter_off) { in netfs_write_folio_single()
820 rolling_buffer_advance(&wreq->buffer, stream->submit_off - iter_off); in netfs_write_folio_single()
821 iter_off = stream->submit_off; in netfs_write_folio_single()
824 atomic64_set(&wreq->issued_to, fpos + stream->submit_off); in netfs_write_folio_single()
825 stream->submit_extendable_to = fsize - stream->submit_off; in netfs_write_folio_single()
826 part = netfs_advance_write(wreq, stream, fpos + stream->submit_off, in netfs_write_folio_single()
827 stream->submit_len, to_eof); in netfs_write_folio_single()
828 stream->submit_off += part; in netfs_write_folio_single()
829 if (part > stream->submit_len) in netfs_write_folio_single()
830 stream->submit_len = 0; in netfs_write_folio_single()
832 stream->submit_len -= part; in netfs_write_folio_single()
837 wreq->buffer.iter.iov_offset = 0; in netfs_write_folio_single()
839 rolling_buffer_advance(&wreq->buffer, fsize - iter_off); in netfs_write_folio_single()
840 atomic64_set(&wreq->issued_to, fpos + fsize); in netfs_write_folio_single()
843 kdebug("R=%x: No submit", wreq->debug_id); in netfs_write_folio_single()
849 * netfs_writeback_single - Write back a monolithic payload
854 * Write a monolithic, non-pagecache object back to the server and/or
862 struct netfs_inode *ictx = netfs_inode(mapping->host); in netfs_writeback_single()
868 return -EIO; in netfs_writeback_single()
870 if (!mutex_trylock(&ictx->wb_lock)) { in netfs_writeback_single()
871 if (wbc->sync_mode == WB_SYNC_NONE) { in netfs_writeback_single()
876 mutex_lock(&ictx->wb_lock); in netfs_writeback_single()
885 __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &wreq->flags); in netfs_writeback_single()
889 if (__test_and_set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags)) in netfs_writeback_single()
890 wreq->netfs_ops->begin_writeback(wreq); in netfs_writeback_single()
892 for (fq = (struct folio_queue *)iter->folioq; fq; fq = fq->next) { in netfs_writeback_single()
897 _debug("wbiter %lx %llx", folio->index, atomic64_read(&wreq->issued_to)); in netfs_writeback_single()
902 size -= part; in netfs_writeback_single()
910 netfs_issue_write(wreq, &wreq->io_streams[s]); in netfs_writeback_single()
912 set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags); in netfs_writeback_single()
914 mutex_unlock(&ictx->wb_lock); in netfs_writeback_single()
922 mutex_unlock(&ictx->wb_lock); in netfs_writeback_single()