Lines Matching +full:buffered +full:- +full:negative
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Network filesystem high-level buffered read support.
17 struct netfs_cache_resources *cres = &rreq->cache_resources; in netfs_cache_expand_readahead()
19 if (cres->ops && cres->ops->expand_readahead) in netfs_cache_expand_readahead()
20 cres->ops->expand_readahead(cres, _start, _len, i_size); in netfs_cache_expand_readahead()
29 netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size); in netfs_rreq_expand()
34 if (rreq->netfs_ops->expand_readahead) in netfs_rreq_expand()
35 rreq->netfs_ops->expand_readahead(rreq); in netfs_rreq_expand()
46 if (rreq->start != readahead_pos(ractl) || in netfs_rreq_expand()
47 rreq->len != readahead_length(ractl)) { in netfs_rreq_expand()
48 readahead_expand(ractl, rreq->start, rreq->len); in netfs_rreq_expand()
49 rreq->start = readahead_pos(ractl); in netfs_rreq_expand()
50 rreq->len = readahead_length(ractl); in netfs_rreq_expand()
63 return fscache_begin_read_operation(&rreq->cache_resources, netfs_i_cookie(ctx)); in netfs_begin_cache_read()
67 * netfs_prepare_read_iterator - Prepare the subreq iterator for I/O
72 * is intended to be called from the ->issue_read() method once the filesystem
75 * Returns the limited size if successful and -ENOMEM if insufficient memory
78 * [!] NOTE: This must be run in the same thread as ->issue_read() was called
84 struct netfs_io_request *rreq = subreq->rreq; in netfs_prepare_read_iterator()
85 size_t rsize = subreq->len; in netfs_prepare_read_iterator()
87 if (subreq->source == NETFS_DOWNLOAD_FROM_SERVER) in netfs_prepare_read_iterator()
88 rsize = umin(rsize, rreq->io_streams[0].sreq_max_len); in netfs_prepare_read_iterator()
94 * that we will need to release later - but we don't want to do in netfs_prepare_read_iterator()
100 while (rreq->submitted < subreq->start + rsize) { in netfs_prepare_read_iterator()
103 added = rolling_buffer_load_from_ra(&rreq->buffer, ractl, in netfs_prepare_read_iterator()
107 rreq->submitted += added; in netfs_prepare_read_iterator()
112 subreq->len = rsize; in netfs_prepare_read_iterator()
113 if (unlikely(rreq->io_streams[0].sreq_max_segs)) { in netfs_prepare_read_iterator()
114 size_t limit = netfs_limit_iter(&rreq->buffer.iter, 0, rsize, in netfs_prepare_read_iterator()
115 rreq->io_streams[0].sreq_max_segs); in netfs_prepare_read_iterator()
118 subreq->len = limit; in netfs_prepare_read_iterator()
123 subreq->io_iter = rreq->buffer.iter; in netfs_prepare_read_iterator()
125 iov_iter_truncate(&subreq->io_iter, subreq->len); in netfs_prepare_read_iterator()
126 rolling_buffer_advance(&rreq->buffer, subreq->len); in netfs_prepare_read_iterator()
127 return subreq->len; in netfs_prepare_read_iterator()
134 struct netfs_cache_resources *cres = &rreq->cache_resources; in netfs_cache_prepare_read()
137 if (!cres->ops) in netfs_cache_prepare_read()
139 source = cres->ops->prepare_read(subreq, i_size); in netfs_cache_prepare_read()
147 * - Eats the caller's ref on subreq.
152 struct netfs_cache_resources *cres = &rreq->cache_resources; in netfs_read_cache_to_pagecache()
155 cres->ops->read(cres, subreq->start, &subreq->io_iter, NETFS_READ_HOLE_IGNORE, in netfs_read_cache_to_pagecache()
163 struct netfs_io_stream *stream = &rreq->io_streams[0]; in netfs_queue_read()
165 __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags); in netfs_queue_read()
171 spin_lock(&rreq->lock); in netfs_queue_read()
172 list_add_tail(&subreq->rreq_link, &stream->subrequests); in netfs_queue_read()
173 if (list_is_first(&subreq->rreq_link, &stream->subrequests)) { in netfs_queue_read()
174 stream->front = subreq; in netfs_queue_read()
175 if (!stream->active) { in netfs_queue_read()
176 stream->collected_to = stream->front->start; in netfs_queue_read()
178 smp_store_release(&stream->active, true); in netfs_queue_read()
184 set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags); in netfs_queue_read()
187 spin_unlock(&rreq->lock); in netfs_queue_read()
193 switch (subreq->source) { in netfs_issue_read()
195 rreq->netfs_ops->issue_read(subreq); in netfs_issue_read()
201 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); in netfs_issue_read()
202 subreq->error = 0; in netfs_issue_read()
203 iov_iter_zero(subreq->len, &subreq->io_iter); in netfs_issue_read()
204 subreq->transferred = subreq->len; in netfs_issue_read()
218 struct netfs_inode *ictx = netfs_inode(rreq->inode); in netfs_read_to_pagecache()
219 unsigned long long start = rreq->start; in netfs_read_to_pagecache()
220 ssize_t size = rreq->len; in netfs_read_to_pagecache()
230 ret = -ENOMEM; in netfs_read_to_pagecache()
234 subreq->start = start; in netfs_read_to_pagecache()
235 subreq->len = size; in netfs_read_to_pagecache()
237 source = netfs_cache_prepare_read(rreq, subreq, rreq->i_size); in netfs_read_to_pagecache()
238 subreq->source = source; in netfs_read_to_pagecache()
240 unsigned long long zp = umin(ictx->zero_point, rreq->i_size); in netfs_read_to_pagecache()
241 size_t len = subreq->len; in netfs_read_to_pagecache()
243 if (unlikely(rreq->origin == NETFS_READ_SINGLE)) in netfs_read_to_pagecache()
244 zp = rreq->i_size; in netfs_read_to_pagecache()
245 if (subreq->start >= zp) { in netfs_read_to_pagecache()
246 subreq->source = source = NETFS_FILL_WITH_ZEROES; in netfs_read_to_pagecache()
250 if (len > zp - subreq->start) in netfs_read_to_pagecache()
251 len = zp - subreq->start; in netfs_read_to_pagecache()
253 pr_err("ZERO-LEN READ: R=%08x[%x] l=%zx/%zx s=%llx z=%llx i=%llx", in netfs_read_to_pagecache()
254 rreq->debug_id, subreq->debug_index, in netfs_read_to_pagecache()
255 subreq->len, size, in netfs_read_to_pagecache()
256 subreq->start, ictx->zero_point, rreq->i_size); in netfs_read_to_pagecache()
259 subreq->len = len; in netfs_read_to_pagecache()
262 if (rreq->netfs_ops->prepare_read) { in netfs_read_to_pagecache()
263 ret = rreq->netfs_ops->prepare_read(subreq); in netfs_read_to_pagecache()
265 subreq->error = ret; in netfs_read_to_pagecache()
266 /* Not queued - release both refs. */ in netfs_read_to_pagecache()
280 subreq->source = NETFS_FILL_WITH_ZEROES; in netfs_read_to_pagecache()
299 subreq->error = ret; in netfs_read_to_pagecache()
301 /* Not queued - release both refs. */ in netfs_read_to_pagecache()
306 size -= slice; in netfs_read_to_pagecache()
316 set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags); in netfs_read_to_pagecache()
321 cmpxchg(&rreq->error, 0, ret); in netfs_read_to_pagecache()
325 * netfs_readahead - Helper to manage a read request
329 * the netfs if not. Space beyond the EOF is zero-filled. Multiple I/O
342 struct netfs_inode *ictx = netfs_inode(ractl->mapping->host); in netfs_readahead()
347 rreq = netfs_alloc_request(ractl->mapping, ractl->file, start, size, in netfs_readahead()
352 __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags); in netfs_readahead()
355 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) in netfs_readahead()
364 rreq->submitted = rreq->start; in netfs_readahead()
365 if (rolling_buffer_init(&rreq->buffer, rreq->debug_id, ITER_DEST) < 0) in netfs_readahead()
384 if (rolling_buffer_init(&rreq->buffer, rreq->debug_id, ITER_DEST) < 0) in netfs_create_singular_buffer()
385 return -ENOMEM; in netfs_create_singular_buffer()
387 added = rolling_buffer_append(&rreq->buffer, folio, rollbuf_flags); in netfs_create_singular_buffer()
390 rreq->submitted = rreq->start + added; in netfs_create_singular_buffer()
400 struct address_space *mapping = folio->mapping; in netfs_read_gaps()
402 struct netfs_inode *ctx = netfs_inode(mapping->host); in netfs_read_gaps()
405 unsigned int from = finfo->dirty_offset; in netfs_read_gaps()
406 unsigned int to = from + finfo->dirty_len; in netfs_read_gaps()
413 _enter("%lx", folio->index); in netfs_read_gaps()
422 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) in netfs_read_gaps()
426 trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_read_gaps); in netfs_read_gaps()
431 ret = -ENOMEM; in netfs_read_gaps()
444 rreq->direct_bv = bvec; in netfs_read_gaps()
445 rreq->direct_bv_count = nr_bvec; in netfs_read_gaps()
451 part = min_t(size_t, to - off, PAGE_SIZE); in netfs_read_gaps()
456 bvec_set_folio(&bvec[i++], folio, flen - to, to); in netfs_read_gaps()
457 iov_iter_bvec(&rreq->buffer.iter, ITER_DEST, bvec, i, rreq->len); in netfs_read_gaps()
458 rreq->submitted = rreq->start + flen; in netfs_read_gaps()
482 * netfs_read_folio - Helper to manage a read_folio request
487 * possible, or the netfs if not. Space beyond the EOF is zero-filled.
497 struct address_space *mapping = folio->mapping; in netfs_read_folio()
499 struct netfs_inode *ctx = netfs_inode(mapping->host); in netfs_read_folio()
507 _enter("%lx", folio->index); in netfs_read_folio()
518 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) in netfs_read_folio()
522 trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage); in netfs_read_folio()
550 * - full folio write
551 * - write that lies in a folio that is completely beyond EOF
552 * - write that covers the folio from start to EOF or beyond it
566 if (pos - offset + len <= i_size) in netfs_skip_folio_read()
578 if (pos - offset >= i_size) in netfs_skip_folio_read()
592 * netfs_write_begin - Helper to prepare for writing [DEPRECATED]
601 * Pre-read data for a write-begin request by drawing data from the cache if
602 * possible, or the netfs if not. Space beyond the EOF is zero-filled.
614 * will cause the folio to be re-got and the process to be retried.
640 if (ctx->ops->check_write_begin) { in netfs_write_begin()
642 ret = ctx->ops->check_write_begin(file, pos, len, &folio, _fsdata); in netfs_write_begin()
654 /* If the folio is beyond the EOF, we want to clear it - unless it's in netfs_write_begin()
671 rreq->no_unlock_folio = folio->index; in netfs_write_begin()
672 __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags); in netfs_write_begin()
675 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) in netfs_write_begin()
720 struct address_space *mapping = folio->mapping; in netfs_prefetch_for_write()
721 struct netfs_inode *ctx = netfs_inode(mapping->host); in netfs_prefetch_for_write()
728 ret = -ENOMEM; in netfs_prefetch_for_write()
737 rreq->no_unlock_folio = folio->index; in netfs_prefetch_for_write()
738 __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags); in netfs_prefetch_for_write()
740 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) in netfs_prefetch_for_write()
764 * netfs_buffered_read_iter - Filesystem buffered I/O read routine
768 * This is the ->read_iter() routine for all filesystems that can use the page
771 * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall be
775 * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O requests
777 * -EAGAIN shall be returned. When readahead would be triggered, a partial,
782 * * negative error code (or 0 if IOCB_NOIO) if nothing was read
786 struct inode *inode = file_inode(iocb->ki_filp); in netfs_buffered_read_iter()
790 if (WARN_ON_ONCE((iocb->ki_flags & IOCB_DIRECT) || in netfs_buffered_read_iter()
791 test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags))) in netfs_buffered_read_iter()
792 return -EINVAL; in netfs_buffered_read_iter()
804 * netfs_file_read_iter - Generic filesystem read routine
808 * This is the ->read_iter() routine for all filesystems that can use the page
811 * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall be
815 * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O requests
817 * -EAGAIN shall be returned. When readahead would be triggered, a partial,
822 * * negative error code (or 0 if IOCB_NOIO) if nothing was read
826 struct netfs_inode *ictx = netfs_inode(iocb->ki_filp->f_mapping->host); in netfs_file_read_iter()
828 if ((iocb->ki_flags & IOCB_DIRECT) || in netfs_file_read_iter()
829 test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags)) in netfs_file_read_iter()