Lines Matching +full:no +full:- +full:ref +full:- +full:high +full:- +full:z

1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Network filesystem high-level buffered read support.
17 struct netfs_cache_resources *cres = &rreq->cache_resources; in netfs_cache_expand_readahead()
19 if (cres->ops && cres->ops->expand_readahead) in netfs_cache_expand_readahead()
20 cres->ops->expand_readahead(cres, _start, _len, i_size); in netfs_cache_expand_readahead()
29 netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size); in netfs_rreq_expand()
34 if (rreq->netfs_ops->expand_readahead) in netfs_rreq_expand()
35 rreq->netfs_ops->expand_readahead(rreq); in netfs_rreq_expand()
46 if (rreq->start != readahead_pos(ractl) || in netfs_rreq_expand()
47 rreq->len != readahead_length(ractl)) { in netfs_rreq_expand()
48 readahead_expand(ractl, rreq->start, rreq->len); in netfs_rreq_expand()
49 rreq->start = readahead_pos(ractl); in netfs_rreq_expand()
50 rreq->len = readahead_length(ractl); in netfs_rreq_expand()
63 return fscache_begin_read_operation(&rreq->cache_resources, netfs_i_cookie(ctx)); in netfs_begin_cache_read()
67 * netfs_prepare_read_iterator - Prepare the subreq iterator for I/O
72 * is intended to be called from the ->issue_read() method once the filesystem
75 * Returns the limited size if successful and -ENOMEM if insufficient memory
78 * [!] NOTE: This must be run in the same thread as ->issue_read() was called
83 struct netfs_io_request *rreq = subreq->rreq; in netfs_prepare_read_iterator()
84 size_t rsize = subreq->len; in netfs_prepare_read_iterator()
86 if (subreq->source == NETFS_DOWNLOAD_FROM_SERVER) in netfs_prepare_read_iterator()
87 rsize = umin(rsize, rreq->io_streams[0].sreq_max_len); in netfs_prepare_read_iterator()
89 if (rreq->ractl) { in netfs_prepare_read_iterator()
92 * into the buffer. Note that this acquires a ref on each page in netfs_prepare_read_iterator()
93 * that we will need to release later - but we don't want to do in netfs_prepare_read_iterator()
99 while (rreq->submitted < subreq->start + rsize) { in netfs_prepare_read_iterator()
102 added = rolling_buffer_load_from_ra(&rreq->buffer, rreq->ractl, in netfs_prepare_read_iterator()
106 rreq->submitted += added; in netfs_prepare_read_iterator()
111 subreq->len = rsize; in netfs_prepare_read_iterator()
112 if (unlikely(rreq->io_streams[0].sreq_max_segs)) { in netfs_prepare_read_iterator()
113 size_t limit = netfs_limit_iter(&rreq->buffer.iter, 0, rsize, in netfs_prepare_read_iterator()
114 rreq->io_streams[0].sreq_max_segs); in netfs_prepare_read_iterator()
117 subreq->len = limit; in netfs_prepare_read_iterator()
122 subreq->io_iter = rreq->buffer.iter; in netfs_prepare_read_iterator()
124 iov_iter_truncate(&subreq->io_iter, subreq->len); in netfs_prepare_read_iterator()
125 rolling_buffer_advance(&rreq->buffer, subreq->len); in netfs_prepare_read_iterator()
126 return subreq->len; in netfs_prepare_read_iterator()
133 struct netfs_cache_resources *cres = &rreq->cache_resources; in netfs_cache_prepare_read()
136 if (!cres->ops) in netfs_cache_prepare_read()
138 source = cres->ops->prepare_read(subreq, i_size); in netfs_cache_prepare_read()
146 * - Eats the caller's ref on subreq.
151 struct netfs_cache_resources *cres = &rreq->cache_resources; in netfs_read_cache_to_pagecache()
154 cres->ops->read(cres, subreq->start, &subreq->io_iter, NETFS_READ_HOLE_IGNORE, in netfs_read_cache_to_pagecache()
162 struct netfs_io_stream *stream = &rreq->io_streams[0]; in netfs_queue_read()
164 __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags); in netfs_queue_read()
170 spin_lock(&rreq->lock); in netfs_queue_read()
171 list_add_tail(&subreq->rreq_link, &stream->subrequests); in netfs_queue_read()
172 if (list_is_first(&subreq->rreq_link, &stream->subrequests)) { in netfs_queue_read()
173 stream->front = subreq; in netfs_queue_read()
174 if (!stream->active) { in netfs_queue_read()
175 stream->collected_to = stream->front->start; in netfs_queue_read()
177 smp_store_release(&stream->active, true); in netfs_queue_read()
183 set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags); in netfs_queue_read()
186 spin_unlock(&rreq->lock); in netfs_queue_read()
192 switch (subreq->source) { in netfs_issue_read()
194 rreq->netfs_ops->issue_read(subreq); in netfs_issue_read()
200 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); in netfs_issue_read()
201 subreq->error = 0; in netfs_issue_read()
202 iov_iter_zero(subreq->len, &subreq->io_iter); in netfs_issue_read()
203 subreq->transferred = subreq->len; in netfs_issue_read()
216 struct netfs_inode *ictx = netfs_inode(rreq->inode); in netfs_read_to_pagecache()
217 unsigned long long start = rreq->start; in netfs_read_to_pagecache()
218 ssize_t size = rreq->len; in netfs_read_to_pagecache()
228 ret = -ENOMEM; in netfs_read_to_pagecache()
232 subreq->start = start; in netfs_read_to_pagecache()
233 subreq->len = size; in netfs_read_to_pagecache()
235 source = netfs_cache_prepare_read(rreq, subreq, rreq->i_size); in netfs_read_to_pagecache()
236 subreq->source = source; in netfs_read_to_pagecache()
238 unsigned long long zp = umin(ictx->zero_point, rreq->i_size); in netfs_read_to_pagecache()
239 size_t len = subreq->len; in netfs_read_to_pagecache()
241 if (unlikely(rreq->origin == NETFS_READ_SINGLE)) in netfs_read_to_pagecache()
242 zp = rreq->i_size; in netfs_read_to_pagecache()
243 if (subreq->start >= zp) { in netfs_read_to_pagecache()
244 subreq->source = source = NETFS_FILL_WITH_ZEROES; in netfs_read_to_pagecache()
248 if (len > zp - subreq->start) in netfs_read_to_pagecache()
249 len = zp - subreq->start; in netfs_read_to_pagecache()
251 pr_err("ZERO-LEN READ: R=%08x[%x] l=%zx/%zx s=%llx z=%llx i=%llx", in netfs_read_to_pagecache()
252 rreq->debug_id, subreq->debug_index, in netfs_read_to_pagecache()
253 subreq->len, size, in netfs_read_to_pagecache()
254 subreq->start, ictx->zero_point, rreq->i_size); in netfs_read_to_pagecache()
257 subreq->len = len; in netfs_read_to_pagecache()
260 if (rreq->netfs_ops->prepare_read) { in netfs_read_to_pagecache()
261 ret = rreq->netfs_ops->prepare_read(subreq); in netfs_read_to_pagecache()
263 subreq->error = ret; in netfs_read_to_pagecache()
264 /* Not queued - release both refs. */ in netfs_read_to_pagecache()
278 subreq->source = NETFS_FILL_WITH_ZEROES; in netfs_read_to_pagecache()
297 subreq->error = ret; in netfs_read_to_pagecache()
299 /* Not queued - release both refs. */ in netfs_read_to_pagecache()
304 size -= slice; in netfs_read_to_pagecache()
314 set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags); in netfs_read_to_pagecache()
319 cmpxchg(&rreq->error, 0, ret); in netfs_read_to_pagecache()
323 * netfs_readahead - Helper to manage a read request
327 * the netfs if not. Space beyond the EOF is zero-filled. Multiple I/O
340 struct netfs_inode *ictx = netfs_inode(ractl->mapping->host); in netfs_readahead()
345 rreq = netfs_alloc_request(ractl->mapping, ractl->file, start, size, in netfs_readahead()
350 __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags); in netfs_readahead()
353 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) in netfs_readahead()
362 rreq->ractl = ractl; in netfs_readahead()
363 rreq->submitted = rreq->start; in netfs_readahead()
364 if (rolling_buffer_init(&rreq->buffer, rreq->debug_id, ITER_DEST) < 0) in netfs_readahead()
385 if (rolling_buffer_init(&rreq->buffer, rreq->debug_id, ITER_DEST) < 0) in netfs_create_singular_buffer()
386 return -ENOMEM; in netfs_create_singular_buffer()
388 added = rolling_buffer_append(&rreq->buffer, folio, rollbuf_flags); in netfs_create_singular_buffer()
391 rreq->submitted = rreq->start + added; in netfs_create_singular_buffer()
392 rreq->ractl = (struct readahead_control *)1UL; in netfs_create_singular_buffer()
402 struct address_space *mapping = folio->mapping; in netfs_read_gaps()
404 struct netfs_inode *ctx = netfs_inode(mapping->host); in netfs_read_gaps()
407 unsigned int from = finfo->dirty_offset; in netfs_read_gaps()
408 unsigned int to = from + finfo->dirty_len; in netfs_read_gaps()
415 _enter("%lx", folio->index); in netfs_read_gaps()
424 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) in netfs_read_gaps()
428 trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_read_gaps); in netfs_read_gaps()
433 ret = -ENOMEM; in netfs_read_gaps()
446 rreq->direct_bv = bvec; in netfs_read_gaps()
447 rreq->direct_bv_count = nr_bvec; in netfs_read_gaps()
453 part = min_t(size_t, to - off, PAGE_SIZE); in netfs_read_gaps()
458 bvec_set_folio(&bvec[i++], folio, flen - to, to); in netfs_read_gaps()
459 iov_iter_bvec(&rreq->buffer.iter, ITER_DEST, bvec, i, rreq->len); in netfs_read_gaps()
460 rreq->submitted = rreq->start + flen; in netfs_read_gaps()
484 * netfs_read_folio - Helper to manage a read_folio request
489 * possible, or the netfs if not. Space beyond the EOF is zero-filled.
499 struct address_space *mapping = folio->mapping; in netfs_read_folio()
501 struct netfs_inode *ctx = netfs_inode(mapping->host); in netfs_read_folio()
509 _enter("%lx", folio->index); in netfs_read_folio()
520 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) in netfs_read_folio()
524 trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage); in netfs_read_folio()
552 * - full folio write
553 * - write that lies in a folio that is completely beyond EOF
554 * - write that covers the folio from start to EOF or beyond it
568 if (pos - offset + len <= i_size) in netfs_skip_folio_read()
580 if (pos - offset >= i_size) in netfs_skip_folio_read()
594 * netfs_write_begin - Helper to prepare for writing [DEPRECATED]
603 * Pre-read data for a write-begin request by drawing data from the cache if
604 * possible, or the netfs if not. Space beyond the EOF is zero-filled.
616 * will cause the folio to be re-got and the process to be retried.
642 if (ctx->ops->check_write_begin) { in netfs_write_begin()
644 ret = ctx->ops->check_write_begin(file, pos, len, &folio, _fsdata); in netfs_write_begin()
656 /* If the folio is beyond the EOF, we want to clear it - unless it's in netfs_write_begin()
673 rreq->no_unlock_folio = folio->index; in netfs_write_begin()
674 __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags); in netfs_write_begin()
677 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) in netfs_write_begin()
722 struct address_space *mapping = folio->mapping; in netfs_prefetch_for_write()
723 struct netfs_inode *ctx = netfs_inode(mapping->host); in netfs_prefetch_for_write()
730 ret = -ENOMEM; in netfs_prefetch_for_write()
739 rreq->no_unlock_folio = folio->index; in netfs_prefetch_for_write()
740 __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags); in netfs_prefetch_for_write()
742 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) in netfs_prefetch_for_write()
766 * netfs_buffered_read_iter - Filesystem buffered I/O read routine
770 * This is the ->read_iter() routine for all filesystems that can use the page
773 * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall be
774 * returned when no data can be read without waiting for I/O requests to
777 * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O requests
778 * shall be made for the read or for readahead. When no data can be read,
779 * -EAGAIN shall be returned. When readahead would be triggered, a partial,
788 struct inode *inode = file_inode(iocb->ki_filp); in netfs_buffered_read_iter()
792 if (WARN_ON_ONCE((iocb->ki_flags & IOCB_DIRECT) || in netfs_buffered_read_iter()
793 test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags))) in netfs_buffered_read_iter()
794 return -EINVAL; in netfs_buffered_read_iter()
806 * netfs_file_read_iter - Generic filesystem read routine
810 * This is the ->read_iter() routine for all filesystems that can use the page
813 * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall be
814 * returned when no data can be read without waiting for I/O requests to
817 * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O requests
818 * shall be made for the read or for readahead. When no data can be read,
819 * -EAGAIN shall be returned. When readahead would be triggered, a partial,
828 struct netfs_inode *ictx = netfs_inode(iocb->ki_filp->f_mapping->host); in netfs_file_read_iter()
830 if ((iocb->ki_flags & IOCB_DIRECT) || in netfs_file_read_iter()
831 test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags)) in netfs_file_read_iter()