Lines Matching +full:sync +full:- +full:active

1 // SPDX-License-Identifier: GPL-2.0-or-later
21 struct netfs_io_request *rreq = subreq->rreq; in netfs_prepare_dio_read_iterator()
24 rsize = umin(subreq->len, rreq->io_streams[0].sreq_max_len); in netfs_prepare_dio_read_iterator()
25 subreq->len = rsize; in netfs_prepare_dio_read_iterator()
27 if (unlikely(rreq->io_streams[0].sreq_max_segs)) { in netfs_prepare_dio_read_iterator()
28 size_t limit = netfs_limit_iter(&rreq->buffer.iter, 0, rsize, in netfs_prepare_dio_read_iterator()
29 rreq->io_streams[0].sreq_max_segs); in netfs_prepare_dio_read_iterator()
32 subreq->len = limit; in netfs_prepare_dio_read_iterator()
39 subreq->io_iter = rreq->buffer.iter; in netfs_prepare_dio_read_iterator()
40 iov_iter_truncate(&subreq->io_iter, subreq->len); in netfs_prepare_dio_read_iterator()
41 iov_iter_advance(&rreq->buffer.iter, subreq->len); in netfs_prepare_dio_read_iterator()
50 struct netfs_io_stream *stream = &rreq->io_streams[0]; in netfs_dispatch_unbuffered_reads()
51 unsigned long long start = rreq->start; in netfs_dispatch_unbuffered_reads()
52 ssize_t size = rreq->len; in netfs_dispatch_unbuffered_reads()
61 ret = -ENOMEM; in netfs_dispatch_unbuffered_reads()
65 subreq->source = NETFS_DOWNLOAD_FROM_SERVER; in netfs_dispatch_unbuffered_reads()
66 subreq->start = start; in netfs_dispatch_unbuffered_reads()
67 subreq->len = size; in netfs_dispatch_unbuffered_reads()
69 __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags); in netfs_dispatch_unbuffered_reads()
71 spin_lock(&rreq->lock); in netfs_dispatch_unbuffered_reads()
72 list_add_tail(&subreq->rreq_link, &stream->subrequests); in netfs_dispatch_unbuffered_reads()
73 if (list_is_first(&subreq->rreq_link, &stream->subrequests)) { in netfs_dispatch_unbuffered_reads()
74 stream->front = subreq; in netfs_dispatch_unbuffered_reads()
75 if (!stream->active) { in netfs_dispatch_unbuffered_reads()
76 stream->collected_to = stream->front->start; in netfs_dispatch_unbuffered_reads()
77 /* Store list pointers before active flag */ in netfs_dispatch_unbuffered_reads()
78 smp_store_release(&stream->active, true); in netfs_dispatch_unbuffered_reads()
82 spin_unlock(&rreq->lock); in netfs_dispatch_unbuffered_reads()
85 if (rreq->netfs_ops->prepare_read) { in netfs_dispatch_unbuffered_reads()
86 ret = rreq->netfs_ops->prepare_read(subreq); in netfs_dispatch_unbuffered_reads()
94 slice = subreq->len; in netfs_dispatch_unbuffered_reads()
95 size -= slice; in netfs_dispatch_unbuffered_reads()
97 rreq->submitted += slice; in netfs_dispatch_unbuffered_reads()
100 set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags); in netfs_dispatch_unbuffered_reads()
103 rreq->netfs_ops->issue_read(subreq); in netfs_dispatch_unbuffered_reads()
105 if (test_bit(NETFS_RREQ_PAUSE, &rreq->flags)) in netfs_dispatch_unbuffered_reads()
107 if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) in netfs_dispatch_unbuffered_reads()
109 if (test_bit(NETFS_RREQ_BLOCKED, &rreq->flags) && in netfs_dispatch_unbuffered_reads()
110 test_bit(NETFS_RREQ_NONBLOCK, &rreq->flags)) in netfs_dispatch_unbuffered_reads()
117 set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags); in netfs_dispatch_unbuffered_reads()
128 static int netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync) in netfs_unbuffered_read() argument
132 _enter("R=%x %llx-%llx", in netfs_unbuffered_read()
133 rreq->debug_id, rreq->start, rreq->start + rreq->len - 1); in netfs_unbuffered_read()
135 if (rreq->len == 0) { in netfs_unbuffered_read()
136 pr_err("Zero-sized read [R=%x]\n", rreq->debug_id); in netfs_unbuffered_read()
137 return -EIO; in netfs_unbuffered_read()
142 inode_dio_begin(rreq->inode); in netfs_unbuffered_read()
146 if (!rreq->submitted) { in netfs_unbuffered_read()
148 inode_dio_end(rreq->inode); in netfs_unbuffered_read()
153 if (sync) in netfs_unbuffered_read()
156 ret = -EIOCBQUEUED; in netfs_unbuffered_read()
163 * netfs_unbuffered_read_iter_locked - Perform an unbuffered or direct I/O read
177 bool sync = is_sync_kiocb(iocb); in netfs_unbuffered_read_iter_locked() local
187 file_accessed(iocb->ki_filp); in netfs_unbuffered_read_iter_locked()
189 rreq = netfs_alloc_request(iocb->ki_filp->f_mapping, iocb->ki_filp, in netfs_unbuffered_read_iter_locked()
190 iocb->ki_pos, orig_count, in netfs_unbuffered_read_iter_locked()
196 trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_dio_read); in netfs_unbuffered_read_iter_locked()
208 ret = netfs_extract_user_iter(iter, rreq->len, &rreq->buffer.iter, 0); in netfs_unbuffered_read_iter_locked()
211 rreq->direct_bv = (struct bio_vec *)rreq->buffer.iter.bvec; in netfs_unbuffered_read_iter_locked()
212 rreq->direct_bv_count = ret; in netfs_unbuffered_read_iter_locked()
213 rreq->direct_bv_unpin = iov_iter_extract_will_pin(iter); in netfs_unbuffered_read_iter_locked()
214 rreq->len = iov_iter_count(&rreq->buffer.iter); in netfs_unbuffered_read_iter_locked()
216 rreq->buffer.iter = *iter; in netfs_unbuffered_read_iter_locked()
217 rreq->len = orig_count; in netfs_unbuffered_read_iter_locked()
218 rreq->direct_bv_unpin = false; in netfs_unbuffered_read_iter_locked()
224 if (!sync) { in netfs_unbuffered_read_iter_locked()
225 rreq->iocb = iocb; in netfs_unbuffered_read_iter_locked()
226 __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags); in netfs_unbuffered_read_iter_locked()
229 ret = netfs_unbuffered_read(rreq, sync); in netfs_unbuffered_read_iter_locked()
231 goto out; /* May be -EIOCBQUEUED */ in netfs_unbuffered_read_iter_locked()
232 if (sync) { in netfs_unbuffered_read_iter_locked()
234 iocb->ki_pos += rreq->transferred; in netfs_unbuffered_read_iter_locked()
235 ret = rreq->transferred; in netfs_unbuffered_read_iter_locked()
241 orig_count -= ret; in netfs_unbuffered_read_iter_locked()
247 * netfs_unbuffered_read_iter - Perform an unbuffered or direct I/O read
256 struct inode *inode = file_inode(iocb->ki_filp); in netfs_unbuffered_read_iter()
259 if (!iter->count) in netfs_unbuffered_read_iter()