1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Direct I/O support. 3 * 4 * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/export.h> 9 #include <linux/fs.h> 10 #include <linux/mm.h> 11 #include <linux/pagemap.h> 12 #include <linux/slab.h> 13 #include <linux/uio.h> 14 #include <linux/sched/mm.h> 15 #include <linux/task_io_accounting_ops.h> 16 #include <linux/netfs.h> 17 #include "internal.h" 18 19 static void netfs_prepare_dio_read_iterator(struct netfs_io_subrequest *subreq) 20 { 21 struct netfs_io_request *rreq = subreq->rreq; 22 size_t rsize; 23 24 rsize = umin(subreq->len, rreq->io_streams[0].sreq_max_len); 25 subreq->len = rsize; 26 27 if (unlikely(rreq->io_streams[0].sreq_max_segs)) { 28 size_t limit = netfs_limit_iter(&rreq->buffer.iter, 0, rsize, 29 rreq->io_streams[0].sreq_max_segs); 30 31 if (limit < rsize) { 32 subreq->len = limit; 33 trace_netfs_sreq(subreq, netfs_sreq_trace_limited); 34 } 35 } 36 37 trace_netfs_sreq(subreq, netfs_sreq_trace_prepare); 38 39 subreq->io_iter = rreq->buffer.iter; 40 iov_iter_truncate(&subreq->io_iter, subreq->len); 41 iov_iter_advance(&rreq->buffer.iter, subreq->len); 42 } 43 44 /* 45 * Perform a read to a buffer from the server, slicing up the region to be read 46 * according to the network rsize. 47 */ 48 static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq) 49 { 50 struct netfs_io_stream *stream = &rreq->io_streams[0]; 51 unsigned long long start = rreq->start; 52 ssize_t size = rreq->len; 53 int ret = 0; 54 55 do { 56 struct netfs_io_subrequest *subreq; 57 ssize_t slice; 58 59 subreq = netfs_alloc_subrequest(rreq); 60 if (!subreq) { 61 ret = -ENOMEM; 62 break; 63 } 64 65 subreq->source = NETFS_DOWNLOAD_FROM_SERVER; 66 subreq->start = start; 67 subreq->len = size; 68 69 __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags); 70 71 spin_lock(&rreq->lock); 72 list_add_tail(&subreq->rreq_link, &stream->subrequests); 73 if (list_is_first(&subreq->rreq_link, &stream->subrequests)) { 74 if (!stream->active) { 75 stream->collected_to = subreq->start; 76 /* Store list pointers before active flag */ 77 smp_store_release(&stream->active, true); 78 } 79 } 80 trace_netfs_sreq(subreq, netfs_sreq_trace_added); 81 spin_unlock(&rreq->lock); 82 83 netfs_stat(&netfs_n_rh_download); 84 if (rreq->netfs_ops->prepare_read) { 85 ret = rreq->netfs_ops->prepare_read(subreq); 86 if (ret < 0) { 87 netfs_put_subrequest(subreq, netfs_sreq_trace_put_cancel); 88 break; 89 } 90 } 91 92 netfs_prepare_dio_read_iterator(subreq); 93 slice = subreq->len; 94 size -= slice; 95 start += slice; 96 rreq->submitted += slice; 97 if (size <= 0) { 98 smp_wmb(); /* Write lists before ALL_QUEUED. */ 99 set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags); 100 } 101 102 rreq->netfs_ops->issue_read(subreq); 103 104 if (test_bit(NETFS_RREQ_PAUSE, &rreq->flags)) 105 netfs_wait_for_paused_read(rreq); 106 if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) 107 break; 108 cond_resched(); 109 } while (size > 0); 110 111 if (unlikely(size > 0)) { 112 smp_wmb(); /* Write lists before ALL_QUEUED. */ 113 set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags); 114 netfs_wake_collector(rreq); 115 } 116 117 return ret; 118 } 119 120 /* 121 * Perform a read to an application buffer, bypassing the pagecache and the 122 * local disk cache. 123 */ 124 static ssize_t netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync) 125 { 126 ssize_t ret; 127 128 _enter("R=%x %llx-%llx", 129 rreq->debug_id, rreq->start, rreq->start + rreq->len - 1); 130 131 if (rreq->len == 0) { 132 pr_err("Zero-sized read [R=%x]\n", rreq->debug_id); 133 netfs_put_request(rreq, netfs_rreq_trace_put_discard); 134 return -EIO; 135 } 136 137 // TODO: Use bounce buffer if requested 138 139 inode_dio_begin(rreq->inode); 140 141 ret = netfs_dispatch_unbuffered_reads(rreq); 142 143 if (!rreq->submitted) { 144 netfs_put_request(rreq, netfs_rreq_trace_put_no_submit); 145 inode_dio_end(rreq->inode); 146 ret = 0; 147 goto out; 148 } 149 150 if (sync) 151 ret = netfs_wait_for_read(rreq); 152 else 153 ret = -EIOCBQUEUED; 154 out: 155 _leave(" = %zd", ret); 156 return ret; 157 } 158 159 /** 160 * netfs_unbuffered_read_iter_locked - Perform an unbuffered or direct I/O read 161 * @iocb: The I/O control descriptor describing the read 162 * @iter: The output buffer (also specifies read length) 163 * 164 * Perform an unbuffered I/O or direct I/O from the file in @iocb to the 165 * output buffer. No use is made of the pagecache. 166 * 167 * The caller must hold any appropriate locks. 168 */ 169 ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *iter) 170 { 171 struct netfs_io_request *rreq; 172 ssize_t ret; 173 size_t orig_count = iov_iter_count(iter); 174 bool sync = is_sync_kiocb(iocb); 175 176 _enter(""); 177 178 if (!orig_count) 179 return 0; /* Don't update atime */ 180 181 ret = kiocb_write_and_wait(iocb, orig_count); 182 if (ret < 0) 183 return ret; 184 file_accessed(iocb->ki_filp); 185 186 rreq = netfs_alloc_request(iocb->ki_filp->f_mapping, iocb->ki_filp, 187 iocb->ki_pos, orig_count, 188 iocb->ki_flags & IOCB_DIRECT ? 189 NETFS_DIO_READ : NETFS_UNBUFFERED_READ); 190 if (IS_ERR(rreq)) 191 return PTR_ERR(rreq); 192 193 netfs_stat(&netfs_n_rh_dio_read); 194 trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_dio_read); 195 196 /* If this is an async op, we have to keep track of the destination 197 * buffer for ourselves as the caller's iterator will be trashed when 198 * we return. 199 * 200 * In such a case, extract an iterator to represent as much of the the 201 * output buffer as we can manage. Note that the extraction might not 202 * be able to allocate a sufficiently large bvec array and may shorten 203 * the request. 204 */ 205 if (user_backed_iter(iter)) { 206 ret = netfs_extract_user_iter(iter, rreq->len, &rreq->buffer.iter, 0); 207 if (ret < 0) 208 goto error_put; 209 rreq->direct_bv = (struct bio_vec *)rreq->buffer.iter.bvec; 210 rreq->direct_bv_count = ret; 211 rreq->direct_bv_unpin = iov_iter_extract_will_pin(iter); 212 rreq->len = iov_iter_count(&rreq->buffer.iter); 213 } else { 214 rreq->buffer.iter = *iter; 215 rreq->len = orig_count; 216 rreq->direct_bv_unpin = false; 217 iov_iter_advance(iter, orig_count); 218 } 219 220 // TODO: Set up bounce buffer if needed 221 222 if (!sync) { 223 rreq->iocb = iocb; 224 __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags); 225 } 226 227 ret = netfs_unbuffered_read(rreq, sync); 228 if (ret < 0) 229 goto out; /* May be -EIOCBQUEUED */ 230 if (sync) { 231 // TODO: Copy from bounce buffer 232 iocb->ki_pos += rreq->transferred; 233 ret = rreq->transferred; 234 } 235 236 out: 237 netfs_put_request(rreq, netfs_rreq_trace_put_return); 238 if (ret > 0) 239 orig_count -= ret; 240 return ret; 241 242 error_put: 243 netfs_put_failed_request(rreq); 244 return ret; 245 } 246 EXPORT_SYMBOL(netfs_unbuffered_read_iter_locked); 247 248 /** 249 * netfs_unbuffered_read_iter - Perform an unbuffered or direct I/O read 250 * @iocb: The I/O control descriptor describing the read 251 * @iter: The output buffer (also specifies read length) 252 * 253 * Perform an unbuffered I/O or direct I/O from the file in @iocb to the 254 * output buffer. No use is made of the pagecache. 255 */ 256 ssize_t netfs_unbuffered_read_iter(struct kiocb *iocb, struct iov_iter *iter) 257 { 258 struct inode *inode = file_inode(iocb->ki_filp); 259 ssize_t ret; 260 261 if (!iter->count) 262 return 0; /* Don't update atime */ 263 264 ret = netfs_start_io_direct(inode); 265 if (ret == 0) { 266 ret = netfs_unbuffered_read_iter_locked(iocb, iter); 267 netfs_end_io_direct(inode); 268 } 269 return ret; 270 } 271 EXPORT_SYMBOL(netfs_unbuffered_read_iter); 272