1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Single, monolithic object support (e.g. AFS directory). 3 * 4 * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/export.h> 9 #include <linux/fs.h> 10 #include <linux/mm.h> 11 #include <linux/pagemap.h> 12 #include <linux/slab.h> 13 #include <linux/uio.h> 14 #include <linux/sched/mm.h> 15 #include <linux/task_io_accounting_ops.h> 16 #include <linux/netfs.h> 17 #include "internal.h" 18 19 /** 20 * netfs_single_mark_inode_dirty - Mark a single, monolithic object inode dirty 21 * @inode: The inode to mark 22 * 23 * Mark an inode that contains a single, monolithic object as dirty so that its 24 * writepages op will get called. If set, the SINGLE_NO_UPLOAD flag indicates 25 * that the object will only be written to the cache and not uploaded (e.g. AFS 26 * directory contents). 27 */ 28 void netfs_single_mark_inode_dirty(struct inode *inode) 29 { 30 struct netfs_inode *ictx = netfs_inode(inode); 31 bool cache_only = test_bit(NETFS_ICTX_SINGLE_NO_UPLOAD, &ictx->flags); 32 bool caching = fscache_cookie_enabled(netfs_i_cookie(netfs_inode(inode))); 33 34 if (cache_only && !caching) 35 return; 36 37 mark_inode_dirty(inode); 38 39 if (caching && !(inode->i_state & I_PINNING_NETFS_WB)) { 40 bool need_use = false; 41 42 spin_lock(&inode->i_lock); 43 if (!(inode->i_state & I_PINNING_NETFS_WB)) { 44 inode->i_state |= I_PINNING_NETFS_WB; 45 need_use = true; 46 } 47 spin_unlock(&inode->i_lock); 48 49 if (need_use) 50 fscache_use_cookie(netfs_i_cookie(ictx), true); 51 } 52 53 } 54 EXPORT_SYMBOL(netfs_single_mark_inode_dirty); 55 56 static int netfs_single_begin_cache_read(struct netfs_io_request *rreq, struct netfs_inode *ctx) 57 { 58 return fscache_begin_read_operation(&rreq->cache_resources, netfs_i_cookie(ctx)); 59 } 60 61 static void netfs_single_cache_prepare_read(struct netfs_io_request *rreq, 62 struct netfs_io_subrequest *subreq) 63 { 64 struct netfs_cache_resources *cres = &rreq->cache_resources; 65 66 if (!cres->ops) { 67 subreq->source = NETFS_DOWNLOAD_FROM_SERVER; 68 return; 69 } 70 subreq->source = cres->ops->prepare_read(subreq, rreq->i_size); 71 trace_netfs_sreq(subreq, netfs_sreq_trace_prepare); 72 73 } 74 75 static void netfs_single_read_cache(struct netfs_io_request *rreq, 76 struct netfs_io_subrequest *subreq) 77 { 78 struct netfs_cache_resources *cres = &rreq->cache_resources; 79 80 _enter("R=%08x[%x]", rreq->debug_id, subreq->debug_index); 81 netfs_stat(&netfs_n_rh_read); 82 cres->ops->read(cres, subreq->start, &subreq->io_iter, NETFS_READ_HOLE_FAIL, 83 netfs_cache_read_terminated, subreq); 84 } 85 86 /* 87 * Perform a read to a buffer from the cache or the server. Only a single 88 * subreq is permitted as the object must be fetched in a single transaction. 89 */ 90 static int netfs_single_dispatch_read(struct netfs_io_request *rreq) 91 { 92 struct netfs_io_stream *stream = &rreq->io_streams[0]; 93 struct netfs_io_subrequest *subreq; 94 int ret = 0; 95 96 subreq = netfs_alloc_subrequest(rreq); 97 if (!subreq) 98 return -ENOMEM; 99 100 subreq->source = NETFS_SOURCE_UNKNOWN; 101 subreq->start = 0; 102 subreq->len = rreq->len; 103 subreq->io_iter = rreq->buffer.iter; 104 105 __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags); 106 107 spin_lock(&rreq->lock); 108 list_add_tail(&subreq->rreq_link, &stream->subrequests); 109 trace_netfs_sreq(subreq, netfs_sreq_trace_added); 110 stream->front = subreq; 111 /* Store list pointers before active flag */ 112 smp_store_release(&stream->active, true); 113 spin_unlock(&rreq->lock); 114 115 netfs_single_cache_prepare_read(rreq, subreq); 116 switch (subreq->source) { 117 case NETFS_DOWNLOAD_FROM_SERVER: 118 netfs_stat(&netfs_n_rh_download); 119 if (rreq->netfs_ops->prepare_read) { 120 ret = rreq->netfs_ops->prepare_read(subreq); 121 if (ret < 0) 122 goto cancel; 123 } 124 125 rreq->netfs_ops->issue_read(subreq); 126 rreq->submitted += subreq->len; 127 break; 128 case NETFS_READ_FROM_CACHE: 129 trace_netfs_sreq(subreq, netfs_sreq_trace_submit); 130 netfs_single_read_cache(rreq, subreq); 131 rreq->submitted += subreq->len; 132 ret = 0; 133 break; 134 default: 135 pr_warn("Unexpected single-read source %u\n", subreq->source); 136 WARN_ON_ONCE(true); 137 ret = -EIO; 138 break; 139 } 140 141 smp_wmb(); /* Write lists before ALL_QUEUED. */ 142 set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags); 143 return ret; 144 cancel: 145 netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel); 146 return ret; 147 } 148 149 /** 150 * netfs_read_single - Synchronously read a single blob of pages. 151 * @inode: The inode to read from. 152 * @file: The file we're using to read or NULL. 153 * @iter: The buffer we're reading into. 154 * 155 * Fulfil a read request for a single monolithic object by drawing data from 156 * the cache if possible, or the netfs if not. The buffer may be larger than 157 * the file content; unused beyond the EOF will be zero-filled. The content 158 * will be read with a single I/O request (though this may be retried). 159 * 160 * The calling netfs must initialise a netfs context contiguous to the vfs 161 * inode before calling this. 162 * 163 * This is usable whether or not caching is enabled. If caching is enabled, 164 * the data will be stored as a single object into the cache. 165 */ 166 ssize_t netfs_read_single(struct inode *inode, struct file *file, struct iov_iter *iter) 167 { 168 struct netfs_io_request *rreq; 169 struct netfs_inode *ictx = netfs_inode(inode); 170 ssize_t ret; 171 172 rreq = netfs_alloc_request(inode->i_mapping, file, 0, iov_iter_count(iter), 173 NETFS_READ_SINGLE); 174 if (IS_ERR(rreq)) 175 return PTR_ERR(rreq); 176 177 ret = netfs_single_begin_cache_read(rreq, ictx); 178 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) 179 goto cleanup_free; 180 181 netfs_stat(&netfs_n_rh_read_single); 182 trace_netfs_read(rreq, 0, rreq->len, netfs_read_trace_read_single); 183 184 rreq->buffer.iter = *iter; 185 netfs_single_dispatch_read(rreq); 186 187 ret = netfs_wait_for_read(rreq); 188 netfs_put_request(rreq, true, netfs_rreq_trace_put_return); 189 return ret; 190 191 cleanup_free: 192 netfs_put_request(rreq, false, netfs_rreq_trace_put_failed); 193 return ret; 194 } 195 EXPORT_SYMBOL(netfs_read_single); 196