1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Miscellaneous routines. 3 * 4 * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/swap.h> 9 #include "internal.h" 10 11 /* 12 * Append a folio to the rolling queue. 13 */ 14 int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio, 15 bool needs_put) 16 { 17 struct folio_queue *tail = rreq->buffer_tail; 18 unsigned int slot, order = folio_order(folio); 19 20 if (WARN_ON_ONCE(!rreq->buffer && tail) || 21 WARN_ON_ONCE(rreq->buffer && !tail)) 22 return -EIO; 23 24 if (!tail || folioq_full(tail)) { 25 tail = kmalloc(sizeof(*tail), GFP_NOFS); 26 if (!tail) 27 return -ENOMEM; 28 netfs_stat(&netfs_n_folioq); 29 folioq_init(tail); 30 tail->prev = rreq->buffer_tail; 31 if (tail->prev) 32 tail->prev->next = tail; 33 rreq->buffer_tail = tail; 34 if (!rreq->buffer) { 35 rreq->buffer = tail; 36 iov_iter_folio_queue(&rreq->io_iter, ITER_SOURCE, tail, 0, 0, 0); 37 } 38 rreq->buffer_tail_slot = 0; 39 } 40 41 rreq->io_iter.count += PAGE_SIZE << order; 42 43 slot = folioq_append(tail, folio); 44 /* Store the counter after setting the slot. */ 45 smp_store_release(&rreq->buffer_tail_slot, slot); 46 return 0; 47 } 48 49 /* 50 * Delete the head of a rolling queue. 51 */ 52 struct folio_queue *netfs_delete_buffer_head(struct netfs_io_request *wreq) 53 { 54 struct folio_queue *head = wreq->buffer, *next = head->next; 55 56 if (next) 57 next->prev = NULL; 58 netfs_stat_d(&netfs_n_folioq); 59 kfree(head); 60 wreq->buffer = next; 61 return next; 62 } 63 64 /* 65 * Clear out a rolling queue. 66 */ 67 void netfs_clear_buffer(struct netfs_io_request *rreq) 68 { 69 struct folio_queue *p; 70 71 while ((p = rreq->buffer)) { 72 rreq->buffer = p->next; 73 for (int slot = 0; slot < folioq_nr_slots(p); slot++) { 74 struct folio *folio = folioq_folio(p, slot); 75 if (!folio) 76 continue; 77 if (folioq_is_marked(p, slot)) { 78 trace_netfs_folio(folio, netfs_folio_trace_put); 79 folio_put(folio); 80 } 81 } 82 netfs_stat_d(&netfs_n_folioq); 83 kfree(p); 84 } 85 } 86 87 /* 88 * Reset the subrequest iterator to refer just to the region remaining to be 89 * read. The iterator may or may not have been advanced by socket ops or 90 * extraction ops to an extent that may or may not match the amount actually 91 * read. 92 */ 93 void netfs_reset_iter(struct netfs_io_subrequest *subreq) 94 { 95 struct iov_iter *io_iter = &subreq->io_iter; 96 size_t remain = subreq->len - subreq->transferred; 97 98 if (io_iter->count > remain) 99 iov_iter_advance(io_iter, io_iter->count - remain); 100 else if (io_iter->count < remain) 101 iov_iter_revert(io_iter, remain - io_iter->count); 102 iov_iter_truncate(&subreq->io_iter, remain); 103 } 104 105 /** 106 * netfs_dirty_folio - Mark folio dirty and pin a cache object for writeback 107 * @mapping: The mapping the folio belongs to. 108 * @folio: The folio being dirtied. 109 * 110 * Set the dirty flag on a folio and pin an in-use cache object in memory so 111 * that writeback can later write to it. This is intended to be called from 112 * the filesystem's ->dirty_folio() method. 113 * 114 * Return: true if the dirty flag was set on the folio, false otherwise. 115 */ 116 bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio) 117 { 118 struct inode *inode = mapping->host; 119 struct netfs_inode *ictx = netfs_inode(inode); 120 struct fscache_cookie *cookie = netfs_i_cookie(ictx); 121 bool need_use = false; 122 123 _enter(""); 124 125 if (!filemap_dirty_folio(mapping, folio)) 126 return false; 127 if (!fscache_cookie_valid(cookie)) 128 return true; 129 130 if (!(inode->i_state & I_PINNING_NETFS_WB)) { 131 spin_lock(&inode->i_lock); 132 if (!(inode->i_state & I_PINNING_NETFS_WB)) { 133 inode->i_state |= I_PINNING_NETFS_WB; 134 need_use = true; 135 } 136 spin_unlock(&inode->i_lock); 137 138 if (need_use) 139 fscache_use_cookie(cookie, true); 140 } 141 return true; 142 } 143 EXPORT_SYMBOL(netfs_dirty_folio); 144 145 /** 146 * netfs_unpin_writeback - Unpin writeback resources 147 * @inode: The inode on which the cookie resides 148 * @wbc: The writeback control 149 * 150 * Unpin the writeback resources pinned by netfs_dirty_folio(). This is 151 * intended to be called as/by the netfs's ->write_inode() method. 152 */ 153 int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc) 154 { 155 struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode)); 156 157 if (wbc->unpinned_netfs_wb) 158 fscache_unuse_cookie(cookie, NULL, NULL); 159 return 0; 160 } 161 EXPORT_SYMBOL(netfs_unpin_writeback); 162 163 /** 164 * netfs_clear_inode_writeback - Clear writeback resources pinned by an inode 165 * @inode: The inode to clean up 166 * @aux: Auxiliary data to apply to the inode 167 * 168 * Clear any writeback resources held by an inode when the inode is evicted. 169 * This must be called before clear_inode() is called. 170 */ 171 void netfs_clear_inode_writeback(struct inode *inode, const void *aux) 172 { 173 struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode)); 174 175 if (inode->i_state & I_PINNING_NETFS_WB) { 176 loff_t i_size = i_size_read(inode); 177 fscache_unuse_cookie(cookie, aux, &i_size); 178 } 179 } 180 EXPORT_SYMBOL(netfs_clear_inode_writeback); 181 182 /** 183 * netfs_invalidate_folio - Invalidate or partially invalidate a folio 184 * @folio: Folio proposed for release 185 * @offset: Offset of the invalidated region 186 * @length: Length of the invalidated region 187 * 188 * Invalidate part or all of a folio for a network filesystem. The folio will 189 * be removed afterwards if the invalidated region covers the entire folio. 190 */ 191 void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length) 192 { 193 struct netfs_folio *finfo; 194 struct netfs_inode *ctx = netfs_inode(folio_inode(folio)); 195 size_t flen = folio_size(folio); 196 197 _enter("{%lx},%zx,%zx", folio->index, offset, length); 198 199 if (offset == 0 && length == flen) { 200 unsigned long long i_size = i_size_read(&ctx->inode); 201 unsigned long long fpos = folio_pos(folio), end; 202 203 end = umin(fpos + flen, i_size); 204 if (fpos < i_size && end > ctx->zero_point) 205 ctx->zero_point = end; 206 } 207 208 folio_wait_private_2(folio); /* [DEPRECATED] */ 209 210 if (!folio_test_private(folio)) 211 return; 212 213 finfo = netfs_folio_info(folio); 214 215 if (offset == 0 && length >= flen) 216 goto erase_completely; 217 218 if (finfo) { 219 /* We have a partially uptodate page from a streaming write. */ 220 unsigned int fstart = finfo->dirty_offset; 221 unsigned int fend = fstart + finfo->dirty_len; 222 unsigned int iend = offset + length; 223 224 if (offset >= fend) 225 return; 226 if (iend <= fstart) 227 return; 228 229 /* The invalidation region overlaps the data. If the region 230 * covers the start of the data, we either move along the start 231 * or just erase the data entirely. 232 */ 233 if (offset <= fstart) { 234 if (iend >= fend) 235 goto erase_completely; 236 /* Move the start of the data. */ 237 finfo->dirty_len = fend - iend; 238 finfo->dirty_offset = offset; 239 return; 240 } 241 242 /* Reduce the length of the data if the invalidation region 243 * covers the tail part. 244 */ 245 if (iend >= fend) { 246 finfo->dirty_len = offset - fstart; 247 return; 248 } 249 250 /* A partial write was split. The caller has already zeroed 251 * it, so just absorb the hole. 252 */ 253 } 254 return; 255 256 erase_completely: 257 netfs_put_group(netfs_folio_group(folio)); 258 folio_detach_private(folio); 259 folio_clear_uptodate(folio); 260 kfree(finfo); 261 return; 262 } 263 EXPORT_SYMBOL(netfs_invalidate_folio); 264 265 /** 266 * netfs_release_folio - Try to release a folio 267 * @folio: Folio proposed for release 268 * @gfp: Flags qualifying the release 269 * 270 * Request release of a folio and clean up its private state if it's not busy. 271 * Returns true if the folio can now be released, false if not 272 */ 273 bool netfs_release_folio(struct folio *folio, gfp_t gfp) 274 { 275 struct netfs_inode *ctx = netfs_inode(folio_inode(folio)); 276 unsigned long long end; 277 278 if (folio_test_dirty(folio)) 279 return false; 280 281 end = umin(folio_pos(folio) + folio_size(folio), i_size_read(&ctx->inode)); 282 if (end > ctx->zero_point) 283 ctx->zero_point = end; 284 285 if (folio_test_private(folio)) 286 return false; 287 if (unlikely(folio_test_private_2(folio))) { /* [DEPRECATED] */ 288 if (current_is_kswapd() || !(gfp & __GFP_FS)) 289 return false; 290 folio_wait_private_2(folio); 291 } 292 fscache_note_page_release(netfs_i_cookie(ctx)); 293 return true; 294 } 295 EXPORT_SYMBOL(netfs_release_folio); 296