1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Miscellaneous routines.
3 *
4 * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8 #include <linux/swap.h>
9 #include "internal.h"
10
11 /*
12 * Make sure there's space in the rolling queue.
13 */
netfs_buffer_make_space(struct netfs_io_request * rreq)14 struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq)
15 {
16 struct folio_queue *tail = rreq->buffer_tail, *prev;
17 unsigned int prev_nr_slots = 0;
18
19 if (WARN_ON_ONCE(!rreq->buffer && tail) ||
20 WARN_ON_ONCE(rreq->buffer && !tail))
21 return ERR_PTR(-EIO);
22
23 prev = tail;
24 if (prev) {
25 if (!folioq_full(tail))
26 return tail;
27 prev_nr_slots = folioq_nr_slots(tail);
28 }
29
30 tail = kmalloc(sizeof(*tail), GFP_NOFS);
31 if (!tail)
32 return ERR_PTR(-ENOMEM);
33 netfs_stat(&netfs_n_folioq);
34 folioq_init(tail);
35 tail->prev = prev;
36 if (prev)
37 /* [!] NOTE: After we set prev->next, the consumer is entirely
38 * at liberty to delete prev.
39 */
40 WRITE_ONCE(prev->next, tail);
41
42 rreq->buffer_tail = tail;
43 if (!rreq->buffer) {
44 rreq->buffer = tail;
45 iov_iter_folio_queue(&rreq->io_iter, ITER_SOURCE, tail, 0, 0, 0);
46 } else {
47 /* Make sure we don't leave the master iterator pointing to a
48 * block that might get immediately consumed.
49 */
50 if (rreq->io_iter.folioq == prev &&
51 rreq->io_iter.folioq_slot == prev_nr_slots) {
52 rreq->io_iter.folioq = tail;
53 rreq->io_iter.folioq_slot = 0;
54 }
55 }
56 rreq->buffer_tail_slot = 0;
57 return tail;
58 }
59
60 /*
61 * Append a folio to the rolling queue.
62 */
netfs_buffer_append_folio(struct netfs_io_request * rreq,struct folio * folio,bool needs_put)63 int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio,
64 bool needs_put)
65 {
66 struct folio_queue *tail;
67 unsigned int slot, order = folio_order(folio);
68
69 tail = netfs_buffer_make_space(rreq);
70 if (IS_ERR(tail))
71 return PTR_ERR(tail);
72
73 rreq->io_iter.count += PAGE_SIZE << order;
74
75 slot = folioq_append(tail, folio);
76 /* Store the counter after setting the slot. */
77 smp_store_release(&rreq->buffer_tail_slot, slot);
78 return 0;
79 }
80
81 /*
82 * Delete the head of a rolling queue.
83 */
netfs_delete_buffer_head(struct netfs_io_request * wreq)84 struct folio_queue *netfs_delete_buffer_head(struct netfs_io_request *wreq)
85 {
86 struct folio_queue *head = wreq->buffer, *next = head->next;
87
88 if (next)
89 next->prev = NULL;
90 netfs_stat_d(&netfs_n_folioq);
91 kfree(head);
92 wreq->buffer = next;
93 return next;
94 }
95
96 /*
97 * Clear out a rolling queue.
98 */
netfs_clear_buffer(struct netfs_io_request * rreq)99 void netfs_clear_buffer(struct netfs_io_request *rreq)
100 {
101 struct folio_queue *p;
102
103 while ((p = rreq->buffer)) {
104 rreq->buffer = p->next;
105 for (int slot = 0; slot < folioq_count(p); slot++) {
106 struct folio *folio = folioq_folio(p, slot);
107 if (!folio)
108 continue;
109 if (folioq_is_marked(p, slot)) {
110 trace_netfs_folio(folio, netfs_folio_trace_put);
111 folio_put(folio);
112 }
113 }
114 netfs_stat_d(&netfs_n_folioq);
115 kfree(p);
116 }
117 }
118
119 /*
120 * Reset the subrequest iterator to refer just to the region remaining to be
121 * read. The iterator may or may not have been advanced by socket ops or
122 * extraction ops to an extent that may or may not match the amount actually
123 * read.
124 */
netfs_reset_iter(struct netfs_io_subrequest * subreq)125 void netfs_reset_iter(struct netfs_io_subrequest *subreq)
126 {
127 struct iov_iter *io_iter = &subreq->io_iter;
128 size_t remain = subreq->len - subreq->transferred;
129
130 if (io_iter->count > remain)
131 iov_iter_advance(io_iter, io_iter->count - remain);
132 else if (io_iter->count < remain)
133 iov_iter_revert(io_iter, remain - io_iter->count);
134 iov_iter_truncate(&subreq->io_iter, remain);
135 }
136
137 /**
138 * netfs_dirty_folio - Mark folio dirty and pin a cache object for writeback
139 * @mapping: The mapping the folio belongs to.
140 * @folio: The folio being dirtied.
141 *
142 * Set the dirty flag on a folio and pin an in-use cache object in memory so
143 * that writeback can later write to it. This is intended to be called from
144 * the filesystem's ->dirty_folio() method.
145 *
146 * Return: true if the dirty flag was set on the folio, false otherwise.
147 */
netfs_dirty_folio(struct address_space * mapping,struct folio * folio)148 bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio)
149 {
150 struct inode *inode = mapping->host;
151 struct netfs_inode *ictx = netfs_inode(inode);
152 struct fscache_cookie *cookie = netfs_i_cookie(ictx);
153 bool need_use = false;
154
155 _enter("");
156
157 if (!filemap_dirty_folio(mapping, folio))
158 return false;
159 if (!fscache_cookie_valid(cookie))
160 return true;
161
162 if (!(inode->i_state & I_PINNING_NETFS_WB)) {
163 spin_lock(&inode->i_lock);
164 if (!(inode->i_state & I_PINNING_NETFS_WB)) {
165 inode->i_state |= I_PINNING_NETFS_WB;
166 need_use = true;
167 }
168 spin_unlock(&inode->i_lock);
169
170 if (need_use)
171 fscache_use_cookie(cookie, true);
172 }
173 return true;
174 }
175 EXPORT_SYMBOL(netfs_dirty_folio);
176
177 /**
178 * netfs_unpin_writeback - Unpin writeback resources
179 * @inode: The inode on which the cookie resides
180 * @wbc: The writeback control
181 *
182 * Unpin the writeback resources pinned by netfs_dirty_folio(). This is
183 * intended to be called as/by the netfs's ->write_inode() method.
184 */
netfs_unpin_writeback(struct inode * inode,struct writeback_control * wbc)185 int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc)
186 {
187 struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
188
189 if (wbc->unpinned_netfs_wb)
190 fscache_unuse_cookie(cookie, NULL, NULL);
191 return 0;
192 }
193 EXPORT_SYMBOL(netfs_unpin_writeback);
194
195 /**
196 * netfs_clear_inode_writeback - Clear writeback resources pinned by an inode
197 * @inode: The inode to clean up
198 * @aux: Auxiliary data to apply to the inode
199 *
200 * Clear any writeback resources held by an inode when the inode is evicted.
201 * This must be called before clear_inode() is called.
202 */
netfs_clear_inode_writeback(struct inode * inode,const void * aux)203 void netfs_clear_inode_writeback(struct inode *inode, const void *aux)
204 {
205 struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
206
207 if (inode->i_state & I_PINNING_NETFS_WB) {
208 loff_t i_size = i_size_read(inode);
209 fscache_unuse_cookie(cookie, aux, &i_size);
210 }
211 }
212 EXPORT_SYMBOL(netfs_clear_inode_writeback);
213
214 /**
215 * netfs_invalidate_folio - Invalidate or partially invalidate a folio
216 * @folio: Folio proposed for release
217 * @offset: Offset of the invalidated region
218 * @length: Length of the invalidated region
219 *
220 * Invalidate part or all of a folio for a network filesystem. The folio will
221 * be removed afterwards if the invalidated region covers the entire folio.
222 */
netfs_invalidate_folio(struct folio * folio,size_t offset,size_t length)223 void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
224 {
225 struct netfs_folio *finfo;
226 struct netfs_inode *ctx = netfs_inode(folio_inode(folio));
227 size_t flen = folio_size(folio);
228
229 _enter("{%lx},%zx,%zx", folio->index, offset, length);
230
231 if (offset == 0 && length == flen) {
232 unsigned long long i_size = i_size_read(&ctx->inode);
233 unsigned long long fpos = folio_pos(folio), end;
234
235 end = umin(fpos + flen, i_size);
236 if (fpos < i_size && end > ctx->zero_point)
237 ctx->zero_point = end;
238 }
239
240 folio_wait_private_2(folio); /* [DEPRECATED] */
241
242 if (!folio_test_private(folio))
243 return;
244
245 finfo = netfs_folio_info(folio);
246
247 if (offset == 0 && length >= flen)
248 goto erase_completely;
249
250 if (finfo) {
251 /* We have a partially uptodate page from a streaming write. */
252 unsigned int fstart = finfo->dirty_offset;
253 unsigned int fend = fstart + finfo->dirty_len;
254 unsigned int iend = offset + length;
255
256 if (offset >= fend)
257 return;
258 if (iend <= fstart)
259 return;
260
261 /* The invalidation region overlaps the data. If the region
262 * covers the start of the data, we either move along the start
263 * or just erase the data entirely.
264 */
265 if (offset <= fstart) {
266 if (iend >= fend)
267 goto erase_completely;
268 /* Move the start of the data. */
269 finfo->dirty_len = fend - iend;
270 finfo->dirty_offset = offset;
271 return;
272 }
273
274 /* Reduce the length of the data if the invalidation region
275 * covers the tail part.
276 */
277 if (iend >= fend) {
278 finfo->dirty_len = offset - fstart;
279 return;
280 }
281
282 /* A partial write was split. The caller has already zeroed
283 * it, so just absorb the hole.
284 */
285 }
286 return;
287
288 erase_completely:
289 netfs_put_group(netfs_folio_group(folio));
290 folio_detach_private(folio);
291 folio_clear_uptodate(folio);
292 kfree(finfo);
293 return;
294 }
295 EXPORT_SYMBOL(netfs_invalidate_folio);
296
297 /**
298 * netfs_release_folio - Try to release a folio
299 * @folio: Folio proposed for release
300 * @gfp: Flags qualifying the release
301 *
302 * Request release of a folio and clean up its private state if it's not busy.
303 * Returns true if the folio can now be released, false if not
304 */
netfs_release_folio(struct folio * folio,gfp_t gfp)305 bool netfs_release_folio(struct folio *folio, gfp_t gfp)
306 {
307 struct netfs_inode *ctx = netfs_inode(folio_inode(folio));
308 unsigned long long end;
309
310 if (folio_test_dirty(folio))
311 return false;
312
313 end = umin(folio_pos(folio) + folio_size(folio), i_size_read(&ctx->inode));
314 if (end > ctx->zero_point)
315 ctx->zero_point = end;
316
317 if (folio_test_private(folio))
318 return false;
319 if (unlikely(folio_test_private_2(folio))) { /* [DEPRECATED] */
320 if (current_is_kswapd() || !(gfp & __GFP_FS))
321 return false;
322 folio_wait_private_2(folio);
323 }
324 fscache_note_page_release(netfs_i_cookie(ctx));
325 return true;
326 }
327 EXPORT_SYMBOL(netfs_release_folio);
328