xref: /linux/fs/netfs/misc.c (revision 3f41368fbfe1b3d5922d317fe1a0a0cab6846802)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Miscellaneous routines.
3  *
4  * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/swap.h>
9 #include "internal.h"
10 
11 /*
12  * Attach a folio to the buffer and maybe set marks on it to say that we need
13  * to put the folio later and twiddle the pagecache flags.
14  */
15 int netfs_xa_store_and_mark(struct xarray *xa, unsigned long index,
16 			    struct folio *folio, unsigned int flags,
17 			    gfp_t gfp_mask)
18 {
19 	XA_STATE_ORDER(xas, xa, index, folio_order(folio));
20 
21 retry:
22 	xas_lock(&xas);
23 	for (;;) {
24 		xas_store(&xas, folio);
25 		if (!xas_error(&xas))
26 			break;
27 		xas_unlock(&xas);
28 		if (!xas_nomem(&xas, gfp_mask))
29 			return xas_error(&xas);
30 		goto retry;
31 	}
32 
33 	if (flags & NETFS_FLAG_PUT_MARK)
34 		xas_set_mark(&xas, NETFS_BUF_PUT_MARK);
35 	if (flags & NETFS_FLAG_PAGECACHE_MARK)
36 		xas_set_mark(&xas, NETFS_BUF_PAGECACHE_MARK);
37 	xas_unlock(&xas);
38 	return xas_error(&xas);
39 }
40 
41 /*
42  * Create the specified range of folios in the buffer attached to the read
43  * request.  The folios are marked with NETFS_BUF_PUT_MARK so that we know that
44  * these need freeing later.
45  */
46 int netfs_add_folios_to_buffer(struct xarray *buffer,
47 			       struct address_space *mapping,
48 			       pgoff_t index, pgoff_t to, gfp_t gfp_mask)
49 {
50 	struct folio *folio;
51 	int ret;
52 
53 	if (to + 1 == index) /* Page range is inclusive */
54 		return 0;
55 
56 	do {
57 		/* TODO: Figure out what order folio can be allocated here */
58 		folio = filemap_alloc_folio(readahead_gfp_mask(mapping), 0);
59 		if (!folio)
60 			return -ENOMEM;
61 		folio->index = index;
62 		ret = netfs_xa_store_and_mark(buffer, index, folio,
63 					      NETFS_FLAG_PUT_MARK, gfp_mask);
64 		if (ret < 0) {
65 			folio_put(folio);
66 			return ret;
67 		}
68 
69 		index += folio_nr_pages(folio);
70 	} while (index <= to && index != 0);
71 
72 	return 0;
73 }
74 
75 /*
76  * Clear an xarray buffer, putting a ref on the folios that have
77  * NETFS_BUF_PUT_MARK set.
78  */
79 void netfs_clear_buffer(struct xarray *buffer)
80 {
81 	struct folio *folio;
82 	XA_STATE(xas, buffer, 0);
83 
84 	rcu_read_lock();
85 	xas_for_each_marked(&xas, folio, ULONG_MAX, NETFS_BUF_PUT_MARK) {
86 		folio_put(folio);
87 	}
88 	rcu_read_unlock();
89 	xa_destroy(buffer);
90 }
91 
92 /**
93  * netfs_dirty_folio - Mark folio dirty and pin a cache object for writeback
94  * @mapping: The mapping the folio belongs to.
95  * @folio: The folio being dirtied.
96  *
97  * Set the dirty flag on a folio and pin an in-use cache object in memory so
98  * that writeback can later write to it.  This is intended to be called from
99  * the filesystem's ->dirty_folio() method.
100  *
101  * Return: true if the dirty flag was set on the folio, false otherwise.
102  */
103 bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio)
104 {
105 	struct inode *inode = mapping->host;
106 	struct netfs_inode *ictx = netfs_inode(inode);
107 	struct fscache_cookie *cookie = netfs_i_cookie(ictx);
108 	bool need_use = false;
109 
110 	_enter("");
111 
112 	if (!filemap_dirty_folio(mapping, folio))
113 		return false;
114 	if (!fscache_cookie_valid(cookie))
115 		return true;
116 
117 	if (!(inode->i_state & I_PINNING_NETFS_WB)) {
118 		spin_lock(&inode->i_lock);
119 		if (!(inode->i_state & I_PINNING_NETFS_WB)) {
120 			inode->i_state |= I_PINNING_NETFS_WB;
121 			need_use = true;
122 		}
123 		spin_unlock(&inode->i_lock);
124 
125 		if (need_use)
126 			fscache_use_cookie(cookie, true);
127 	}
128 	return true;
129 }
130 EXPORT_SYMBOL(netfs_dirty_folio);
131 
132 /**
133  * netfs_unpin_writeback - Unpin writeback resources
134  * @inode: The inode on which the cookie resides
135  * @wbc: The writeback control
136  *
137  * Unpin the writeback resources pinned by netfs_dirty_folio().  This is
138  * intended to be called as/by the netfs's ->write_inode() method.
139  */
140 int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc)
141 {
142 	struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
143 
144 	if (wbc->unpinned_netfs_wb)
145 		fscache_unuse_cookie(cookie, NULL, NULL);
146 	return 0;
147 }
148 EXPORT_SYMBOL(netfs_unpin_writeback);
149 
150 /**
151  * netfs_clear_inode_writeback - Clear writeback resources pinned by an inode
152  * @inode: The inode to clean up
153  * @aux: Auxiliary data to apply to the inode
154  *
155  * Clear any writeback resources held by an inode when the inode is evicted.
156  * This must be called before clear_inode() is called.
157  */
158 void netfs_clear_inode_writeback(struct inode *inode, const void *aux)
159 {
160 	struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
161 
162 	if (inode->i_state & I_PINNING_NETFS_WB) {
163 		loff_t i_size = i_size_read(inode);
164 		fscache_unuse_cookie(cookie, aux, &i_size);
165 	}
166 }
167 EXPORT_SYMBOL(netfs_clear_inode_writeback);
168 
169 /**
170  * netfs_invalidate_folio - Invalidate or partially invalidate a folio
171  * @folio: Folio proposed for release
172  * @offset: Offset of the invalidated region
173  * @length: Length of the invalidated region
174  *
175  * Invalidate part or all of a folio for a network filesystem.  The folio will
176  * be removed afterwards if the invalidated region covers the entire folio.
177  */
178 void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
179 {
180 	struct netfs_folio *finfo;
181 	size_t flen = folio_size(folio);
182 
183 	_enter("{%lx},%zx,%zx", folio->index, offset, length);
184 
185 	if (!folio_test_private(folio))
186 		return;
187 
188 	finfo = netfs_folio_info(folio);
189 
190 	if (offset == 0 && length >= flen)
191 		goto erase_completely;
192 
193 	if (finfo) {
194 		/* We have a partially uptodate page from a streaming write. */
195 		unsigned int fstart = finfo->dirty_offset;
196 		unsigned int fend = fstart + finfo->dirty_len;
197 		unsigned int end = offset + length;
198 
199 		if (offset >= fend)
200 			return;
201 		if (end <= fstart)
202 			return;
203 		if (offset <= fstart && end >= fend)
204 			goto erase_completely;
205 		if (offset <= fstart && end > fstart)
206 			goto reduce_len;
207 		if (offset > fstart && end >= fend)
208 			goto move_start;
209 		/* A partial write was split.  The caller has already zeroed
210 		 * it, so just absorb the hole.
211 		 */
212 	}
213 	return;
214 
215 erase_completely:
216 	netfs_put_group(netfs_folio_group(folio));
217 	folio_detach_private(folio);
218 	folio_clear_uptodate(folio);
219 	kfree(finfo);
220 	return;
221 reduce_len:
222 	finfo->dirty_len = offset + length - finfo->dirty_offset;
223 	return;
224 move_start:
225 	finfo->dirty_len -= offset - finfo->dirty_offset;
226 	finfo->dirty_offset = offset;
227 }
228 EXPORT_SYMBOL(netfs_invalidate_folio);
229 
230 /**
231  * netfs_release_folio - Try to release a folio
232  * @folio: Folio proposed for release
233  * @gfp: Flags qualifying the release
234  *
235  * Request release of a folio and clean up its private state if it's not busy.
236  * Returns true if the folio can now be released, false if not
237  */
238 bool netfs_release_folio(struct folio *folio, gfp_t gfp)
239 {
240 	struct netfs_inode *ctx = netfs_inode(folio_inode(folio));
241 	unsigned long long end;
242 
243 	end = folio_pos(folio) + folio_size(folio);
244 	if (end > ctx->zero_point)
245 		ctx->zero_point = end;
246 
247 	if (folio_test_private(folio))
248 		return false;
249 	fscache_note_page_release(netfs_i_cookie(ctx));
250 	return true;
251 }
252 EXPORT_SYMBOL(netfs_release_folio);
253