xref: /linux/fs/netfs/misc.c (revision 8a405552fd3b1eefe186e724343e88790f6be832)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Miscellaneous routines.
3  *
4  * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/swap.h>
9 #include "internal.h"
10 
11 /**
12  * netfs_dirty_folio - Mark folio dirty and pin a cache object for writeback
13  * @mapping: The mapping the folio belongs to.
14  * @folio: The folio being dirtied.
15  *
16  * Set the dirty flag on a folio and pin an in-use cache object in memory so
17  * that writeback can later write to it.  This is intended to be called from
18  * the filesystem's ->dirty_folio() method.
19  *
20  * Return: true if the dirty flag was set on the folio, false otherwise.
21  */
22 bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio)
23 {
24 	struct inode *inode = mapping->host;
25 	struct netfs_inode *ictx = netfs_inode(inode);
26 	struct fscache_cookie *cookie = netfs_i_cookie(ictx);
27 	bool need_use = false;
28 
29 	_enter("");
30 
31 	if (!filemap_dirty_folio(mapping, folio))
32 		return false;
33 	if (!fscache_cookie_valid(cookie))
34 		return true;
35 
36 	if (!(inode->i_state & I_PINNING_NETFS_WB)) {
37 		spin_lock(&inode->i_lock);
38 		if (!(inode->i_state & I_PINNING_NETFS_WB)) {
39 			inode->i_state |= I_PINNING_NETFS_WB;
40 			need_use = true;
41 		}
42 		spin_unlock(&inode->i_lock);
43 
44 		if (need_use)
45 			fscache_use_cookie(cookie, true);
46 	}
47 	return true;
48 }
49 EXPORT_SYMBOL(netfs_dirty_folio);
50 
51 /**
52  * netfs_unpin_writeback - Unpin writeback resources
53  * @inode: The inode on which the cookie resides
54  * @wbc: The writeback control
55  *
56  * Unpin the writeback resources pinned by netfs_dirty_folio().  This is
57  * intended to be called as/by the netfs's ->write_inode() method.
58  */
59 int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc)
60 {
61 	struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
62 
63 	if (wbc->unpinned_netfs_wb)
64 		fscache_unuse_cookie(cookie, NULL, NULL);
65 	return 0;
66 }
67 EXPORT_SYMBOL(netfs_unpin_writeback);
68 
69 /**
70  * netfs_clear_inode_writeback - Clear writeback resources pinned by an inode
71  * @inode: The inode to clean up
72  * @aux: Auxiliary data to apply to the inode
73  *
74  * Clear any writeback resources held by an inode when the inode is evicted.
75  * This must be called before clear_inode() is called.
76  */
77 void netfs_clear_inode_writeback(struct inode *inode, const void *aux)
78 {
79 	struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
80 
81 	if (inode->i_state & I_PINNING_NETFS_WB) {
82 		loff_t i_size = i_size_read(inode);
83 		fscache_unuse_cookie(cookie, aux, &i_size);
84 	}
85 }
86 EXPORT_SYMBOL(netfs_clear_inode_writeback);
87 
88 /**
89  * netfs_invalidate_folio - Invalidate or partially invalidate a folio
90  * @folio: Folio proposed for release
91  * @offset: Offset of the invalidated region
92  * @length: Length of the invalidated region
93  *
94  * Invalidate part or all of a folio for a network filesystem.  The folio will
95  * be removed afterwards if the invalidated region covers the entire folio.
96  */
97 void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
98 {
99 	struct netfs_folio *finfo;
100 	struct netfs_inode *ctx = netfs_inode(folio_inode(folio));
101 	size_t flen = folio_size(folio);
102 
103 	_enter("{%lx},%zx,%zx", folio->index, offset, length);
104 
105 	if (offset == 0 && length == flen) {
106 		unsigned long long i_size = i_size_read(&ctx->inode);
107 		unsigned long long fpos = folio_pos(folio), end;
108 
109 		end = umin(fpos + flen, i_size);
110 		if (fpos < i_size && end > ctx->zero_point)
111 			ctx->zero_point = end;
112 	}
113 
114 	folio_wait_private_2(folio); /* [DEPRECATED] */
115 
116 	if (!folio_test_private(folio))
117 		return;
118 
119 	finfo = netfs_folio_info(folio);
120 
121 	if (offset == 0 && length >= flen)
122 		goto erase_completely;
123 
124 	if (finfo) {
125 		/* We have a partially uptodate page from a streaming write. */
126 		unsigned int fstart = finfo->dirty_offset;
127 		unsigned int fend = fstart + finfo->dirty_len;
128 		unsigned int iend = offset + length;
129 
130 		if (offset >= fend)
131 			return;
132 		if (iend <= fstart)
133 			return;
134 
135 		/* The invalidation region overlaps the data.  If the region
136 		 * covers the start of the data, we either move along the start
137 		 * or just erase the data entirely.
138 		 */
139 		if (offset <= fstart) {
140 			if (iend >= fend)
141 				goto erase_completely;
142 			/* Move the start of the data. */
143 			finfo->dirty_len = fend - iend;
144 			finfo->dirty_offset = offset;
145 			return;
146 		}
147 
148 		/* Reduce the length of the data if the invalidation region
149 		 * covers the tail part.
150 		 */
151 		if (iend >= fend) {
152 			finfo->dirty_len = offset - fstart;
153 			return;
154 		}
155 
156 		/* A partial write was split.  The caller has already zeroed
157 		 * it, so just absorb the hole.
158 		 */
159 	}
160 	return;
161 
162 erase_completely:
163 	netfs_put_group(netfs_folio_group(folio));
164 	folio_detach_private(folio);
165 	folio_clear_uptodate(folio);
166 	kfree(finfo);
167 	return;
168 }
169 EXPORT_SYMBOL(netfs_invalidate_folio);
170 
171 /**
172  * netfs_release_folio - Try to release a folio
173  * @folio: Folio proposed for release
174  * @gfp: Flags qualifying the release
175  *
176  * Request release of a folio and clean up its private state if it's not busy.
177  * Returns true if the folio can now be released, false if not
178  */
179 bool netfs_release_folio(struct folio *folio, gfp_t gfp)
180 {
181 	struct netfs_inode *ctx = netfs_inode(folio_inode(folio));
182 	unsigned long long end;
183 
184 	if (folio_test_dirty(folio))
185 		return false;
186 
187 	end = umin(folio_pos(folio) + folio_size(folio), i_size_read(&ctx->inode));
188 	if (end > ctx->zero_point)
189 		ctx->zero_point = end;
190 
191 	if (folio_test_private(folio))
192 		return false;
193 	if (unlikely(folio_test_private_2(folio))) { /* [DEPRECATED] */
194 		if (current_is_kswapd() || !(gfp & __GFP_FS))
195 			return false;
196 		folio_wait_private_2(folio);
197 	}
198 	fscache_note_page_release(netfs_i_cookie(ctx));
199 	return true;
200 }
201 EXPORT_SYMBOL(netfs_release_folio);
202