xref: /linux/fs/netfs/fscache_io.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Cache data I/O routines
3  *
4  * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 #define FSCACHE_DEBUG_LEVEL OPERATION
8 #include <linux/fscache-cache.h>
9 #include <linux/uio.h>
10 #include <linux/bvec.h>
11 #include <linux/slab.h>
12 #include "internal.h"
13 
14 /**
15  * fscache_wait_for_operation - Wait for an object become accessible
16  * @cres: The cache resources for the operation being performed
17  * @want_state: The minimum state the object must be at
18  *
19  * See if the target cache object is at the specified minimum state of
20  * accessibility yet, and if not, wait for it.
21  */
22 bool fscache_wait_for_operation(struct netfs_cache_resources *cres,
23 				enum fscache_want_state want_state)
24 {
25 	struct fscache_cookie *cookie = fscache_cres_cookie(cres);
26 	enum fscache_cookie_state state;
27 
28 again:
29 	if (!fscache_cache_is_live(cookie->volume->cache)) {
30 		_leave(" [broken]");
31 		return false;
32 	}
33 
34 	state = fscache_cookie_state(cookie);
35 	_enter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
36 
37 	switch (state) {
38 	case FSCACHE_COOKIE_STATE_CREATING:
39 	case FSCACHE_COOKIE_STATE_INVALIDATING:
40 		if (want_state == FSCACHE_WANT_PARAMS)
41 			goto ready; /* There can be no content */
42 		fallthrough;
43 	case FSCACHE_COOKIE_STATE_LOOKING_UP:
44 	case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
45 		wait_var_event(&cookie->state,
46 			       fscache_cookie_state(cookie) != state);
47 		goto again;
48 
49 	case FSCACHE_COOKIE_STATE_ACTIVE:
50 		goto ready;
51 	case FSCACHE_COOKIE_STATE_DROPPED:
52 	case FSCACHE_COOKIE_STATE_RELINQUISHING:
53 	default:
54 		_leave(" [not live]");
55 		return false;
56 	}
57 
58 ready:
59 	if (!cres->cache_priv2)
60 		return cookie->volume->cache->ops->begin_operation(cres, want_state);
61 	return true;
62 }
63 EXPORT_SYMBOL(fscache_wait_for_operation);
64 
65 /*
66  * Begin an I/O operation on the cache, waiting till we reach the right state.
67  *
68  * Attaches the resources required to the operation resources record.
69  */
70 static int fscache_begin_operation(struct netfs_cache_resources *cres,
71 				   struct fscache_cookie *cookie,
72 				   enum fscache_want_state want_state,
73 				   enum fscache_access_trace why)
74 {
75 	enum fscache_cookie_state state;
76 	long timeo;
77 	bool once_only = false;
78 
79 	cres->ops		= NULL;
80 	cres->cache_priv	= cookie;
81 	cres->cache_priv2	= NULL;
82 	cres->debug_id		= cookie->debug_id;
83 	cres->inval_counter	= cookie->inval_counter;
84 
85 	if (!fscache_begin_cookie_access(cookie, why)) {
86 		cres->cache_priv = NULL;
87 		return -ENOBUFS;
88 	}
89 
90 again:
91 	spin_lock(&cookie->lock);
92 
93 	state = fscache_cookie_state(cookie);
94 	_enter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
95 
96 	switch (state) {
97 	case FSCACHE_COOKIE_STATE_LOOKING_UP:
98 	case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
99 	case FSCACHE_COOKIE_STATE_INVALIDATING:
100 		goto wait_for_file_wrangling;
101 	case FSCACHE_COOKIE_STATE_CREATING:
102 		if (want_state == FSCACHE_WANT_PARAMS)
103 			goto ready; /* There can be no content */
104 		goto wait_for_file_wrangling;
105 	case FSCACHE_COOKIE_STATE_ACTIVE:
106 		goto ready;
107 	case FSCACHE_COOKIE_STATE_DROPPED:
108 	case FSCACHE_COOKIE_STATE_RELINQUISHING:
109 		WARN(1, "Can't use cookie in state %u\n", cookie->state);
110 		goto not_live;
111 	default:
112 		goto not_live;
113 	}
114 
115 ready:
116 	spin_unlock(&cookie->lock);
117 	if (!cookie->volume->cache->ops->begin_operation(cres, want_state))
118 		goto failed;
119 	return 0;
120 
121 wait_for_file_wrangling:
122 	spin_unlock(&cookie->lock);
123 	trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
124 			     atomic_read(&cookie->n_accesses),
125 			     fscache_access_io_wait);
126 	timeo = wait_var_event_timeout(&cookie->state,
127 				       fscache_cookie_state(cookie) != state, 20 * HZ);
128 	if (timeo <= 1 && !once_only) {
129 		pr_warn("%s: cookie state change wait timed out: cookie->state=%u state=%u",
130 			__func__, fscache_cookie_state(cookie), state);
131 		fscache_print_cookie(cookie, 'O');
132 		once_only = true;
133 	}
134 	goto again;
135 
136 not_live:
137 	spin_unlock(&cookie->lock);
138 failed:
139 	cres->cache_priv = NULL;
140 	cres->ops = NULL;
141 	fscache_end_cookie_access(cookie, fscache_access_io_not_live);
142 	_leave(" = -ENOBUFS");
143 	return -ENOBUFS;
144 }
145 
146 int __fscache_begin_read_operation(struct netfs_cache_resources *cres,
147 				   struct fscache_cookie *cookie)
148 {
149 	return fscache_begin_operation(cres, cookie, FSCACHE_WANT_PARAMS,
150 				       fscache_access_io_read);
151 }
152 EXPORT_SYMBOL(__fscache_begin_read_operation);
153 
154 int __fscache_begin_write_operation(struct netfs_cache_resources *cres,
155 				    struct fscache_cookie *cookie)
156 {
157 	return fscache_begin_operation(cres, cookie, FSCACHE_WANT_PARAMS,
158 				       fscache_access_io_write);
159 }
160 EXPORT_SYMBOL(__fscache_begin_write_operation);
161 
162 struct fscache_write_request {
163 	struct netfs_cache_resources cache_resources;
164 	struct address_space	*mapping;
165 	loff_t			start;
166 	size_t			len;
167 	bool			set_bits;
168 	bool			using_pgpriv2;
169 	netfs_io_terminated_t	term_func;
170 	void			*term_func_priv;
171 };
172 
173 void __fscache_clear_page_bits(struct address_space *mapping,
174 			       loff_t start, size_t len)
175 {
176 	pgoff_t first = start / PAGE_SIZE;
177 	pgoff_t last = (start + len - 1) / PAGE_SIZE;
178 	struct page *page;
179 
180 	if (len) {
181 		XA_STATE(xas, &mapping->i_pages, first);
182 
183 		rcu_read_lock();
184 		xas_for_each(&xas, page, last) {
185 			folio_end_private_2(page_folio(page));
186 		}
187 		rcu_read_unlock();
188 	}
189 }
190 EXPORT_SYMBOL(__fscache_clear_page_bits);
191 
192 /*
193  * Deal with the completion of writing the data to the cache.
194  */
195 static void fscache_wreq_done(void *priv, ssize_t transferred_or_error,
196 			      bool was_async)
197 {
198 	struct fscache_write_request *wreq = priv;
199 
200 	if (wreq->using_pgpriv2)
201 		fscache_clear_page_bits(wreq->mapping, wreq->start, wreq->len,
202 					wreq->set_bits);
203 
204 	if (wreq->term_func)
205 		wreq->term_func(wreq->term_func_priv, transferred_or_error,
206 				was_async);
207 	fscache_end_operation(&wreq->cache_resources);
208 	kfree(wreq);
209 }
210 
211 void __fscache_write_to_cache(struct fscache_cookie *cookie,
212 			      struct address_space *mapping,
213 			      loff_t start, size_t len, loff_t i_size,
214 			      netfs_io_terminated_t term_func,
215 			      void *term_func_priv,
216 			      bool using_pgpriv2, bool cond)
217 {
218 	struct fscache_write_request *wreq;
219 	struct netfs_cache_resources *cres;
220 	struct iov_iter iter;
221 	int ret = -ENOBUFS;
222 
223 	if (len == 0)
224 		goto abandon;
225 
226 	_enter("%llx,%zx", start, len);
227 
228 	wreq = kzalloc(sizeof(struct fscache_write_request), GFP_NOFS);
229 	if (!wreq)
230 		goto abandon;
231 	wreq->mapping		= mapping;
232 	wreq->start		= start;
233 	wreq->len		= len;
234 	wreq->using_pgpriv2	= using_pgpriv2;
235 	wreq->set_bits		= cond;
236 	wreq->term_func		= term_func;
237 	wreq->term_func_priv	= term_func_priv;
238 
239 	cres = &wreq->cache_resources;
240 	if (fscache_begin_operation(cres, cookie, FSCACHE_WANT_WRITE,
241 				    fscache_access_io_write) < 0)
242 		goto abandon_free;
243 
244 	ret = cres->ops->prepare_write(cres, &start, &len, len, i_size, false);
245 	if (ret < 0)
246 		goto abandon_end;
247 
248 	/* TODO: Consider clearing page bits now for space the write isn't
249 	 * covering.  This is more complicated than it appears when THPs are
250 	 * taken into account.
251 	 */
252 
253 	iov_iter_xarray(&iter, ITER_SOURCE, &mapping->i_pages, start, len);
254 	fscache_write(cres, start, &iter, fscache_wreq_done, wreq);
255 	return;
256 
257 abandon_end:
258 	return fscache_wreq_done(wreq, ret, false);
259 abandon_free:
260 	kfree(wreq);
261 abandon:
262 	if (using_pgpriv2)
263 		fscache_clear_page_bits(mapping, start, len, cond);
264 	if (term_func)
265 		term_func(term_func_priv, ret, false);
266 }
267 EXPORT_SYMBOL(__fscache_write_to_cache);
268 
269 /*
270  * Change the size of a backing object.
271  */
272 void __fscache_resize_cookie(struct fscache_cookie *cookie, loff_t new_size)
273 {
274 	struct netfs_cache_resources cres;
275 
276 	trace_fscache_resize(cookie, new_size);
277 	if (fscache_begin_operation(&cres, cookie, FSCACHE_WANT_WRITE,
278 				    fscache_access_io_resize) == 0) {
279 		fscache_stat(&fscache_n_resizes);
280 		set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &cookie->flags);
281 
282 		/* We cannot defer a resize as we need to do it inside the
283 		 * netfs's inode lock so that we're serialised with respect to
284 		 * writes.
285 		 */
286 		cookie->volume->cache->ops->resize_cookie(&cres, new_size);
287 		fscache_end_operation(&cres);
288 	} else {
289 		fscache_stat(&fscache_n_resizes_null);
290 	}
291 }
292 EXPORT_SYMBOL(__fscache_resize_cookie);
293