xref: /linux/fs/netfs/internal.h (revision 7fe03f8ff55d33fe6398637f78a8620dd2a78b38)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* Internal definitions for network filesystem support
3  *
4  * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/slab.h>
9 #include <linux/seq_file.h>
10 #include <linux/folio_queue.h>
11 #include <linux/netfs.h>
12 #include <linux/fscache.h>
13 #include <linux/fscache-cache.h>
14 #include <trace/events/netfs.h>
15 #include <trace/events/fscache.h>
16 
17 #ifdef pr_fmt
18 #undef pr_fmt
19 #endif
20 
21 #define pr_fmt(fmt) "netfs: " fmt
22 
23 /*
24  * buffered_read.c
25  */
26 void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error, bool was_async);
27 int netfs_prefetch_for_write(struct file *file, struct folio *folio,
28 			     size_t offset, size_t len);
29 
30 /*
31  * main.c
32  */
33 extern unsigned int netfs_debug;
34 extern struct list_head netfs_io_requests;
35 extern spinlock_t netfs_proc_lock;
36 extern mempool_t netfs_request_pool;
37 extern mempool_t netfs_subrequest_pool;
38 
39 #ifdef CONFIG_PROC_FS
40 static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq)
41 {
42 	spin_lock(&netfs_proc_lock);
43 	list_add_tail_rcu(&rreq->proc_link, &netfs_io_requests);
44 	spin_unlock(&netfs_proc_lock);
45 }
46 static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq)
47 {
48 	if (!list_empty(&rreq->proc_link)) {
49 		spin_lock(&netfs_proc_lock);
50 		list_del_rcu(&rreq->proc_link);
51 		spin_unlock(&netfs_proc_lock);
52 	}
53 }
54 #else
55 static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq) {}
56 static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {}
57 #endif
58 
59 /*
60  * misc.c
61  */
62 struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq,
63 					    enum netfs_folioq_trace trace);
64 void netfs_reset_iter(struct netfs_io_subrequest *subreq);
65 
66 /*
67  * objects.c
68  */
69 struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
70 					     struct file *file,
71 					     loff_t start, size_t len,
72 					     enum netfs_io_origin origin);
73 void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
74 void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async);
75 void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
76 		       enum netfs_rreq_ref_trace what);
77 struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq);
78 
79 static inline void netfs_see_request(struct netfs_io_request *rreq,
80 				     enum netfs_rreq_ref_trace what)
81 {
82 	trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what);
83 }
84 
85 static inline void netfs_see_subrequest(struct netfs_io_subrequest *subreq,
86 					enum netfs_sreq_ref_trace what)
87 {
88 	trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index,
89 			     refcount_read(&subreq->ref), what);
90 }
91 
92 /*
93  * read_collect.c
94  */
95 void netfs_read_collection_worker(struct work_struct *work);
96 void netfs_wake_read_collector(struct netfs_io_request *rreq);
97 void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error, bool was_async);
98 ssize_t netfs_wait_for_read(struct netfs_io_request *rreq);
99 void netfs_wait_for_pause(struct netfs_io_request *rreq);
100 
101 /*
102  * read_pgpriv2.c
103  */
104 void netfs_pgpriv2_copy_to_cache(struct netfs_io_request *rreq, struct folio *folio);
105 void netfs_pgpriv2_end_copy_to_cache(struct netfs_io_request *rreq);
106 bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq);
107 
108 /*
109  * read_retry.c
110  */
111 void netfs_retry_reads(struct netfs_io_request *rreq);
112 void netfs_unlock_abandoned_read_pages(struct netfs_io_request *rreq);
113 
114 /*
115  * stats.c
116  */
117 #ifdef CONFIG_NETFS_STATS
118 extern atomic_t netfs_n_rh_dio_read;
119 extern atomic_t netfs_n_rh_readahead;
120 extern atomic_t netfs_n_rh_read_folio;
121 extern atomic_t netfs_n_rh_read_single;
122 extern atomic_t netfs_n_rh_rreq;
123 extern atomic_t netfs_n_rh_sreq;
124 extern atomic_t netfs_n_rh_download;
125 extern atomic_t netfs_n_rh_download_done;
126 extern atomic_t netfs_n_rh_download_failed;
127 extern atomic_t netfs_n_rh_download_instead;
128 extern atomic_t netfs_n_rh_read;
129 extern atomic_t netfs_n_rh_read_done;
130 extern atomic_t netfs_n_rh_read_failed;
131 extern atomic_t netfs_n_rh_zero;
132 extern atomic_t netfs_n_rh_short_read;
133 extern atomic_t netfs_n_rh_write;
134 extern atomic_t netfs_n_rh_write_begin;
135 extern atomic_t netfs_n_rh_write_done;
136 extern atomic_t netfs_n_rh_write_failed;
137 extern atomic_t netfs_n_rh_write_zskip;
138 extern atomic_t netfs_n_wh_buffered_write;
139 extern atomic_t netfs_n_wh_writethrough;
140 extern atomic_t netfs_n_wh_dio_write;
141 extern atomic_t netfs_n_wh_writepages;
142 extern atomic_t netfs_n_wh_copy_to_cache;
143 extern atomic_t netfs_n_wh_wstream_conflict;
144 extern atomic_t netfs_n_wh_upload;
145 extern atomic_t netfs_n_wh_upload_done;
146 extern atomic_t netfs_n_wh_upload_failed;
147 extern atomic_t netfs_n_wh_write;
148 extern atomic_t netfs_n_wh_write_done;
149 extern atomic_t netfs_n_wh_write_failed;
150 extern atomic_t netfs_n_wb_lock_skip;
151 extern atomic_t netfs_n_wb_lock_wait;
152 extern atomic_t netfs_n_folioq;
153 
154 int netfs_stats_show(struct seq_file *m, void *v);
155 
156 static inline void netfs_stat(atomic_t *stat)
157 {
158 	atomic_inc(stat);
159 }
160 
161 static inline void netfs_stat_d(atomic_t *stat)
162 {
163 	atomic_dec(stat);
164 }
165 
166 #else
167 #define netfs_stat(x) do {} while(0)
168 #define netfs_stat_d(x) do {} while(0)
169 #endif
170 
171 /*
172  * write_collect.c
173  */
174 int netfs_folio_written_back(struct folio *folio);
175 void netfs_write_collection_worker(struct work_struct *work);
176 void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async);
177 
178 /*
179  * write_issue.c
180  */
181 struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
182 						struct file *file,
183 						loff_t start,
184 						enum netfs_io_origin origin);
185 void netfs_reissue_write(struct netfs_io_stream *stream,
186 			 struct netfs_io_subrequest *subreq,
187 			 struct iov_iter *source);
188 void netfs_issue_write(struct netfs_io_request *wreq,
189 		       struct netfs_io_stream *stream);
190 size_t netfs_advance_write(struct netfs_io_request *wreq,
191 			   struct netfs_io_stream *stream,
192 			   loff_t start, size_t len, bool to_eof);
193 struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len);
194 int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
195 			       struct folio *folio, size_t copied, bool to_page_end,
196 			       struct folio **writethrough_cache);
197 int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
198 			   struct folio *writethrough_cache);
199 int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len);
200 
201 /*
202  * write_retry.c
203  */
204 void netfs_retry_writes(struct netfs_io_request *wreq);
205 
206 /*
207  * Miscellaneous functions.
208  */
209 static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx)
210 {
211 #if IS_ENABLED(CONFIG_FSCACHE)
212 	struct fscache_cookie *cookie = ctx->cache;
213 
214 	return fscache_cookie_valid(cookie) && cookie->cache_priv &&
215 		fscache_cookie_enabled(cookie);
216 #else
217 	return false;
218 #endif
219 }
220 
221 /*
222  * Get a ref on a netfs group attached to a dirty page (e.g. a ceph snap).
223  */
224 static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_group)
225 {
226 	if (netfs_group && netfs_group != NETFS_FOLIO_COPY_TO_CACHE)
227 		refcount_inc(&netfs_group->ref);
228 	return netfs_group;
229 }
230 
231 /*
232  * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap).
233  */
234 static inline void netfs_put_group(struct netfs_group *netfs_group)
235 {
236 	if (netfs_group &&
237 	    netfs_group != NETFS_FOLIO_COPY_TO_CACHE &&
238 	    refcount_dec_and_test(&netfs_group->ref))
239 		netfs_group->free(netfs_group);
240 }
241 
242 /*
243  * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap).
244  */
245 static inline void netfs_put_group_many(struct netfs_group *netfs_group, int nr)
246 {
247 	if (netfs_group &&
248 	    netfs_group != NETFS_FOLIO_COPY_TO_CACHE &&
249 	    refcount_sub_and_test(nr, &netfs_group->ref))
250 		netfs_group->free(netfs_group);
251 }
252 
253 /*
254  * fscache-cache.c
255  */
256 #ifdef CONFIG_PROC_FS
257 extern const struct seq_operations fscache_caches_seq_ops;
258 #endif
259 bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
260 void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
261 struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache);
262 void fscache_put_cache(struct fscache_cache *cache, enum fscache_cache_trace where);
263 
264 static inline enum fscache_cache_state fscache_cache_state(const struct fscache_cache *cache)
265 {
266 	return smp_load_acquire(&cache->state);
267 }
268 
269 static inline bool fscache_cache_is_live(const struct fscache_cache *cache)
270 {
271 	return fscache_cache_state(cache) == FSCACHE_CACHE_IS_ACTIVE;
272 }
273 
274 static inline void fscache_set_cache_state(struct fscache_cache *cache,
275 					   enum fscache_cache_state new_state)
276 {
277 	smp_store_release(&cache->state, new_state);
278 
279 }
280 
281 static inline bool fscache_set_cache_state_maybe(struct fscache_cache *cache,
282 						 enum fscache_cache_state old_state,
283 						 enum fscache_cache_state new_state)
284 {
285 	return try_cmpxchg_release(&cache->state, &old_state, new_state);
286 }
287 
288 /*
289  * fscache-cookie.c
290  */
291 extern struct kmem_cache *fscache_cookie_jar;
292 #ifdef CONFIG_PROC_FS
293 extern const struct seq_operations fscache_cookies_seq_ops;
294 #endif
295 extern struct timer_list fscache_cookie_lru_timer;
296 
297 extern void fscache_print_cookie(struct fscache_cookie *cookie, char prefix);
298 extern bool fscache_begin_cookie_access(struct fscache_cookie *cookie,
299 					enum fscache_access_trace why);
300 
301 static inline void fscache_see_cookie(struct fscache_cookie *cookie,
302 				      enum fscache_cookie_trace where)
303 {
304 	trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref),
305 			     where);
306 }
307 
308 /*
309  * fscache-main.c
310  */
311 extern unsigned int fscache_hash(unsigned int salt, const void *data, size_t len);
312 #ifdef CONFIG_FSCACHE
313 int __init fscache_init(void);
314 void __exit fscache_exit(void);
315 #else
316 static inline int fscache_init(void) { return 0; }
317 static inline void fscache_exit(void) {}
318 #endif
319 
320 /*
321  * fscache-proc.c
322  */
323 #ifdef CONFIG_PROC_FS
324 extern int __init fscache_proc_init(void);
325 extern void fscache_proc_cleanup(void);
326 #else
327 #define fscache_proc_init()	(0)
328 #define fscache_proc_cleanup()	do {} while (0)
329 #endif
330 
331 /*
332  * fscache-stats.c
333  */
334 #ifdef CONFIG_FSCACHE_STATS
335 extern atomic_t fscache_n_volumes;
336 extern atomic_t fscache_n_volumes_collision;
337 extern atomic_t fscache_n_volumes_nomem;
338 extern atomic_t fscache_n_cookies;
339 extern atomic_t fscache_n_cookies_lru;
340 extern atomic_t fscache_n_cookies_lru_expired;
341 extern atomic_t fscache_n_cookies_lru_removed;
342 extern atomic_t fscache_n_cookies_lru_dropped;
343 
344 extern atomic_t fscache_n_acquires;
345 extern atomic_t fscache_n_acquires_ok;
346 extern atomic_t fscache_n_acquires_oom;
347 
348 extern atomic_t fscache_n_invalidates;
349 
350 extern atomic_t fscache_n_relinquishes;
351 extern atomic_t fscache_n_relinquishes_retire;
352 extern atomic_t fscache_n_relinquishes_dropped;
353 
354 extern atomic_t fscache_n_resizes;
355 extern atomic_t fscache_n_resizes_null;
356 
357 static inline void fscache_stat(atomic_t *stat)
358 {
359 	atomic_inc(stat);
360 }
361 
362 static inline void fscache_stat_d(atomic_t *stat)
363 {
364 	atomic_dec(stat);
365 }
366 
367 #define __fscache_stat(stat) (stat)
368 
369 int fscache_stats_show(struct seq_file *m);
370 #else
371 
372 #define __fscache_stat(stat) (NULL)
373 #define fscache_stat(stat) do {} while (0)
374 #define fscache_stat_d(stat) do {} while (0)
375 
376 static inline int fscache_stats_show(struct seq_file *m) { return 0; }
377 #endif
378 
379 /*
380  * fscache-volume.c
381  */
382 #ifdef CONFIG_PROC_FS
383 extern const struct seq_operations fscache_volumes_seq_ops;
384 #endif
385 
386 struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
387 					  enum fscache_volume_trace where);
388 bool fscache_begin_volume_access(struct fscache_volume *volume,
389 				 struct fscache_cookie *cookie,
390 				 enum fscache_access_trace why);
391 void fscache_create_volume(struct fscache_volume *volume, bool wait);
392 
393 /*****************************************************************************/
394 /*
395  * debug tracing
396  */
397 #define dbgprintk(FMT, ...) \
398 	printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
399 
400 #define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
401 #define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
402 #define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__)
403 
404 #ifdef __KDEBUG
405 #define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__)
406 #define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__)
407 #define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__)
408 
409 #elif defined(CONFIG_NETFS_DEBUG)
410 #define _enter(FMT, ...)			\
411 do {						\
412 	if (netfs_debug)			\
413 		kenter(FMT, ##__VA_ARGS__);	\
414 } while (0)
415 
416 #define _leave(FMT, ...)			\
417 do {						\
418 	if (netfs_debug)			\
419 		kleave(FMT, ##__VA_ARGS__);	\
420 } while (0)
421 
422 #define _debug(FMT, ...)			\
423 do {						\
424 	if (netfs_debug)			\
425 		kdebug(FMT, ##__VA_ARGS__);	\
426 } while (0)
427 
428 #else
429 #define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__)
430 #define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
431 #define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
432 #endif
433 
434 /*
435  * assertions
436  */
437 #if 1 /* defined(__KDEBUGALL) */
438 
439 #define ASSERT(X)							\
440 do {									\
441 	if (unlikely(!(X))) {						\
442 		pr_err("\n");					\
443 		pr_err("Assertion failed\n");	\
444 		BUG();							\
445 	}								\
446 } while (0)
447 
448 #define ASSERTCMP(X, OP, Y)						\
449 do {									\
450 	if (unlikely(!((X) OP (Y)))) {					\
451 		pr_err("\n");					\
452 		pr_err("Assertion failed\n");	\
453 		pr_err("%lx " #OP " %lx is false\n",		\
454 		       (unsigned long)(X), (unsigned long)(Y));		\
455 		BUG();							\
456 	}								\
457 } while (0)
458 
459 #define ASSERTIF(C, X)							\
460 do {									\
461 	if (unlikely((C) && !(X))) {					\
462 		pr_err("\n");					\
463 		pr_err("Assertion failed\n");	\
464 		BUG();							\
465 	}								\
466 } while (0)
467 
468 #define ASSERTIFCMP(C, X, OP, Y)					\
469 do {									\
470 	if (unlikely((C) && !((X) OP (Y)))) {				\
471 		pr_err("\n");					\
472 		pr_err("Assertion failed\n");	\
473 		pr_err("%lx " #OP " %lx is false\n",		\
474 		       (unsigned long)(X), (unsigned long)(Y));		\
475 		BUG();							\
476 	}								\
477 } while (0)
478 
479 #else
480 
481 #define ASSERT(X)			do {} while (0)
482 #define ASSERTCMP(X, OP, Y)		do {} while (0)
483 #define ASSERTIF(C, X)			do {} while (0)
484 #define ASSERTIFCMP(C, X, OP, Y)	do {} while (0)
485 
486 #endif /* assert or not */
487