xref: /linux/fs/netfs/internal.h (revision 0808ebf2f80b962e75741a41ced372a7116f1e26)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* Internal definitions for network filesystem support
3  *
4  * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/slab.h>
9 #include <linux/seq_file.h>
10 #include <linux/netfs.h>
11 #include <linux/fscache.h>
12 #include <linux/fscache-cache.h>
13 #include <trace/events/netfs.h>
14 #include <trace/events/fscache.h>
15 
16 #ifdef pr_fmt
17 #undef pr_fmt
18 #endif
19 
20 #define pr_fmt(fmt) "netfs: " fmt
21 
22 /*
23  * buffered_read.c
24  */
25 void netfs_rreq_unlock_folios(struct netfs_io_request *rreq);
26 int netfs_prefetch_for_write(struct file *file, struct folio *folio,
27 			     size_t offset, size_t len);
28 
29 /*
30  * io.c
31  */
32 int netfs_begin_read(struct netfs_io_request *rreq, bool sync);
33 
34 /*
35  * main.c
36  */
37 extern unsigned int netfs_debug;
38 extern struct list_head netfs_io_requests;
39 extern spinlock_t netfs_proc_lock;
40 extern mempool_t netfs_request_pool;
41 extern mempool_t netfs_subrequest_pool;
42 
43 #ifdef CONFIG_PROC_FS
44 static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq)
45 {
46 	spin_lock(&netfs_proc_lock);
47 	list_add_tail_rcu(&rreq->proc_link, &netfs_io_requests);
48 	spin_unlock(&netfs_proc_lock);
49 }
50 static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq)
51 {
52 	if (!list_empty(&rreq->proc_link)) {
53 		spin_lock(&netfs_proc_lock);
54 		list_del_rcu(&rreq->proc_link);
55 		spin_unlock(&netfs_proc_lock);
56 	}
57 }
58 #else
59 static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq) {}
60 static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {}
61 #endif
62 
63 /*
64  * misc.c
65  */
66 
67 /*
68  * objects.c
69  */
70 struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
71 					     struct file *file,
72 					     loff_t start, size_t len,
73 					     enum netfs_io_origin origin);
74 void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
75 void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async);
76 void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
77 		       enum netfs_rreq_ref_trace what);
78 struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq);
79 
80 static inline void netfs_see_request(struct netfs_io_request *rreq,
81 				     enum netfs_rreq_ref_trace what)
82 {
83 	trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what);
84 }
85 
86 /*
87  * stats.c
88  */
89 #ifdef CONFIG_NETFS_STATS
90 extern atomic_t netfs_n_rh_dio_read;
91 extern atomic_t netfs_n_rh_readahead;
92 extern atomic_t netfs_n_rh_read_folio;
93 extern atomic_t netfs_n_rh_rreq;
94 extern atomic_t netfs_n_rh_sreq;
95 extern atomic_t netfs_n_rh_download;
96 extern atomic_t netfs_n_rh_download_done;
97 extern atomic_t netfs_n_rh_download_failed;
98 extern atomic_t netfs_n_rh_download_instead;
99 extern atomic_t netfs_n_rh_read;
100 extern atomic_t netfs_n_rh_read_done;
101 extern atomic_t netfs_n_rh_read_failed;
102 extern atomic_t netfs_n_rh_zero;
103 extern atomic_t netfs_n_rh_short_read;
104 extern atomic_t netfs_n_rh_write;
105 extern atomic_t netfs_n_rh_write_begin;
106 extern atomic_t netfs_n_rh_write_done;
107 extern atomic_t netfs_n_rh_write_failed;
108 extern atomic_t netfs_n_rh_write_zskip;
109 extern atomic_t netfs_n_wh_buffered_write;
110 extern atomic_t netfs_n_wh_writethrough;
111 extern atomic_t netfs_n_wh_dio_write;
112 extern atomic_t netfs_n_wh_writepages;
113 extern atomic_t netfs_n_wh_wstream_conflict;
114 extern atomic_t netfs_n_wh_upload;
115 extern atomic_t netfs_n_wh_upload_done;
116 extern atomic_t netfs_n_wh_upload_failed;
117 extern atomic_t netfs_n_wh_write;
118 extern atomic_t netfs_n_wh_write_done;
119 extern atomic_t netfs_n_wh_write_failed;
120 
121 int netfs_stats_show(struct seq_file *m, void *v);
122 
123 static inline void netfs_stat(atomic_t *stat)
124 {
125 	atomic_inc(stat);
126 }
127 
128 static inline void netfs_stat_d(atomic_t *stat)
129 {
130 	atomic_dec(stat);
131 }
132 
133 #else
134 #define netfs_stat(x) do {} while(0)
135 #define netfs_stat_d(x) do {} while(0)
136 #endif
137 
138 /*
139  * write_collect.c
140  */
141 int netfs_folio_written_back(struct folio *folio);
142 void netfs_write_collection_worker(struct work_struct *work);
143 void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async);
144 
145 /*
146  * write_issue.c
147  */
148 struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
149 						struct file *file,
150 						loff_t start,
151 						enum netfs_io_origin origin);
152 void netfs_reissue_write(struct netfs_io_stream *stream,
153 			 struct netfs_io_subrequest *subreq);
154 int netfs_advance_write(struct netfs_io_request *wreq,
155 			struct netfs_io_stream *stream,
156 			loff_t start, size_t len, bool to_eof);
157 struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len);
158 int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
159 			       struct folio *folio, size_t copied, bool to_page_end,
160 			       struct folio **writethrough_cache);
161 int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
162 			   struct folio *writethrough_cache);
163 int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len);
164 
165 /*
166  * Miscellaneous functions.
167  */
168 static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx)
169 {
170 #if IS_ENABLED(CONFIG_FSCACHE)
171 	struct fscache_cookie *cookie = ctx->cache;
172 
173 	return fscache_cookie_valid(cookie) && cookie->cache_priv &&
174 		fscache_cookie_enabled(cookie);
175 #else
176 	return false;
177 #endif
178 }
179 
180 /*
181  * Get a ref on a netfs group attached to a dirty page (e.g. a ceph snap).
182  */
183 static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_group)
184 {
185 	if (netfs_group && netfs_group != NETFS_FOLIO_COPY_TO_CACHE)
186 		refcount_inc(&netfs_group->ref);
187 	return netfs_group;
188 }
189 
190 /*
191  * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap).
192  */
193 static inline void netfs_put_group(struct netfs_group *netfs_group)
194 {
195 	if (netfs_group &&
196 	    netfs_group != NETFS_FOLIO_COPY_TO_CACHE &&
197 	    refcount_dec_and_test(&netfs_group->ref))
198 		netfs_group->free(netfs_group);
199 }
200 
201 /*
202  * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap).
203  */
204 static inline void netfs_put_group_many(struct netfs_group *netfs_group, int nr)
205 {
206 	if (netfs_group &&
207 	    netfs_group != NETFS_FOLIO_COPY_TO_CACHE &&
208 	    refcount_sub_and_test(nr, &netfs_group->ref))
209 		netfs_group->free(netfs_group);
210 }
211 
212 /*
213  * fscache-cache.c
214  */
215 #ifdef CONFIG_PROC_FS
216 extern const struct seq_operations fscache_caches_seq_ops;
217 #endif
218 bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
219 void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
220 struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache);
221 void fscache_put_cache(struct fscache_cache *cache, enum fscache_cache_trace where);
222 
223 static inline enum fscache_cache_state fscache_cache_state(const struct fscache_cache *cache)
224 {
225 	return smp_load_acquire(&cache->state);
226 }
227 
228 static inline bool fscache_cache_is_live(const struct fscache_cache *cache)
229 {
230 	return fscache_cache_state(cache) == FSCACHE_CACHE_IS_ACTIVE;
231 }
232 
233 static inline void fscache_set_cache_state(struct fscache_cache *cache,
234 					   enum fscache_cache_state new_state)
235 {
236 	smp_store_release(&cache->state, new_state);
237 
238 }
239 
240 static inline bool fscache_set_cache_state_maybe(struct fscache_cache *cache,
241 						 enum fscache_cache_state old_state,
242 						 enum fscache_cache_state new_state)
243 {
244 	return try_cmpxchg_release(&cache->state, &old_state, new_state);
245 }
246 
247 /*
248  * fscache-cookie.c
249  */
250 extern struct kmem_cache *fscache_cookie_jar;
251 #ifdef CONFIG_PROC_FS
252 extern const struct seq_operations fscache_cookies_seq_ops;
253 #endif
254 extern struct timer_list fscache_cookie_lru_timer;
255 
256 extern void fscache_print_cookie(struct fscache_cookie *cookie, char prefix);
257 extern bool fscache_begin_cookie_access(struct fscache_cookie *cookie,
258 					enum fscache_access_trace why);
259 
260 static inline void fscache_see_cookie(struct fscache_cookie *cookie,
261 				      enum fscache_cookie_trace where)
262 {
263 	trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref),
264 			     where);
265 }
266 
267 /*
268  * fscache-main.c
269  */
270 extern unsigned int fscache_hash(unsigned int salt, const void *data, size_t len);
271 #ifdef CONFIG_FSCACHE
272 int __init fscache_init(void);
273 void __exit fscache_exit(void);
274 #else
275 static inline int fscache_init(void) { return 0; }
276 static inline void fscache_exit(void) {}
277 #endif
278 
279 /*
280  * fscache-proc.c
281  */
282 #ifdef CONFIG_PROC_FS
283 extern int __init fscache_proc_init(void);
284 extern void fscache_proc_cleanup(void);
285 #else
286 #define fscache_proc_init()	(0)
287 #define fscache_proc_cleanup()	do {} while (0)
288 #endif
289 
290 /*
291  * fscache-stats.c
292  */
293 #ifdef CONFIG_FSCACHE_STATS
294 extern atomic_t fscache_n_volumes;
295 extern atomic_t fscache_n_volumes_collision;
296 extern atomic_t fscache_n_volumes_nomem;
297 extern atomic_t fscache_n_cookies;
298 extern atomic_t fscache_n_cookies_lru;
299 extern atomic_t fscache_n_cookies_lru_expired;
300 extern atomic_t fscache_n_cookies_lru_removed;
301 extern atomic_t fscache_n_cookies_lru_dropped;
302 
303 extern atomic_t fscache_n_acquires;
304 extern atomic_t fscache_n_acquires_ok;
305 extern atomic_t fscache_n_acquires_oom;
306 
307 extern atomic_t fscache_n_invalidates;
308 
309 extern atomic_t fscache_n_relinquishes;
310 extern atomic_t fscache_n_relinquishes_retire;
311 extern atomic_t fscache_n_relinquishes_dropped;
312 
313 extern atomic_t fscache_n_resizes;
314 extern atomic_t fscache_n_resizes_null;
315 
316 static inline void fscache_stat(atomic_t *stat)
317 {
318 	atomic_inc(stat);
319 }
320 
321 static inline void fscache_stat_d(atomic_t *stat)
322 {
323 	atomic_dec(stat);
324 }
325 
326 #define __fscache_stat(stat) (stat)
327 
328 int fscache_stats_show(struct seq_file *m);
329 #else
330 
331 #define __fscache_stat(stat) (NULL)
332 #define fscache_stat(stat) do {} while (0)
333 #define fscache_stat_d(stat) do {} while (0)
334 
335 static inline int fscache_stats_show(struct seq_file *m) { return 0; }
336 #endif
337 
338 /*
339  * fscache-volume.c
340  */
341 #ifdef CONFIG_PROC_FS
342 extern const struct seq_operations fscache_volumes_seq_ops;
343 #endif
344 
345 struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
346 					  enum fscache_volume_trace where);
347 bool fscache_begin_volume_access(struct fscache_volume *volume,
348 				 struct fscache_cookie *cookie,
349 				 enum fscache_access_trace why);
350 void fscache_create_volume(struct fscache_volume *volume, bool wait);
351 
352 /*****************************************************************************/
353 /*
354  * debug tracing
355  */
356 #define dbgprintk(FMT, ...) \
357 	printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
358 
359 #define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
360 #define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
361 #define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__)
362 
363 #ifdef __KDEBUG
364 #define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__)
365 #define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__)
366 #define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__)
367 
368 #elif defined(CONFIG_NETFS_DEBUG)
369 #define _enter(FMT, ...)			\
370 do {						\
371 	if (netfs_debug)			\
372 		kenter(FMT, ##__VA_ARGS__);	\
373 } while (0)
374 
375 #define _leave(FMT, ...)			\
376 do {						\
377 	if (netfs_debug)			\
378 		kleave(FMT, ##__VA_ARGS__);	\
379 } while (0)
380 
381 #define _debug(FMT, ...)			\
382 do {						\
383 	if (netfs_debug)			\
384 		kdebug(FMT, ##__VA_ARGS__);	\
385 } while (0)
386 
387 #else
388 #define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__)
389 #define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
390 #define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
391 #endif
392 
393 /*
394  * assertions
395  */
396 #if 1 /* defined(__KDEBUGALL) */
397 
398 #define ASSERT(X)							\
399 do {									\
400 	if (unlikely(!(X))) {						\
401 		pr_err("\n");					\
402 		pr_err("Assertion failed\n");	\
403 		BUG();							\
404 	}								\
405 } while (0)
406 
407 #define ASSERTCMP(X, OP, Y)						\
408 do {									\
409 	if (unlikely(!((X) OP (Y)))) {					\
410 		pr_err("\n");					\
411 		pr_err("Assertion failed\n");	\
412 		pr_err("%lx " #OP " %lx is false\n",		\
413 		       (unsigned long)(X), (unsigned long)(Y));		\
414 		BUG();							\
415 	}								\
416 } while (0)
417 
418 #define ASSERTIF(C, X)							\
419 do {									\
420 	if (unlikely((C) && !(X))) {					\
421 		pr_err("\n");					\
422 		pr_err("Assertion failed\n");	\
423 		BUG();							\
424 	}								\
425 } while (0)
426 
427 #define ASSERTIFCMP(C, X, OP, Y)					\
428 do {									\
429 	if (unlikely((C) && !((X) OP (Y)))) {				\
430 		pr_err("\n");					\
431 		pr_err("Assertion failed\n");	\
432 		pr_err("%lx " #OP " %lx is false\n",		\
433 		       (unsigned long)(X), (unsigned long)(Y));		\
434 		BUG();							\
435 	}								\
436 } while (0)
437 
438 #else
439 
440 #define ASSERT(X)			do {} while (0)
441 #define ASSERTCMP(X, OP, Y)		do {} while (0)
442 #define ASSERTIF(C, X)			do {} while (0)
443 #define ASSERTIFCMP(C, X, OP, Y)	do {} while (0)
444 
445 #endif /* assert or not */
446