xref: /linux/fs/netfs/internal.h (revision ff9fbcafbaf13346c742c0d672a22f5ac20b9d92)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* Internal definitions for network filesystem support
3  *
4  * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/slab.h>
9 #include <linux/seq_file.h>
10 #include <linux/netfs.h>
11 #include <linux/fscache.h>
12 #include <linux/fscache-cache.h>
13 #include <trace/events/netfs.h>
14 #include <trace/events/fscache.h>
15 
16 #ifdef pr_fmt
17 #undef pr_fmt
18 #endif
19 
20 #define pr_fmt(fmt) "netfs: " fmt
21 
22 /*
23  * buffered_read.c
24  */
25 void netfs_rreq_unlock_folios(struct netfs_io_request *rreq);
26 int netfs_prefetch_for_write(struct file *file, struct folio *folio,
27 			     size_t offset, size_t len);
28 
29 /*
30  * io.c
31  */
32 int netfs_begin_read(struct netfs_io_request *rreq, bool sync);
33 
34 /*
35  * main.c
36  */
37 extern struct list_head netfs_io_requests;
38 extern spinlock_t netfs_proc_lock;
39 extern mempool_t netfs_request_pool;
40 extern mempool_t netfs_subrequest_pool;
41 
42 #ifdef CONFIG_PROC_FS
43 static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq)
44 {
45 	spin_lock(&netfs_proc_lock);
46 	list_add_tail_rcu(&rreq->proc_link, &netfs_io_requests);
47 	spin_unlock(&netfs_proc_lock);
48 }
49 static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq)
50 {
51 	if (!list_empty(&rreq->proc_link)) {
52 		spin_lock(&netfs_proc_lock);
53 		list_del_rcu(&rreq->proc_link);
54 		spin_unlock(&netfs_proc_lock);
55 	}
56 }
57 #else
58 static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq) {}
59 static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {}
60 #endif
61 
62 /*
63  * misc.c
64  */
65 
66 /*
67  * objects.c
68  */
69 struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
70 					     struct file *file,
71 					     loff_t start, size_t len,
72 					     enum netfs_io_origin origin);
73 void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
74 void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async);
75 void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
76 		       enum netfs_rreq_ref_trace what);
77 struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq);
78 
79 static inline void netfs_see_request(struct netfs_io_request *rreq,
80 				     enum netfs_rreq_ref_trace what)
81 {
82 	trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what);
83 }
84 
85 /*
86  * stats.c
87  */
88 #ifdef CONFIG_NETFS_STATS
89 extern atomic_t netfs_n_rh_dio_read;
90 extern atomic_t netfs_n_rh_readahead;
91 extern atomic_t netfs_n_rh_read_folio;
92 extern atomic_t netfs_n_rh_rreq;
93 extern atomic_t netfs_n_rh_sreq;
94 extern atomic_t netfs_n_rh_download;
95 extern atomic_t netfs_n_rh_download_done;
96 extern atomic_t netfs_n_rh_download_failed;
97 extern atomic_t netfs_n_rh_download_instead;
98 extern atomic_t netfs_n_rh_read;
99 extern atomic_t netfs_n_rh_read_done;
100 extern atomic_t netfs_n_rh_read_failed;
101 extern atomic_t netfs_n_rh_zero;
102 extern atomic_t netfs_n_rh_short_read;
103 extern atomic_t netfs_n_rh_write;
104 extern atomic_t netfs_n_rh_write_begin;
105 extern atomic_t netfs_n_rh_write_done;
106 extern atomic_t netfs_n_rh_write_failed;
107 extern atomic_t netfs_n_rh_write_zskip;
108 extern atomic_t netfs_n_wh_buffered_write;
109 extern atomic_t netfs_n_wh_writethrough;
110 extern atomic_t netfs_n_wh_dio_write;
111 extern atomic_t netfs_n_wh_writepages;
112 extern atomic_t netfs_n_wh_wstream_conflict;
113 extern atomic_t netfs_n_wh_upload;
114 extern atomic_t netfs_n_wh_upload_done;
115 extern atomic_t netfs_n_wh_upload_failed;
116 extern atomic_t netfs_n_wh_write;
117 extern atomic_t netfs_n_wh_write_done;
118 extern atomic_t netfs_n_wh_write_failed;
119 
120 int netfs_stats_show(struct seq_file *m, void *v);
121 
122 static inline void netfs_stat(atomic_t *stat)
123 {
124 	atomic_inc(stat);
125 }
126 
127 static inline void netfs_stat_d(atomic_t *stat)
128 {
129 	atomic_dec(stat);
130 }
131 
132 #else
133 #define netfs_stat(x) do {} while(0)
134 #define netfs_stat_d(x) do {} while(0)
135 #endif
136 
137 /*
138  * write_collect.c
139  */
140 int netfs_folio_written_back(struct folio *folio);
141 void netfs_write_collection_worker(struct work_struct *work);
142 void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async);
143 
144 /*
145  * write_issue.c
146  */
147 struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
148 						struct file *file,
149 						loff_t start,
150 						enum netfs_io_origin origin);
151 void netfs_reissue_write(struct netfs_io_stream *stream,
152 			 struct netfs_io_subrequest *subreq);
153 int netfs_advance_write(struct netfs_io_request *wreq,
154 			struct netfs_io_stream *stream,
155 			loff_t start, size_t len, bool to_eof);
156 struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len);
157 int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
158 			       struct folio *folio, size_t copied, bool to_page_end,
159 			       struct folio **writethrough_cache);
160 int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
161 			   struct folio *writethrough_cache);
162 int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len);
163 
164 /*
165  * Miscellaneous functions.
166  */
167 static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx)
168 {
169 #if IS_ENABLED(CONFIG_FSCACHE)
170 	struct fscache_cookie *cookie = ctx->cache;
171 
172 	return fscache_cookie_valid(cookie) && cookie->cache_priv &&
173 		fscache_cookie_enabled(cookie);
174 #else
175 	return false;
176 #endif
177 }
178 
179 /*
180  * Get a ref on a netfs group attached to a dirty page (e.g. a ceph snap).
181  */
182 static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_group)
183 {
184 	if (netfs_group && netfs_group != NETFS_FOLIO_COPY_TO_CACHE)
185 		refcount_inc(&netfs_group->ref);
186 	return netfs_group;
187 }
188 
189 /*
190  * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap).
191  */
192 static inline void netfs_put_group(struct netfs_group *netfs_group)
193 {
194 	if (netfs_group &&
195 	    netfs_group != NETFS_FOLIO_COPY_TO_CACHE &&
196 	    refcount_dec_and_test(&netfs_group->ref))
197 		netfs_group->free(netfs_group);
198 }
199 
200 /*
201  * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap).
202  */
203 static inline void netfs_put_group_many(struct netfs_group *netfs_group, int nr)
204 {
205 	if (netfs_group &&
206 	    netfs_group != NETFS_FOLIO_COPY_TO_CACHE &&
207 	    refcount_sub_and_test(nr, &netfs_group->ref))
208 		netfs_group->free(netfs_group);
209 }
210 
211 /*
212  * fscache-cache.c
213  */
214 #ifdef CONFIG_PROC_FS
215 extern const struct seq_operations fscache_caches_seq_ops;
216 #endif
217 bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
218 void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
219 struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache);
220 void fscache_put_cache(struct fscache_cache *cache, enum fscache_cache_trace where);
221 
222 static inline enum fscache_cache_state fscache_cache_state(const struct fscache_cache *cache)
223 {
224 	return smp_load_acquire(&cache->state);
225 }
226 
227 static inline bool fscache_cache_is_live(const struct fscache_cache *cache)
228 {
229 	return fscache_cache_state(cache) == FSCACHE_CACHE_IS_ACTIVE;
230 }
231 
232 static inline void fscache_set_cache_state(struct fscache_cache *cache,
233 					   enum fscache_cache_state new_state)
234 {
235 	smp_store_release(&cache->state, new_state);
236 
237 }
238 
239 static inline bool fscache_set_cache_state_maybe(struct fscache_cache *cache,
240 						 enum fscache_cache_state old_state,
241 						 enum fscache_cache_state new_state)
242 {
243 	return try_cmpxchg_release(&cache->state, &old_state, new_state);
244 }
245 
246 /*
247  * fscache-cookie.c
248  */
249 extern struct kmem_cache *fscache_cookie_jar;
250 #ifdef CONFIG_PROC_FS
251 extern const struct seq_operations fscache_cookies_seq_ops;
252 #endif
253 extern struct timer_list fscache_cookie_lru_timer;
254 
255 extern void fscache_print_cookie(struct fscache_cookie *cookie, char prefix);
256 extern bool fscache_begin_cookie_access(struct fscache_cookie *cookie,
257 					enum fscache_access_trace why);
258 
259 static inline void fscache_see_cookie(struct fscache_cookie *cookie,
260 				      enum fscache_cookie_trace where)
261 {
262 	trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref),
263 			     where);
264 }
265 
266 /*
267  * fscache-main.c
268  */
269 extern unsigned int fscache_hash(unsigned int salt, const void *data, size_t len);
270 #ifdef CONFIG_FSCACHE
271 int __init fscache_init(void);
272 void __exit fscache_exit(void);
273 #else
274 static inline int fscache_init(void) { return 0; }
275 static inline void fscache_exit(void) {}
276 #endif
277 
278 /*
279  * fscache-proc.c
280  */
281 #ifdef CONFIG_PROC_FS
282 extern int __init fscache_proc_init(void);
283 extern void fscache_proc_cleanup(void);
284 #else
285 #define fscache_proc_init()	(0)
286 #define fscache_proc_cleanup()	do {} while (0)
287 #endif
288 
289 /*
290  * fscache-stats.c
291  */
292 #ifdef CONFIG_FSCACHE_STATS
293 extern atomic_t fscache_n_volumes;
294 extern atomic_t fscache_n_volumes_collision;
295 extern atomic_t fscache_n_volumes_nomem;
296 extern atomic_t fscache_n_cookies;
297 extern atomic_t fscache_n_cookies_lru;
298 extern atomic_t fscache_n_cookies_lru_expired;
299 extern atomic_t fscache_n_cookies_lru_removed;
300 extern atomic_t fscache_n_cookies_lru_dropped;
301 
302 extern atomic_t fscache_n_acquires;
303 extern atomic_t fscache_n_acquires_ok;
304 extern atomic_t fscache_n_acquires_oom;
305 
306 extern atomic_t fscache_n_invalidates;
307 
308 extern atomic_t fscache_n_relinquishes;
309 extern atomic_t fscache_n_relinquishes_retire;
310 extern atomic_t fscache_n_relinquishes_dropped;
311 
312 extern atomic_t fscache_n_resizes;
313 extern atomic_t fscache_n_resizes_null;
314 
315 static inline void fscache_stat(atomic_t *stat)
316 {
317 	atomic_inc(stat);
318 }
319 
320 static inline void fscache_stat_d(atomic_t *stat)
321 {
322 	atomic_dec(stat);
323 }
324 
325 #define __fscache_stat(stat) (stat)
326 
327 int fscache_stats_show(struct seq_file *m);
328 #else
329 
330 #define __fscache_stat(stat) (NULL)
331 #define fscache_stat(stat) do {} while (0)
332 #define fscache_stat_d(stat) do {} while (0)
333 
334 static inline int fscache_stats_show(struct seq_file *m) { return 0; }
335 #endif
336 
337 /*
338  * fscache-volume.c
339  */
340 #ifdef CONFIG_PROC_FS
341 extern const struct seq_operations fscache_volumes_seq_ops;
342 #endif
343 
344 struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
345 					  enum fscache_volume_trace where);
346 bool fscache_begin_volume_access(struct fscache_volume *volume,
347 				 struct fscache_cookie *cookie,
348 				 enum fscache_access_trace why);
349 void fscache_create_volume(struct fscache_volume *volume, bool wait);
350 
351 /*****************************************************************************/
352 /*
353  * debug tracing
354  */
355 #define dbgprintk(FMT, ...) \
356 	pr_debug("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
357 
358 #define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
359 #define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
360 #define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__)
361 
362 /*
363  * assertions
364  */
365 #if 1 /* defined(__KDEBUGALL) */
366 
367 #define ASSERT(X)							\
368 do {									\
369 	if (unlikely(!(X))) {						\
370 		pr_err("\n");					\
371 		pr_err("Assertion failed\n");	\
372 		BUG();							\
373 	}								\
374 } while (0)
375 
376 #define ASSERTCMP(X, OP, Y)						\
377 do {									\
378 	if (unlikely(!((X) OP (Y)))) {					\
379 		pr_err("\n");					\
380 		pr_err("Assertion failed\n");	\
381 		pr_err("%lx " #OP " %lx is false\n",		\
382 		       (unsigned long)(X), (unsigned long)(Y));		\
383 		BUG();							\
384 	}								\
385 } while (0)
386 
387 #define ASSERTIF(C, X)							\
388 do {									\
389 	if (unlikely((C) && !(X))) {					\
390 		pr_err("\n");					\
391 		pr_err("Assertion failed\n");	\
392 		BUG();							\
393 	}								\
394 } while (0)
395 
396 #define ASSERTIFCMP(C, X, OP, Y)					\
397 do {									\
398 	if (unlikely((C) && !((X) OP (Y)))) {				\
399 		pr_err("\n");					\
400 		pr_err("Assertion failed\n");	\
401 		pr_err("%lx " #OP " %lx is false\n",		\
402 		       (unsigned long)(X), (unsigned long)(Y));		\
403 		BUG();							\
404 	}								\
405 } while (0)
406 
407 #else
408 
409 #define ASSERT(X)			do {} while (0)
410 #define ASSERTCMP(X, OP, Y)		do {} while (0)
411 #define ASSERTIF(C, X)			do {} while (0)
412 #define ASSERTIFCMP(C, X, OP, Y)	do {} while (0)
413 
414 #endif /* assert or not */
415