1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* Internal definitions for network filesystem support 3 * 4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/seq_file.h> 10 #include <linux/netfs.h> 11 #include <linux/fscache.h> 12 #include <linux/fscache-cache.h> 13 #include <trace/events/netfs.h> 14 #include <trace/events/fscache.h> 15 16 #ifdef pr_fmt 17 #undef pr_fmt 18 #endif 19 20 #define pr_fmt(fmt) "netfs: " fmt 21 22 /* 23 * buffered_read.c 24 */ 25 void netfs_rreq_unlock_folios(struct netfs_io_request *rreq); 26 int netfs_prefetch_for_write(struct file *file, struct folio *folio, 27 size_t offset, size_t len); 28 29 /* 30 * io.c 31 */ 32 int netfs_begin_read(struct netfs_io_request *rreq, bool sync); 33 34 /* 35 * main.c 36 */ 37 extern unsigned int netfs_debug; 38 extern struct list_head netfs_io_requests; 39 extern spinlock_t netfs_proc_lock; 40 extern mempool_t netfs_request_pool; 41 extern mempool_t netfs_subrequest_pool; 42 43 #ifdef CONFIG_PROC_FS 44 static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq) 45 { 46 spin_lock(&netfs_proc_lock); 47 list_add_tail_rcu(&rreq->proc_link, &netfs_io_requests); 48 spin_unlock(&netfs_proc_lock); 49 } 50 static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) 51 { 52 if (!list_empty(&rreq->proc_link)) { 53 spin_lock(&netfs_proc_lock); 54 list_del_rcu(&rreq->proc_link); 55 spin_unlock(&netfs_proc_lock); 56 } 57 } 58 #else 59 static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq) {} 60 static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {} 61 #endif 62 63 /* 64 * misc.c 65 */ 66 #define NETFS_FLAG_PUT_MARK BIT(0) 67 #define NETFS_FLAG_PAGECACHE_MARK BIT(1) 68 int netfs_xa_store_and_mark(struct xarray *xa, unsigned long index, 69 struct folio *folio, unsigned int flags, 70 gfp_t gfp_mask); 71 int netfs_add_folios_to_buffer(struct xarray *buffer, 72 struct address_space *mapping, 73 pgoff_t index, pgoff_t to, gfp_t gfp_mask); 74 void netfs_clear_buffer(struct xarray *buffer); 75 76 /* 77 * objects.c 78 */ 79 struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, 80 struct file *file, 81 loff_t start, size_t len, 82 enum netfs_io_origin origin); 83 void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what); 84 void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async); 85 void netfs_put_request(struct netfs_io_request *rreq, bool was_async, 86 enum netfs_rreq_ref_trace what); 87 struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq); 88 89 static inline void netfs_see_request(struct netfs_io_request *rreq, 90 enum netfs_rreq_ref_trace what) 91 { 92 trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what); 93 } 94 95 /* 96 * stats.c 97 */ 98 #ifdef CONFIG_NETFS_STATS 99 extern atomic_t netfs_n_rh_dio_read; 100 extern atomic_t netfs_n_rh_readahead; 101 extern atomic_t netfs_n_rh_read_folio; 102 extern atomic_t netfs_n_rh_rreq; 103 extern atomic_t netfs_n_rh_sreq; 104 extern atomic_t netfs_n_rh_download; 105 extern atomic_t netfs_n_rh_download_done; 106 extern atomic_t netfs_n_rh_download_failed; 107 extern atomic_t netfs_n_rh_download_instead; 108 extern atomic_t netfs_n_rh_read; 109 extern atomic_t netfs_n_rh_read_done; 110 extern atomic_t netfs_n_rh_read_failed; 111 extern atomic_t netfs_n_rh_zero; 112 extern atomic_t netfs_n_rh_short_read; 113 extern atomic_t netfs_n_rh_write; 114 extern atomic_t netfs_n_rh_write_begin; 115 extern atomic_t netfs_n_rh_write_done; 116 extern atomic_t netfs_n_rh_write_failed; 117 extern atomic_t netfs_n_rh_write_zskip; 118 extern atomic_t netfs_n_wh_buffered_write; 119 extern atomic_t netfs_n_wh_writethrough; 120 extern atomic_t netfs_n_wh_dio_write; 121 extern atomic_t netfs_n_wh_writepages; 122 extern atomic_t netfs_n_wh_wstream_conflict; 123 extern atomic_t netfs_n_wh_upload; 124 extern atomic_t netfs_n_wh_upload_done; 125 extern atomic_t netfs_n_wh_upload_failed; 126 extern atomic_t netfs_n_wh_write; 127 extern atomic_t netfs_n_wh_write_done; 128 extern atomic_t netfs_n_wh_write_failed; 129 130 int netfs_stats_show(struct seq_file *m, void *v); 131 132 static inline void netfs_stat(atomic_t *stat) 133 { 134 atomic_inc(stat); 135 } 136 137 static inline void netfs_stat_d(atomic_t *stat) 138 { 139 atomic_dec(stat); 140 } 141 142 #else 143 #define netfs_stat(x) do {} while(0) 144 #define netfs_stat_d(x) do {} while(0) 145 #endif 146 147 /* 148 * write_collect.c 149 */ 150 int netfs_folio_written_back(struct folio *folio); 151 void netfs_write_collection_worker(struct work_struct *work); 152 void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async); 153 154 /* 155 * write_issue.c 156 */ 157 struct netfs_io_request *netfs_create_write_req(struct address_space *mapping, 158 struct file *file, 159 loff_t start, 160 enum netfs_io_origin origin); 161 void netfs_reissue_write(struct netfs_io_stream *stream, 162 struct netfs_io_subrequest *subreq); 163 int netfs_advance_write(struct netfs_io_request *wreq, 164 struct netfs_io_stream *stream, 165 loff_t start, size_t len, bool to_eof); 166 struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len); 167 int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, 168 struct folio *folio, size_t copied, bool to_page_end, 169 struct folio **writethrough_cache); 170 int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, 171 struct folio *writethrough_cache); 172 int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len); 173 174 /* 175 * Miscellaneous functions. 176 */ 177 static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx) 178 { 179 #if IS_ENABLED(CONFIG_FSCACHE) 180 struct fscache_cookie *cookie = ctx->cache; 181 182 return fscache_cookie_valid(cookie) && cookie->cache_priv && 183 fscache_cookie_enabled(cookie); 184 #else 185 return false; 186 #endif 187 } 188 189 /* 190 * Get a ref on a netfs group attached to a dirty page (e.g. a ceph snap). 191 */ 192 static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_group) 193 { 194 if (netfs_group && netfs_group != NETFS_FOLIO_COPY_TO_CACHE) 195 refcount_inc(&netfs_group->ref); 196 return netfs_group; 197 } 198 199 /* 200 * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap). 201 */ 202 static inline void netfs_put_group(struct netfs_group *netfs_group) 203 { 204 if (netfs_group && 205 netfs_group != NETFS_FOLIO_COPY_TO_CACHE && 206 refcount_dec_and_test(&netfs_group->ref)) 207 netfs_group->free(netfs_group); 208 } 209 210 /* 211 * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap). 212 */ 213 static inline void netfs_put_group_many(struct netfs_group *netfs_group, int nr) 214 { 215 if (netfs_group && 216 netfs_group != NETFS_FOLIO_COPY_TO_CACHE && 217 refcount_sub_and_test(nr, &netfs_group->ref)) 218 netfs_group->free(netfs_group); 219 } 220 221 /* 222 * fscache-cache.c 223 */ 224 #ifdef CONFIG_PROC_FS 225 extern const struct seq_operations fscache_caches_seq_ops; 226 #endif 227 bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why); 228 void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why); 229 struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache); 230 void fscache_put_cache(struct fscache_cache *cache, enum fscache_cache_trace where); 231 232 static inline enum fscache_cache_state fscache_cache_state(const struct fscache_cache *cache) 233 { 234 return smp_load_acquire(&cache->state); 235 } 236 237 static inline bool fscache_cache_is_live(const struct fscache_cache *cache) 238 { 239 return fscache_cache_state(cache) == FSCACHE_CACHE_IS_ACTIVE; 240 } 241 242 static inline void fscache_set_cache_state(struct fscache_cache *cache, 243 enum fscache_cache_state new_state) 244 { 245 smp_store_release(&cache->state, new_state); 246 247 } 248 249 static inline bool fscache_set_cache_state_maybe(struct fscache_cache *cache, 250 enum fscache_cache_state old_state, 251 enum fscache_cache_state new_state) 252 { 253 return try_cmpxchg_release(&cache->state, &old_state, new_state); 254 } 255 256 /* 257 * fscache-cookie.c 258 */ 259 extern struct kmem_cache *fscache_cookie_jar; 260 #ifdef CONFIG_PROC_FS 261 extern const struct seq_operations fscache_cookies_seq_ops; 262 #endif 263 extern struct timer_list fscache_cookie_lru_timer; 264 265 extern void fscache_print_cookie(struct fscache_cookie *cookie, char prefix); 266 extern bool fscache_begin_cookie_access(struct fscache_cookie *cookie, 267 enum fscache_access_trace why); 268 269 static inline void fscache_see_cookie(struct fscache_cookie *cookie, 270 enum fscache_cookie_trace where) 271 { 272 trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref), 273 where); 274 } 275 276 /* 277 * fscache-main.c 278 */ 279 extern unsigned int fscache_hash(unsigned int salt, const void *data, size_t len); 280 #ifdef CONFIG_FSCACHE 281 int __init fscache_init(void); 282 void __exit fscache_exit(void); 283 #else 284 static inline int fscache_init(void) { return 0; } 285 static inline void fscache_exit(void) {} 286 #endif 287 288 /* 289 * fscache-proc.c 290 */ 291 #ifdef CONFIG_PROC_FS 292 extern int __init fscache_proc_init(void); 293 extern void fscache_proc_cleanup(void); 294 #else 295 #define fscache_proc_init() (0) 296 #define fscache_proc_cleanup() do {} while (0) 297 #endif 298 299 /* 300 * fscache-stats.c 301 */ 302 #ifdef CONFIG_FSCACHE_STATS 303 extern atomic_t fscache_n_volumes; 304 extern atomic_t fscache_n_volumes_collision; 305 extern atomic_t fscache_n_volumes_nomem; 306 extern atomic_t fscache_n_cookies; 307 extern atomic_t fscache_n_cookies_lru; 308 extern atomic_t fscache_n_cookies_lru_expired; 309 extern atomic_t fscache_n_cookies_lru_removed; 310 extern atomic_t fscache_n_cookies_lru_dropped; 311 312 extern atomic_t fscache_n_acquires; 313 extern atomic_t fscache_n_acquires_ok; 314 extern atomic_t fscache_n_acquires_oom; 315 316 extern atomic_t fscache_n_invalidates; 317 318 extern atomic_t fscache_n_relinquishes; 319 extern atomic_t fscache_n_relinquishes_retire; 320 extern atomic_t fscache_n_relinquishes_dropped; 321 322 extern atomic_t fscache_n_resizes; 323 extern atomic_t fscache_n_resizes_null; 324 325 static inline void fscache_stat(atomic_t *stat) 326 { 327 atomic_inc(stat); 328 } 329 330 static inline void fscache_stat_d(atomic_t *stat) 331 { 332 atomic_dec(stat); 333 } 334 335 #define __fscache_stat(stat) (stat) 336 337 int fscache_stats_show(struct seq_file *m); 338 #else 339 340 #define __fscache_stat(stat) (NULL) 341 #define fscache_stat(stat) do {} while (0) 342 #define fscache_stat_d(stat) do {} while (0) 343 344 static inline int fscache_stats_show(struct seq_file *m) { return 0; } 345 #endif 346 347 /* 348 * fscache-volume.c 349 */ 350 #ifdef CONFIG_PROC_FS 351 extern const struct seq_operations fscache_volumes_seq_ops; 352 #endif 353 354 struct fscache_volume *fscache_get_volume(struct fscache_volume *volume, 355 enum fscache_volume_trace where); 356 void fscache_put_volume(struct fscache_volume *volume, 357 enum fscache_volume_trace where); 358 bool fscache_begin_volume_access(struct fscache_volume *volume, 359 struct fscache_cookie *cookie, 360 enum fscache_access_trace why); 361 void fscache_create_volume(struct fscache_volume *volume, bool wait); 362 363 /*****************************************************************************/ 364 /* 365 * debug tracing 366 */ 367 #define dbgprintk(FMT, ...) \ 368 printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__) 369 370 #define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__) 371 #define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__) 372 #define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__) 373 374 #ifdef __KDEBUG 375 #define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__) 376 #define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__) 377 #define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__) 378 379 #elif defined(CONFIG_NETFS_DEBUG) 380 #define _enter(FMT, ...) \ 381 do { \ 382 if (netfs_debug) \ 383 kenter(FMT, ##__VA_ARGS__); \ 384 } while (0) 385 386 #define _leave(FMT, ...) \ 387 do { \ 388 if (netfs_debug) \ 389 kleave(FMT, ##__VA_ARGS__); \ 390 } while (0) 391 392 #define _debug(FMT, ...) \ 393 do { \ 394 if (netfs_debug) \ 395 kdebug(FMT, ##__VA_ARGS__); \ 396 } while (0) 397 398 #else 399 #define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__) 400 #define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__) 401 #define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__) 402 #endif 403 404 /* 405 * assertions 406 */ 407 #if 1 /* defined(__KDEBUGALL) */ 408 409 #define ASSERT(X) \ 410 do { \ 411 if (unlikely(!(X))) { \ 412 pr_err("\n"); \ 413 pr_err("Assertion failed\n"); \ 414 BUG(); \ 415 } \ 416 } while (0) 417 418 #define ASSERTCMP(X, OP, Y) \ 419 do { \ 420 if (unlikely(!((X) OP (Y)))) { \ 421 pr_err("\n"); \ 422 pr_err("Assertion failed\n"); \ 423 pr_err("%lx " #OP " %lx is false\n", \ 424 (unsigned long)(X), (unsigned long)(Y)); \ 425 BUG(); \ 426 } \ 427 } while (0) 428 429 #define ASSERTIF(C, X) \ 430 do { \ 431 if (unlikely((C) && !(X))) { \ 432 pr_err("\n"); \ 433 pr_err("Assertion failed\n"); \ 434 BUG(); \ 435 } \ 436 } while (0) 437 438 #define ASSERTIFCMP(C, X, OP, Y) \ 439 do { \ 440 if (unlikely((C) && !((X) OP (Y)))) { \ 441 pr_err("\n"); \ 442 pr_err("Assertion failed\n"); \ 443 pr_err("%lx " #OP " %lx is false\n", \ 444 (unsigned long)(X), (unsigned long)(Y)); \ 445 BUG(); \ 446 } \ 447 } while (0) 448 449 #else 450 451 #define ASSERT(X) do {} while (0) 452 #define ASSERTCMP(X, OP, Y) do {} while (0) 453 #define ASSERTIF(C, X) do {} while (0) 454 #define ASSERTIFCMP(C, X, OP, Y) do {} while (0) 455 456 #endif /* assert or not */ 457