1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* Internal definitions for network filesystem support 3 * 4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/seq_file.h> 10 #include <linux/folio_queue.h> 11 #include <linux/netfs.h> 12 #include <linux/fscache.h> 13 #include <linux/fscache-cache.h> 14 #include <trace/events/netfs.h> 15 #include <trace/events/fscache.h> 16 17 #ifdef pr_fmt 18 #undef pr_fmt 19 #endif 20 21 #define pr_fmt(fmt) "netfs: " fmt 22 23 /* 24 * buffered_read.c 25 */ 26 void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error); 27 int netfs_prefetch_for_write(struct file *file, struct folio *folio, 28 size_t offset, size_t len); 29 30 /* 31 * buffered_write.c 32 */ 33 void netfs_update_i_size(struct netfs_inode *ctx, struct inode *inode, 34 loff_t pos, size_t copied); 35 36 /* 37 * main.c 38 */ 39 extern unsigned int netfs_debug; 40 extern struct list_head netfs_io_requests; 41 extern spinlock_t netfs_proc_lock; 42 extern mempool_t netfs_request_pool; 43 extern mempool_t netfs_subrequest_pool; 44 45 #ifdef CONFIG_PROC_FS 46 static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq) 47 { 48 spin_lock(&netfs_proc_lock); 49 list_add_tail_rcu(&rreq->proc_link, &netfs_io_requests); 50 spin_unlock(&netfs_proc_lock); 51 } 52 static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) 53 { 54 if (!list_empty(&rreq->proc_link)) { 55 spin_lock(&netfs_proc_lock); 56 list_del_rcu(&rreq->proc_link); 57 spin_unlock(&netfs_proc_lock); 58 } 59 } 60 #else 61 static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq) {} 62 static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {} 63 #endif 64 65 /* 66 * misc.c 67 */ 68 struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq, 69 enum netfs_folioq_trace trace); 70 void netfs_reset_iter(struct netfs_io_subrequest *subreq); 71 void netfs_wake_collector(struct netfs_io_request *rreq); 72 void netfs_subreq_clear_in_progress(struct netfs_io_subrequest *subreq); 73 void netfs_wait_for_in_progress_stream(struct netfs_io_request *rreq, 74 struct netfs_io_stream *stream); 75 ssize_t netfs_wait_for_read(struct netfs_io_request *rreq); 76 ssize_t netfs_wait_for_write(struct netfs_io_request *rreq); 77 void netfs_wait_for_paused_read(struct netfs_io_request *rreq); 78 void netfs_wait_for_paused_write(struct netfs_io_request *rreq); 79 80 /* 81 * objects.c 82 */ 83 struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, 84 struct file *file, 85 loff_t start, size_t len, 86 enum netfs_io_origin origin); 87 void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what); 88 void netfs_clear_subrequests(struct netfs_io_request *rreq); 89 void netfs_put_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what); 90 struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq); 91 92 static inline void netfs_see_request(struct netfs_io_request *rreq, 93 enum netfs_rreq_ref_trace what) 94 { 95 trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what); 96 } 97 98 static inline void netfs_see_subrequest(struct netfs_io_subrequest *subreq, 99 enum netfs_sreq_ref_trace what) 100 { 101 trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index, 102 refcount_read(&subreq->ref), what); 103 } 104 105 /* 106 * read_collect.c 107 */ 108 bool netfs_read_collection(struct netfs_io_request *rreq); 109 void netfs_read_collection_worker(struct work_struct *work); 110 void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error); 111 112 /* 113 * read_pgpriv2.c 114 */ 115 void netfs_pgpriv2_copy_to_cache(struct netfs_io_request *rreq, struct folio *folio); 116 void netfs_pgpriv2_end_copy_to_cache(struct netfs_io_request *rreq); 117 bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq); 118 119 /* 120 * read_retry.c 121 */ 122 void netfs_retry_reads(struct netfs_io_request *rreq); 123 void netfs_unlock_abandoned_read_pages(struct netfs_io_request *rreq); 124 125 /* 126 * stats.c 127 */ 128 #ifdef CONFIG_NETFS_STATS 129 extern atomic_t netfs_n_rh_dio_read; 130 extern atomic_t netfs_n_rh_readahead; 131 extern atomic_t netfs_n_rh_read_folio; 132 extern atomic_t netfs_n_rh_read_single; 133 extern atomic_t netfs_n_rh_rreq; 134 extern atomic_t netfs_n_rh_sreq; 135 extern atomic_t netfs_n_rh_download; 136 extern atomic_t netfs_n_rh_download_done; 137 extern atomic_t netfs_n_rh_download_failed; 138 extern atomic_t netfs_n_rh_download_instead; 139 extern atomic_t netfs_n_rh_read; 140 extern atomic_t netfs_n_rh_read_done; 141 extern atomic_t netfs_n_rh_read_failed; 142 extern atomic_t netfs_n_rh_zero; 143 extern atomic_t netfs_n_rh_short_read; 144 extern atomic_t netfs_n_rh_write; 145 extern atomic_t netfs_n_rh_write_begin; 146 extern atomic_t netfs_n_rh_write_done; 147 extern atomic_t netfs_n_rh_write_failed; 148 extern atomic_t netfs_n_rh_write_zskip; 149 extern atomic_t netfs_n_rh_retry_read_req; 150 extern atomic_t netfs_n_rh_retry_read_subreq; 151 extern atomic_t netfs_n_wh_buffered_write; 152 extern atomic_t netfs_n_wh_writethrough; 153 extern atomic_t netfs_n_wh_dio_write; 154 extern atomic_t netfs_n_wh_writepages; 155 extern atomic_t netfs_n_wh_copy_to_cache; 156 extern atomic_t netfs_n_wh_wstream_conflict; 157 extern atomic_t netfs_n_wh_upload; 158 extern atomic_t netfs_n_wh_upload_done; 159 extern atomic_t netfs_n_wh_upload_failed; 160 extern atomic_t netfs_n_wh_write; 161 extern atomic_t netfs_n_wh_write_done; 162 extern atomic_t netfs_n_wh_write_failed; 163 extern atomic_t netfs_n_wh_retry_write_req; 164 extern atomic_t netfs_n_wh_retry_write_subreq; 165 extern atomic_t netfs_n_wb_lock_skip; 166 extern atomic_t netfs_n_wb_lock_wait; 167 extern atomic_t netfs_n_folioq; 168 169 int netfs_stats_show(struct seq_file *m, void *v); 170 171 static inline void netfs_stat(atomic_t *stat) 172 { 173 atomic_inc(stat); 174 } 175 176 static inline void netfs_stat_d(atomic_t *stat) 177 { 178 atomic_dec(stat); 179 } 180 181 #else 182 #define netfs_stat(x) do {} while(0) 183 #define netfs_stat_d(x) do {} while(0) 184 #endif 185 186 /* 187 * write_collect.c 188 */ 189 int netfs_folio_written_back(struct folio *folio); 190 bool netfs_write_collection(struct netfs_io_request *wreq); 191 void netfs_write_collection_worker(struct work_struct *work); 192 193 /* 194 * write_issue.c 195 */ 196 struct netfs_io_request *netfs_create_write_req(struct address_space *mapping, 197 struct file *file, 198 loff_t start, 199 enum netfs_io_origin origin); 200 void netfs_reissue_write(struct netfs_io_stream *stream, 201 struct netfs_io_subrequest *subreq, 202 struct iov_iter *source); 203 void netfs_issue_write(struct netfs_io_request *wreq, 204 struct netfs_io_stream *stream); 205 size_t netfs_advance_write(struct netfs_io_request *wreq, 206 struct netfs_io_stream *stream, 207 loff_t start, size_t len, bool to_eof); 208 struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len); 209 int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, 210 struct folio *folio, size_t copied, bool to_page_end, 211 struct folio **writethrough_cache); 212 ssize_t netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, 213 struct folio *writethrough_cache); 214 int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len); 215 216 /* 217 * write_retry.c 218 */ 219 void netfs_retry_writes(struct netfs_io_request *wreq); 220 221 /* 222 * Miscellaneous functions. 223 */ 224 static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx) 225 { 226 #if IS_ENABLED(CONFIG_FSCACHE) 227 struct fscache_cookie *cookie = ctx->cache; 228 229 return fscache_cookie_valid(cookie) && cookie->cache_priv && 230 fscache_cookie_enabled(cookie); 231 #else 232 return false; 233 #endif 234 } 235 236 /* 237 * Get a ref on a netfs group attached to a dirty page (e.g. a ceph snap). 238 */ 239 static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_group) 240 { 241 if (netfs_group && netfs_group != NETFS_FOLIO_COPY_TO_CACHE) 242 refcount_inc(&netfs_group->ref); 243 return netfs_group; 244 } 245 246 /* 247 * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap). 248 */ 249 static inline void netfs_put_group(struct netfs_group *netfs_group) 250 { 251 if (netfs_group && 252 netfs_group != NETFS_FOLIO_COPY_TO_CACHE && 253 refcount_dec_and_test(&netfs_group->ref)) 254 netfs_group->free(netfs_group); 255 } 256 257 /* 258 * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap). 259 */ 260 static inline void netfs_put_group_many(struct netfs_group *netfs_group, int nr) 261 { 262 if (netfs_group && 263 netfs_group != NETFS_FOLIO_COPY_TO_CACHE && 264 refcount_sub_and_test(nr, &netfs_group->ref)) 265 netfs_group->free(netfs_group); 266 } 267 268 /* 269 * Clear and wake up a NETFS_RREQ_* flag bit on a request. 270 */ 271 static inline void netfs_wake_rreq_flag(struct netfs_io_request *rreq, 272 unsigned int rreq_flag, 273 enum netfs_rreq_trace trace) 274 { 275 if (test_bit(rreq_flag, &rreq->flags)) { 276 clear_bit_unlock(rreq_flag, &rreq->flags); 277 smp_mb__after_atomic(); /* Set flag before task state */ 278 trace_netfs_rreq(rreq, trace); 279 wake_up(&rreq->waitq); 280 } 281 } 282 283 /* 284 * Test the NETFS_RREQ_IN_PROGRESS flag, inserting an appropriate barrier. 285 */ 286 static inline bool netfs_check_rreq_in_progress(const struct netfs_io_request *rreq) 287 { 288 /* Order read of flags before read of anything else, such as error. */ 289 return test_bit_acquire(NETFS_RREQ_IN_PROGRESS, &rreq->flags); 290 } 291 292 /* 293 * Test the NETFS_SREQ_IN_PROGRESS flag, inserting an appropriate barrier. 294 */ 295 static inline bool netfs_check_subreq_in_progress(const struct netfs_io_subrequest *subreq) 296 { 297 /* Order read of flags before read of anything else, such as error. */ 298 return test_bit_acquire(NETFS_SREQ_IN_PROGRESS, &subreq->flags); 299 } 300 301 /* 302 * fscache-cache.c 303 */ 304 #ifdef CONFIG_PROC_FS 305 extern const struct seq_operations fscache_caches_seq_ops; 306 #endif 307 bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why); 308 void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why); 309 struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache); 310 void fscache_put_cache(struct fscache_cache *cache, enum fscache_cache_trace where); 311 312 static inline enum fscache_cache_state fscache_cache_state(const struct fscache_cache *cache) 313 { 314 return smp_load_acquire(&cache->state); 315 } 316 317 static inline bool fscache_cache_is_live(const struct fscache_cache *cache) 318 { 319 return fscache_cache_state(cache) == FSCACHE_CACHE_IS_ACTIVE; 320 } 321 322 static inline void fscache_set_cache_state(struct fscache_cache *cache, 323 enum fscache_cache_state new_state) 324 { 325 smp_store_release(&cache->state, new_state); 326 327 } 328 329 static inline bool fscache_set_cache_state_maybe(struct fscache_cache *cache, 330 enum fscache_cache_state old_state, 331 enum fscache_cache_state new_state) 332 { 333 return try_cmpxchg_release(&cache->state, &old_state, new_state); 334 } 335 336 /* 337 * fscache-cookie.c 338 */ 339 extern struct kmem_cache *fscache_cookie_jar; 340 #ifdef CONFIG_PROC_FS 341 extern const struct seq_operations fscache_cookies_seq_ops; 342 #endif 343 extern struct timer_list fscache_cookie_lru_timer; 344 345 extern void fscache_print_cookie(struct fscache_cookie *cookie, char prefix); 346 extern bool fscache_begin_cookie_access(struct fscache_cookie *cookie, 347 enum fscache_access_trace why); 348 349 static inline void fscache_see_cookie(struct fscache_cookie *cookie, 350 enum fscache_cookie_trace where) 351 { 352 trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref), 353 where); 354 } 355 356 /* 357 * fscache-main.c 358 */ 359 extern unsigned int fscache_hash(unsigned int salt, const void *data, size_t len); 360 #ifdef CONFIG_FSCACHE 361 int __init fscache_init(void); 362 void __exit fscache_exit(void); 363 #else 364 static inline int fscache_init(void) { return 0; } 365 static inline void fscache_exit(void) {} 366 #endif 367 368 /* 369 * fscache-proc.c 370 */ 371 #ifdef CONFIG_PROC_FS 372 extern int __init fscache_proc_init(void); 373 extern void fscache_proc_cleanup(void); 374 #else 375 #define fscache_proc_init() (0) 376 #define fscache_proc_cleanup() do {} while (0) 377 #endif 378 379 /* 380 * fscache-stats.c 381 */ 382 #ifdef CONFIG_FSCACHE_STATS 383 extern atomic_t fscache_n_volumes; 384 extern atomic_t fscache_n_volumes_collision; 385 extern atomic_t fscache_n_volumes_nomem; 386 extern atomic_t fscache_n_cookies; 387 extern atomic_t fscache_n_cookies_lru; 388 extern atomic_t fscache_n_cookies_lru_expired; 389 extern atomic_t fscache_n_cookies_lru_removed; 390 extern atomic_t fscache_n_cookies_lru_dropped; 391 392 extern atomic_t fscache_n_acquires; 393 extern atomic_t fscache_n_acquires_ok; 394 extern atomic_t fscache_n_acquires_oom; 395 396 extern atomic_t fscache_n_invalidates; 397 398 extern atomic_t fscache_n_relinquishes; 399 extern atomic_t fscache_n_relinquishes_retire; 400 extern atomic_t fscache_n_relinquishes_dropped; 401 402 extern atomic_t fscache_n_resizes; 403 extern atomic_t fscache_n_resizes_null; 404 405 static inline void fscache_stat(atomic_t *stat) 406 { 407 atomic_inc(stat); 408 } 409 410 static inline void fscache_stat_d(atomic_t *stat) 411 { 412 atomic_dec(stat); 413 } 414 415 #define __fscache_stat(stat) (stat) 416 417 int fscache_stats_show(struct seq_file *m); 418 #else 419 420 #define __fscache_stat(stat) (NULL) 421 #define fscache_stat(stat) do {} while (0) 422 #define fscache_stat_d(stat) do {} while (0) 423 424 static inline int fscache_stats_show(struct seq_file *m) { return 0; } 425 #endif 426 427 /* 428 * fscache-volume.c 429 */ 430 #ifdef CONFIG_PROC_FS 431 extern const struct seq_operations fscache_volumes_seq_ops; 432 #endif 433 434 struct fscache_volume *fscache_get_volume(struct fscache_volume *volume, 435 enum fscache_volume_trace where); 436 bool fscache_begin_volume_access(struct fscache_volume *volume, 437 struct fscache_cookie *cookie, 438 enum fscache_access_trace why); 439 void fscache_create_volume(struct fscache_volume *volume, bool wait); 440 441 /*****************************************************************************/ 442 /* 443 * debug tracing 444 */ 445 #define dbgprintk(FMT, ...) \ 446 printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__) 447 448 #define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__) 449 #define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__) 450 #define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__) 451 452 #ifdef __KDEBUG 453 #define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__) 454 #define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__) 455 #define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__) 456 457 #elif defined(CONFIG_NETFS_DEBUG) 458 #define _enter(FMT, ...) \ 459 do { \ 460 if (netfs_debug) \ 461 kenter(FMT, ##__VA_ARGS__); \ 462 } while (0) 463 464 #define _leave(FMT, ...) \ 465 do { \ 466 if (netfs_debug) \ 467 kleave(FMT, ##__VA_ARGS__); \ 468 } while (0) 469 470 #define _debug(FMT, ...) \ 471 do { \ 472 if (netfs_debug) \ 473 kdebug(FMT, ##__VA_ARGS__); \ 474 } while (0) 475 476 #else 477 #define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__) 478 #define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__) 479 #define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__) 480 #endif 481 482 /* 483 * assertions 484 */ 485 #if 1 /* defined(__KDEBUGALL) */ 486 487 #define ASSERT(X) \ 488 do { \ 489 if (unlikely(!(X))) { \ 490 pr_err("\n"); \ 491 pr_err("Assertion failed\n"); \ 492 BUG(); \ 493 } \ 494 } while (0) 495 496 #define ASSERTCMP(X, OP, Y) \ 497 do { \ 498 if (unlikely(!((X) OP (Y)))) { \ 499 pr_err("\n"); \ 500 pr_err("Assertion failed\n"); \ 501 pr_err("%lx " #OP " %lx is false\n", \ 502 (unsigned long)(X), (unsigned long)(Y)); \ 503 BUG(); \ 504 } \ 505 } while (0) 506 507 #define ASSERTIF(C, X) \ 508 do { \ 509 if (unlikely((C) && !(X))) { \ 510 pr_err("\n"); \ 511 pr_err("Assertion failed\n"); \ 512 BUG(); \ 513 } \ 514 } while (0) 515 516 #define ASSERTIFCMP(C, X, OP, Y) \ 517 do { \ 518 if (unlikely((C) && !((X) OP (Y)))) { \ 519 pr_err("\n"); \ 520 pr_err("Assertion failed\n"); \ 521 pr_err("%lx " #OP " %lx is false\n", \ 522 (unsigned long)(X), (unsigned long)(Y)); \ 523 BUG(); \ 524 } \ 525 } while (0) 526 527 #else 528 529 #define ASSERT(X) do {} while (0) 530 #define ASSERTCMP(X, OP, Y) do {} while (0) 531 #define ASSERTIF(C, X) do {} while (0) 532 #define ASSERTIFCMP(C, X, OP, Y) do {} while (0) 533 534 #endif /* assert or not */ 535