1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* Internal definitions for network filesystem support 3 * 4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/seq_file.h> 10 #include <linux/folio_queue.h> 11 #include <linux/netfs.h> 12 #include <linux/fscache.h> 13 #include <linux/fscache-cache.h> 14 #include <trace/events/netfs.h> 15 #include <trace/events/fscache.h> 16 17 #ifdef pr_fmt 18 #undef pr_fmt 19 #endif 20 21 #define pr_fmt(fmt) "netfs: " fmt 22 23 /* 24 * buffered_read.c 25 */ 26 void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error); 27 int netfs_prefetch_for_write(struct file *file, struct folio *folio, 28 size_t offset, size_t len); 29 30 /* 31 * buffered_write.c 32 */ 33 void netfs_update_i_size(struct netfs_inode *ctx, struct inode *inode, 34 loff_t pos, size_t copied); 35 36 /* 37 * main.c 38 */ 39 extern unsigned int netfs_debug; 40 extern struct list_head netfs_io_requests; 41 extern spinlock_t netfs_proc_lock; 42 extern mempool_t netfs_request_pool; 43 extern mempool_t netfs_subrequest_pool; 44 45 #ifdef CONFIG_PROC_FS 46 static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq) 47 { 48 spin_lock(&netfs_proc_lock); 49 list_add_tail_rcu(&rreq->proc_link, &netfs_io_requests); 50 spin_unlock(&netfs_proc_lock); 51 } 52 static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) 53 { 54 if (!list_empty(&rreq->proc_link)) { 55 spin_lock(&netfs_proc_lock); 56 list_del_rcu(&rreq->proc_link); 57 spin_unlock(&netfs_proc_lock); 58 } 59 } 60 #else 61 static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq) {} 62 static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {} 63 #endif 64 65 /* 66 * misc.c 67 */ 68 struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq, 69 enum netfs_folioq_trace trace); 70 void netfs_reset_iter(struct netfs_io_subrequest *subreq); 71 void netfs_wake_collector(struct netfs_io_request *rreq); 72 void netfs_subreq_clear_in_progress(struct netfs_io_subrequest *subreq); 73 void netfs_wait_for_in_progress_stream(struct netfs_io_request *rreq, 74 struct netfs_io_stream *stream); 75 ssize_t netfs_wait_for_read(struct netfs_io_request *rreq); 76 ssize_t netfs_wait_for_write(struct netfs_io_request *rreq); 77 void netfs_wait_for_paused_read(struct netfs_io_request *rreq); 78 void netfs_wait_for_paused_write(struct netfs_io_request *rreq); 79 80 /* 81 * objects.c 82 */ 83 struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, 84 struct file *file, 85 loff_t start, size_t len, 86 enum netfs_io_origin origin); 87 void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what); 88 void netfs_clear_subrequests(struct netfs_io_request *rreq); 89 void netfs_put_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what); 90 void netfs_put_failed_request(struct netfs_io_request *rreq); 91 struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq); 92 93 static inline void netfs_see_request(struct netfs_io_request *rreq, 94 enum netfs_rreq_ref_trace what) 95 { 96 trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what); 97 } 98 99 static inline void netfs_see_subrequest(struct netfs_io_subrequest *subreq, 100 enum netfs_sreq_ref_trace what) 101 { 102 trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index, 103 refcount_read(&subreq->ref), what); 104 } 105 106 /* 107 * read_collect.c 108 */ 109 bool netfs_read_collection(struct netfs_io_request *rreq); 110 void netfs_read_collection_worker(struct work_struct *work); 111 void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error); 112 113 /* 114 * read_pgpriv2.c 115 */ 116 void netfs_pgpriv2_copy_to_cache(struct netfs_io_request *rreq, struct folio *folio); 117 void netfs_pgpriv2_end_copy_to_cache(struct netfs_io_request *rreq); 118 bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq); 119 120 /* 121 * read_retry.c 122 */ 123 void netfs_retry_reads(struct netfs_io_request *rreq); 124 void netfs_unlock_abandoned_read_pages(struct netfs_io_request *rreq); 125 126 /* 127 * stats.c 128 */ 129 #ifdef CONFIG_NETFS_STATS 130 extern atomic_t netfs_n_rh_dio_read; 131 extern atomic_t netfs_n_rh_readahead; 132 extern atomic_t netfs_n_rh_read_folio; 133 extern atomic_t netfs_n_rh_read_single; 134 extern atomic_t netfs_n_rh_rreq; 135 extern atomic_t netfs_n_rh_sreq; 136 extern atomic_t netfs_n_rh_download; 137 extern atomic_t netfs_n_rh_download_done; 138 extern atomic_t netfs_n_rh_download_failed; 139 extern atomic_t netfs_n_rh_download_instead; 140 extern atomic_t netfs_n_rh_read; 141 extern atomic_t netfs_n_rh_read_done; 142 extern atomic_t netfs_n_rh_read_failed; 143 extern atomic_t netfs_n_rh_zero; 144 extern atomic_t netfs_n_rh_short_read; 145 extern atomic_t netfs_n_rh_write; 146 extern atomic_t netfs_n_rh_write_begin; 147 extern atomic_t netfs_n_rh_write_done; 148 extern atomic_t netfs_n_rh_write_failed; 149 extern atomic_t netfs_n_rh_write_zskip; 150 extern atomic_t netfs_n_rh_retry_read_req; 151 extern atomic_t netfs_n_rh_retry_read_subreq; 152 extern atomic_t netfs_n_wh_buffered_write; 153 extern atomic_t netfs_n_wh_writethrough; 154 extern atomic_t netfs_n_wh_dio_write; 155 extern atomic_t netfs_n_wh_writepages; 156 extern atomic_t netfs_n_wh_copy_to_cache; 157 extern atomic_t netfs_n_wh_wstream_conflict; 158 extern atomic_t netfs_n_wh_upload; 159 extern atomic_t netfs_n_wh_upload_done; 160 extern atomic_t netfs_n_wh_upload_failed; 161 extern atomic_t netfs_n_wh_write; 162 extern atomic_t netfs_n_wh_write_done; 163 extern atomic_t netfs_n_wh_write_failed; 164 extern atomic_t netfs_n_wh_retry_write_req; 165 extern atomic_t netfs_n_wh_retry_write_subreq; 166 extern atomic_t netfs_n_wb_lock_skip; 167 extern atomic_t netfs_n_wb_lock_wait; 168 extern atomic_t netfs_n_folioq; 169 170 int netfs_stats_show(struct seq_file *m, void *v); 171 172 static inline void netfs_stat(atomic_t *stat) 173 { 174 atomic_inc(stat); 175 } 176 177 static inline void netfs_stat_d(atomic_t *stat) 178 { 179 atomic_dec(stat); 180 } 181 182 #else 183 #define netfs_stat(x) do {} while(0) 184 #define netfs_stat_d(x) do {} while(0) 185 #endif 186 187 /* 188 * write_collect.c 189 */ 190 int netfs_folio_written_back(struct folio *folio); 191 bool netfs_write_collection(struct netfs_io_request *wreq); 192 void netfs_write_collection_worker(struct work_struct *work); 193 194 /* 195 * write_issue.c 196 */ 197 struct netfs_io_request *netfs_create_write_req(struct address_space *mapping, 198 struct file *file, 199 loff_t start, 200 enum netfs_io_origin origin); 201 void netfs_prepare_write(struct netfs_io_request *wreq, 202 struct netfs_io_stream *stream, 203 loff_t start); 204 void netfs_reissue_write(struct netfs_io_stream *stream, 205 struct netfs_io_subrequest *subreq, 206 struct iov_iter *source); 207 void netfs_issue_write(struct netfs_io_request *wreq, 208 struct netfs_io_stream *stream); 209 size_t netfs_advance_write(struct netfs_io_request *wreq, 210 struct netfs_io_stream *stream, 211 loff_t start, size_t len, bool to_eof); 212 struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len); 213 int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, 214 struct folio *folio, size_t copied, bool to_page_end, 215 struct folio **writethrough_cache); 216 ssize_t netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, 217 struct folio *writethrough_cache); 218 219 /* 220 * write_retry.c 221 */ 222 void netfs_retry_writes(struct netfs_io_request *wreq); 223 224 /* 225 * Miscellaneous functions. 226 */ 227 static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx) 228 { 229 #if IS_ENABLED(CONFIG_FSCACHE) 230 struct fscache_cookie *cookie = ctx->cache; 231 232 return fscache_cookie_valid(cookie) && cookie->cache_priv && 233 fscache_cookie_enabled(cookie); 234 #else 235 return false; 236 #endif 237 } 238 239 /* 240 * Get a ref on a netfs group attached to a dirty page (e.g. a ceph snap). 241 */ 242 static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_group) 243 { 244 if (netfs_group && netfs_group != NETFS_FOLIO_COPY_TO_CACHE) 245 refcount_inc(&netfs_group->ref); 246 return netfs_group; 247 } 248 249 /* 250 * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap). 251 */ 252 static inline void netfs_put_group(struct netfs_group *netfs_group) 253 { 254 if (netfs_group && 255 netfs_group != NETFS_FOLIO_COPY_TO_CACHE && 256 refcount_dec_and_test(&netfs_group->ref)) 257 netfs_group->free(netfs_group); 258 } 259 260 /* 261 * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap). 262 */ 263 static inline void netfs_put_group_many(struct netfs_group *netfs_group, int nr) 264 { 265 if (netfs_group && 266 netfs_group != NETFS_FOLIO_COPY_TO_CACHE && 267 refcount_sub_and_test(nr, &netfs_group->ref)) 268 netfs_group->free(netfs_group); 269 } 270 271 /* 272 * Clear and wake up a NETFS_RREQ_* flag bit on a request. 273 */ 274 static inline void netfs_wake_rreq_flag(struct netfs_io_request *rreq, 275 unsigned int rreq_flag, 276 enum netfs_rreq_trace trace) 277 { 278 if (test_bit(rreq_flag, &rreq->flags)) { 279 clear_bit_unlock(rreq_flag, &rreq->flags); 280 smp_mb__after_atomic(); /* Set flag before task state */ 281 trace_netfs_rreq(rreq, trace); 282 wake_up(&rreq->waitq); 283 } 284 } 285 286 /* 287 * Test the NETFS_RREQ_IN_PROGRESS flag, inserting an appropriate barrier. 288 */ 289 static inline bool netfs_check_rreq_in_progress(const struct netfs_io_request *rreq) 290 { 291 /* Order read of flags before read of anything else, such as error. */ 292 return test_bit_acquire(NETFS_RREQ_IN_PROGRESS, &rreq->flags); 293 } 294 295 /* 296 * Test the NETFS_SREQ_IN_PROGRESS flag, inserting an appropriate barrier. 297 */ 298 static inline bool netfs_check_subreq_in_progress(const struct netfs_io_subrequest *subreq) 299 { 300 /* Order read of flags before read of anything else, such as error. */ 301 return test_bit_acquire(NETFS_SREQ_IN_PROGRESS, &subreq->flags); 302 } 303 304 /* 305 * fscache-cache.c 306 */ 307 #ifdef CONFIG_PROC_FS 308 extern const struct seq_operations fscache_caches_seq_ops; 309 #endif 310 bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why); 311 void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why); 312 struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache); 313 void fscache_put_cache(struct fscache_cache *cache, enum fscache_cache_trace where); 314 315 static inline enum fscache_cache_state fscache_cache_state(const struct fscache_cache *cache) 316 { 317 return smp_load_acquire(&cache->state); 318 } 319 320 static inline bool fscache_cache_is_live(const struct fscache_cache *cache) 321 { 322 return fscache_cache_state(cache) == FSCACHE_CACHE_IS_ACTIVE; 323 } 324 325 static inline void fscache_set_cache_state(struct fscache_cache *cache, 326 enum fscache_cache_state new_state) 327 { 328 smp_store_release(&cache->state, new_state); 329 330 } 331 332 static inline bool fscache_set_cache_state_maybe(struct fscache_cache *cache, 333 enum fscache_cache_state old_state, 334 enum fscache_cache_state new_state) 335 { 336 return try_cmpxchg_release(&cache->state, &old_state, new_state); 337 } 338 339 /* 340 * fscache-cookie.c 341 */ 342 extern struct kmem_cache *fscache_cookie_jar; 343 #ifdef CONFIG_PROC_FS 344 extern const struct seq_operations fscache_cookies_seq_ops; 345 #endif 346 extern struct timer_list fscache_cookie_lru_timer; 347 348 extern void fscache_print_cookie(struct fscache_cookie *cookie, char prefix); 349 extern bool fscache_begin_cookie_access(struct fscache_cookie *cookie, 350 enum fscache_access_trace why); 351 352 static inline void fscache_see_cookie(struct fscache_cookie *cookie, 353 enum fscache_cookie_trace where) 354 { 355 trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref), 356 where); 357 } 358 359 /* 360 * fscache-main.c 361 */ 362 extern unsigned int fscache_hash(unsigned int salt, const void *data, size_t len); 363 #ifdef CONFIG_FSCACHE 364 int __init fscache_init(void); 365 void __exit fscache_exit(void); 366 #else 367 static inline int fscache_init(void) { return 0; } 368 static inline void fscache_exit(void) {} 369 #endif 370 371 /* 372 * fscache-proc.c 373 */ 374 #ifdef CONFIG_PROC_FS 375 extern int __init fscache_proc_init(void); 376 extern void fscache_proc_cleanup(void); 377 #else 378 #define fscache_proc_init() (0) 379 #define fscache_proc_cleanup() do {} while (0) 380 #endif 381 382 /* 383 * fscache-stats.c 384 */ 385 #ifdef CONFIG_FSCACHE_STATS 386 extern atomic_t fscache_n_volumes; 387 extern atomic_t fscache_n_volumes_collision; 388 extern atomic_t fscache_n_volumes_nomem; 389 extern atomic_t fscache_n_cookies; 390 extern atomic_t fscache_n_cookies_lru; 391 extern atomic_t fscache_n_cookies_lru_expired; 392 extern atomic_t fscache_n_cookies_lru_removed; 393 extern atomic_t fscache_n_cookies_lru_dropped; 394 395 extern atomic_t fscache_n_acquires; 396 extern atomic_t fscache_n_acquires_ok; 397 extern atomic_t fscache_n_acquires_oom; 398 399 extern atomic_t fscache_n_invalidates; 400 401 extern atomic_t fscache_n_relinquishes; 402 extern atomic_t fscache_n_relinquishes_retire; 403 extern atomic_t fscache_n_relinquishes_dropped; 404 405 extern atomic_t fscache_n_resizes; 406 extern atomic_t fscache_n_resizes_null; 407 408 static inline void fscache_stat(atomic_t *stat) 409 { 410 atomic_inc(stat); 411 } 412 413 static inline void fscache_stat_d(atomic_t *stat) 414 { 415 atomic_dec(stat); 416 } 417 418 #define __fscache_stat(stat) (stat) 419 420 int fscache_stats_show(struct seq_file *m); 421 #else 422 423 #define __fscache_stat(stat) (NULL) 424 #define fscache_stat(stat) do {} while (0) 425 #define fscache_stat_d(stat) do {} while (0) 426 427 static inline int fscache_stats_show(struct seq_file *m) { return 0; } 428 #endif 429 430 /* 431 * fscache-volume.c 432 */ 433 #ifdef CONFIG_PROC_FS 434 extern const struct seq_operations fscache_volumes_seq_ops; 435 #endif 436 437 struct fscache_volume *fscache_get_volume(struct fscache_volume *volume, 438 enum fscache_volume_trace where); 439 bool fscache_begin_volume_access(struct fscache_volume *volume, 440 struct fscache_cookie *cookie, 441 enum fscache_access_trace why); 442 void fscache_create_volume(struct fscache_volume *volume, bool wait); 443 444 /*****************************************************************************/ 445 /* 446 * debug tracing 447 */ 448 #define dbgprintk(FMT, ...) \ 449 printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__) 450 451 #define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__) 452 #define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__) 453 #define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__) 454 455 #ifdef __KDEBUG 456 #define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__) 457 #define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__) 458 #define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__) 459 460 #elif defined(CONFIG_NETFS_DEBUG) 461 #define _enter(FMT, ...) \ 462 do { \ 463 if (netfs_debug) \ 464 kenter(FMT, ##__VA_ARGS__); \ 465 } while (0) 466 467 #define _leave(FMT, ...) \ 468 do { \ 469 if (netfs_debug) \ 470 kleave(FMT, ##__VA_ARGS__); \ 471 } while (0) 472 473 #define _debug(FMT, ...) \ 474 do { \ 475 if (netfs_debug) \ 476 kdebug(FMT, ##__VA_ARGS__); \ 477 } while (0) 478 479 #else 480 #define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__) 481 #define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__) 482 #define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__) 483 #endif 484 485 /* 486 * assertions 487 */ 488 #if 1 /* defined(__KDEBUGALL) */ 489 490 #define ASSERT(X) \ 491 do { \ 492 if (unlikely(!(X))) { \ 493 pr_err("\n"); \ 494 pr_err("Assertion failed\n"); \ 495 BUG(); \ 496 } \ 497 } while (0) 498 499 #define ASSERTCMP(X, OP, Y) \ 500 do { \ 501 if (unlikely(!((X) OP (Y)))) { \ 502 pr_err("\n"); \ 503 pr_err("Assertion failed\n"); \ 504 pr_err("%lx " #OP " %lx is false\n", \ 505 (unsigned long)(X), (unsigned long)(Y)); \ 506 BUG(); \ 507 } \ 508 } while (0) 509 510 #define ASSERTIF(C, X) \ 511 do { \ 512 if (unlikely((C) && !(X))) { \ 513 pr_err("\n"); \ 514 pr_err("Assertion failed\n"); \ 515 BUG(); \ 516 } \ 517 } while (0) 518 519 #define ASSERTIFCMP(C, X, OP, Y) \ 520 do { \ 521 if (unlikely((C) && !((X) OP (Y)))) { \ 522 pr_err("\n"); \ 523 pr_err("Assertion failed\n"); \ 524 pr_err("%lx " #OP " %lx is false\n", \ 525 (unsigned long)(X), (unsigned long)(Y)); \ 526 BUG(); \ 527 } \ 528 } while (0) 529 530 #else 531 532 #define ASSERT(X) do {} while (0) 533 #define ASSERTCMP(X, OP, Y) do {} while (0) 534 #define ASSERTIF(C, X) do {} while (0) 535 #define ASSERTIFCMP(C, X, OP, Y) do {} while (0) 536 537 #endif /* assert or not */ 538