1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* Internal definitions for network filesystem support 3 * 4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/seq_file.h> 10 #include <linux/folio_queue.h> 11 #include <linux/netfs.h> 12 #include <linux/fscache.h> 13 #include <linux/fscache-cache.h> 14 #include <trace/events/netfs.h> 15 #include <trace/events/fscache.h> 16 17 #ifdef pr_fmt 18 #undef pr_fmt 19 #endif 20 21 #define pr_fmt(fmt) "netfs: " fmt 22 23 /* 24 * buffered_read.c 25 */ 26 void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error); 27 int netfs_prefetch_for_write(struct file *file, struct folio *folio, 28 size_t offset, size_t len); 29 30 /* 31 * buffered_write.c 32 */ 33 void netfs_update_i_size(struct netfs_inode *ctx, struct inode *inode, 34 loff_t pos, size_t copied); 35 36 /* 37 * main.c 38 */ 39 extern unsigned int netfs_debug; 40 extern struct list_head netfs_io_requests; 41 extern spinlock_t netfs_proc_lock; 42 extern mempool_t netfs_request_pool; 43 extern mempool_t netfs_subrequest_pool; 44 45 #ifdef CONFIG_PROC_FS 46 static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq) 47 { 48 spin_lock(&netfs_proc_lock); 49 list_add_tail_rcu(&rreq->proc_link, &netfs_io_requests); 50 spin_unlock(&netfs_proc_lock); 51 } 52 static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) 53 { 54 if (!list_empty(&rreq->proc_link)) { 55 spin_lock(&netfs_proc_lock); 56 list_del_rcu(&rreq->proc_link); 57 spin_unlock(&netfs_proc_lock); 58 } 59 } 60 #else 61 static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq) {} 62 static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {} 63 #endif 64 65 /* 66 * misc.c 67 */ 68 struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq, 69 enum netfs_folioq_trace trace); 70 void netfs_reset_iter(struct netfs_io_subrequest *subreq); 71 void netfs_wake_collector(struct netfs_io_request *rreq); 72 void netfs_subreq_clear_in_progress(struct netfs_io_subrequest *subreq); 73 void netfs_wait_for_in_progress_stream(struct netfs_io_request *rreq, 74 struct netfs_io_stream *stream); 75 ssize_t netfs_wait_for_read(struct netfs_io_request *rreq); 76 ssize_t netfs_wait_for_write(struct netfs_io_request *rreq); 77 void netfs_wait_for_paused_read(struct netfs_io_request *rreq); 78 void netfs_wait_for_paused_write(struct netfs_io_request *rreq); 79 80 /* 81 * objects.c 82 */ 83 struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, 84 struct file *file, 85 loff_t start, size_t len, 86 enum netfs_io_origin origin); 87 void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what); 88 void netfs_clear_subrequests(struct netfs_io_request *rreq); 89 void netfs_put_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what); 90 void netfs_put_failed_request(struct netfs_io_request *rreq); 91 struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq); 92 93 static inline void netfs_see_request(struct netfs_io_request *rreq, 94 enum netfs_rreq_ref_trace what) 95 { 96 trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what); 97 } 98 99 static inline void netfs_see_subrequest(struct netfs_io_subrequest *subreq, 100 enum netfs_sreq_ref_trace what) 101 { 102 trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index, 103 refcount_read(&subreq->ref), what); 104 } 105 106 /* 107 * read_collect.c 108 */ 109 bool netfs_read_collection(struct netfs_io_request *rreq); 110 void netfs_read_collection_worker(struct work_struct *work); 111 void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error); 112 113 /* 114 * read_pgpriv2.c 115 */ 116 void netfs_pgpriv2_copy_to_cache(struct netfs_io_request *rreq, struct folio *folio); 117 void netfs_pgpriv2_end_copy_to_cache(struct netfs_io_request *rreq); 118 bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq); 119 120 /* 121 * read_retry.c 122 */ 123 void netfs_retry_reads(struct netfs_io_request *rreq); 124 void netfs_unlock_abandoned_read_pages(struct netfs_io_request *rreq); 125 126 /* 127 * stats.c 128 */ 129 #ifdef CONFIG_NETFS_STATS 130 extern atomic_t netfs_n_rh_dio_read; 131 extern atomic_t netfs_n_rh_readahead; 132 extern atomic_t netfs_n_rh_read_folio; 133 extern atomic_t netfs_n_rh_read_single; 134 extern atomic_t netfs_n_rh_rreq; 135 extern atomic_t netfs_n_rh_sreq; 136 extern atomic_t netfs_n_rh_download; 137 extern atomic_t netfs_n_rh_download_done; 138 extern atomic_t netfs_n_rh_download_failed; 139 extern atomic_t netfs_n_rh_download_instead; 140 extern atomic_t netfs_n_rh_read; 141 extern atomic_t netfs_n_rh_read_done; 142 extern atomic_t netfs_n_rh_read_failed; 143 extern atomic_t netfs_n_rh_zero; 144 extern atomic_t netfs_n_rh_short_read; 145 extern atomic_t netfs_n_rh_write; 146 extern atomic_t netfs_n_rh_write_begin; 147 extern atomic_t netfs_n_rh_write_done; 148 extern atomic_t netfs_n_rh_write_failed; 149 extern atomic_t netfs_n_rh_write_zskip; 150 extern atomic_t netfs_n_rh_retry_read_req; 151 extern atomic_t netfs_n_rh_retry_read_subreq; 152 extern atomic_t netfs_n_wh_buffered_write; 153 extern atomic_t netfs_n_wh_writethrough; 154 extern atomic_t netfs_n_wh_dio_write; 155 extern atomic_t netfs_n_wh_writepages; 156 extern atomic_t netfs_n_wh_copy_to_cache; 157 extern atomic_t netfs_n_wh_wstream_conflict; 158 extern atomic_t netfs_n_wh_upload; 159 extern atomic_t netfs_n_wh_upload_done; 160 extern atomic_t netfs_n_wh_upload_failed; 161 extern atomic_t netfs_n_wh_write; 162 extern atomic_t netfs_n_wh_write_done; 163 extern atomic_t netfs_n_wh_write_failed; 164 extern atomic_t netfs_n_wh_retry_write_req; 165 extern atomic_t netfs_n_wh_retry_write_subreq; 166 extern atomic_t netfs_n_wb_lock_skip; 167 extern atomic_t netfs_n_wb_lock_wait; 168 extern atomic_t netfs_n_folioq; 169 170 int netfs_stats_show(struct seq_file *m, void *v); 171 172 static inline void netfs_stat(atomic_t *stat) 173 { 174 atomic_inc(stat); 175 } 176 177 static inline void netfs_stat_d(atomic_t *stat) 178 { 179 atomic_dec(stat); 180 } 181 182 #else 183 #define netfs_stat(x) do {} while(0) 184 #define netfs_stat_d(x) do {} while(0) 185 #endif 186 187 /* 188 * write_collect.c 189 */ 190 int netfs_folio_written_back(struct folio *folio); 191 bool netfs_write_collection(struct netfs_io_request *wreq); 192 void netfs_write_collection_worker(struct work_struct *work); 193 194 /* 195 * write_issue.c 196 */ 197 struct netfs_io_request *netfs_create_write_req(struct address_space *mapping, 198 struct file *file, 199 loff_t start, 200 enum netfs_io_origin origin); 201 void netfs_reissue_write(struct netfs_io_stream *stream, 202 struct netfs_io_subrequest *subreq, 203 struct iov_iter *source); 204 void netfs_issue_write(struct netfs_io_request *wreq, 205 struct netfs_io_stream *stream); 206 size_t netfs_advance_write(struct netfs_io_request *wreq, 207 struct netfs_io_stream *stream, 208 loff_t start, size_t len, bool to_eof); 209 struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len); 210 int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, 211 struct folio *folio, size_t copied, bool to_page_end, 212 struct folio **writethrough_cache); 213 ssize_t netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, 214 struct folio *writethrough_cache); 215 int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len); 216 217 /* 218 * write_retry.c 219 */ 220 void netfs_retry_writes(struct netfs_io_request *wreq); 221 222 /* 223 * Miscellaneous functions. 224 */ 225 static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx) 226 { 227 #if IS_ENABLED(CONFIG_FSCACHE) 228 struct fscache_cookie *cookie = ctx->cache; 229 230 return fscache_cookie_valid(cookie) && cookie->cache_priv && 231 fscache_cookie_enabled(cookie); 232 #else 233 return false; 234 #endif 235 } 236 237 /* 238 * Get a ref on a netfs group attached to a dirty page (e.g. a ceph snap). 239 */ 240 static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_group) 241 { 242 if (netfs_group && netfs_group != NETFS_FOLIO_COPY_TO_CACHE) 243 refcount_inc(&netfs_group->ref); 244 return netfs_group; 245 } 246 247 /* 248 * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap). 249 */ 250 static inline void netfs_put_group(struct netfs_group *netfs_group) 251 { 252 if (netfs_group && 253 netfs_group != NETFS_FOLIO_COPY_TO_CACHE && 254 refcount_dec_and_test(&netfs_group->ref)) 255 netfs_group->free(netfs_group); 256 } 257 258 /* 259 * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap). 260 */ 261 static inline void netfs_put_group_many(struct netfs_group *netfs_group, int nr) 262 { 263 if (netfs_group && 264 netfs_group != NETFS_FOLIO_COPY_TO_CACHE && 265 refcount_sub_and_test(nr, &netfs_group->ref)) 266 netfs_group->free(netfs_group); 267 } 268 269 /* 270 * Clear and wake up a NETFS_RREQ_* flag bit on a request. 271 */ 272 static inline void netfs_wake_rreq_flag(struct netfs_io_request *rreq, 273 unsigned int rreq_flag, 274 enum netfs_rreq_trace trace) 275 { 276 if (test_bit(rreq_flag, &rreq->flags)) { 277 clear_bit_unlock(rreq_flag, &rreq->flags); 278 smp_mb__after_atomic(); /* Set flag before task state */ 279 trace_netfs_rreq(rreq, trace); 280 wake_up(&rreq->waitq); 281 } 282 } 283 284 /* 285 * Test the NETFS_RREQ_IN_PROGRESS flag, inserting an appropriate barrier. 286 */ 287 static inline bool netfs_check_rreq_in_progress(const struct netfs_io_request *rreq) 288 { 289 /* Order read of flags before read of anything else, such as error. */ 290 return test_bit_acquire(NETFS_RREQ_IN_PROGRESS, &rreq->flags); 291 } 292 293 /* 294 * Test the NETFS_SREQ_IN_PROGRESS flag, inserting an appropriate barrier. 295 */ 296 static inline bool netfs_check_subreq_in_progress(const struct netfs_io_subrequest *subreq) 297 { 298 /* Order read of flags before read of anything else, such as error. */ 299 return test_bit_acquire(NETFS_SREQ_IN_PROGRESS, &subreq->flags); 300 } 301 302 /* 303 * fscache-cache.c 304 */ 305 #ifdef CONFIG_PROC_FS 306 extern const struct seq_operations fscache_caches_seq_ops; 307 #endif 308 bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why); 309 void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why); 310 struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache); 311 void fscache_put_cache(struct fscache_cache *cache, enum fscache_cache_trace where); 312 313 static inline enum fscache_cache_state fscache_cache_state(const struct fscache_cache *cache) 314 { 315 return smp_load_acquire(&cache->state); 316 } 317 318 static inline bool fscache_cache_is_live(const struct fscache_cache *cache) 319 { 320 return fscache_cache_state(cache) == FSCACHE_CACHE_IS_ACTIVE; 321 } 322 323 static inline void fscache_set_cache_state(struct fscache_cache *cache, 324 enum fscache_cache_state new_state) 325 { 326 smp_store_release(&cache->state, new_state); 327 328 } 329 330 static inline bool fscache_set_cache_state_maybe(struct fscache_cache *cache, 331 enum fscache_cache_state old_state, 332 enum fscache_cache_state new_state) 333 { 334 return try_cmpxchg_release(&cache->state, &old_state, new_state); 335 } 336 337 /* 338 * fscache-cookie.c 339 */ 340 extern struct kmem_cache *fscache_cookie_jar; 341 #ifdef CONFIG_PROC_FS 342 extern const struct seq_operations fscache_cookies_seq_ops; 343 #endif 344 extern struct timer_list fscache_cookie_lru_timer; 345 346 extern void fscache_print_cookie(struct fscache_cookie *cookie, char prefix); 347 extern bool fscache_begin_cookie_access(struct fscache_cookie *cookie, 348 enum fscache_access_trace why); 349 350 static inline void fscache_see_cookie(struct fscache_cookie *cookie, 351 enum fscache_cookie_trace where) 352 { 353 trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref), 354 where); 355 } 356 357 /* 358 * fscache-main.c 359 */ 360 extern unsigned int fscache_hash(unsigned int salt, const void *data, size_t len); 361 #ifdef CONFIG_FSCACHE 362 int __init fscache_init(void); 363 void __exit fscache_exit(void); 364 #else 365 static inline int fscache_init(void) { return 0; } 366 static inline void fscache_exit(void) {} 367 #endif 368 369 /* 370 * fscache-proc.c 371 */ 372 #ifdef CONFIG_PROC_FS 373 extern int __init fscache_proc_init(void); 374 extern void fscache_proc_cleanup(void); 375 #else 376 #define fscache_proc_init() (0) 377 #define fscache_proc_cleanup() do {} while (0) 378 #endif 379 380 /* 381 * fscache-stats.c 382 */ 383 #ifdef CONFIG_FSCACHE_STATS 384 extern atomic_t fscache_n_volumes; 385 extern atomic_t fscache_n_volumes_collision; 386 extern atomic_t fscache_n_volumes_nomem; 387 extern atomic_t fscache_n_cookies; 388 extern atomic_t fscache_n_cookies_lru; 389 extern atomic_t fscache_n_cookies_lru_expired; 390 extern atomic_t fscache_n_cookies_lru_removed; 391 extern atomic_t fscache_n_cookies_lru_dropped; 392 393 extern atomic_t fscache_n_acquires; 394 extern atomic_t fscache_n_acquires_ok; 395 extern atomic_t fscache_n_acquires_oom; 396 397 extern atomic_t fscache_n_invalidates; 398 399 extern atomic_t fscache_n_relinquishes; 400 extern atomic_t fscache_n_relinquishes_retire; 401 extern atomic_t fscache_n_relinquishes_dropped; 402 403 extern atomic_t fscache_n_resizes; 404 extern atomic_t fscache_n_resizes_null; 405 406 static inline void fscache_stat(atomic_t *stat) 407 { 408 atomic_inc(stat); 409 } 410 411 static inline void fscache_stat_d(atomic_t *stat) 412 { 413 atomic_dec(stat); 414 } 415 416 #define __fscache_stat(stat) (stat) 417 418 int fscache_stats_show(struct seq_file *m); 419 #else 420 421 #define __fscache_stat(stat) (NULL) 422 #define fscache_stat(stat) do {} while (0) 423 #define fscache_stat_d(stat) do {} while (0) 424 425 static inline int fscache_stats_show(struct seq_file *m) { return 0; } 426 #endif 427 428 /* 429 * fscache-volume.c 430 */ 431 #ifdef CONFIG_PROC_FS 432 extern const struct seq_operations fscache_volumes_seq_ops; 433 #endif 434 435 struct fscache_volume *fscache_get_volume(struct fscache_volume *volume, 436 enum fscache_volume_trace where); 437 bool fscache_begin_volume_access(struct fscache_volume *volume, 438 struct fscache_cookie *cookie, 439 enum fscache_access_trace why); 440 void fscache_create_volume(struct fscache_volume *volume, bool wait); 441 442 /*****************************************************************************/ 443 /* 444 * debug tracing 445 */ 446 #define dbgprintk(FMT, ...) \ 447 printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__) 448 449 #define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__) 450 #define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__) 451 #define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__) 452 453 #ifdef __KDEBUG 454 #define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__) 455 #define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__) 456 #define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__) 457 458 #elif defined(CONFIG_NETFS_DEBUG) 459 #define _enter(FMT, ...) \ 460 do { \ 461 if (netfs_debug) \ 462 kenter(FMT, ##__VA_ARGS__); \ 463 } while (0) 464 465 #define _leave(FMT, ...) \ 466 do { \ 467 if (netfs_debug) \ 468 kleave(FMT, ##__VA_ARGS__); \ 469 } while (0) 470 471 #define _debug(FMT, ...) \ 472 do { \ 473 if (netfs_debug) \ 474 kdebug(FMT, ##__VA_ARGS__); \ 475 } while (0) 476 477 #else 478 #define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__) 479 #define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__) 480 #define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__) 481 #endif 482 483 /* 484 * assertions 485 */ 486 #if 1 /* defined(__KDEBUGALL) */ 487 488 #define ASSERT(X) \ 489 do { \ 490 if (unlikely(!(X))) { \ 491 pr_err("\n"); \ 492 pr_err("Assertion failed\n"); \ 493 BUG(); \ 494 } \ 495 } while (0) 496 497 #define ASSERTCMP(X, OP, Y) \ 498 do { \ 499 if (unlikely(!((X) OP (Y)))) { \ 500 pr_err("\n"); \ 501 pr_err("Assertion failed\n"); \ 502 pr_err("%lx " #OP " %lx is false\n", \ 503 (unsigned long)(X), (unsigned long)(Y)); \ 504 BUG(); \ 505 } \ 506 } while (0) 507 508 #define ASSERTIF(C, X) \ 509 do { \ 510 if (unlikely((C) && !(X))) { \ 511 pr_err("\n"); \ 512 pr_err("Assertion failed\n"); \ 513 BUG(); \ 514 } \ 515 } while (0) 516 517 #define ASSERTIFCMP(C, X, OP, Y) \ 518 do { \ 519 if (unlikely((C) && !((X) OP (Y)))) { \ 520 pr_err("\n"); \ 521 pr_err("Assertion failed\n"); \ 522 pr_err("%lx " #OP " %lx is false\n", \ 523 (unsigned long)(X), (unsigned long)(Y)); \ 524 BUG(); \ 525 } \ 526 } while (0) 527 528 #else 529 530 #define ASSERT(X) do {} while (0) 531 #define ASSERTCMP(X, OP, Y) do {} while (0) 532 #define ASSERTIF(C, X) do {} while (0) 533 #define ASSERTIFCMP(C, X, OP, Y) do {} while (0) 534 535 #endif /* assert or not */ 536