1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * include/linux/writeback.h 4 */ 5 #ifndef WRITEBACK_H 6 #define WRITEBACK_H 7 8 #include <linux/sched.h> 9 #include <linux/workqueue.h> 10 #include <linux/fs.h> 11 #include <linux/flex_proportions.h> 12 #include <linux/backing-dev-defs.h> 13 #include <linux/blk_types.h> 14 #include <linux/pagevec.h> 15 16 struct bio; 17 18 DECLARE_PER_CPU(int, dirty_throttle_leaks); 19 20 /* 21 * The global dirty threshold is normally equal to the global dirty limit, 22 * except when the system suddenly allocates a lot of anonymous memory and 23 * knocks down the global dirty threshold quickly, in which case the global 24 * dirty limit will follow down slowly to prevent livelocking all dirtier tasks. 25 */ 26 #define DIRTY_SCOPE 8 27 28 struct backing_dev_info; 29 30 /* 31 * fs/fs-writeback.c 32 */ 33 enum writeback_sync_modes { 34 WB_SYNC_NONE, /* Don't wait on anything */ 35 WB_SYNC_ALL, /* Wait on every mapping */ 36 }; 37 38 /* 39 * A control structure which tells the writeback code what to do. These are 40 * always on the stack, and hence need no locking. They are always initialised 41 * in a manner such that unspecified fields are set to zero. 42 */ 43 struct writeback_control { 44 /* public fields that can be set and/or consumed by the caller: */ 45 long nr_to_write; /* Write this many pages, and decrement 46 this for each page written */ 47 long pages_skipped; /* Pages which were not written */ 48 49 /* 50 * For a_ops->writepages(): if start or end are non-zero then this is 51 * a hint that the filesystem need only write out the pages inside that 52 * byterange. The byte at `end' is included in the writeout request. 53 */ 54 loff_t range_start; 55 loff_t range_end; 56 57 enum writeback_sync_modes sync_mode; 58 59 unsigned for_kupdate:1; /* A kupdate writeback */ 60 unsigned for_background:1; /* A background writeback */ 61 unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */ 62 unsigned for_reclaim:1; /* Invoked from the page allocator */ 63 unsigned range_cyclic:1; /* range_start is cyclic */ 64 unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ 65 unsigned unpinned_netfs_wb:1; /* Cleared I_PINNING_NETFS_WB */ 66 67 /* 68 * When writeback IOs are bounced through async layers, only the 69 * initial synchronous phase should be accounted towards inode 70 * cgroup ownership arbitration to avoid confusion. Later stages 71 * can set the following flag to disable the accounting. 72 */ 73 unsigned no_cgroup_owner:1; 74 75 /* To enable batching of swap writes to non-block-device backends, 76 * "plug" can be set point to a 'struct swap_iocb *'. When all swap 77 * writes have been submitted, if with swap_iocb is not NULL, 78 * swap_write_unplug() should be called. 79 */ 80 struct swap_iocb **swap_plug; 81 82 /* internal fields used by the ->writepages implementation: */ 83 struct folio_batch fbatch; 84 pgoff_t index; 85 int saved_err; 86 87 #ifdef CONFIG_CGROUP_WRITEBACK 88 struct bdi_writeback *wb; /* wb this writeback is issued under */ 89 struct inode *inode; /* inode being written out */ 90 91 /* foreign inode detection, see wbc_detach_inode() */ 92 int wb_id; /* current wb id */ 93 int wb_lcand_id; /* last foreign candidate wb id */ 94 int wb_tcand_id; /* this foreign candidate wb id */ 95 size_t wb_bytes; /* bytes written by current wb */ 96 size_t wb_lcand_bytes; /* bytes written by last candidate */ 97 size_t wb_tcand_bytes; /* bytes written by this candidate */ 98 #endif 99 }; 100 101 static inline blk_opf_t wbc_to_write_flags(struct writeback_control *wbc) 102 { 103 blk_opf_t flags = 0; 104 105 if (wbc->sync_mode == WB_SYNC_ALL) 106 flags |= REQ_SYNC; 107 else if (wbc->for_kupdate || wbc->for_background) 108 flags |= REQ_BACKGROUND; 109 110 return flags; 111 } 112 113 #ifdef CONFIG_CGROUP_WRITEBACK 114 #define wbc_blkcg_css(wbc) \ 115 ((wbc)->wb ? (wbc)->wb->blkcg_css : blkcg_root_css) 116 #else 117 #define wbc_blkcg_css(wbc) (blkcg_root_css) 118 #endif /* CONFIG_CGROUP_WRITEBACK */ 119 120 /* 121 * A wb_domain represents a domain that wb's (bdi_writeback's) belong to 122 * and are measured against each other in. There always is one global 123 * domain, global_wb_domain, that every wb in the system is a member of. 124 * This allows measuring the relative bandwidth of each wb to distribute 125 * dirtyable memory accordingly. 126 */ 127 struct wb_domain { 128 spinlock_t lock; 129 130 /* 131 * Scale the writeback cache size proportional to the relative 132 * writeout speed. 133 * 134 * We do this by keeping a floating proportion between BDIs, based 135 * on page writeback completions [end_page_writeback()]. Those 136 * devices that write out pages fastest will get the larger share, 137 * while the slower will get a smaller share. 138 * 139 * We use page writeout completions because we are interested in 140 * getting rid of dirty pages. Having them written out is the 141 * primary goal. 142 * 143 * We introduce a concept of time, a period over which we measure 144 * these events, because demand can/will vary over time. The length 145 * of this period itself is measured in page writeback completions. 146 */ 147 struct fprop_global completions; 148 struct timer_list period_timer; /* timer for aging of completions */ 149 unsigned long period_time; 150 151 /* 152 * The dirtyable memory and dirty threshold could be suddenly 153 * knocked down by a large amount (eg. on the startup of KVM in a 154 * swapless system). This may throw the system into deep dirty 155 * exceeded state and throttle heavy/light dirtiers alike. To 156 * retain good responsiveness, maintain global_dirty_limit for 157 * tracking slowly down to the knocked down dirty threshold. 158 * 159 * Both fields are protected by ->lock. 160 */ 161 unsigned long dirty_limit_tstamp; 162 unsigned long dirty_limit; 163 }; 164 165 /** 166 * wb_domain_size_changed - memory available to a wb_domain has changed 167 * @dom: wb_domain of interest 168 * 169 * This function should be called when the amount of memory available to 170 * @dom has changed. It resets @dom's dirty limit parameters to prevent 171 * the past values which don't match the current configuration from skewing 172 * dirty throttling. Without this, when memory size of a wb_domain is 173 * greatly reduced, the dirty throttling logic may allow too many pages to 174 * be dirtied leading to consecutive unnecessary OOMs and may get stuck in 175 * that situation. 176 */ 177 static inline void wb_domain_size_changed(struct wb_domain *dom) 178 { 179 spin_lock(&dom->lock); 180 dom->dirty_limit_tstamp = jiffies; 181 dom->dirty_limit = 0; 182 spin_unlock(&dom->lock); 183 } 184 185 /* 186 * fs/fs-writeback.c 187 */ 188 struct bdi_writeback; 189 void writeback_inodes_sb(struct super_block *, enum wb_reason reason); 190 void writeback_inodes_sb_nr(struct super_block *, unsigned long nr, 191 enum wb_reason reason); 192 void try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason); 193 void sync_inodes_sb(struct super_block *); 194 void wakeup_flusher_threads(enum wb_reason reason); 195 void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi, 196 enum wb_reason reason); 197 void inode_wait_for_writeback(struct inode *inode); 198 void inode_io_list_del(struct inode *inode); 199 200 /* writeback.h requires fs.h; it, too, is not included from here. */ 201 static inline void wait_on_inode(struct inode *inode) 202 { 203 wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE); 204 } 205 206 #ifdef CONFIG_CGROUP_WRITEBACK 207 208 #include <linux/cgroup.h> 209 #include <linux/bio.h> 210 211 void __inode_attach_wb(struct inode *inode, struct folio *folio); 212 void wbc_attach_and_unlock_inode(struct writeback_control *wbc, 213 struct inode *inode) 214 __releases(&inode->i_lock); 215 void wbc_detach_inode(struct writeback_control *wbc); 216 void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page, 217 size_t bytes); 218 int cgroup_writeback_by_id(u64 bdi_id, int memcg_id, 219 enum wb_reason reason, struct wb_completion *done); 220 void cgroup_writeback_umount(void); 221 bool cleanup_offline_cgwb(struct bdi_writeback *wb); 222 223 /** 224 * inode_attach_wb - associate an inode with its wb 225 * @inode: inode of interest 226 * @folio: folio being dirtied (may be NULL) 227 * 228 * If @inode doesn't have its wb, associate it with the wb matching the 229 * memcg of @folio or, if @folio is NULL, %current. May be called w/ or w/o 230 * @inode->i_lock. 231 */ 232 static inline void inode_attach_wb(struct inode *inode, struct folio *folio) 233 { 234 if (!inode->i_wb) 235 __inode_attach_wb(inode, folio); 236 } 237 238 /** 239 * inode_detach_wb - disassociate an inode from its wb 240 * @inode: inode of interest 241 * 242 * @inode is being freed. Detach from its wb. 243 */ 244 static inline void inode_detach_wb(struct inode *inode) 245 { 246 if (inode->i_wb) { 247 WARN_ON_ONCE(!(inode->i_state & I_CLEAR)); 248 wb_put(inode->i_wb); 249 inode->i_wb = NULL; 250 } 251 } 252 253 /** 254 * wbc_attach_fdatawrite_inode - associate wbc and inode for fdatawrite 255 * @wbc: writeback_control of interest 256 * @inode: target inode 257 * 258 * This function is to be used by __filemap_fdatawrite_range(), which is an 259 * alternative entry point into writeback code, and first ensures @inode is 260 * associated with a bdi_writeback and attaches it to @wbc. 261 */ 262 static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc, 263 struct inode *inode) 264 { 265 spin_lock(&inode->i_lock); 266 inode_attach_wb(inode, NULL); 267 wbc_attach_and_unlock_inode(wbc, inode); 268 } 269 270 /** 271 * wbc_init_bio - writeback specific initializtion of bio 272 * @wbc: writeback_control for the writeback in progress 273 * @bio: bio to be initialized 274 * 275 * @bio is a part of the writeback in progress controlled by @wbc. Perform 276 * writeback specific initialization. This is used to apply the cgroup 277 * writeback context. Must be called after the bio has been associated with 278 * a device. 279 */ 280 static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio) 281 { 282 /* 283 * pageout() path doesn't attach @wbc to the inode being written 284 * out. This is intentional as we don't want the function to block 285 * behind a slow cgroup. Ultimately, we want pageout() to kick off 286 * regular writeback instead of writing things out itself. 287 */ 288 if (wbc->wb) 289 bio_associate_blkg_from_css(bio, wbc->wb->blkcg_css); 290 } 291 292 #else /* CONFIG_CGROUP_WRITEBACK */ 293 294 static inline void inode_attach_wb(struct inode *inode, struct folio *folio) 295 { 296 } 297 298 static inline void inode_detach_wb(struct inode *inode) 299 { 300 } 301 302 static inline void wbc_attach_and_unlock_inode(struct writeback_control *wbc, 303 struct inode *inode) 304 __releases(&inode->i_lock) 305 { 306 spin_unlock(&inode->i_lock); 307 } 308 309 static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc, 310 struct inode *inode) 311 { 312 } 313 314 static inline void wbc_detach_inode(struct writeback_control *wbc) 315 { 316 } 317 318 static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio) 319 { 320 } 321 322 static inline void wbc_account_cgroup_owner(struct writeback_control *wbc, 323 struct page *page, size_t bytes) 324 { 325 } 326 327 static inline void cgroup_writeback_umount(void) 328 { 329 } 330 331 #endif /* CONFIG_CGROUP_WRITEBACK */ 332 333 /* 334 * mm/page-writeback.c 335 */ 336 void laptop_io_completion(struct backing_dev_info *info); 337 void laptop_sync_completion(void); 338 void laptop_mode_timer_fn(struct timer_list *t); 339 bool node_dirty_ok(struct pglist_data *pgdat); 340 int wb_domain_init(struct wb_domain *dom, gfp_t gfp); 341 #ifdef CONFIG_CGROUP_WRITEBACK 342 void wb_domain_exit(struct wb_domain *dom); 343 #endif 344 345 extern struct wb_domain global_wb_domain; 346 347 /* These are exported to sysctl. */ 348 extern unsigned int dirty_writeback_interval; 349 extern unsigned int dirty_expire_interval; 350 extern unsigned int dirtytime_expire_interval; 351 extern int laptop_mode; 352 353 int dirtytime_interval_handler(struct ctl_table *table, int write, 354 void *buffer, size_t *lenp, loff_t *ppos); 355 356 void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); 357 unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh); 358 unsigned long cgwb_calc_thresh(struct bdi_writeback *wb); 359 360 void wb_update_bandwidth(struct bdi_writeback *wb); 361 362 /* Invoke balance dirty pages in async mode. */ 363 #define BDP_ASYNC 0x0001 364 365 void balance_dirty_pages_ratelimited(struct address_space *mapping); 366 int balance_dirty_pages_ratelimited_flags(struct address_space *mapping, 367 unsigned int flags); 368 369 bool wb_over_bg_thresh(struct bdi_writeback *wb); 370 371 struct folio *writeback_iter(struct address_space *mapping, 372 struct writeback_control *wbc, struct folio *folio, int *error); 373 374 typedef int (*writepage_t)(struct folio *folio, struct writeback_control *wbc, 375 void *data); 376 377 int write_cache_pages(struct address_space *mapping, 378 struct writeback_control *wbc, writepage_t writepage, 379 void *data); 380 int do_writepages(struct address_space *mapping, struct writeback_control *wbc); 381 void writeback_set_ratelimit(void); 382 void tag_pages_for_writeback(struct address_space *mapping, 383 pgoff_t start, pgoff_t end); 384 385 bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio); 386 bool folio_redirty_for_writepage(struct writeback_control *, struct folio *); 387 bool redirty_page_for_writepage(struct writeback_control *, struct page *); 388 389 void sb_mark_inode_writeback(struct inode *inode); 390 void sb_clear_inode_writeback(struct inode *inode); 391 392 #endif /* WRITEBACK_H */ 393