1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * mm/page-writeback.c
4 *
5 * Copyright (C) 2002, Linus Torvalds.
6 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
7 *
8 * Contains functions related to writing back dirty pages at the
9 * address_space level.
10 *
11 * 10Apr2002 Andrew Morton
12 * Initial version
13 */
14
15 #include <linux/kernel.h>
16 #include <linux/math64.h>
17 #include <linux/export.h>
18 #include <linux/spinlock.h>
19 #include <linux/fs.h>
20 #include <linux/mm.h>
21 #include <linux/swap.h>
22 #include <linux/slab.h>
23 #include <linux/pagemap.h>
24 #include <linux/writeback.h>
25 #include <linux/init.h>
26 #include <linux/backing-dev.h>
27 #include <linux/task_io_accounting_ops.h>
28 #include <linux/blkdev.h>
29 #include <linux/mpage.h>
30 #include <linux/rmap.h>
31 #include <linux/percpu.h>
32 #include <linux/smp.h>
33 #include <linux/sysctl.h>
34 #include <linux/cpu.h>
35 #include <linux/syscalls.h>
36 #include <linux/folio_batch.h>
37 #include <linux/timer.h>
38 #include <linux/sched/rt.h>
39 #include <linux/sched/signal.h>
40 #include <linux/mm_inline.h>
41 #include <linux/shmem_fs.h>
42 #include <trace/events/writeback.h>
43
44 #include "internal.h"
45
46 /*
47 * Sleep at most 200ms at a time in balance_dirty_pages().
48 */
49 #define MAX_PAUSE max(HZ/5, 1)
50
51 /*
52 * Try to keep balance_dirty_pages() call intervals higher than this many pages
53 * by raising pause time to max_pause when falls below it.
54 */
55 #define DIRTY_POLL_THRESH (128 >> (PAGE_SHIFT - 10))
56
57 /*
58 * Estimate write bandwidth or update dirty limit at 200ms intervals.
59 */
60 #define BANDWIDTH_INTERVAL max(HZ/5, 1)
61
62 #define RATELIMIT_CALC_SHIFT 10
63
64 /*
65 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
66 * will look to see if it needs to force writeback or throttling.
67 */
68 static long ratelimit_pages = 32;
69
70 /* The following parameters are exported via /proc/sys/vm */
71
72 /*
73 * Start background writeback (via writeback threads) at this percentage
74 */
75 static int dirty_background_ratio = 10;
76
77 /*
78 * dirty_background_bytes starts at 0 (disabled) so that it is a function of
79 * dirty_background_ratio * the amount of dirtyable memory
80 */
81 static unsigned long dirty_background_bytes;
82
83 /*
84 * free highmem will not be subtracted from the total free memory
85 * for calculating free ratios if vm_highmem_is_dirtyable is true
86 */
87 static int vm_highmem_is_dirtyable;
88
89 /*
90 * The generator of dirty data starts writeback at this percentage
91 */
92 static int vm_dirty_ratio = 20;
93
94 /*
95 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
96 * vm_dirty_ratio * the amount of dirtyable memory
97 */
98 static unsigned long vm_dirty_bytes;
99
100 /*
101 * The interval between `kupdate'-style writebacks
102 */
103 unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
104
105 EXPORT_SYMBOL_GPL(dirty_writeback_interval);
106
107 /*
108 * The longest time for which data is allowed to remain dirty
109 */
110 unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
111
112 /* End of sysctl-exported parameters */
113
114 struct wb_domain global_wb_domain;
115
116 /*
117 * Length of period for aging writeout fractions of bdis. This is an
118 * arbitrarily chosen number. The longer the period, the slower fractions will
119 * reflect changes in current writeout rate.
120 */
121 #define VM_COMPLETIONS_PERIOD_LEN (3*HZ)
122
123 #ifdef CONFIG_CGROUP_WRITEBACK
124
125 #define GDTC_INIT(__wb) .wb = (__wb), \
126 .dom = &global_wb_domain, \
127 .wb_completions = &(__wb)->completions
128
129 #define GDTC_INIT_NO_WB .dom = &global_wb_domain
130
131 #define MDTC_INIT(__wb, __gdtc) .wb = (__wb), \
132 .dom = mem_cgroup_wb_domain(__wb), \
133 .wb_completions = &(__wb)->memcg_completions, \
134 .gdtc = __gdtc
135
mdtc_valid(struct dirty_throttle_control * dtc)136 static bool mdtc_valid(struct dirty_throttle_control *dtc)
137 {
138 return dtc->dom;
139 }
140
dtc_dom(struct dirty_throttle_control * dtc)141 static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
142 {
143 return dtc->dom;
144 }
145
mdtc_gdtc(struct dirty_throttle_control * mdtc)146 static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
147 {
148 return mdtc->gdtc;
149 }
150
wb_memcg_completions(struct bdi_writeback * wb)151 static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
152 {
153 return &wb->memcg_completions;
154 }
155
wb_min_max_ratio(struct bdi_writeback * wb,unsigned long * minp,unsigned long * maxp)156 static void wb_min_max_ratio(struct bdi_writeback *wb,
157 unsigned long *minp, unsigned long *maxp)
158 {
159 unsigned long this_bw = READ_ONCE(wb->avg_write_bandwidth);
160 unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
161 unsigned long long min = wb->bdi->min_ratio;
162 unsigned long long max = wb->bdi->max_ratio;
163
164 /*
165 * @wb may already be clean by the time control reaches here and
166 * the total may not include its bw.
167 */
168 if (this_bw < tot_bw) {
169 if (min) {
170 min *= this_bw;
171 min = div64_ul(min, tot_bw);
172 }
173 if (max < 100 * BDI_RATIO_SCALE) {
174 max *= this_bw;
175 max = div64_ul(max, tot_bw);
176 }
177 }
178
179 *minp = min;
180 *maxp = max;
181 }
182
183 #else /* CONFIG_CGROUP_WRITEBACK */
184
185 #define GDTC_INIT(__wb) .wb = (__wb), \
186 .wb_completions = &(__wb)->completions
187 #define GDTC_INIT_NO_WB
188 #define MDTC_INIT(__wb, __gdtc)
189
mdtc_valid(struct dirty_throttle_control * dtc)190 static bool mdtc_valid(struct dirty_throttle_control *dtc)
191 {
192 return false;
193 }
194
dtc_dom(struct dirty_throttle_control * dtc)195 static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
196 {
197 return &global_wb_domain;
198 }
199
mdtc_gdtc(struct dirty_throttle_control * mdtc)200 static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
201 {
202 return NULL;
203 }
204
wb_memcg_completions(struct bdi_writeback * wb)205 static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
206 {
207 return NULL;
208 }
209
wb_min_max_ratio(struct bdi_writeback * wb,unsigned long * minp,unsigned long * maxp)210 static void wb_min_max_ratio(struct bdi_writeback *wb,
211 unsigned long *minp, unsigned long *maxp)
212 {
213 *minp = wb->bdi->min_ratio;
214 *maxp = wb->bdi->max_ratio;
215 }
216
217 #endif /* CONFIG_CGROUP_WRITEBACK */
218
219 /*
220 * In a memory zone, there is a certain amount of pages we consider
221 * available for the page cache, which is essentially the number of
222 * free and reclaimable pages, minus some zone reserves to protect
223 * lowmem and the ability to uphold the zone's watermarks without
224 * requiring writeback.
225 *
226 * This number of dirtyable pages is the base value of which the
227 * user-configurable dirty ratio is the effective number of pages that
228 * are allowed to be actually dirtied. Per individual zone, or
229 * globally by using the sum of dirtyable pages over all zones.
230 *
231 * Because the user is allowed to specify the dirty limit globally as
232 * absolute number of bytes, calculating the per-zone dirty limit can
233 * require translating the configured limit into a percentage of
234 * global dirtyable memory first.
235 */
236
237 /**
238 * node_dirtyable_memory - number of dirtyable pages in a node
239 * @pgdat: the node
240 *
241 * Return: the node's number of pages potentially available for dirty
242 * page cache. This is the base value for the per-node dirty limits.
243 */
node_dirtyable_memory(struct pglist_data * pgdat)244 static unsigned long node_dirtyable_memory(struct pglist_data *pgdat)
245 {
246 unsigned long nr_pages = 0;
247 int z;
248
249 for (z = 0; z < MAX_NR_ZONES; z++) {
250 struct zone *zone = pgdat->node_zones + z;
251
252 if (!populated_zone(zone))
253 continue;
254
255 nr_pages += zone_page_state(zone, NR_FREE_PAGES);
256 }
257
258 /*
259 * Pages reserved for the kernel should not be considered
260 * dirtyable, to prevent a situation where reclaim has to
261 * clean pages in order to balance the zones.
262 */
263 nr_pages -= min(nr_pages, pgdat->totalreserve_pages);
264
265 nr_pages += node_page_state(pgdat, NR_INACTIVE_FILE);
266 nr_pages += node_page_state(pgdat, NR_ACTIVE_FILE);
267
268 return nr_pages;
269 }
270
highmem_dirtyable_memory(unsigned long total)271 static unsigned long highmem_dirtyable_memory(unsigned long total)
272 {
273 #ifdef CONFIG_HIGHMEM
274 int node;
275 unsigned long x = 0;
276 int i;
277
278 for_each_node_state(node, N_HIGH_MEMORY) {
279 for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) {
280 struct zone *z;
281 unsigned long nr_pages;
282
283 if (!is_highmem_idx(i))
284 continue;
285
286 z = &NODE_DATA(node)->node_zones[i];
287 if (!populated_zone(z))
288 continue;
289
290 nr_pages = zone_page_state(z, NR_FREE_PAGES);
291 /* watch for underflows */
292 nr_pages -= min(nr_pages, high_wmark_pages(z));
293 nr_pages += zone_page_state(z, NR_ZONE_INACTIVE_FILE);
294 nr_pages += zone_page_state(z, NR_ZONE_ACTIVE_FILE);
295 x += nr_pages;
296 }
297 }
298
299 /*
300 * Make sure that the number of highmem pages is never larger
301 * than the number of the total dirtyable memory. This can only
302 * occur in very strange VM situations but we want to make sure
303 * that this does not occur.
304 */
305 return min(x, total);
306 #else
307 return 0;
308 #endif
309 }
310
311 /**
312 * global_dirtyable_memory - number of globally dirtyable pages
313 *
314 * Return: the global number of pages potentially available for dirty
315 * page cache. This is the base value for the global dirty limits.
316 */
global_dirtyable_memory(void)317 static unsigned long global_dirtyable_memory(void)
318 {
319 unsigned long x;
320
321 x = global_zone_page_state(NR_FREE_PAGES);
322 /*
323 * Pages reserved for the kernel should not be considered
324 * dirtyable, to prevent a situation where reclaim has to
325 * clean pages in order to balance the zones.
326 */
327 x -= min(x, totalreserve_pages);
328
329 x += global_node_page_state(NR_INACTIVE_FILE);
330 x += global_node_page_state(NR_ACTIVE_FILE);
331
332 if (!vm_highmem_is_dirtyable)
333 x -= highmem_dirtyable_memory(x);
334
335 return x + 1; /* Ensure that we never return 0 */
336 }
337
338 /**
339 * domain_dirty_limits - calculate thresh and bg_thresh for a wb_domain
340 * @dtc: dirty_throttle_control of interest
341 *
342 * Calculate @dtc->thresh and ->bg_thresh considering
343 * vm_dirty_{bytes|ratio} and dirty_background_{bytes|ratio}. The caller
344 * must ensure that @dtc->avail is set before calling this function. The
345 * dirty limits will be lifted by 1/4 for real-time tasks.
346 */
domain_dirty_limits(struct dirty_throttle_control * dtc)347 static void domain_dirty_limits(struct dirty_throttle_control *dtc)
348 {
349 const unsigned long available_memory = dtc->avail;
350 struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
351 unsigned long bytes = vm_dirty_bytes;
352 unsigned long bg_bytes = dirty_background_bytes;
353 /* convert ratios to per-PAGE_SIZE for higher precision */
354 unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100;
355 unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100;
356 unsigned long thresh;
357 unsigned long bg_thresh;
358 struct task_struct *tsk;
359
360 /* gdtc is !NULL iff @dtc is for memcg domain */
361 if (gdtc) {
362 unsigned long global_avail = gdtc->avail;
363
364 /*
365 * The byte settings can't be applied directly to memcg
366 * domains. Convert them to ratios by scaling against
367 * globally available memory. As the ratios are in
368 * per-PAGE_SIZE, they can be obtained by dividing bytes by
369 * number of pages.
370 */
371 if (bytes)
372 ratio = min(DIV_ROUND_UP(bytes, global_avail),
373 PAGE_SIZE);
374 if (bg_bytes)
375 bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail),
376 PAGE_SIZE);
377 bytes = bg_bytes = 0;
378 }
379
380 if (bytes)
381 thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
382 else
383 thresh = (ratio * available_memory) / PAGE_SIZE;
384
385 if (bg_bytes)
386 bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
387 else
388 bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
389
390 tsk = current;
391 if (rt_or_dl_task(tsk)) {
392 bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
393 thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
394 }
395 /*
396 * Dirty throttling logic assumes the limits in page units fit into
397 * 32-bits. This gives 16TB dirty limits max which is hopefully enough.
398 */
399 if (thresh > UINT_MAX)
400 thresh = UINT_MAX;
401 /* This makes sure bg_thresh is within 32-bits as well */
402 if (bg_thresh >= thresh)
403 bg_thresh = thresh / 2;
404 dtc->thresh = thresh;
405 dtc->bg_thresh = bg_thresh;
406
407 /* we should eventually report the domain in the TP */
408 if (!gdtc)
409 trace_global_dirty_state(bg_thresh, thresh);
410 }
411
412 /**
413 * global_dirty_limits - background-writeback and dirty-throttling thresholds
414 * @pbackground: out parameter for bg_thresh
415 * @pdirty: out parameter for thresh
416 *
417 * Calculate bg_thresh and thresh for global_wb_domain. See
418 * domain_dirty_limits() for details.
419 */
global_dirty_limits(unsigned long * pbackground,unsigned long * pdirty)420 void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
421 {
422 struct dirty_throttle_control gdtc = { GDTC_INIT_NO_WB };
423
424 gdtc.avail = global_dirtyable_memory();
425 domain_dirty_limits(&gdtc);
426
427 *pbackground = gdtc.bg_thresh;
428 *pdirty = gdtc.thresh;
429 }
430
431 /**
432 * node_dirty_limit - maximum number of dirty pages allowed in a node
433 * @pgdat: the node
434 *
435 * Return: the maximum number of dirty pages allowed in a node, based
436 * on the node's dirtyable memory.
437 */
node_dirty_limit(struct pglist_data * pgdat)438 static unsigned long node_dirty_limit(struct pglist_data *pgdat)
439 {
440 unsigned long node_memory = node_dirtyable_memory(pgdat);
441 struct task_struct *tsk = current;
442 unsigned long dirty;
443
444 if (vm_dirty_bytes)
445 dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) *
446 node_memory / global_dirtyable_memory();
447 else
448 dirty = vm_dirty_ratio * node_memory / 100;
449
450 if (rt_or_dl_task(tsk))
451 dirty += dirty / 4;
452
453 /*
454 * Dirty throttling logic assumes the limits in page units fit into
455 * 32-bits. This gives 16TB dirty limits max which is hopefully enough.
456 */
457 return min_t(unsigned long, dirty, UINT_MAX);
458 }
459
460 /**
461 * node_dirty_ok - tells whether a node is within its dirty limits
462 * @pgdat: the node to check
463 *
464 * Return: %true when the dirty pages in @pgdat are within the node's
465 * dirty limit, %false if the limit is exceeded.
466 */
node_dirty_ok(struct pglist_data * pgdat)467 bool node_dirty_ok(struct pglist_data *pgdat)
468 {
469 unsigned long limit = node_dirty_limit(pgdat);
470 unsigned long nr_pages = 0;
471
472 nr_pages += node_page_state(pgdat, NR_FILE_DIRTY);
473 nr_pages += node_page_state(pgdat, NR_WRITEBACK);
474
475 return nr_pages <= limit;
476 }
477
478 #ifdef CONFIG_SYSCTL
dirty_background_ratio_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)479 static int dirty_background_ratio_handler(const struct ctl_table *table, int write,
480 void *buffer, size_t *lenp, loff_t *ppos)
481 {
482 int ret;
483
484 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
485 if (ret == 0 && write)
486 dirty_background_bytes = 0;
487 return ret;
488 }
489
dirty_background_bytes_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)490 static int dirty_background_bytes_handler(const struct ctl_table *table, int write,
491 void *buffer, size_t *lenp, loff_t *ppos)
492 {
493 int ret;
494 unsigned long old_bytes = dirty_background_bytes;
495
496 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
497 if (ret == 0 && write) {
498 if (DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE) >
499 UINT_MAX) {
500 dirty_background_bytes = old_bytes;
501 return -ERANGE;
502 }
503 dirty_background_ratio = 0;
504 }
505 return ret;
506 }
507
dirty_ratio_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)508 static int dirty_ratio_handler(const struct ctl_table *table, int write, void *buffer,
509 size_t *lenp, loff_t *ppos)
510 {
511 int old_ratio = vm_dirty_ratio;
512 int ret;
513
514 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
515 if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
516 vm_dirty_bytes = 0;
517 writeback_set_ratelimit();
518 }
519 return ret;
520 }
521
dirty_bytes_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)522 static int dirty_bytes_handler(const struct ctl_table *table, int write,
523 void *buffer, size_t *lenp, loff_t *ppos)
524 {
525 unsigned long old_bytes = vm_dirty_bytes;
526 int ret;
527
528 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
529 if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
530 if (DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) > UINT_MAX) {
531 vm_dirty_bytes = old_bytes;
532 return -ERANGE;
533 }
534 writeback_set_ratelimit();
535 vm_dirty_ratio = 0;
536 }
537 return ret;
538 }
539 #endif
540
wp_next_time(unsigned long cur_time)541 static unsigned long wp_next_time(unsigned long cur_time)
542 {
543 cur_time += VM_COMPLETIONS_PERIOD_LEN;
544 /* 0 has a special meaning... */
545 if (!cur_time)
546 return 1;
547 return cur_time;
548 }
549
wb_domain_writeout_add(struct wb_domain * dom,struct fprop_local_percpu * completions,unsigned int max_prop_frac,long nr)550 static void wb_domain_writeout_add(struct wb_domain *dom,
551 struct fprop_local_percpu *completions,
552 unsigned int max_prop_frac, long nr)
553 {
554 __fprop_add_percpu_max(&dom->completions, completions,
555 max_prop_frac, nr);
556 /* First event after period switching was turned off? */
557 if (unlikely(!dom->period_time)) {
558 /*
559 * We can race with other wb_domain_writeout_add calls here but
560 * it does not cause any harm since the resulting time when
561 * timer will fire and what is in writeout_period_time will be
562 * roughly the same.
563 */
564 dom->period_time = wp_next_time(jiffies);
565 mod_timer(&dom->period_timer, dom->period_time);
566 }
567 }
568
569 /*
570 * Increment @wb's writeout completion count and the global writeout
571 * completion count. Called from __folio_end_writeback().
572 */
__wb_writeout_add(struct bdi_writeback * wb,long nr)573 static inline void __wb_writeout_add(struct bdi_writeback *wb, long nr)
574 {
575 struct wb_domain *cgdom;
576
577 wb_stat_mod(wb, WB_WRITTEN, nr);
578 wb_domain_writeout_add(&global_wb_domain, &wb->completions,
579 wb->bdi->max_prop_frac, nr);
580
581 cgdom = mem_cgroup_wb_domain(wb);
582 if (cgdom)
583 wb_domain_writeout_add(cgdom, wb_memcg_completions(wb),
584 wb->bdi->max_prop_frac, nr);
585 }
586
wb_writeout_inc(struct bdi_writeback * wb)587 void wb_writeout_inc(struct bdi_writeback *wb)
588 {
589 unsigned long flags;
590
591 local_irq_save(flags);
592 __wb_writeout_add(wb, 1);
593 local_irq_restore(flags);
594 }
595 EXPORT_SYMBOL_GPL(wb_writeout_inc);
596
597 /*
598 * On idle system, we can be called long after we scheduled because we use
599 * deferred timers so count with missed periods.
600 */
writeout_period(struct timer_list * t)601 static void writeout_period(struct timer_list *t)
602 {
603 struct wb_domain *dom = timer_container_of(dom, t, period_timer);
604 int miss_periods = (jiffies - dom->period_time) /
605 VM_COMPLETIONS_PERIOD_LEN;
606
607 if (fprop_new_period(&dom->completions, miss_periods + 1)) {
608 dom->period_time = wp_next_time(dom->period_time +
609 miss_periods * VM_COMPLETIONS_PERIOD_LEN);
610 mod_timer(&dom->period_timer, dom->period_time);
611 } else {
612 /*
613 * Aging has zeroed all fractions. Stop wasting CPU on period
614 * updates.
615 */
616 dom->period_time = 0;
617 }
618 }
619
wb_domain_init(struct wb_domain * dom,gfp_t gfp)620 int wb_domain_init(struct wb_domain *dom, gfp_t gfp)
621 {
622 memset(dom, 0, sizeof(*dom));
623
624 spin_lock_init(&dom->lock);
625
626 timer_setup(&dom->period_timer, writeout_period, TIMER_DEFERRABLE);
627
628 dom->dirty_limit_tstamp = jiffies;
629
630 return fprop_global_init(&dom->completions, gfp);
631 }
632
633 #ifdef CONFIG_CGROUP_WRITEBACK
wb_domain_exit(struct wb_domain * dom)634 void wb_domain_exit(struct wb_domain *dom)
635 {
636 timer_delete_sync(&dom->period_timer);
637 fprop_global_destroy(&dom->completions);
638 }
639 #endif
640
641 /*
642 * bdi_min_ratio keeps the sum of the minimum dirty shares of all
643 * registered backing devices, which, for obvious reasons, can not
644 * exceed 100%.
645 */
646 static unsigned int bdi_min_ratio;
647
bdi_check_pages_limit(unsigned long pages)648 static int bdi_check_pages_limit(unsigned long pages)
649 {
650 unsigned long max_dirty_pages = global_dirtyable_memory();
651
652 if (pages > max_dirty_pages)
653 return -EINVAL;
654
655 return 0;
656 }
657
bdi_ratio_from_pages(unsigned long pages)658 static unsigned long bdi_ratio_from_pages(unsigned long pages)
659 {
660 unsigned long background_thresh;
661 unsigned long dirty_thresh;
662 unsigned long ratio;
663
664 global_dirty_limits(&background_thresh, &dirty_thresh);
665 if (!dirty_thresh)
666 return -EINVAL;
667 ratio = div64_u64(pages * 100ULL * BDI_RATIO_SCALE, dirty_thresh);
668
669 return ratio;
670 }
671
bdi_get_bytes(unsigned int ratio)672 static u64 bdi_get_bytes(unsigned int ratio)
673 {
674 unsigned long background_thresh;
675 unsigned long dirty_thresh;
676 u64 bytes;
677
678 global_dirty_limits(&background_thresh, &dirty_thresh);
679 bytes = (dirty_thresh * PAGE_SIZE * ratio) / BDI_RATIO_SCALE / 100;
680
681 return bytes;
682 }
683
__bdi_set_min_ratio(struct backing_dev_info * bdi,unsigned int min_ratio)684 static int __bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
685 {
686 unsigned int delta;
687 int ret = 0;
688
689 if (min_ratio > 100 * BDI_RATIO_SCALE)
690 return -EINVAL;
691
692 spin_lock_bh(&bdi_lock);
693 if (min_ratio > bdi->max_ratio) {
694 ret = -EINVAL;
695 } else {
696 if (min_ratio < bdi->min_ratio) {
697 delta = bdi->min_ratio - min_ratio;
698 bdi_min_ratio -= delta;
699 bdi->min_ratio = min_ratio;
700 } else {
701 delta = min_ratio - bdi->min_ratio;
702 if (bdi_min_ratio + delta < 100 * BDI_RATIO_SCALE) {
703 bdi_min_ratio += delta;
704 bdi->min_ratio = min_ratio;
705 } else {
706 ret = -EINVAL;
707 }
708 }
709 }
710 spin_unlock_bh(&bdi_lock);
711
712 return ret;
713 }
714
__bdi_set_max_ratio(struct backing_dev_info * bdi,unsigned int max_ratio)715 static int __bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio)
716 {
717 int ret = 0;
718
719 if (max_ratio > 100 * BDI_RATIO_SCALE)
720 return -EINVAL;
721
722 spin_lock_bh(&bdi_lock);
723 if (bdi->min_ratio > max_ratio) {
724 ret = -EINVAL;
725 } else {
726 bdi->max_ratio = max_ratio;
727 bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) /
728 (100 * BDI_RATIO_SCALE);
729 }
730 spin_unlock_bh(&bdi_lock);
731
732 return ret;
733 }
734
bdi_set_min_ratio_no_scale(struct backing_dev_info * bdi,unsigned int min_ratio)735 int bdi_set_min_ratio_no_scale(struct backing_dev_info *bdi, unsigned int min_ratio)
736 {
737 return __bdi_set_min_ratio(bdi, min_ratio);
738 }
739
bdi_set_max_ratio_no_scale(struct backing_dev_info * bdi,unsigned int max_ratio)740 int bdi_set_max_ratio_no_scale(struct backing_dev_info *bdi, unsigned int max_ratio)
741 {
742 return __bdi_set_max_ratio(bdi, max_ratio);
743 }
744
bdi_set_min_ratio(struct backing_dev_info * bdi,unsigned int min_ratio)745 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
746 {
747 return __bdi_set_min_ratio(bdi, min_ratio * BDI_RATIO_SCALE);
748 }
749
bdi_set_max_ratio(struct backing_dev_info * bdi,unsigned int max_ratio)750 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio)
751 {
752 return __bdi_set_max_ratio(bdi, max_ratio * BDI_RATIO_SCALE);
753 }
754 EXPORT_SYMBOL(bdi_set_max_ratio);
755
bdi_get_min_bytes(struct backing_dev_info * bdi)756 u64 bdi_get_min_bytes(struct backing_dev_info *bdi)
757 {
758 return bdi_get_bytes(bdi->min_ratio);
759 }
760
bdi_set_min_bytes(struct backing_dev_info * bdi,u64 min_bytes)761 int bdi_set_min_bytes(struct backing_dev_info *bdi, u64 min_bytes)
762 {
763 int ret;
764 unsigned long pages = min_bytes >> PAGE_SHIFT;
765 long min_ratio;
766
767 ret = bdi_check_pages_limit(pages);
768 if (ret)
769 return ret;
770
771 min_ratio = bdi_ratio_from_pages(pages);
772 if (min_ratio < 0)
773 return min_ratio;
774 return __bdi_set_min_ratio(bdi, min_ratio);
775 }
776
bdi_get_max_bytes(struct backing_dev_info * bdi)777 u64 bdi_get_max_bytes(struct backing_dev_info *bdi)
778 {
779 return bdi_get_bytes(bdi->max_ratio);
780 }
781
bdi_set_max_bytes(struct backing_dev_info * bdi,u64 max_bytes)782 int bdi_set_max_bytes(struct backing_dev_info *bdi, u64 max_bytes)
783 {
784 int ret;
785 unsigned long pages = max_bytes >> PAGE_SHIFT;
786 long max_ratio;
787
788 ret = bdi_check_pages_limit(pages);
789 if (ret)
790 return ret;
791
792 max_ratio = bdi_ratio_from_pages(pages);
793 if (max_ratio < 0)
794 return max_ratio;
795 return __bdi_set_max_ratio(bdi, max_ratio);
796 }
797
bdi_set_strict_limit(struct backing_dev_info * bdi,unsigned int strict_limit)798 int bdi_set_strict_limit(struct backing_dev_info *bdi, unsigned int strict_limit)
799 {
800 if (strict_limit > 1)
801 return -EINVAL;
802
803 spin_lock_bh(&bdi_lock);
804 if (strict_limit)
805 bdi->capabilities |= BDI_CAP_STRICTLIMIT;
806 else
807 bdi->capabilities &= ~BDI_CAP_STRICTLIMIT;
808 spin_unlock_bh(&bdi_lock);
809
810 return 0;
811 }
812
dirty_freerun_ceiling(unsigned long thresh,unsigned long bg_thresh)813 static unsigned long dirty_freerun_ceiling(unsigned long thresh,
814 unsigned long bg_thresh)
815 {
816 return (thresh + bg_thresh) / 2;
817 }
818
hard_dirty_limit(struct wb_domain * dom,unsigned long thresh)819 static unsigned long hard_dirty_limit(struct wb_domain *dom,
820 unsigned long thresh)
821 {
822 return max(thresh, dom->dirty_limit);
823 }
824
825 /*
826 * Memory which can be further allocated to a memcg domain is capped by
827 * system-wide clean memory excluding the amount being used in the domain.
828 */
mdtc_calc_avail(struct dirty_throttle_control * mdtc,unsigned long filepages,unsigned long headroom)829 static void mdtc_calc_avail(struct dirty_throttle_control *mdtc,
830 unsigned long filepages, unsigned long headroom)
831 {
832 struct dirty_throttle_control *gdtc = mdtc_gdtc(mdtc);
833 unsigned long clean = filepages - min(filepages, mdtc->dirty);
834 unsigned long global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty);
835 unsigned long other_clean = global_clean - min(global_clean, clean);
836
837 mdtc->avail = filepages + min(headroom, other_clean);
838 }
839
dtc_is_global(struct dirty_throttle_control * dtc)840 static inline bool dtc_is_global(struct dirty_throttle_control *dtc)
841 {
842 return mdtc_gdtc(dtc) == NULL;
843 }
844
845 /*
846 * Dirty background will ignore pages being written as we're trying to
847 * decide whether to put more under writeback.
848 */
domain_dirty_avail(struct dirty_throttle_control * dtc,bool include_writeback)849 static void domain_dirty_avail(struct dirty_throttle_control *dtc,
850 bool include_writeback)
851 {
852 if (dtc_is_global(dtc)) {
853 dtc->avail = global_dirtyable_memory();
854 dtc->dirty = global_node_page_state(NR_FILE_DIRTY);
855 if (include_writeback)
856 dtc->dirty += global_node_page_state(NR_WRITEBACK);
857 } else {
858 unsigned long filepages = 0, headroom = 0, writeback = 0;
859
860 mem_cgroup_wb_stats(dtc->wb, &filepages, &headroom, &dtc->dirty,
861 &writeback);
862 if (include_writeback)
863 dtc->dirty += writeback;
864 mdtc_calc_avail(dtc, filepages, headroom);
865 }
866 }
867
868 /**
869 * __wb_calc_thresh - @wb's share of dirty threshold
870 * @dtc: dirty_throttle_context of interest
871 * @thresh: dirty throttling or dirty background threshold of wb_domain in @dtc
872 *
873 * Note that balance_dirty_pages() will only seriously take dirty throttling
874 * threshold as a hard limit when sleeping max_pause per page is not enough
875 * to keep the dirty pages under control. For example, when the device is
876 * completely stalled due to some error conditions, or when there are 1000
877 * dd tasks writing to a slow 10MB/s USB key.
878 * In the other normal situations, it acts more gently by throttling the tasks
879 * more (rather than completely block them) when the wb dirty pages go high.
880 *
881 * It allocates high/low dirty limits to fast/slow devices, in order to prevent
882 * - starving fast devices
883 * - piling up dirty pages (that will take long time to sync) on slow devices
884 *
885 * The wb's share of dirty limit will be adapting to its throughput and
886 * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
887 *
888 * Return: @wb's dirty limit in pages. For dirty throttling limit, the term
889 * "dirty" in the context of dirty balancing includes all PG_dirty and
890 * PG_writeback pages.
891 */
__wb_calc_thresh(struct dirty_throttle_control * dtc,unsigned long thresh)892 static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc,
893 unsigned long thresh)
894 {
895 struct wb_domain *dom = dtc_dom(dtc);
896 struct bdi_writeback *wb = dtc->wb;
897 u64 wb_thresh;
898 u64 wb_max_thresh;
899 unsigned long numerator, denominator;
900 unsigned long wb_min_ratio, wb_max_ratio;
901
902 /*
903 * Calculate this wb's share of the thresh ratio.
904 */
905 fprop_fraction_percpu(&dom->completions, dtc->wb_completions,
906 &numerator, &denominator);
907
908 wb_thresh = (thresh * (100 * BDI_RATIO_SCALE - bdi_min_ratio)) / (100 * BDI_RATIO_SCALE);
909 wb_thresh *= numerator;
910 wb_thresh = div64_ul(wb_thresh, denominator);
911
912 wb_min_max_ratio(wb, &wb_min_ratio, &wb_max_ratio);
913
914 wb_thresh += (thresh * wb_min_ratio) / (100 * BDI_RATIO_SCALE);
915
916 /*
917 * It's very possible that wb_thresh is close to 0 not because the
918 * device is slow, but that it has remained inactive for long time.
919 * Honour such devices a reasonable good (hopefully IO efficient)
920 * threshold, so that the occasional writes won't be blocked and active
921 * writes can rampup the threshold quickly.
922 */
923 if (thresh > dtc->dirty) {
924 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT))
925 wb_thresh = max(wb_thresh, (thresh - dtc->dirty) / 100);
926 else
927 wb_thresh = max(wb_thresh, (thresh - dtc->dirty) / 8);
928 }
929
930 wb_max_thresh = thresh * wb_max_ratio / (100 * BDI_RATIO_SCALE);
931 if (wb_thresh > wb_max_thresh)
932 wb_thresh = wb_max_thresh;
933
934 return wb_thresh;
935 }
936
wb_calc_thresh(struct bdi_writeback * wb,unsigned long thresh)937 unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh)
938 {
939 struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };
940
941 domain_dirty_avail(&gdtc, true);
942 return __wb_calc_thresh(&gdtc, thresh);
943 }
944
cgwb_calc_thresh(struct bdi_writeback * wb)945 unsigned long cgwb_calc_thresh(struct bdi_writeback *wb)
946 {
947 struct dirty_throttle_control gdtc = { GDTC_INIT_NO_WB };
948 struct dirty_throttle_control mdtc = { MDTC_INIT(wb, &gdtc) };
949
950 domain_dirty_avail(&gdtc, true);
951 domain_dirty_avail(&mdtc, true);
952 domain_dirty_limits(&mdtc);
953
954 return __wb_calc_thresh(&mdtc, mdtc.thresh);
955 }
956
957 /*
958 * setpoint - dirty 3
959 * f(dirty) := 1.0 + (----------------)
960 * limit - setpoint
961 *
962 * it's a 3rd order polynomial that subjects to
963 *
964 * (1) f(freerun) = 2.0 => rampup dirty_ratelimit reasonably fast
965 * (2) f(setpoint) = 1.0 => the balance point
966 * (3) f(limit) = 0 => the hard limit
967 * (4) df/dx <= 0 => negative feedback control
968 * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
969 * => fast response on large errors; small oscillation near setpoint
970 */
pos_ratio_polynom(unsigned long setpoint,unsigned long dirty,unsigned long limit)971 static long long pos_ratio_polynom(unsigned long setpoint,
972 unsigned long dirty,
973 unsigned long limit)
974 {
975 long long pos_ratio;
976 long x;
977
978 x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
979 (limit - setpoint) | 1);
980 pos_ratio = x;
981 pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
982 pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
983 pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
984
985 return clamp(pos_ratio, 0LL, 2LL << RATELIMIT_CALC_SHIFT);
986 }
987
988 /*
989 * Dirty position control.
990 *
991 * (o) global/bdi setpoints
992 *
993 * We want the dirty pages be balanced around the global/wb setpoints.
994 * When the number of dirty pages is higher/lower than the setpoint, the
995 * dirty position control ratio (and hence task dirty ratelimit) will be
996 * decreased/increased to bring the dirty pages back to the setpoint.
997 *
998 * pos_ratio = 1 << RATELIMIT_CALC_SHIFT
999 *
1000 * if (dirty < setpoint) scale up pos_ratio
1001 * if (dirty > setpoint) scale down pos_ratio
1002 *
1003 * if (wb_dirty < wb_setpoint) scale up pos_ratio
1004 * if (wb_dirty > wb_setpoint) scale down pos_ratio
1005 *
1006 * task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT
1007 *
1008 * (o) global control line
1009 *
1010 * ^ pos_ratio
1011 * |
1012 * | |<===== global dirty control scope ======>|
1013 * 2.0 * * * * * * *
1014 * | .*
1015 * | . *
1016 * | . *
1017 * | . *
1018 * | . *
1019 * | . *
1020 * 1.0 ................................*
1021 * | . . *
1022 * | . . *
1023 * | . . *
1024 * | . . *
1025 * | . . *
1026 * 0 +------------.------------------.----------------------*------------->
1027 * freerun^ setpoint^ limit^ dirty pages
1028 *
1029 * (o) wb control line
1030 *
1031 * ^ pos_ratio
1032 * |
1033 * | *
1034 * | *
1035 * | *
1036 * | *
1037 * | * |<=========== span ============>|
1038 * 1.0 .......................*
1039 * | . *
1040 * | . *
1041 * | . *
1042 * | . *
1043 * | . *
1044 * | . *
1045 * | . *
1046 * | . *
1047 * | . *
1048 * | . *
1049 * | . *
1050 * 1/4 ...............................................* * * * * * * * * * * *
1051 * | . .
1052 * | . .
1053 * | . .
1054 * 0 +----------------------.-------------------------------.------------->
1055 * wb_setpoint^ x_intercept^
1056 *
1057 * The wb control line won't drop below pos_ratio=1/4, so that wb_dirty can
1058 * be smoothly throttled down to normal if it starts high in situations like
1059 * - start writing to a slow SD card and a fast disk at the same time. The SD
1060 * card's wb_dirty may rush to many times higher than wb_setpoint.
1061 * - the wb dirty thresh drops quickly due to change of JBOD workload
1062 */
wb_position_ratio(struct dirty_throttle_control * dtc)1063 static void wb_position_ratio(struct dirty_throttle_control *dtc)
1064 {
1065 struct bdi_writeback *wb = dtc->wb;
1066 unsigned long write_bw = READ_ONCE(wb->avg_write_bandwidth);
1067 unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
1068 unsigned long limit = dtc->limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
1069 unsigned long wb_thresh = dtc->wb_thresh;
1070 unsigned long x_intercept;
1071 unsigned long setpoint; /* dirty pages' target balance point */
1072 unsigned long wb_setpoint;
1073 unsigned long span;
1074 long long pos_ratio; /* for scaling up/down the rate limit */
1075 long x;
1076
1077 dtc->pos_ratio = 0;
1078
1079 if (unlikely(dtc->dirty >= limit))
1080 return;
1081
1082 /*
1083 * global setpoint
1084 *
1085 * See comment for pos_ratio_polynom().
1086 */
1087 setpoint = (freerun + limit) / 2;
1088 pos_ratio = pos_ratio_polynom(setpoint, dtc->dirty, limit);
1089
1090 /*
1091 * The strictlimit feature is a tool preventing mistrusted filesystems
1092 * from growing a large number of dirty pages before throttling. For
1093 * such filesystems balance_dirty_pages always checks wb counters
1094 * against wb limits. Even if global "nr_dirty" is under "freerun".
1095 * This is especially important for fuse which sets bdi->max_ratio to
1096 * 1% by default.
1097 *
1098 * Here, in wb_position_ratio(), we calculate pos_ratio based on
1099 * two values: wb_dirty and wb_thresh. Let's consider an example:
1100 * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global
1101 * limits are set by default to 10% and 20% (background and throttle).
1102 * Then wb_thresh is 1% of 20% of 16GB. This amounts to ~8K pages.
1103 * wb_calc_thresh(wb, bg_thresh) is about ~4K pages. wb_setpoint is
1104 * about ~6K pages (as the average of background and throttle wb
1105 * limits). The 3rd order polynomial will provide positive feedback if
1106 * wb_dirty is under wb_setpoint and vice versa.
1107 *
1108 * Note, that we cannot use global counters in these calculations
1109 * because we want to throttle process writing to a strictlimit wb
1110 * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB
1111 * in the example above).
1112 */
1113 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
1114 long long wb_pos_ratio;
1115
1116 if (dtc->wb_dirty >= wb_thresh)
1117 return;
1118
1119 wb_setpoint = dirty_freerun_ceiling(wb_thresh,
1120 dtc->wb_bg_thresh);
1121
1122 if (wb_setpoint == 0 || wb_setpoint == wb_thresh)
1123 return;
1124
1125 wb_pos_ratio = pos_ratio_polynom(wb_setpoint, dtc->wb_dirty,
1126 wb_thresh);
1127
1128 /*
1129 * Typically, for strictlimit case, wb_setpoint << setpoint
1130 * and pos_ratio >> wb_pos_ratio. In the other words global
1131 * state ("dirty") is not limiting factor and we have to
1132 * make decision based on wb counters. But there is an
1133 * important case when global pos_ratio should get precedence:
1134 * global limits are exceeded (e.g. due to activities on other
1135 * wb's) while given strictlimit wb is below limit.
1136 *
1137 * "pos_ratio * wb_pos_ratio" would work for the case above,
1138 * but it would look too non-natural for the case of all
1139 * activity in the system coming from a single strictlimit wb
1140 * with bdi->max_ratio == 100%.
1141 *
1142 * Note that min() below somewhat changes the dynamics of the
1143 * control system. Normally, pos_ratio value can be well over 3
1144 * (when globally we are at freerun and wb is well below wb
1145 * setpoint). Now the maximum pos_ratio in the same situation
1146 * is 2. We might want to tweak this if we observe the control
1147 * system is too slow to adapt.
1148 */
1149 dtc->pos_ratio = min(pos_ratio, wb_pos_ratio);
1150 return;
1151 }
1152
1153 /*
1154 * We have computed basic pos_ratio above based on global situation. If
1155 * the wb is over/under its share of dirty pages, we want to scale
1156 * pos_ratio further down/up. That is done by the following mechanism.
1157 */
1158
1159 /*
1160 * wb setpoint
1161 *
1162 * f(wb_dirty) := 1.0 + k * (wb_dirty - wb_setpoint)
1163 *
1164 * x_intercept - wb_dirty
1165 * := --------------------------
1166 * x_intercept - wb_setpoint
1167 *
1168 * The main wb control line is a linear function that subjects to
1169 *
1170 * (1) f(wb_setpoint) = 1.0
1171 * (2) k = - 1 / (8 * write_bw) (in single wb case)
1172 * or equally: x_intercept = wb_setpoint + 8 * write_bw
1173 *
1174 * For single wb case, the dirty pages are observed to fluctuate
1175 * regularly within range
1176 * [wb_setpoint - write_bw/2, wb_setpoint + write_bw/2]
1177 * for various filesystems, where (2) can yield in a reasonable 12.5%
1178 * fluctuation range for pos_ratio.
1179 *
1180 * For JBOD case, wb_thresh (not wb_dirty!) could fluctuate up to its
1181 * own size, so move the slope over accordingly and choose a slope that
1182 * yields 100% pos_ratio fluctuation on suddenly doubled wb_thresh.
1183 */
1184 if (unlikely(wb_thresh > dtc->thresh))
1185 wb_thresh = dtc->thresh;
1186 /*
1187 * scale global setpoint to wb's:
1188 * wb_setpoint = setpoint * wb_thresh / thresh
1189 */
1190 x = div_u64((u64)wb_thresh << 16, dtc->thresh | 1);
1191 wb_setpoint = setpoint * (u64)x >> 16;
1192 /*
1193 * Use span=(8*write_bw) in single wb case as indicated by
1194 * (thresh - wb_thresh ~= 0) and transit to wb_thresh in JBOD case.
1195 *
1196 * wb_thresh thresh - wb_thresh
1197 * span = --------- * (8 * write_bw) + ------------------ * wb_thresh
1198 * thresh thresh
1199 */
1200 span = (dtc->thresh - wb_thresh + 8 * write_bw) * (u64)x >> 16;
1201 x_intercept = wb_setpoint + span;
1202
1203 if (dtc->wb_dirty < x_intercept - span / 4) {
1204 pos_ratio = div64_u64(pos_ratio * (x_intercept - dtc->wb_dirty),
1205 (x_intercept - wb_setpoint) | 1);
1206 } else
1207 pos_ratio /= 4;
1208
1209 /*
1210 * wb reserve area, safeguard against dirty pool underrun and disk idle
1211 * It may push the desired control point of global dirty pages higher
1212 * than setpoint.
1213 */
1214 x_intercept = wb_thresh / 2;
1215 if (dtc->wb_dirty < x_intercept) {
1216 if (dtc->wb_dirty > x_intercept / 8)
1217 pos_ratio = div_u64(pos_ratio * x_intercept,
1218 dtc->wb_dirty);
1219 else
1220 pos_ratio *= 8;
1221 }
1222
1223 dtc->pos_ratio = pos_ratio;
1224 }
1225
wb_update_write_bandwidth(struct bdi_writeback * wb,unsigned long elapsed,unsigned long written)1226 static void wb_update_write_bandwidth(struct bdi_writeback *wb,
1227 unsigned long elapsed,
1228 unsigned long written)
1229 {
1230 const unsigned long period = roundup_pow_of_two(3 * HZ);
1231 unsigned long avg = wb->avg_write_bandwidth;
1232 unsigned long old = wb->write_bandwidth;
1233 u64 bw;
1234
1235 /*
1236 * bw = written * HZ / elapsed
1237 *
1238 * bw * elapsed + write_bandwidth * (period - elapsed)
1239 * write_bandwidth = ---------------------------------------------------
1240 * period
1241 *
1242 * @written may have decreased due to folio_redirty_for_writepage().
1243 * Avoid underflowing @bw calculation.
1244 */
1245 bw = written - min(written, wb->written_stamp);
1246 bw *= HZ;
1247 if (unlikely(elapsed > period)) {
1248 bw = div64_ul(bw, elapsed);
1249 avg = bw;
1250 goto out;
1251 }
1252 bw += (u64)wb->write_bandwidth * (period - elapsed);
1253 bw >>= ilog2(period);
1254
1255 /*
1256 * one more level of smoothing, for filtering out sudden spikes
1257 */
1258 if (avg > old && old >= (unsigned long)bw)
1259 avg -= (avg - old) >> 3;
1260
1261 if (avg < old && old <= (unsigned long)bw)
1262 avg += (old - avg) >> 3;
1263
1264 out:
1265 /* keep avg > 0 to guarantee that tot > 0 if there are dirty wbs */
1266 avg = max(avg, 1LU);
1267 if (wb_has_dirty_io(wb)) {
1268 long delta = avg - wb->avg_write_bandwidth;
1269 WARN_ON_ONCE(atomic_long_add_return(delta,
1270 &wb->bdi->tot_write_bandwidth) <= 0);
1271 }
1272 wb->write_bandwidth = bw;
1273 WRITE_ONCE(wb->avg_write_bandwidth, avg);
1274 }
1275
update_dirty_limit(struct dirty_throttle_control * dtc)1276 static void update_dirty_limit(struct dirty_throttle_control *dtc)
1277 {
1278 struct wb_domain *dom = dtc_dom(dtc);
1279 unsigned long thresh = dtc->thresh;
1280 unsigned long limit = dom->dirty_limit;
1281
1282 /*
1283 * Follow up in one step.
1284 */
1285 if (limit < thresh) {
1286 limit = thresh;
1287 goto update;
1288 }
1289
1290 /*
1291 * Follow down slowly. Use the higher one as the target, because thresh
1292 * may drop below dirty. This is exactly the reason to introduce
1293 * dom->dirty_limit which is guaranteed to lie above the dirty pages.
1294 */
1295 thresh = max(thresh, dtc->dirty);
1296 if (limit > thresh) {
1297 limit -= (limit - thresh) >> 5;
1298 goto update;
1299 }
1300 return;
1301 update:
1302 dom->dirty_limit = limit;
1303 }
1304
domain_update_dirty_limit(struct dirty_throttle_control * dtc,unsigned long now)1305 static void domain_update_dirty_limit(struct dirty_throttle_control *dtc,
1306 unsigned long now)
1307 {
1308 struct wb_domain *dom = dtc_dom(dtc);
1309
1310 /*
1311 * check locklessly first to optimize away locking for the most time
1312 */
1313 if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL))
1314 return;
1315
1316 spin_lock(&dom->lock);
1317 if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) {
1318 update_dirty_limit(dtc);
1319 dom->dirty_limit_tstamp = now;
1320 }
1321 spin_unlock(&dom->lock);
1322 }
1323
1324 /*
1325 * Maintain wb->dirty_ratelimit, the base dirty throttle rate.
1326 *
1327 * Normal wb tasks will be curbed at or below it in long term.
1328 * Obviously it should be around (write_bw / N) when there are N dd tasks.
1329 */
wb_update_dirty_ratelimit(struct dirty_throttle_control * dtc,unsigned long dirtied,unsigned long elapsed)1330 static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc,
1331 unsigned long dirtied,
1332 unsigned long elapsed)
1333 {
1334 struct bdi_writeback *wb = dtc->wb;
1335 unsigned long dirty = dtc->dirty;
1336 unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
1337 unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
1338 unsigned long setpoint = (freerun + limit) / 2;
1339 unsigned long write_bw = wb->avg_write_bandwidth;
1340 unsigned long dirty_ratelimit = wb->dirty_ratelimit;
1341 unsigned long dirty_rate;
1342 unsigned long task_ratelimit;
1343 unsigned long balanced_dirty_ratelimit;
1344 unsigned long step;
1345 unsigned long x;
1346 unsigned long shift;
1347
1348 /*
1349 * The dirty rate will match the writeout rate in long term, except
1350 * when dirty pages are truncated by userspace or re-dirtied by FS.
1351 */
1352 dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed;
1353
1354 /*
1355 * task_ratelimit reflects each dd's dirty rate for the past 200ms.
1356 */
1357 task_ratelimit = (u64)dirty_ratelimit *
1358 dtc->pos_ratio >> RATELIMIT_CALC_SHIFT;
1359 task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */
1360
1361 /*
1362 * A linear estimation of the "balanced" throttle rate. The theory is,
1363 * if there are N dd tasks, each throttled at task_ratelimit, the wb's
1364 * dirty_rate will be measured to be (N * task_ratelimit). So the below
1365 * formula will yield the balanced rate limit (write_bw / N).
1366 *
1367 * Note that the expanded form is not a pure rate feedback:
1368 * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) (1)
1369 * but also takes pos_ratio into account:
1370 * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio (2)
1371 *
1372 * (1) is not realistic because pos_ratio also takes part in balancing
1373 * the dirty rate. Consider the state
1374 * pos_ratio = 0.5 (3)
1375 * rate = 2 * (write_bw / N) (4)
1376 * If (1) is used, it will stuck in that state! Because each dd will
1377 * be throttled at
1378 * task_ratelimit = pos_ratio * rate = (write_bw / N) (5)
1379 * yielding
1380 * dirty_rate = N * task_ratelimit = write_bw (6)
1381 * put (6) into (1) we get
1382 * rate_(i+1) = rate_(i) (7)
1383 *
1384 * So we end up using (2) to always keep
1385 * rate_(i+1) ~= (write_bw / N) (8)
1386 * regardless of the value of pos_ratio. As long as (8) is satisfied,
1387 * pos_ratio is able to drive itself to 1.0, which is not only where
1388 * the dirty count meet the setpoint, but also where the slope of
1389 * pos_ratio is most flat and hence task_ratelimit is least fluctuated.
1390 */
1391 balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
1392 dirty_rate | 1);
1393 /*
1394 * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw
1395 */
1396 if (unlikely(balanced_dirty_ratelimit > write_bw))
1397 balanced_dirty_ratelimit = write_bw;
1398
1399 /*
1400 * We could safely do this and return immediately:
1401 *
1402 * wb->dirty_ratelimit = balanced_dirty_ratelimit;
1403 *
1404 * However to get a more stable dirty_ratelimit, the below elaborated
1405 * code makes use of task_ratelimit to filter out singular points and
1406 * limit the step size.
1407 *
1408 * The below code essentially only uses the relative value of
1409 *
1410 * task_ratelimit - dirty_ratelimit
1411 * = (pos_ratio - 1) * dirty_ratelimit
1412 *
1413 * which reflects the direction and size of dirty position error.
1414 */
1415
1416 /*
1417 * dirty_ratelimit will follow balanced_dirty_ratelimit iff
1418 * task_ratelimit is on the same side of dirty_ratelimit, too.
1419 * For example, when
1420 * - dirty_ratelimit > balanced_dirty_ratelimit
1421 * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint)
1422 * lowering dirty_ratelimit will help meet both the position and rate
1423 * control targets. Otherwise, don't update dirty_ratelimit if it will
1424 * only help meet the rate target. After all, what the users ultimately
1425 * feel and care are stable dirty rate and small position error.
1426 *
1427 * |task_ratelimit - dirty_ratelimit| is used to limit the step size
1428 * and filter out the singular points of balanced_dirty_ratelimit. Which
1429 * keeps jumping around randomly and can even leap far away at times
1430 * due to the small 200ms estimation period of dirty_rate (we want to
1431 * keep that period small to reduce time lags).
1432 */
1433 step = 0;
1434
1435 /*
1436 * For strictlimit case, calculations above were based on wb counters
1437 * and limits (starting from pos_ratio = wb_position_ratio() and up to
1438 * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate).
1439 * Hence, to calculate "step" properly, we have to use wb_dirty as
1440 * "dirty" and wb_setpoint as "setpoint".
1441 */
1442 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
1443 dirty = dtc->wb_dirty;
1444 setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2;
1445 }
1446
1447 if (dirty < setpoint) {
1448 x = min3(wb->balanced_dirty_ratelimit,
1449 balanced_dirty_ratelimit, task_ratelimit);
1450 if (dirty_ratelimit < x)
1451 step = x - dirty_ratelimit;
1452 } else {
1453 x = max3(wb->balanced_dirty_ratelimit,
1454 balanced_dirty_ratelimit, task_ratelimit);
1455 if (dirty_ratelimit > x)
1456 step = dirty_ratelimit - x;
1457 }
1458
1459 /*
1460 * Don't pursue 100% rate matching. It's impossible since the balanced
1461 * rate itself is constantly fluctuating. So decrease the track speed
1462 * when it gets close to the target. Helps eliminate pointless tremors.
1463 */
1464 shift = dirty_ratelimit / (2 * step + 1);
1465 if (shift < BITS_PER_LONG)
1466 step = DIV_ROUND_UP(step >> shift, 8);
1467 else
1468 step = 0;
1469
1470 if (dirty_ratelimit < balanced_dirty_ratelimit)
1471 dirty_ratelimit += step;
1472 else
1473 dirty_ratelimit -= step;
1474
1475 WRITE_ONCE(wb->dirty_ratelimit, max(dirty_ratelimit, 1UL));
1476 wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
1477
1478 trace_bdi_dirty_ratelimit(wb, dirty_rate, task_ratelimit);
1479 }
1480
__wb_update_bandwidth(struct dirty_throttle_control * gdtc,struct dirty_throttle_control * mdtc,bool update_ratelimit)1481 static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc,
1482 struct dirty_throttle_control *mdtc,
1483 bool update_ratelimit)
1484 {
1485 struct bdi_writeback *wb = gdtc->wb;
1486 unsigned long now = jiffies;
1487 unsigned long elapsed;
1488 unsigned long dirtied;
1489 unsigned long written;
1490
1491 spin_lock(&wb->list_lock);
1492
1493 /*
1494 * Lockless checks for elapsed time are racy and delayed update after
1495 * IO completion doesn't do it at all (to make sure written pages are
1496 * accounted reasonably quickly). Make sure elapsed >= 1 to avoid
1497 * division errors.
1498 */
1499 elapsed = max(now - wb->bw_time_stamp, 1UL);
1500 dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]);
1501 written = percpu_counter_read(&wb->stat[WB_WRITTEN]);
1502
1503 if (update_ratelimit) {
1504 domain_update_dirty_limit(gdtc, now);
1505 wb_update_dirty_ratelimit(gdtc, dirtied, elapsed);
1506
1507 /*
1508 * @mdtc is always NULL if !CGROUP_WRITEBACK but the
1509 * compiler has no way to figure that out. Help it.
1510 */
1511 if (IS_ENABLED(CONFIG_CGROUP_WRITEBACK) && mdtc) {
1512 domain_update_dirty_limit(mdtc, now);
1513 wb_update_dirty_ratelimit(mdtc, dirtied, elapsed);
1514 }
1515 }
1516 wb_update_write_bandwidth(wb, elapsed, written);
1517
1518 wb->dirtied_stamp = dirtied;
1519 wb->written_stamp = written;
1520 WRITE_ONCE(wb->bw_time_stamp, now);
1521 spin_unlock(&wb->list_lock);
1522 }
1523
wb_update_bandwidth(struct bdi_writeback * wb)1524 void wb_update_bandwidth(struct bdi_writeback *wb)
1525 {
1526 struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };
1527
1528 __wb_update_bandwidth(&gdtc, NULL, false);
1529 }
1530
1531 /* Interval after which we consider wb idle and don't estimate bandwidth */
1532 #define WB_BANDWIDTH_IDLE_JIF (HZ)
1533
wb_bandwidth_estimate_start(struct bdi_writeback * wb)1534 static void wb_bandwidth_estimate_start(struct bdi_writeback *wb)
1535 {
1536 unsigned long now = jiffies;
1537 unsigned long elapsed = now - READ_ONCE(wb->bw_time_stamp);
1538
1539 if (elapsed > WB_BANDWIDTH_IDLE_JIF &&
1540 !atomic_read(&wb->writeback_inodes)) {
1541 spin_lock(&wb->list_lock);
1542 wb->dirtied_stamp = wb_stat(wb, WB_DIRTIED);
1543 wb->written_stamp = wb_stat(wb, WB_WRITTEN);
1544 WRITE_ONCE(wb->bw_time_stamp, now);
1545 spin_unlock(&wb->list_lock);
1546 }
1547 }
1548
1549 /*
1550 * After a task dirtied this many pages, balance_dirty_pages_ratelimited()
1551 * will look to see if it needs to start dirty throttling.
1552 *
1553 * If dirty_poll_interval is too low, big NUMA machines will call the expensive
1554 * global_zone_page_state() too often. So scale it near-sqrt to the safety margin
1555 * (the number of pages we may dirty without exceeding the dirty limits).
1556 */
dirty_poll_interval(unsigned long dirty,unsigned long thresh)1557 static unsigned long dirty_poll_interval(unsigned long dirty,
1558 unsigned long thresh)
1559 {
1560 if (thresh > dirty)
1561 return 1UL << (ilog2(thresh - dirty) >> 1);
1562
1563 return 1;
1564 }
1565
wb_max_pause(struct bdi_writeback * wb,unsigned long wb_dirty)1566 static unsigned long wb_max_pause(struct bdi_writeback *wb,
1567 unsigned long wb_dirty)
1568 {
1569 unsigned long bw = READ_ONCE(wb->avg_write_bandwidth);
1570 unsigned long t;
1571
1572 /*
1573 * Limit pause time for small memory systems. If sleeping for too long
1574 * time, a small pool of dirty/writeback pages may go empty and disk go
1575 * idle.
1576 *
1577 * 8 serves as the safety ratio.
1578 */
1579 t = wb_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
1580 t++;
1581
1582 return min_t(unsigned long, t, MAX_PAUSE);
1583 }
1584
wb_min_pause(struct bdi_writeback * wb,long max_pause,unsigned long task_ratelimit,unsigned long dirty_ratelimit,int * nr_dirtied_pause)1585 static long wb_min_pause(struct bdi_writeback *wb,
1586 long max_pause,
1587 unsigned long task_ratelimit,
1588 unsigned long dirty_ratelimit,
1589 int *nr_dirtied_pause)
1590 {
1591 long hi = ilog2(READ_ONCE(wb->avg_write_bandwidth));
1592 long lo = ilog2(READ_ONCE(wb->dirty_ratelimit));
1593 long t; /* target pause */
1594 long pause; /* estimated next pause */
1595 int pages; /* target nr_dirtied_pause */
1596
1597 /* target for 10ms pause on 1-dd case */
1598 t = max(1, HZ / 100);
1599
1600 /*
1601 * Scale up pause time for concurrent dirtiers in order to reduce CPU
1602 * overheads.
1603 *
1604 * (N * 10ms) on 2^N concurrent tasks.
1605 */
1606 if (hi > lo)
1607 t += (hi - lo) * (10 * HZ) / 1024;
1608
1609 /*
1610 * This is a bit convoluted. We try to base the next nr_dirtied_pause
1611 * on the much more stable dirty_ratelimit. However the next pause time
1612 * will be computed based on task_ratelimit and the two rate limits may
1613 * depart considerably at some time. Especially if task_ratelimit goes
1614 * below dirty_ratelimit/2 and the target pause is max_pause, the next
1615 * pause time will be max_pause*2 _trimmed down_ to max_pause. As a
1616 * result task_ratelimit won't be executed faithfully, which could
1617 * eventually bring down dirty_ratelimit.
1618 *
1619 * We apply two rules to fix it up:
1620 * 1) try to estimate the next pause time and if necessary, use a lower
1621 * nr_dirtied_pause so as not to exceed max_pause. When this happens,
1622 * nr_dirtied_pause will be "dancing" with task_ratelimit.
1623 * 2) limit the target pause time to max_pause/2, so that the normal
1624 * small fluctuations of task_ratelimit won't trigger rule (1) and
1625 * nr_dirtied_pause will remain as stable as dirty_ratelimit.
1626 */
1627 t = min(t, 1 + max_pause / 2);
1628 pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1629
1630 /*
1631 * Tiny nr_dirtied_pause is found to hurt I/O performance in the test
1632 * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}.
1633 * When the 16 consecutive reads are often interrupted by some dirty
1634 * throttling pause during the async writes, cfq will go into idles
1635 * (deadline is fine). So push nr_dirtied_pause as high as possible
1636 * until reaches DIRTY_POLL_THRESH=32 pages.
1637 */
1638 if (pages < DIRTY_POLL_THRESH) {
1639 t = max_pause;
1640 pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1641 if (pages > DIRTY_POLL_THRESH) {
1642 pages = DIRTY_POLL_THRESH;
1643 t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit;
1644 }
1645 }
1646
1647 pause = HZ * pages / (task_ratelimit + 1);
1648 if (pause > max_pause) {
1649 t = max_pause;
1650 pages = task_ratelimit * t / roundup_pow_of_two(HZ);
1651 }
1652
1653 *nr_dirtied_pause = pages;
1654 /*
1655 * The minimal pause time will normally be half the target pause time.
1656 */
1657 return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
1658 }
1659
wb_dirty_limits(struct dirty_throttle_control * dtc)1660 static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
1661 {
1662 struct bdi_writeback *wb = dtc->wb;
1663 unsigned long wb_reclaimable;
1664
1665 /*
1666 * wb_thresh is not treated as some limiting factor as
1667 * dirty_thresh, due to reasons
1668 * - in JBOD setup, wb_thresh can fluctuate a lot
1669 * - in a system with HDD and USB key, the USB key may somehow
1670 * go into state (wb_dirty >> wb_thresh) either because
1671 * wb_dirty starts high, or because wb_thresh drops low.
1672 * In this case we don't want to hard throttle the USB key
1673 * dirtiers for 100 seconds until wb_dirty drops under
1674 * wb_thresh. Instead the auxiliary wb control line in
1675 * wb_position_ratio() will let the dirtier task progress
1676 * at some rate <= (write_bw / 2) for bringing down wb_dirty.
1677 */
1678 dtc->wb_thresh = __wb_calc_thresh(dtc, dtc->thresh);
1679 dtc->wb_bg_thresh = dtc->thresh ?
1680 div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
1681
1682 /*
1683 * In order to avoid the stacked BDI deadlock we need
1684 * to ensure we accurately count the 'dirty' pages when
1685 * the threshold is low.
1686 *
1687 * Otherwise it would be possible to get thresh+n pages
1688 * reported dirty, even though there are thresh-m pages
1689 * actually dirty; with m+n sitting in the percpu
1690 * deltas.
1691 */
1692 if (dtc->wb_thresh < 2 * wb_stat_error()) {
1693 wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
1694 dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK);
1695 } else {
1696 wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE);
1697 dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK);
1698 }
1699 }
1700
domain_poll_intv(struct dirty_throttle_control * dtc,bool strictlimit)1701 static unsigned long domain_poll_intv(struct dirty_throttle_control *dtc,
1702 bool strictlimit)
1703 {
1704 unsigned long dirty, thresh;
1705
1706 if (strictlimit) {
1707 dirty = dtc->wb_dirty;
1708 thresh = dtc->wb_thresh;
1709 } else {
1710 dirty = dtc->dirty;
1711 thresh = dtc->thresh;
1712 }
1713
1714 return dirty_poll_interval(dirty, thresh);
1715 }
1716
1717 /*
1718 * Throttle it only when the background writeback cannot catch-up. This avoids
1719 * (excessively) small writeouts when the wb limits are ramping up in case of
1720 * !strictlimit.
1721 *
1722 * In strictlimit case make decision based on the wb counters and limits. Small
1723 * writeouts when the wb limits are ramping up are the price we consciously pay
1724 * for strictlimit-ing.
1725 */
domain_dirty_freerun(struct dirty_throttle_control * dtc,bool strictlimit)1726 static void domain_dirty_freerun(struct dirty_throttle_control *dtc,
1727 bool strictlimit)
1728 {
1729 unsigned long dirty, thresh, bg_thresh;
1730
1731 if (unlikely(strictlimit)) {
1732 wb_dirty_limits(dtc);
1733 dirty = dtc->wb_dirty;
1734 thresh = dtc->wb_thresh;
1735 bg_thresh = dtc->wb_bg_thresh;
1736 } else {
1737 dirty = dtc->dirty;
1738 thresh = dtc->thresh;
1739 bg_thresh = dtc->bg_thresh;
1740 }
1741 dtc->freerun = dirty <= dirty_freerun_ceiling(thresh, bg_thresh);
1742 }
1743
balance_domain_limits(struct dirty_throttle_control * dtc,bool strictlimit)1744 static void balance_domain_limits(struct dirty_throttle_control *dtc,
1745 bool strictlimit)
1746 {
1747 domain_dirty_avail(dtc, true);
1748 domain_dirty_limits(dtc);
1749 domain_dirty_freerun(dtc, strictlimit);
1750 }
1751
wb_dirty_freerun(struct dirty_throttle_control * dtc,bool strictlimit)1752 static void wb_dirty_freerun(struct dirty_throttle_control *dtc,
1753 bool strictlimit)
1754 {
1755 dtc->freerun = false;
1756
1757 /* was already handled in domain_dirty_freerun */
1758 if (strictlimit)
1759 return;
1760
1761 wb_dirty_limits(dtc);
1762 /*
1763 * LOCAL_THROTTLE tasks must not be throttled when below the per-wb
1764 * freerun ceiling.
1765 */
1766 if (!(current->flags & PF_LOCAL_THROTTLE))
1767 return;
1768
1769 dtc->freerun = dtc->wb_dirty <
1770 dirty_freerun_ceiling(dtc->wb_thresh, dtc->wb_bg_thresh);
1771 }
1772
wb_dirty_exceeded(struct dirty_throttle_control * dtc,bool strictlimit)1773 static inline void wb_dirty_exceeded(struct dirty_throttle_control *dtc,
1774 bool strictlimit)
1775 {
1776 dtc->dirty_exceeded = (dtc->wb_dirty > dtc->wb_thresh) &&
1777 ((dtc->dirty > dtc->thresh) || strictlimit);
1778 }
1779
1780 /*
1781 * The limits fields dirty_exceeded and pos_ratio won't be updated if wb is
1782 * in freerun state. Please don't use these invalid fields in freerun case.
1783 */
balance_wb_limits(struct dirty_throttle_control * dtc,bool strictlimit)1784 static void balance_wb_limits(struct dirty_throttle_control *dtc,
1785 bool strictlimit)
1786 {
1787 wb_dirty_freerun(dtc, strictlimit);
1788 if (dtc->freerun)
1789 return;
1790
1791 wb_dirty_exceeded(dtc, strictlimit);
1792 wb_position_ratio(dtc);
1793 }
1794
1795 /*
1796 * balance_dirty_pages() must be called by processes which are generating dirty
1797 * data. It looks at the number of dirty pages in the machine and will force
1798 * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2.
1799 * If we're over `background_thresh' then the writeback threads are woken to
1800 * perform some writeout.
1801 */
balance_dirty_pages(struct bdi_writeback * wb,unsigned long pages_dirtied,unsigned int flags)1802 static int balance_dirty_pages(struct bdi_writeback *wb,
1803 unsigned long pages_dirtied, unsigned int flags)
1804 {
1805 struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
1806 struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
1807 struct dirty_throttle_control * const gdtc = &gdtc_stor;
1808 struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
1809 &mdtc_stor : NULL;
1810 struct dirty_throttle_control *sdtc;
1811 unsigned long nr_dirty;
1812 long period;
1813 long pause;
1814 long max_pause;
1815 long min_pause;
1816 int nr_dirtied_pause;
1817 unsigned long task_ratelimit;
1818 unsigned long dirty_ratelimit;
1819 struct backing_dev_info *bdi = wb->bdi;
1820 bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT;
1821 unsigned long start_time = jiffies;
1822 int ret = 0;
1823
1824 for (;;) {
1825 unsigned long now = jiffies;
1826
1827 nr_dirty = global_node_page_state(NR_FILE_DIRTY);
1828
1829 balance_domain_limits(gdtc, strictlimit);
1830 if (mdtc) {
1831 /*
1832 * If @wb belongs to !root memcg, repeat the same
1833 * basic calculations for the memcg domain.
1834 */
1835 balance_domain_limits(mdtc, strictlimit);
1836 }
1837
1838 if (!writeback_in_progress(wb) &&
1839 (nr_dirty > gdtc->bg_thresh ||
1840 (strictlimit && gdtc->wb_dirty > gdtc->wb_bg_thresh)))
1841 wb_start_background_writeback(wb);
1842
1843 /*
1844 * If memcg domain is in effect, @dirty should be under
1845 * both global and memcg freerun ceilings.
1846 */
1847 if (gdtc->freerun && (!mdtc || mdtc->freerun)) {
1848 unsigned long intv;
1849 unsigned long m_intv;
1850
1851 free_running:
1852 intv = domain_poll_intv(gdtc, strictlimit);
1853 m_intv = ULONG_MAX;
1854
1855 current->dirty_paused_when = now;
1856 current->nr_dirtied = 0;
1857 if (mdtc)
1858 m_intv = domain_poll_intv(mdtc, strictlimit);
1859 current->nr_dirtied_pause = min(intv, m_intv);
1860 break;
1861 }
1862
1863 /*
1864 * Unconditionally start background writeback if it's not
1865 * already in progress. We need to do this because the global
1866 * dirty threshold check above (nr_dirty > gdtc->bg_thresh)
1867 * doesn't account for the memcg-based throttling case. memcg
1868 * uses its own dirty count and thresholds and can trigger
1869 * throttling even when global nr_dirty < gdtc->bg_thresh
1870 *
1871 * Writeback needs to be started else the writer stalls in the
1872 * throttle loop waiting for dirty pages to be written back
1873 * while no writeback is running.
1874 */
1875 if (unlikely(!writeback_in_progress(wb)))
1876 wb_start_background_writeback(wb);
1877
1878 mem_cgroup_flush_foreign(wb);
1879
1880 /*
1881 * Calculate global domain's pos_ratio and select the
1882 * global dtc by default.
1883 */
1884 balance_wb_limits(gdtc, strictlimit);
1885 if (gdtc->freerun)
1886 goto free_running;
1887 sdtc = gdtc;
1888
1889 if (mdtc) {
1890 /*
1891 * If memcg domain is in effect, calculate its
1892 * pos_ratio. @wb should satisfy constraints from
1893 * both global and memcg domains. Choose the one
1894 * w/ lower pos_ratio.
1895 */
1896 balance_wb_limits(mdtc, strictlimit);
1897 if (mdtc->freerun)
1898 goto free_running;
1899 if (mdtc->pos_ratio < gdtc->pos_ratio)
1900 sdtc = mdtc;
1901 }
1902
1903 wb->dirty_exceeded = gdtc->dirty_exceeded ||
1904 (mdtc && mdtc->dirty_exceeded);
1905 if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
1906 BANDWIDTH_INTERVAL))
1907 __wb_update_bandwidth(gdtc, mdtc, true);
1908
1909 /* throttle according to the chosen dtc */
1910 dirty_ratelimit = READ_ONCE(wb->dirty_ratelimit);
1911 task_ratelimit = ((u64)dirty_ratelimit * sdtc->pos_ratio) >>
1912 RATELIMIT_CALC_SHIFT;
1913 max_pause = wb_max_pause(wb, sdtc->wb_dirty);
1914 min_pause = wb_min_pause(wb, max_pause,
1915 task_ratelimit, dirty_ratelimit,
1916 &nr_dirtied_pause);
1917
1918 if (unlikely(task_ratelimit == 0)) {
1919 period = max_pause;
1920 pause = max_pause;
1921 goto pause;
1922 }
1923 period = HZ * pages_dirtied / task_ratelimit;
1924 pause = period;
1925 if (current->dirty_paused_when)
1926 pause -= now - current->dirty_paused_when;
1927 /*
1928 * For less than 1s think time (ext3/4 may block the dirtier
1929 * for up to 800ms from time to time on 1-HDD; so does xfs,
1930 * however at much less frequency), try to compensate it in
1931 * future periods by updating the virtual time; otherwise just
1932 * do a reset, as it may be a light dirtier.
1933 */
1934 if (pause < min_pause) {
1935 trace_balance_dirty_pages(wb,
1936 sdtc,
1937 dirty_ratelimit,
1938 task_ratelimit,
1939 pages_dirtied,
1940 period,
1941 min(pause, 0L),
1942 start_time);
1943 if (pause < -HZ) {
1944 current->dirty_paused_when = now;
1945 current->nr_dirtied = 0;
1946 } else if (period) {
1947 current->dirty_paused_when += period;
1948 current->nr_dirtied = 0;
1949 } else if (current->nr_dirtied_pause <= pages_dirtied)
1950 current->nr_dirtied_pause += pages_dirtied;
1951 break;
1952 }
1953 if (unlikely(pause > max_pause)) {
1954 /* for occasional dropped task_ratelimit */
1955 now += min(pause - max_pause, max_pause);
1956 pause = max_pause;
1957 }
1958
1959 pause:
1960 trace_balance_dirty_pages(wb,
1961 sdtc,
1962 dirty_ratelimit,
1963 task_ratelimit,
1964 pages_dirtied,
1965 period,
1966 pause,
1967 start_time);
1968 if (flags & BDP_ASYNC) {
1969 ret = -EAGAIN;
1970 break;
1971 }
1972 __set_current_state(TASK_KILLABLE);
1973 bdi->last_bdp_sleep = jiffies;
1974 io_schedule_timeout(pause);
1975
1976 current->dirty_paused_when = now + pause;
1977 current->nr_dirtied = 0;
1978 current->nr_dirtied_pause = nr_dirtied_pause;
1979
1980 /*
1981 * This is typically equal to (dirty < thresh) and can also
1982 * keep "1000+ dd on a slow USB stick" under control.
1983 */
1984 if (task_ratelimit)
1985 break;
1986
1987 /*
1988 * In the case of an unresponsive NFS server and the NFS dirty
1989 * pages exceeds dirty_thresh, give the other good wb's a pipe
1990 * to go through, so that tasks on them still remain responsive.
1991 *
1992 * In theory 1 page is enough to keep the consumer-producer
1993 * pipe going: the flusher cleans 1 page => the task dirties 1
1994 * more page. However wb_dirty has accounting errors. So use
1995 * the larger and more IO friendly wb_stat_error.
1996 */
1997 if (sdtc->wb_dirty <= wb_stat_error())
1998 break;
1999
2000 if (fatal_signal_pending(current))
2001 break;
2002 }
2003 return ret;
2004 }
2005
2006 static DEFINE_PER_CPU(int, bdp_ratelimits);
2007
2008 /*
2009 * Normal tasks are throttled by
2010 * loop {
2011 * dirty tsk->nr_dirtied_pause pages;
2012 * take a snap in balance_dirty_pages();
2013 * }
2014 * However there is a worst case. If every task exit immediately when dirtied
2015 * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
2016 * called to throttle the page dirties. The solution is to save the not yet
2017 * throttled page dirties in dirty_throttle_leaks on task exit and charge them
2018 * randomly into the running tasks. This works well for the above worst case,
2019 * as the new task will pick up and accumulate the old task's leaked dirty
2020 * count and eventually get throttled.
2021 */
2022 DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
2023
2024 /**
2025 * balance_dirty_pages_ratelimited_flags - Balance dirty memory state.
2026 * @mapping: address_space which was dirtied.
2027 * @flags: BDP flags.
2028 *
2029 * Processes which are dirtying memory should call in here once for each page
2030 * which was newly dirtied. The function will periodically check the system's
2031 * dirty state and will initiate writeback if needed.
2032 *
2033 * See balance_dirty_pages_ratelimited() for details.
2034 *
2035 * Return: If @flags contains BDP_ASYNC, it may return -EAGAIN to
2036 * indicate that memory is out of balance and the caller must wait
2037 * for I/O to complete. Otherwise, it will return 0 to indicate
2038 * that either memory was already in balance, or it was able to sleep
2039 * until the amount of dirty memory returned to balance.
2040 */
balance_dirty_pages_ratelimited_flags(struct address_space * mapping,unsigned int flags)2041 int balance_dirty_pages_ratelimited_flags(struct address_space *mapping,
2042 unsigned int flags)
2043 {
2044 struct inode *inode = mapping->host;
2045 struct backing_dev_info *bdi = inode_to_bdi(inode);
2046 struct bdi_writeback *wb = NULL;
2047 int ratelimit;
2048 int ret = 0;
2049 int *p;
2050
2051 if (!(bdi->capabilities & BDI_CAP_WRITEBACK))
2052 return ret;
2053
2054 if (inode_cgwb_enabled(inode))
2055 wb = wb_get_create_current(bdi, GFP_KERNEL);
2056 if (!wb)
2057 wb = &bdi->wb;
2058
2059 ratelimit = current->nr_dirtied_pause;
2060 if (wb->dirty_exceeded)
2061 ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));
2062
2063 preempt_disable();
2064 /*
2065 * This prevents one CPU to accumulate too many dirtied pages without
2066 * calling into balance_dirty_pages(), which can happen when there are
2067 * 1000+ tasks, all of them start dirtying pages at exactly the same
2068 * time, hence all honoured too large initial task->nr_dirtied_pause.
2069 */
2070 p = this_cpu_ptr(&bdp_ratelimits);
2071 if (unlikely(current->nr_dirtied >= ratelimit))
2072 *p = 0;
2073 else if (unlikely(*p >= ratelimit_pages)) {
2074 *p = 0;
2075 ratelimit = 0;
2076 }
2077 /*
2078 * Pick up the dirtied pages by the exited tasks. This avoids lots of
2079 * short-lived tasks (eg. gcc invocations in a kernel build) escaping
2080 * the dirty throttling and livelock other long-run dirtiers.
2081 */
2082 p = this_cpu_ptr(&dirty_throttle_leaks);
2083 if (*p > 0 && current->nr_dirtied < ratelimit) {
2084 unsigned long nr_pages_dirtied;
2085 nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
2086 *p -= nr_pages_dirtied;
2087 current->nr_dirtied += nr_pages_dirtied;
2088 }
2089 preempt_enable();
2090
2091 if (unlikely(current->nr_dirtied >= ratelimit))
2092 ret = balance_dirty_pages(wb, current->nr_dirtied, flags);
2093
2094 wb_put(wb);
2095 return ret;
2096 }
2097 EXPORT_SYMBOL_GPL(balance_dirty_pages_ratelimited_flags);
2098
2099 /**
2100 * balance_dirty_pages_ratelimited - balance dirty memory state.
2101 * @mapping: address_space which was dirtied.
2102 *
2103 * Processes which are dirtying memory should call in here once for each page
2104 * which was newly dirtied. The function will periodically check the system's
2105 * dirty state and will initiate writeback if needed.
2106 *
2107 * Once we're over the dirty memory limit we decrease the ratelimiting
2108 * by a lot, to prevent individual processes from overshooting the limit
2109 * by (ratelimit_pages) each.
2110 */
balance_dirty_pages_ratelimited(struct address_space * mapping)2111 void balance_dirty_pages_ratelimited(struct address_space *mapping)
2112 {
2113 balance_dirty_pages_ratelimited_flags(mapping, 0);
2114 }
2115 EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
2116
2117 /*
2118 * Similar to wb_dirty_limits, wb_bg_dirty_limits also calculates dirty
2119 * and thresh, but it's for background writeback.
2120 */
wb_bg_dirty_limits(struct dirty_throttle_control * dtc)2121 static void wb_bg_dirty_limits(struct dirty_throttle_control *dtc)
2122 {
2123 struct bdi_writeback *wb = dtc->wb;
2124
2125 dtc->wb_bg_thresh = __wb_calc_thresh(dtc, dtc->bg_thresh);
2126 if (dtc->wb_bg_thresh < 2 * wb_stat_error())
2127 dtc->wb_dirty = wb_stat_sum(wb, WB_RECLAIMABLE);
2128 else
2129 dtc->wb_dirty = wb_stat(wb, WB_RECLAIMABLE);
2130 }
2131
domain_over_bg_thresh(struct dirty_throttle_control * dtc)2132 static bool domain_over_bg_thresh(struct dirty_throttle_control *dtc)
2133 {
2134 domain_dirty_avail(dtc, false);
2135 domain_dirty_limits(dtc);
2136 if (dtc->dirty > dtc->bg_thresh)
2137 return true;
2138
2139 wb_bg_dirty_limits(dtc);
2140 if (dtc->wb_dirty > dtc->wb_bg_thresh)
2141 return true;
2142
2143 return false;
2144 }
2145
2146 /**
2147 * wb_over_bg_thresh - does @wb need to be written back?
2148 * @wb: bdi_writeback of interest
2149 *
2150 * Determines whether background writeback should keep writing @wb or it's
2151 * clean enough.
2152 *
2153 * Return: %true if writeback should continue.
2154 */
wb_over_bg_thresh(struct bdi_writeback * wb)2155 bool wb_over_bg_thresh(struct bdi_writeback *wb)
2156 {
2157 struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };
2158 struct dirty_throttle_control mdtc = { MDTC_INIT(wb, &gdtc) };
2159
2160 if (domain_over_bg_thresh(&gdtc))
2161 return true;
2162
2163 if (mdtc_valid(&mdtc))
2164 return domain_over_bg_thresh(&mdtc);
2165
2166 return false;
2167 }
2168
2169 #ifdef CONFIG_SYSCTL
2170 /*
2171 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
2172 */
dirty_writeback_centisecs_handler(const struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)2173 static int dirty_writeback_centisecs_handler(const struct ctl_table *table, int write,
2174 void *buffer, size_t *length, loff_t *ppos)
2175 {
2176 unsigned int old_interval = dirty_writeback_interval;
2177 int ret;
2178
2179 ret = proc_dointvec(table, write, buffer, length, ppos);
2180
2181 /*
2182 * Writing 0 to dirty_writeback_interval will disable periodic writeback
2183 * and a different non-zero value will wakeup the writeback threads.
2184 * wb_wakeup_delayed() would be more appropriate, but it's a pain to
2185 * iterate over all bdis and wbs.
2186 * The reason we do this is to make the change take effect immediately.
2187 */
2188 if (!ret && write && dirty_writeback_interval &&
2189 dirty_writeback_interval != old_interval)
2190 wakeup_flusher_threads(WB_REASON_PERIODIC);
2191
2192 return ret;
2193 }
2194 #endif
2195
2196 /*
2197 * If ratelimit_pages is too high then we can get into dirty-data overload
2198 * if a large number of processes all perform writes at the same time.
2199 *
2200 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
2201 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
2202 * thresholds.
2203 */
2204
writeback_set_ratelimit(void)2205 void writeback_set_ratelimit(void)
2206 {
2207 struct wb_domain *dom = &global_wb_domain;
2208 unsigned long background_thresh;
2209 unsigned long dirty_thresh;
2210
2211 global_dirty_limits(&background_thresh, &dirty_thresh);
2212 dom->dirty_limit = dirty_thresh;
2213 ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
2214 if (ratelimit_pages < 16)
2215 ratelimit_pages = 16;
2216 }
2217
page_writeback_cpu_online(unsigned int cpu)2218 static int page_writeback_cpu_online(unsigned int cpu)
2219 {
2220 writeback_set_ratelimit();
2221 return 0;
2222 }
2223
2224 #ifdef CONFIG_SYSCTL
2225
2226 static int laptop_mode;
laptop_mode_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)2227 static int laptop_mode_handler(const struct ctl_table *table, int write,
2228 void *buffer, size_t *lenp, loff_t *ppos)
2229 {
2230 int ret = proc_dointvec_jiffies(table, write, buffer, lenp, ppos);
2231
2232 if (!ret && write)
2233 pr_warn("%s: vm.laptop_mode is deprecated. Ignoring setting.\n",
2234 current->comm);
2235
2236 return ret;
2237 }
2238
2239 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
2240 static const unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
2241
2242 static const struct ctl_table vm_page_writeback_sysctls[] = {
2243 {
2244 .procname = "dirty_background_ratio",
2245 .data = &dirty_background_ratio,
2246 .maxlen = sizeof(dirty_background_ratio),
2247 .mode = 0644,
2248 .proc_handler = dirty_background_ratio_handler,
2249 .extra1 = SYSCTL_ZERO,
2250 .extra2 = SYSCTL_ONE_HUNDRED,
2251 },
2252 {
2253 .procname = "dirty_background_bytes",
2254 .data = &dirty_background_bytes,
2255 .maxlen = sizeof(dirty_background_bytes),
2256 .mode = 0644,
2257 .proc_handler = dirty_background_bytes_handler,
2258 .extra1 = SYSCTL_LONG_ONE,
2259 },
2260 {
2261 .procname = "dirty_ratio",
2262 .data = &vm_dirty_ratio,
2263 .maxlen = sizeof(vm_dirty_ratio),
2264 .mode = 0644,
2265 .proc_handler = dirty_ratio_handler,
2266 .extra1 = SYSCTL_ZERO,
2267 .extra2 = SYSCTL_ONE_HUNDRED,
2268 },
2269 {
2270 .procname = "dirty_bytes",
2271 .data = &vm_dirty_bytes,
2272 .maxlen = sizeof(vm_dirty_bytes),
2273 .mode = 0644,
2274 .proc_handler = dirty_bytes_handler,
2275 .extra1 = (void *)&dirty_bytes_min,
2276 },
2277 {
2278 .procname = "dirty_writeback_centisecs",
2279 .data = &dirty_writeback_interval,
2280 .maxlen = sizeof(dirty_writeback_interval),
2281 .mode = 0644,
2282 .proc_handler = dirty_writeback_centisecs_handler,
2283 },
2284 {
2285 .procname = "dirty_expire_centisecs",
2286 .data = &dirty_expire_interval,
2287 .maxlen = sizeof(dirty_expire_interval),
2288 .mode = 0644,
2289 .proc_handler = proc_dointvec_minmax,
2290 .extra1 = SYSCTL_ZERO,
2291 },
2292 #ifdef CONFIG_HIGHMEM
2293 {
2294 .procname = "highmem_is_dirtyable",
2295 .data = &vm_highmem_is_dirtyable,
2296 .maxlen = sizeof(vm_highmem_is_dirtyable),
2297 .mode = 0644,
2298 .proc_handler = proc_dointvec_minmax,
2299 .extra1 = SYSCTL_ZERO,
2300 .extra2 = SYSCTL_ONE,
2301 },
2302 #endif
2303 {
2304 .procname = "laptop_mode",
2305 .data = &laptop_mode,
2306 .maxlen = sizeof(laptop_mode),
2307 .mode = 0644,
2308 .proc_handler = laptop_mode_handler,
2309 },
2310 };
2311 #endif
2312
2313 /*
2314 * Called early on to tune the page writeback dirty limits.
2315 *
2316 * We used to scale dirty pages according to how total memory
2317 * related to pages that could be allocated for buffers.
2318 *
2319 * However, that was when we used "dirty_ratio" to scale with
2320 * all memory, and we don't do that any more. "dirty_ratio"
2321 * is now applied to total non-HIGHPAGE memory, and as such we can't
2322 * get into the old insane situation any more where we had
2323 * large amounts of dirty pages compared to a small amount of
2324 * non-HIGHMEM memory.
2325 *
2326 * But we might still want to scale the dirty_ratio by how
2327 * much memory the box has..
2328 */
page_writeback_init(void)2329 void __init page_writeback_init(void)
2330 {
2331 BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
2332
2333 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/writeback:online",
2334 page_writeback_cpu_online, NULL);
2335 cpuhp_setup_state(CPUHP_MM_WRITEBACK_DEAD, "mm/writeback:dead", NULL,
2336 page_writeback_cpu_online);
2337 #ifdef CONFIG_SYSCTL
2338 register_sysctl_init("vm", vm_page_writeback_sysctls);
2339 #endif
2340 }
2341
2342 /**
2343 * tag_pages_for_writeback - tag pages to be written by writeback
2344 * @mapping: address space structure to write
2345 * @start: starting page index
2346 * @end: ending page index (inclusive)
2347 *
2348 * This function scans the page range from @start to @end (inclusive) and tags
2349 * all pages that have DIRTY tag set with a special TOWRITE tag. The caller
2350 * can then use the TOWRITE tag to identify pages eligible for writeback.
2351 * This mechanism is used to avoid livelocking of writeback by a process
2352 * steadily creating new dirty pages in the file (thus it is important for this
2353 * function to be quick so that it can tag pages faster than a dirtying process
2354 * can create them).
2355 */
tag_pages_for_writeback(struct address_space * mapping,pgoff_t start,pgoff_t end)2356 void tag_pages_for_writeback(struct address_space *mapping,
2357 pgoff_t start, pgoff_t end)
2358 {
2359 XA_STATE(xas, &mapping->i_pages, start);
2360 unsigned int tagged = 0;
2361 void *page;
2362
2363 xas_lock_irq(&xas);
2364 xas_for_each_marked(&xas, page, end, PAGECACHE_TAG_DIRTY) {
2365 xas_set_mark(&xas, PAGECACHE_TAG_TOWRITE);
2366 if (++tagged % XA_CHECK_SCHED)
2367 continue;
2368
2369 xas_pause(&xas);
2370 xas_unlock_irq(&xas);
2371 cond_resched();
2372 xas_lock_irq(&xas);
2373 }
2374 xas_unlock_irq(&xas);
2375 }
2376 EXPORT_SYMBOL(tag_pages_for_writeback);
2377
folio_prepare_writeback(struct address_space * mapping,struct writeback_control * wbc,struct folio * folio)2378 static bool folio_prepare_writeback(struct address_space *mapping,
2379 struct writeback_control *wbc, struct folio *folio)
2380 {
2381 /*
2382 * Folio truncated or invalidated. We can freely skip it then,
2383 * even for data integrity operations: the folio has disappeared
2384 * concurrently, so there could be no real expectation of this
2385 * data integrity operation even if there is now a new, dirty
2386 * folio at the same pagecache index.
2387 */
2388 if (unlikely(folio->mapping != mapping))
2389 return false;
2390
2391 /*
2392 * Did somebody else write it for us?
2393 */
2394 if (!folio_test_dirty(folio))
2395 return false;
2396
2397 if (folio_test_writeback(folio)) {
2398 if (wbc->sync_mode == WB_SYNC_NONE)
2399 return false;
2400 folio_wait_writeback(folio);
2401 }
2402 BUG_ON(folio_test_writeback(folio));
2403
2404 if (!folio_clear_dirty_for_io(folio))
2405 return false;
2406
2407 return true;
2408 }
2409
2410
wbc_end(struct writeback_control * wbc)2411 static pgoff_t wbc_end(struct writeback_control *wbc)
2412 {
2413 if (wbc->range_cyclic)
2414 return -1;
2415 return wbc->range_end >> PAGE_SHIFT;
2416 }
2417
writeback_get_folio(struct address_space * mapping,struct writeback_control * wbc)2418 static struct folio *writeback_get_folio(struct address_space *mapping,
2419 struct writeback_control *wbc)
2420 {
2421 struct folio *folio;
2422
2423 retry:
2424 folio = folio_batch_next(&wbc->fbatch);
2425 if (!folio) {
2426 folio_batch_release(&wbc->fbatch);
2427 cond_resched();
2428 filemap_get_folios_tag(mapping, &wbc->index, wbc_end(wbc),
2429 wbc_to_tag(wbc), &wbc->fbatch);
2430 folio = folio_batch_next(&wbc->fbatch);
2431 if (!folio)
2432 return NULL;
2433 }
2434
2435 folio_lock(folio);
2436 if (unlikely(!folio_prepare_writeback(mapping, wbc, folio))) {
2437 folio_unlock(folio);
2438 goto retry;
2439 }
2440
2441 trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
2442 return folio;
2443 }
2444
2445 /**
2446 * writeback_iter - iterate folio of a mapping for writeback
2447 * @mapping: address space structure to write
2448 * @wbc: writeback context
2449 * @folio: previously iterated folio (%NULL to start)
2450 * @error: in-out pointer for writeback errors (see below)
2451 *
2452 * This function returns the next folio for the writeback operation described by
2453 * @wbc on @mapping and should be called in a while loop in the ->writepages
2454 * implementation.
2455 *
2456 * To start the writeback operation, %NULL is passed in the @folio argument, and
2457 * for every subsequent iteration the folio returned previously should be passed
2458 * back in.
2459 *
2460 * If there was an error in the per-folio writeback inside the writeback_iter()
2461 * loop, @error should be set to the error value.
2462 *
2463 * Once the writeback described in @wbc has finished, this function will return
2464 * %NULL and if there was an error in any iteration restore it to @error.
2465 *
2466 * Note: callers should not manually break out of the loop using break or goto
2467 * but must keep calling writeback_iter() until it returns %NULL.
2468 *
2469 * Return: the folio to write or %NULL if the loop is done.
2470 */
writeback_iter(struct address_space * mapping,struct writeback_control * wbc,struct folio * folio,int * error)2471 struct folio *writeback_iter(struct address_space *mapping,
2472 struct writeback_control *wbc, struct folio *folio, int *error)
2473 {
2474 if (!folio) {
2475 folio_batch_init(&wbc->fbatch);
2476 wbc->saved_err = *error = 0;
2477
2478 /*
2479 * For range cyclic writeback we remember where we stopped so
2480 * that we can continue where we stopped.
2481 *
2482 * For non-cyclic writeback we always start at the beginning of
2483 * the passed in range.
2484 */
2485 if (wbc->range_cyclic)
2486 wbc->index = mapping->writeback_index;
2487 else
2488 wbc->index = wbc->range_start >> PAGE_SHIFT;
2489
2490 /*
2491 * To avoid livelocks when other processes dirty new pages, we
2492 * first tag pages which should be written back and only then
2493 * start writing them.
2494 *
2495 * For data-integrity writeback we have to be careful so that we
2496 * do not miss some pages (e.g., because some other process has
2497 * cleared the TOWRITE tag we set). The rule we follow is that
2498 * TOWRITE tag can be cleared only by the process clearing the
2499 * DIRTY tag (and submitting the page for I/O).
2500 */
2501 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2502 tag_pages_for_writeback(mapping, wbc->index,
2503 wbc_end(wbc));
2504 } else {
2505 wbc->nr_to_write -= folio_nr_pages(folio);
2506
2507 WARN_ON_ONCE(*error > 0);
2508
2509 /*
2510 * For integrity writeback we have to keep going until we have
2511 * written all the folios we tagged for writeback above, even if
2512 * we run past wbc->nr_to_write or encounter errors.
2513 * We stash away the first error we encounter in wbc->saved_err
2514 * so that it can be retrieved when we're done. This is because
2515 * the file system may still have state to clear for each folio.
2516 *
2517 * For background writeback we exit as soon as we run past
2518 * wbc->nr_to_write or encounter the first error.
2519 */
2520 if (wbc->sync_mode == WB_SYNC_ALL) {
2521 if (*error && !wbc->saved_err)
2522 wbc->saved_err = *error;
2523 } else {
2524 if (*error || wbc->nr_to_write <= 0)
2525 goto done;
2526 }
2527 }
2528
2529 folio = writeback_get_folio(mapping, wbc);
2530 if (!folio) {
2531 /*
2532 * To avoid deadlocks between range_cyclic writeback and callers
2533 * that hold folios in writeback to aggregate I/O until
2534 * the writeback iteration finishes, we do not loop back to the
2535 * start of the file. Doing so causes a folio lock/folio
2536 * writeback access order inversion - we should only ever lock
2537 * multiple folios in ascending folio->index order, and looping
2538 * back to the start of the file violates that rule and causes
2539 * deadlocks.
2540 */
2541 if (wbc->range_cyclic)
2542 mapping->writeback_index = 0;
2543
2544 /*
2545 * Return the first error we encountered (if there was any) to
2546 * the caller.
2547 */
2548 *error = wbc->saved_err;
2549 }
2550 return folio;
2551
2552 done:
2553 if (wbc->range_cyclic)
2554 mapping->writeback_index = folio_next_index(folio);
2555 folio_batch_release(&wbc->fbatch);
2556 return NULL;
2557 }
2558 EXPORT_SYMBOL_GPL(writeback_iter);
2559
do_writepages(struct address_space * mapping,struct writeback_control * wbc)2560 int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
2561 {
2562 int ret;
2563 struct bdi_writeback *wb;
2564
2565 if (wbc->nr_to_write <= 0)
2566 return 0;
2567 wb = inode_to_wb_wbc(mapping->host, wbc);
2568 wb_bandwidth_estimate_start(wb);
2569 while (1) {
2570 if (mapping->a_ops->writepages)
2571 ret = mapping->a_ops->writepages(mapping, wbc);
2572 else
2573 /* deal with chardevs and other special files */
2574 ret = 0;
2575 if (ret != -ENOMEM || wbc->sync_mode != WB_SYNC_ALL)
2576 break;
2577
2578 /*
2579 * Lacking an allocation context or the locality or writeback
2580 * state of any of the inode's pages, throttle based on
2581 * writeback activity on the local node. It's as good a
2582 * guess as any.
2583 */
2584 reclaim_throttle(NODE_DATA(numa_node_id()),
2585 VMSCAN_THROTTLE_WRITEBACK);
2586 }
2587 /*
2588 * Usually few pages are written by now from those we've just submitted
2589 * but if there's constant writeback being submitted, this makes sure
2590 * writeback bandwidth is updated once in a while.
2591 */
2592 if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
2593 BANDWIDTH_INTERVAL))
2594 wb_update_bandwidth(wb);
2595 return ret;
2596 }
2597
2598 /*
2599 * For address_spaces which do not use buffers nor write back.
2600 */
noop_dirty_folio(struct address_space * mapping,struct folio * folio)2601 bool noop_dirty_folio(struct address_space *mapping, struct folio *folio)
2602 {
2603 if (!folio_test_dirty(folio))
2604 return !folio_test_set_dirty(folio);
2605 return false;
2606 }
2607 EXPORT_SYMBOL(noop_dirty_folio);
2608
2609 /*
2610 * Helper function for set_page_dirty family.
2611 *
2612 * NOTE: This relies on being atomic wrt interrupts.
2613 */
folio_account_dirtied(struct folio * folio,struct address_space * mapping)2614 static void folio_account_dirtied(struct folio *folio,
2615 struct address_space *mapping)
2616 {
2617 struct inode *inode = mapping->host;
2618
2619 trace_writeback_dirty_folio(folio, mapping);
2620
2621 if (mapping_can_writeback(mapping)) {
2622 struct bdi_writeback *wb;
2623 long nr = folio_nr_pages(folio);
2624
2625 inode_attach_wb(inode, folio);
2626 wb = inode_to_wb(inode);
2627
2628 lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, nr);
2629 __zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
2630 __node_stat_mod_folio(folio, NR_DIRTIED, nr);
2631 wb_stat_mod(wb, WB_RECLAIMABLE, nr);
2632 wb_stat_mod(wb, WB_DIRTIED, nr);
2633 task_io_account_write(nr * PAGE_SIZE);
2634 current->nr_dirtied += nr;
2635 __this_cpu_add(bdp_ratelimits, nr);
2636
2637 mem_cgroup_track_foreign_dirty(folio, wb);
2638 }
2639 }
2640
2641 /*
2642 * Helper function for deaccounting dirty page without writeback.
2643 *
2644 */
folio_account_cleaned(struct folio * folio,struct bdi_writeback * wb)2645 void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb)
2646 {
2647 long nr = folio_nr_pages(folio);
2648
2649 lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr);
2650 zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
2651 wb_stat_mod(wb, WB_RECLAIMABLE, -nr);
2652 task_io_account_cancelled_write(nr * PAGE_SIZE);
2653 }
2654
2655 /*
2656 * Mark the folio dirty, and set it dirty in the page cache.
2657 *
2658 * If warn is true, then emit a warning if the folio is not uptodate and has
2659 * not been truncated.
2660 *
2661 * It is the caller's responsibility to prevent the folio from being truncated
2662 * while this function is in progress, although it may have been truncated
2663 * before this function is called. Most callers have the folio locked.
2664 * A few have the folio blocked from truncation through other means (e.g.
2665 * zap_vma() has it mapped and is holding the page table lock).
2666 * When called from mark_buffer_dirty(), the filesystem should hold a
2667 * reference to the buffer_head that is being marked dirty, which causes
2668 * try_to_free_buffers() to fail.
2669 */
__folio_mark_dirty(struct folio * folio,struct address_space * mapping,int warn)2670 void __folio_mark_dirty(struct folio *folio, struct address_space *mapping,
2671 int warn)
2672 {
2673 unsigned long flags;
2674
2675 /*
2676 * Shmem writeback relies on swap, and swap writeback is LRU based,
2677 * not using the dirty mark.
2678 */
2679 VM_WARN_ON_ONCE(folio_test_swapcache(folio) || shmem_mapping(mapping));
2680
2681 xa_lock_irqsave(&mapping->i_pages, flags);
2682 if (folio->mapping) { /* Race with truncate? */
2683 WARN_ON_ONCE(warn && !folio_test_uptodate(folio));
2684 folio_account_dirtied(folio, mapping);
2685 __xa_set_mark(&mapping->i_pages, folio->index,
2686 PAGECACHE_TAG_DIRTY);
2687 }
2688 xa_unlock_irqrestore(&mapping->i_pages, flags);
2689 }
2690
2691 /**
2692 * filemap_dirty_folio - Mark a folio dirty for filesystems which do not use buffer_heads.
2693 * @mapping: Address space this folio belongs to.
2694 * @folio: Folio to be marked as dirty.
2695 *
2696 * Filesystems which do not use buffer heads should call this function
2697 * from their dirty_folio address space operation. It ignores the
2698 * contents of folio_get_private(), so if the filesystem marks individual
2699 * blocks as dirty, the filesystem should handle that itself.
2700 *
2701 * This is also sometimes used by filesystems which use buffer_heads when
2702 * a single buffer is being dirtied: we want to set the folio dirty in
2703 * that case, but not all the buffers. This is a "bottom-up" dirtying,
2704 * whereas block_dirty_folio() is a "top-down" dirtying.
2705 *
2706 * The caller must ensure this doesn't race with truncation. Most will
2707 * simply hold the folio lock, but e.g. zap_pte_range() calls with the
2708 * folio mapped and the pte lock held, which also locks out truncation.
2709 */
filemap_dirty_folio(struct address_space * mapping,struct folio * folio)2710 bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio)
2711 {
2712 if (folio_test_set_dirty(folio))
2713 return false;
2714
2715 __folio_mark_dirty(folio, mapping, !folio_test_private(folio));
2716
2717 if (mapping->host) {
2718 /* !PageAnon && !swapper_space */
2719 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
2720 }
2721 return true;
2722 }
2723 EXPORT_SYMBOL(filemap_dirty_folio);
2724
2725 /**
2726 * folio_redirty_for_writepage - Decline to write a dirty folio.
2727 * @wbc: The writeback control.
2728 * @folio: The folio.
2729 *
2730 * When a writepage implementation decides that it doesn't want to write
2731 * @folio for some reason, it should call this function, unlock @folio and
2732 * return 0.
2733 *
2734 * Return: True if we redirtied the folio. False if someone else dirtied
2735 * it first.
2736 */
folio_redirty_for_writepage(struct writeback_control * wbc,struct folio * folio)2737 bool folio_redirty_for_writepage(struct writeback_control *wbc,
2738 struct folio *folio)
2739 {
2740 struct address_space *mapping = folio->mapping;
2741 long nr = folio_nr_pages(folio);
2742 bool ret;
2743
2744 wbc->pages_skipped += nr;
2745 ret = filemap_dirty_folio(mapping, folio);
2746 if (mapping && mapping_can_writeback(mapping)) {
2747 struct inode *inode = mapping->host;
2748 struct bdi_writeback *wb;
2749 struct wb_lock_cookie cookie = {};
2750
2751 wb = unlocked_inode_to_wb_begin(inode, &cookie);
2752 current->nr_dirtied -= nr;
2753 node_stat_mod_folio(folio, NR_DIRTIED, -nr);
2754 wb_stat_mod(wb, WB_DIRTIED, -nr);
2755 unlocked_inode_to_wb_end(inode, &cookie);
2756 }
2757 return ret;
2758 }
2759 EXPORT_SYMBOL(folio_redirty_for_writepage);
2760
2761 /**
2762 * folio_mark_dirty - Mark a folio as being modified.
2763 * @folio: The folio.
2764 *
2765 * The folio may not be truncated while this function is running.
2766 * Holding the folio lock is sufficient to prevent truncation, but some
2767 * callers cannot acquire a sleeping lock. These callers instead hold
2768 * the page table lock for a page table which contains at least one page
2769 * in this folio. Truncation will block on the page table lock as it
2770 * unmaps pages before removing the folio from its mapping.
2771 *
2772 * Return: True if the folio was newly dirtied, false if it was already dirty.
2773 */
folio_mark_dirty(struct folio * folio)2774 bool folio_mark_dirty(struct folio *folio)
2775 {
2776 struct address_space *mapping = folio_mapping(folio);
2777
2778 if (likely(mapping)) {
2779 /*
2780 * readahead/folio_deactivate could remain
2781 * PG_readahead/PG_reclaim due to race with folio_end_writeback
2782 * About readahead, if the folio is written, the flags would be
2783 * reset. So no problem.
2784 * About folio_deactivate, if the folio is redirtied,
2785 * the flag will be reset. So no problem. but if the
2786 * folio is used by readahead it will confuse readahead
2787 * and make it restart the size rampup process. But it's
2788 * a trivial problem.
2789 */
2790 if (folio_test_reclaim(folio))
2791 folio_clear_reclaim(folio);
2792 return mapping->a_ops->dirty_folio(mapping, folio);
2793 }
2794
2795 return noop_dirty_folio(mapping, folio);
2796 }
2797 EXPORT_SYMBOL(folio_mark_dirty);
2798
2799 /*
2800 * folio_mark_dirty() is racy if the caller has no reference against
2801 * folio->mapping->host, and if the folio is unlocked. This is because another
2802 * CPU could truncate the folio off the mapping and then free the mapping.
2803 *
2804 * Usually, the folio _is_ locked, or the caller is a user-space process which
2805 * holds a reference on the inode by having an open file.
2806 *
2807 * In other cases, the folio should be locked before running folio_mark_dirty().
2808 */
folio_mark_dirty_lock(struct folio * folio)2809 bool folio_mark_dirty_lock(struct folio *folio)
2810 {
2811 bool ret;
2812
2813 folio_lock(folio);
2814 ret = folio_mark_dirty(folio);
2815 folio_unlock(folio);
2816 return ret;
2817 }
2818 EXPORT_SYMBOL(folio_mark_dirty_lock);
2819
2820 /*
2821 * This cancels just the dirty bit on the kernel page itself, it does NOT
2822 * actually remove dirty bits on any mmap's that may be around. It also
2823 * leaves the page tagged dirty, so any sync activity will still find it on
2824 * the dirty lists, and in particular, clear_page_dirty_for_io() will still
2825 * look at the dirty bits in the VM.
2826 *
2827 * Doing this should *normally* only ever be done when a page is truncated,
2828 * and is not actually mapped anywhere at all. However, fs/buffer.c does
2829 * this when it notices that somebody has cleaned out all the buffers on a
2830 * page without actually doing it through the VM. Can you say "ext3 is
2831 * horribly ugly"? Thought you could.
2832 */
__folio_cancel_dirty(struct folio * folio)2833 void __folio_cancel_dirty(struct folio *folio)
2834 {
2835 struct address_space *mapping = folio_mapping(folio);
2836
2837 if (mapping_can_writeback(mapping)) {
2838 struct inode *inode = mapping->host;
2839 struct bdi_writeback *wb;
2840 struct wb_lock_cookie cookie = {};
2841
2842 wb = unlocked_inode_to_wb_begin(inode, &cookie);
2843
2844 if (folio_test_clear_dirty(folio))
2845 folio_account_cleaned(folio, wb);
2846
2847 unlocked_inode_to_wb_end(inode, &cookie);
2848 } else {
2849 folio_clear_dirty(folio);
2850 }
2851 }
2852 EXPORT_SYMBOL(__folio_cancel_dirty);
2853
2854 /*
2855 * Clear a folio's dirty flag, while caring for dirty memory accounting.
2856 * Returns true if the folio was previously dirty.
2857 *
2858 * This is for preparing to put the folio under writeout. We leave
2859 * the folio tagged as dirty in the xarray so that a concurrent
2860 * write-for-sync can discover it via a PAGECACHE_TAG_DIRTY walk.
2861 * The ->writepage implementation will run either folio_start_writeback()
2862 * or folio_mark_dirty(), at which stage we bring the folio's dirty flag
2863 * and xarray dirty tag back into sync.
2864 *
2865 * This incoherency between the folio's dirty flag and xarray tag is
2866 * unfortunate, but it only exists while the folio is locked.
2867 */
folio_clear_dirty_for_io(struct folio * folio)2868 bool folio_clear_dirty_for_io(struct folio *folio)
2869 {
2870 struct address_space *mapping = folio_mapping(folio);
2871 bool ret = false;
2872
2873 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2874
2875 if (mapping && mapping_can_writeback(mapping)) {
2876 struct inode *inode = mapping->host;
2877 struct bdi_writeback *wb;
2878 struct wb_lock_cookie cookie = {};
2879
2880 /*
2881 * Yes, Virginia, this is indeed insane.
2882 *
2883 * We use this sequence to make sure that
2884 * (a) we account for dirty stats properly
2885 * (b) we tell the low-level filesystem to
2886 * mark the whole folio dirty if it was
2887 * dirty in a pagetable. Only to then
2888 * (c) clean the folio again and return 1 to
2889 * cause the writeback.
2890 *
2891 * This way we avoid all nasty races with the
2892 * dirty bit in multiple places and clearing
2893 * them concurrently from different threads.
2894 *
2895 * Note! Normally the "folio_mark_dirty(folio)"
2896 * has no effect on the actual dirty bit - since
2897 * that will already usually be set. But we
2898 * need the side effects, and it can help us
2899 * avoid races.
2900 *
2901 * We basically use the folio "master dirty bit"
2902 * as a serialization point for all the different
2903 * threads doing their things.
2904 */
2905 if (folio_mkclean(folio))
2906 folio_mark_dirty(folio);
2907 /*
2908 * We carefully synchronise fault handlers against
2909 * installing a dirty pte and marking the folio dirty
2910 * at this point. We do this by having them hold the
2911 * page lock while dirtying the folio, and folios are
2912 * always locked coming in here, so we get the desired
2913 * exclusion.
2914 */
2915 wb = unlocked_inode_to_wb_begin(inode, &cookie);
2916 if (folio_test_clear_dirty(folio)) {
2917 long nr = folio_nr_pages(folio);
2918 lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr);
2919 zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
2920 wb_stat_mod(wb, WB_RECLAIMABLE, -nr);
2921 ret = true;
2922 }
2923 unlocked_inode_to_wb_end(inode, &cookie);
2924 return ret;
2925 }
2926 return folio_test_clear_dirty(folio);
2927 }
2928 EXPORT_SYMBOL(folio_clear_dirty_for_io);
2929
wb_inode_writeback_start(struct bdi_writeback * wb)2930 static void wb_inode_writeback_start(struct bdi_writeback *wb)
2931 {
2932 atomic_inc(&wb->writeback_inodes);
2933 }
2934
wb_inode_writeback_end(struct bdi_writeback * wb)2935 static void wb_inode_writeback_end(struct bdi_writeback *wb)
2936 {
2937 unsigned long flags;
2938 atomic_dec(&wb->writeback_inodes);
2939 /*
2940 * Make sure estimate of writeback throughput gets updated after
2941 * writeback completed. We delay the update by BANDWIDTH_INTERVAL
2942 * (which is the interval other bandwidth updates use for batching) so
2943 * that if multiple inodes end writeback at a similar time, they get
2944 * batched into one bandwidth update.
2945 */
2946 spin_lock_irqsave(&wb->work_lock, flags);
2947 if (test_bit(WB_registered, &wb->state))
2948 queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
2949 spin_unlock_irqrestore(&wb->work_lock, flags);
2950 }
2951
__folio_end_writeback(struct folio * folio)2952 bool __folio_end_writeback(struct folio *folio)
2953 {
2954 long nr = folio_nr_pages(folio);
2955 struct address_space *mapping = folio_mapping(folio);
2956 bool ret;
2957
2958 if (mapping && mapping_use_writeback_tags(mapping)) {
2959 struct inode *inode = mapping->host;
2960 struct bdi_writeback *wb;
2961 unsigned long flags;
2962
2963 xa_lock_irqsave(&mapping->i_pages, flags);
2964 ret = folio_xor_flags_has_waiters(folio, 1 << PG_writeback);
2965 __xa_clear_mark(&mapping->i_pages, folio->index,
2966 PAGECACHE_TAG_WRITEBACK);
2967
2968 wb = inode_to_wb(inode);
2969 wb_stat_mod(wb, WB_WRITEBACK, -nr);
2970 __wb_writeout_add(wb, nr);
2971 if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) {
2972 wb_inode_writeback_end(wb);
2973 if (mapping->host)
2974 sb_clear_inode_writeback(mapping->host);
2975 }
2976
2977 xa_unlock_irqrestore(&mapping->i_pages, flags);
2978 } else {
2979 ret = folio_xor_flags_has_waiters(folio, 1 << PG_writeback);
2980 }
2981
2982 lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
2983 zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
2984 node_stat_mod_folio(folio, NR_WRITTEN, nr);
2985
2986 return ret;
2987 }
2988
__folio_start_writeback(struct folio * folio,bool keep_write)2989 void __folio_start_writeback(struct folio *folio, bool keep_write)
2990 {
2991 long nr = folio_nr_pages(folio);
2992 struct address_space *mapping = folio_mapping(folio);
2993 int access_ret;
2994
2995 VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
2996 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2997
2998 if (mapping && mapping_use_writeback_tags(mapping)) {
2999 XA_STATE(xas, &mapping->i_pages, folio->index);
3000 struct inode *inode = mapping->host;
3001 struct bdi_writeback *wb;
3002 unsigned long flags;
3003 bool on_wblist;
3004
3005 xas_lock_irqsave(&xas, flags);
3006 xas_load(&xas);
3007 folio_test_set_writeback(folio);
3008
3009 on_wblist = mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
3010
3011 xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK);
3012 wb = inode_to_wb(inode);
3013 wb_stat_mod(wb, WB_WRITEBACK, nr);
3014 if (!on_wblist) {
3015 wb_inode_writeback_start(wb);
3016 /*
3017 * We can come through here when swapping anonymous
3018 * folios, so we don't necessarily have an inode to
3019 * track for sync.
3020 */
3021 if (mapping->host)
3022 sb_mark_inode_writeback(mapping->host);
3023 }
3024
3025 if (!folio_test_dirty(folio))
3026 xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
3027 if (!keep_write)
3028 xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
3029 xas_unlock_irqrestore(&xas, flags);
3030 } else {
3031 folio_test_set_writeback(folio);
3032 }
3033
3034 lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr);
3035 zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
3036
3037 access_ret = arch_make_folio_accessible(folio);
3038 /*
3039 * If writeback has been triggered on a page that cannot be made
3040 * accessible, it is too late to recover here.
3041 */
3042 VM_BUG_ON_FOLIO(access_ret != 0, folio);
3043 }
3044 EXPORT_SYMBOL(__folio_start_writeback);
3045
3046 /**
3047 * folio_wait_writeback - Wait for a folio to finish writeback.
3048 * @folio: The folio to wait for.
3049 *
3050 * If the folio is currently being written back to storage, wait for the
3051 * I/O to complete.
3052 *
3053 * Context: Sleeps. Must be called in process context and with
3054 * no spinlocks held. Caller should hold a reference on the folio.
3055 * If the folio is not locked, writeback may start again after writeback
3056 * has finished.
3057 */
folio_wait_writeback(struct folio * folio)3058 void folio_wait_writeback(struct folio *folio)
3059 {
3060 while (folio_test_writeback(folio)) {
3061 trace_folio_wait_writeback(folio, folio_mapping(folio));
3062 folio_wait_bit(folio, PG_writeback);
3063 }
3064 }
3065 EXPORT_SYMBOL_GPL(folio_wait_writeback);
3066
3067 /**
3068 * folio_wait_writeback_killable - Wait for a folio to finish writeback.
3069 * @folio: The folio to wait for.
3070 *
3071 * If the folio is currently being written back to storage, wait for the
3072 * I/O to complete or a fatal signal to arrive.
3073 *
3074 * Context: Sleeps. Must be called in process context and with
3075 * no spinlocks held. Caller should hold a reference on the folio.
3076 * If the folio is not locked, writeback may start again after writeback
3077 * has finished.
3078 * Return: 0 on success, -EINTR if we get a fatal signal while waiting.
3079 */
folio_wait_writeback_killable(struct folio * folio)3080 int folio_wait_writeback_killable(struct folio *folio)
3081 {
3082 while (folio_test_writeback(folio)) {
3083 trace_folio_wait_writeback(folio, folio_mapping(folio));
3084 if (folio_wait_bit_killable(folio, PG_writeback))
3085 return -EINTR;
3086 }
3087
3088 return 0;
3089 }
3090 EXPORT_SYMBOL_GPL(folio_wait_writeback_killable);
3091
3092 /**
3093 * folio_wait_stable() - wait for writeback to finish, if necessary.
3094 * @folio: The folio to wait on.
3095 *
3096 * This function determines if the given folio is related to a backing
3097 * device that requires folio contents to be held stable during writeback.
3098 * If so, then it will wait for any pending writeback to complete.
3099 *
3100 * Context: Sleeps. Must be called in process context and with
3101 * no spinlocks held. Caller should hold a reference on the folio.
3102 * If the folio is not locked, writeback may start again after writeback
3103 * has finished.
3104 */
folio_wait_stable(struct folio * folio)3105 void folio_wait_stable(struct folio *folio)
3106 {
3107 if (mapping_stable_writes(folio_mapping(folio)))
3108 folio_wait_writeback(folio);
3109 }
3110 EXPORT_SYMBOL_GPL(folio_wait_stable);
3111