xref: /linux/mm/page-writeback.c (revision 64dd89ae01f2708a508e028c28b7906e4702a9a7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm/page-writeback.c
4  *
5  * Copyright (C) 2002, Linus Torvalds.
6  * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
7  *
8  * Contains functions related to writing back dirty pages at the
9  * address_space level.
10  *
11  * 10Apr2002	Andrew Morton
12  *		Initial version
13  */
14 
15 #include <linux/kernel.h>
16 #include <linux/math64.h>
17 #include <linux/export.h>
18 #include <linux/spinlock.h>
19 #include <linux/fs.h>
20 #include <linux/mm.h>
21 #include <linux/swap.h>
22 #include <linux/slab.h>
23 #include <linux/pagemap.h>
24 #include <linux/writeback.h>
25 #include <linux/init.h>
26 #include <linux/backing-dev.h>
27 #include <linux/task_io_accounting_ops.h>
28 #include <linux/blkdev.h>
29 #include <linux/mpage.h>
30 #include <linux/rmap.h>
31 #include <linux/percpu.h>
32 #include <linux/smp.h>
33 #include <linux/sysctl.h>
34 #include <linux/cpu.h>
35 #include <linux/syscalls.h>
36 #include <linux/pagevec.h>
37 #include <linux/timer.h>
38 #include <linux/sched/rt.h>
39 #include <linux/sched/signal.h>
40 #include <linux/mm_inline.h>
41 #include <linux/shmem_fs.h>
42 #include <trace/events/writeback.h>
43 
44 #include "internal.h"
45 
46 /*
47  * Sleep at most 200ms at a time in balance_dirty_pages().
48  */
49 #define MAX_PAUSE		max(HZ/5, 1)
50 
51 /*
52  * Try to keep balance_dirty_pages() call intervals higher than this many pages
53  * by raising pause time to max_pause when falls below it.
54  */
55 #define DIRTY_POLL_THRESH	(128 >> (PAGE_SHIFT - 10))
56 
57 /*
58  * Estimate write bandwidth or update dirty limit at 200ms intervals.
59  */
60 #define BANDWIDTH_INTERVAL	max(HZ/5, 1)
61 
62 #define RATELIMIT_CALC_SHIFT	10
63 
64 /*
65  * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
66  * will look to see if it needs to force writeback or throttling.
67  */
68 static long ratelimit_pages = 32;
69 
70 /* The following parameters are exported via /proc/sys/vm */
71 
72 /*
73  * Start background writeback (via writeback threads) at this percentage
74  */
75 static int dirty_background_ratio = 10;
76 
77 /*
78  * dirty_background_bytes starts at 0 (disabled) so that it is a function of
79  * dirty_background_ratio * the amount of dirtyable memory
80  */
81 static unsigned long dirty_background_bytes;
82 
83 /*
84  * free highmem will not be subtracted from the total free memory
85  * for calculating free ratios if vm_highmem_is_dirtyable is true
86  */
87 static int vm_highmem_is_dirtyable;
88 
89 /*
90  * The generator of dirty data starts writeback at this percentage
91  */
92 static int vm_dirty_ratio = 20;
93 
94 /*
95  * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
96  * vm_dirty_ratio * the amount of dirtyable memory
97  */
98 static unsigned long vm_dirty_bytes;
99 
100 /*
101  * The interval between `kupdate'-style writebacks
102  */
103 unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
104 
105 EXPORT_SYMBOL_GPL(dirty_writeback_interval);
106 
107 /*
108  * The longest time for which data is allowed to remain dirty
109  */
110 unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
111 
112 /* End of sysctl-exported parameters */
113 
114 struct wb_domain global_wb_domain;
115 
116 /*
117  * Length of period for aging writeout fractions of bdis. This is an
118  * arbitrarily chosen number. The longer the period, the slower fractions will
119  * reflect changes in current writeout rate.
120  */
121 #define VM_COMPLETIONS_PERIOD_LEN (3*HZ)
122 
123 #ifdef CONFIG_CGROUP_WRITEBACK
124 
125 #define GDTC_INIT(__wb)		.wb = (__wb),				\
126 				.dom = &global_wb_domain,		\
127 				.wb_completions = &(__wb)->completions
128 
129 #define GDTC_INIT_NO_WB		.dom = &global_wb_domain
130 
131 #define MDTC_INIT(__wb, __gdtc)	.wb = (__wb),				\
132 				.dom = mem_cgroup_wb_domain(__wb),	\
133 				.wb_completions = &(__wb)->memcg_completions, \
134 				.gdtc = __gdtc
135 
136 static bool mdtc_valid(struct dirty_throttle_control *dtc)
137 {
138 	return dtc->dom;
139 }
140 
141 static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
142 {
143 	return dtc->dom;
144 }
145 
146 static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
147 {
148 	return mdtc->gdtc;
149 }
150 
151 static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
152 {
153 	return &wb->memcg_completions;
154 }
155 
156 static void wb_min_max_ratio(struct bdi_writeback *wb,
157 			     unsigned long *minp, unsigned long *maxp)
158 {
159 	unsigned long this_bw = READ_ONCE(wb->avg_write_bandwidth);
160 	unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
161 	unsigned long long min = wb->bdi->min_ratio;
162 	unsigned long long max = wb->bdi->max_ratio;
163 
164 	/*
165 	 * @wb may already be clean by the time control reaches here and
166 	 * the total may not include its bw.
167 	 */
168 	if (this_bw < tot_bw) {
169 		if (min) {
170 			min *= this_bw;
171 			min = div64_ul(min, tot_bw);
172 		}
173 		if (max < 100 * BDI_RATIO_SCALE) {
174 			max *= this_bw;
175 			max = div64_ul(max, tot_bw);
176 		}
177 	}
178 
179 	*minp = min;
180 	*maxp = max;
181 }
182 
183 #else	/* CONFIG_CGROUP_WRITEBACK */
184 
185 #define GDTC_INIT(__wb)		.wb = (__wb),                           \
186 				.wb_completions = &(__wb)->completions
187 #define GDTC_INIT_NO_WB
188 #define MDTC_INIT(__wb, __gdtc)
189 
190 static bool mdtc_valid(struct dirty_throttle_control *dtc)
191 {
192 	return false;
193 }
194 
195 static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
196 {
197 	return &global_wb_domain;
198 }
199 
200 static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
201 {
202 	return NULL;
203 }
204 
205 static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
206 {
207 	return NULL;
208 }
209 
210 static void wb_min_max_ratio(struct bdi_writeback *wb,
211 			     unsigned long *minp, unsigned long *maxp)
212 {
213 	*minp = wb->bdi->min_ratio;
214 	*maxp = wb->bdi->max_ratio;
215 }
216 
217 #endif	/* CONFIG_CGROUP_WRITEBACK */
218 
219 /*
220  * In a memory zone, there is a certain amount of pages we consider
221  * available for the page cache, which is essentially the number of
222  * free and reclaimable pages, minus some zone reserves to protect
223  * lowmem and the ability to uphold the zone's watermarks without
224  * requiring writeback.
225  *
226  * This number of dirtyable pages is the base value of which the
227  * user-configurable dirty ratio is the effective number of pages that
228  * are allowed to be actually dirtied.  Per individual zone, or
229  * globally by using the sum of dirtyable pages over all zones.
230  *
231  * Because the user is allowed to specify the dirty limit globally as
232  * absolute number of bytes, calculating the per-zone dirty limit can
233  * require translating the configured limit into a percentage of
234  * global dirtyable memory first.
235  */
236 
237 /**
238  * node_dirtyable_memory - number of dirtyable pages in a node
239  * @pgdat: the node
240  *
241  * Return: the node's number of pages potentially available for dirty
242  * page cache.  This is the base value for the per-node dirty limits.
243  */
244 static unsigned long node_dirtyable_memory(struct pglist_data *pgdat)
245 {
246 	unsigned long nr_pages = 0;
247 	int z;
248 
249 	for (z = 0; z < MAX_NR_ZONES; z++) {
250 		struct zone *zone = pgdat->node_zones + z;
251 
252 		if (!populated_zone(zone))
253 			continue;
254 
255 		nr_pages += zone_page_state(zone, NR_FREE_PAGES);
256 	}
257 
258 	/*
259 	 * Pages reserved for the kernel should not be considered
260 	 * dirtyable, to prevent a situation where reclaim has to
261 	 * clean pages in order to balance the zones.
262 	 */
263 	nr_pages -= min(nr_pages, pgdat->totalreserve_pages);
264 
265 	nr_pages += node_page_state(pgdat, NR_INACTIVE_FILE);
266 	nr_pages += node_page_state(pgdat, NR_ACTIVE_FILE);
267 
268 	return nr_pages;
269 }
270 
271 static unsigned long highmem_dirtyable_memory(unsigned long total)
272 {
273 #ifdef CONFIG_HIGHMEM
274 	int node;
275 	unsigned long x = 0;
276 	int i;
277 
278 	for_each_node_state(node, N_HIGH_MEMORY) {
279 		for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) {
280 			struct zone *z;
281 			unsigned long nr_pages;
282 
283 			if (!is_highmem_idx(i))
284 				continue;
285 
286 			z = &NODE_DATA(node)->node_zones[i];
287 			if (!populated_zone(z))
288 				continue;
289 
290 			nr_pages = zone_page_state(z, NR_FREE_PAGES);
291 			/* watch for underflows */
292 			nr_pages -= min(nr_pages, high_wmark_pages(z));
293 			nr_pages += zone_page_state(z, NR_ZONE_INACTIVE_FILE);
294 			nr_pages += zone_page_state(z, NR_ZONE_ACTIVE_FILE);
295 			x += nr_pages;
296 		}
297 	}
298 
299 	/*
300 	 * Make sure that the number of highmem pages is never larger
301 	 * than the number of the total dirtyable memory. This can only
302 	 * occur in very strange VM situations but we want to make sure
303 	 * that this does not occur.
304 	 */
305 	return min(x, total);
306 #else
307 	return 0;
308 #endif
309 }
310 
311 /**
312  * global_dirtyable_memory - number of globally dirtyable pages
313  *
314  * Return: the global number of pages potentially available for dirty
315  * page cache.  This is the base value for the global dirty limits.
316  */
317 static unsigned long global_dirtyable_memory(void)
318 {
319 	unsigned long x;
320 
321 	x = global_zone_page_state(NR_FREE_PAGES);
322 	/*
323 	 * Pages reserved for the kernel should not be considered
324 	 * dirtyable, to prevent a situation where reclaim has to
325 	 * clean pages in order to balance the zones.
326 	 */
327 	x -= min(x, totalreserve_pages);
328 
329 	x += global_node_page_state(NR_INACTIVE_FILE);
330 	x += global_node_page_state(NR_ACTIVE_FILE);
331 
332 	if (!vm_highmem_is_dirtyable)
333 		x -= highmem_dirtyable_memory(x);
334 
335 	return x + 1;	/* Ensure that we never return 0 */
336 }
337 
338 /**
339  * domain_dirty_limits - calculate thresh and bg_thresh for a wb_domain
340  * @dtc: dirty_throttle_control of interest
341  *
342  * Calculate @dtc->thresh and ->bg_thresh considering
343  * vm_dirty_{bytes|ratio} and dirty_background_{bytes|ratio}.  The caller
344  * must ensure that @dtc->avail is set before calling this function.  The
345  * dirty limits will be lifted by 1/4 for real-time tasks.
346  */
347 static void domain_dirty_limits(struct dirty_throttle_control *dtc)
348 {
349 	const unsigned long available_memory = dtc->avail;
350 	struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
351 	unsigned long bytes = vm_dirty_bytes;
352 	unsigned long bg_bytes = dirty_background_bytes;
353 	/* convert ratios to per-PAGE_SIZE for higher precision */
354 	unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100;
355 	unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100;
356 	unsigned long thresh;
357 	unsigned long bg_thresh;
358 	struct task_struct *tsk;
359 
360 	/* gdtc is !NULL iff @dtc is for memcg domain */
361 	if (gdtc) {
362 		unsigned long global_avail = gdtc->avail;
363 
364 		/*
365 		 * The byte settings can't be applied directly to memcg
366 		 * domains.  Convert them to ratios by scaling against
367 		 * globally available memory.  As the ratios are in
368 		 * per-PAGE_SIZE, they can be obtained by dividing bytes by
369 		 * number of pages.
370 		 */
371 		if (bytes)
372 			ratio = min(DIV_ROUND_UP(bytes, global_avail),
373 				    PAGE_SIZE);
374 		if (bg_bytes)
375 			bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail),
376 				       PAGE_SIZE);
377 		bytes = bg_bytes = 0;
378 	}
379 
380 	if (bytes)
381 		thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
382 	else
383 		thresh = (ratio * available_memory) / PAGE_SIZE;
384 
385 	if (bg_bytes)
386 		bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
387 	else
388 		bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
389 
390 	tsk = current;
391 	if (rt_or_dl_task(tsk)) {
392 		bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
393 		thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
394 	}
395 	/*
396 	 * Dirty throttling logic assumes the limits in page units fit into
397 	 * 32-bits. This gives 16TB dirty limits max which is hopefully enough.
398 	 */
399 	if (thresh > UINT_MAX)
400 		thresh = UINT_MAX;
401 	/* This makes sure bg_thresh is within 32-bits as well */
402 	if (bg_thresh >= thresh)
403 		bg_thresh = thresh / 2;
404 	dtc->thresh = thresh;
405 	dtc->bg_thresh = bg_thresh;
406 
407 	/* we should eventually report the domain in the TP */
408 	if (!gdtc)
409 		trace_global_dirty_state(bg_thresh, thresh);
410 }
411 
412 /**
413  * global_dirty_limits - background-writeback and dirty-throttling thresholds
414  * @pbackground: out parameter for bg_thresh
415  * @pdirty: out parameter for thresh
416  *
417  * Calculate bg_thresh and thresh for global_wb_domain.  See
418  * domain_dirty_limits() for details.
419  */
420 void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
421 {
422 	struct dirty_throttle_control gdtc = { GDTC_INIT_NO_WB };
423 
424 	gdtc.avail = global_dirtyable_memory();
425 	domain_dirty_limits(&gdtc);
426 
427 	*pbackground = gdtc.bg_thresh;
428 	*pdirty = gdtc.thresh;
429 }
430 
431 /**
432  * node_dirty_limit - maximum number of dirty pages allowed in a node
433  * @pgdat: the node
434  *
435  * Return: the maximum number of dirty pages allowed in a node, based
436  * on the node's dirtyable memory.
437  */
438 static unsigned long node_dirty_limit(struct pglist_data *pgdat)
439 {
440 	unsigned long node_memory = node_dirtyable_memory(pgdat);
441 	struct task_struct *tsk = current;
442 	unsigned long dirty;
443 
444 	if (vm_dirty_bytes)
445 		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) *
446 			node_memory / global_dirtyable_memory();
447 	else
448 		dirty = vm_dirty_ratio * node_memory / 100;
449 
450 	if (rt_or_dl_task(tsk))
451 		dirty += dirty / 4;
452 
453 	/*
454 	 * Dirty throttling logic assumes the limits in page units fit into
455 	 * 32-bits. This gives 16TB dirty limits max which is hopefully enough.
456 	 */
457 	return min_t(unsigned long, dirty, UINT_MAX);
458 }
459 
460 /**
461  * node_dirty_ok - tells whether a node is within its dirty limits
462  * @pgdat: the node to check
463  *
464  * Return: %true when the dirty pages in @pgdat are within the node's
465  * dirty limit, %false if the limit is exceeded.
466  */
467 bool node_dirty_ok(struct pglist_data *pgdat)
468 {
469 	unsigned long limit = node_dirty_limit(pgdat);
470 	unsigned long nr_pages = 0;
471 
472 	nr_pages += node_page_state(pgdat, NR_FILE_DIRTY);
473 	nr_pages += node_page_state(pgdat, NR_WRITEBACK);
474 
475 	return nr_pages <= limit;
476 }
477 
478 #ifdef CONFIG_SYSCTL
479 static int dirty_background_ratio_handler(const struct ctl_table *table, int write,
480 		void *buffer, size_t *lenp, loff_t *ppos)
481 {
482 	int ret;
483 
484 	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
485 	if (ret == 0 && write)
486 		dirty_background_bytes = 0;
487 	return ret;
488 }
489 
490 static int dirty_background_bytes_handler(const struct ctl_table *table, int write,
491 		void *buffer, size_t *lenp, loff_t *ppos)
492 {
493 	int ret;
494 	unsigned long old_bytes = dirty_background_bytes;
495 
496 	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
497 	if (ret == 0 && write) {
498 		if (DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE) >
499 								UINT_MAX) {
500 			dirty_background_bytes = old_bytes;
501 			return -ERANGE;
502 		}
503 		dirty_background_ratio = 0;
504 	}
505 	return ret;
506 }
507 
508 static int dirty_ratio_handler(const struct ctl_table *table, int write, void *buffer,
509 		size_t *lenp, loff_t *ppos)
510 {
511 	int old_ratio = vm_dirty_ratio;
512 	int ret;
513 
514 	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
515 	if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
516 		vm_dirty_bytes = 0;
517 		writeback_set_ratelimit();
518 	}
519 	return ret;
520 }
521 
522 static int dirty_bytes_handler(const struct ctl_table *table, int write,
523 		void *buffer, size_t *lenp, loff_t *ppos)
524 {
525 	unsigned long old_bytes = vm_dirty_bytes;
526 	int ret;
527 
528 	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
529 	if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
530 		if (DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) > UINT_MAX) {
531 			vm_dirty_bytes = old_bytes;
532 			return -ERANGE;
533 		}
534 		writeback_set_ratelimit();
535 		vm_dirty_ratio = 0;
536 	}
537 	return ret;
538 }
539 #endif
540 
541 static unsigned long wp_next_time(unsigned long cur_time)
542 {
543 	cur_time += VM_COMPLETIONS_PERIOD_LEN;
544 	/* 0 has a special meaning... */
545 	if (!cur_time)
546 		return 1;
547 	return cur_time;
548 }
549 
550 static void wb_domain_writeout_add(struct wb_domain *dom,
551 				   struct fprop_local_percpu *completions,
552 				   unsigned int max_prop_frac, long nr)
553 {
554 	__fprop_add_percpu_max(&dom->completions, completions,
555 			       max_prop_frac, nr);
556 	/* First event after period switching was turned off? */
557 	if (unlikely(!dom->period_time)) {
558 		/*
559 		 * We can race with other wb_domain_writeout_add calls here but
560 		 * it does not cause any harm since the resulting time when
561 		 * timer will fire and what is in writeout_period_time will be
562 		 * roughly the same.
563 		 */
564 		dom->period_time = wp_next_time(jiffies);
565 		mod_timer(&dom->period_timer, dom->period_time);
566 	}
567 }
568 
569 /*
570  * Increment @wb's writeout completion count and the global writeout
571  * completion count. Called from __folio_end_writeback().
572  */
573 static inline void __wb_writeout_add(struct bdi_writeback *wb, long nr)
574 {
575 	struct wb_domain *cgdom;
576 
577 	wb_stat_mod(wb, WB_WRITTEN, nr);
578 	wb_domain_writeout_add(&global_wb_domain, &wb->completions,
579 			       wb->bdi->max_prop_frac, nr);
580 
581 	cgdom = mem_cgroup_wb_domain(wb);
582 	if (cgdom)
583 		wb_domain_writeout_add(cgdom, wb_memcg_completions(wb),
584 				       wb->bdi->max_prop_frac, nr);
585 }
586 
587 void wb_writeout_inc(struct bdi_writeback *wb)
588 {
589 	unsigned long flags;
590 
591 	local_irq_save(flags);
592 	__wb_writeout_add(wb, 1);
593 	local_irq_restore(flags);
594 }
595 EXPORT_SYMBOL_GPL(wb_writeout_inc);
596 
597 /*
598  * On idle system, we can be called long after we scheduled because we use
599  * deferred timers so count with missed periods.
600  */
601 static void writeout_period(struct timer_list *t)
602 {
603 	struct wb_domain *dom = timer_container_of(dom, t, period_timer);
604 	int miss_periods = (jiffies - dom->period_time) /
605 						 VM_COMPLETIONS_PERIOD_LEN;
606 
607 	if (fprop_new_period(&dom->completions, miss_periods + 1)) {
608 		dom->period_time = wp_next_time(dom->period_time +
609 				miss_periods * VM_COMPLETIONS_PERIOD_LEN);
610 		mod_timer(&dom->period_timer, dom->period_time);
611 	} else {
612 		/*
613 		 * Aging has zeroed all fractions. Stop wasting CPU on period
614 		 * updates.
615 		 */
616 		dom->period_time = 0;
617 	}
618 }
619 
620 int wb_domain_init(struct wb_domain *dom, gfp_t gfp)
621 {
622 	memset(dom, 0, sizeof(*dom));
623 
624 	spin_lock_init(&dom->lock);
625 
626 	timer_setup(&dom->period_timer, writeout_period, TIMER_DEFERRABLE);
627 
628 	dom->dirty_limit_tstamp = jiffies;
629 
630 	return fprop_global_init(&dom->completions, gfp);
631 }
632 
633 #ifdef CONFIG_CGROUP_WRITEBACK
634 void wb_domain_exit(struct wb_domain *dom)
635 {
636 	timer_delete_sync(&dom->period_timer);
637 	fprop_global_destroy(&dom->completions);
638 }
639 #endif
640 
641 /*
642  * bdi_min_ratio keeps the sum of the minimum dirty shares of all
643  * registered backing devices, which, for obvious reasons, can not
644  * exceed 100%.
645  */
646 static unsigned int bdi_min_ratio;
647 
648 static int bdi_check_pages_limit(unsigned long pages)
649 {
650 	unsigned long max_dirty_pages = global_dirtyable_memory();
651 
652 	if (pages > max_dirty_pages)
653 		return -EINVAL;
654 
655 	return 0;
656 }
657 
658 static unsigned long bdi_ratio_from_pages(unsigned long pages)
659 {
660 	unsigned long background_thresh;
661 	unsigned long dirty_thresh;
662 	unsigned long ratio;
663 
664 	global_dirty_limits(&background_thresh, &dirty_thresh);
665 	if (!dirty_thresh)
666 		return -EINVAL;
667 	ratio = div64_u64(pages * 100ULL * BDI_RATIO_SCALE, dirty_thresh);
668 
669 	return ratio;
670 }
671 
672 static u64 bdi_get_bytes(unsigned int ratio)
673 {
674 	unsigned long background_thresh;
675 	unsigned long dirty_thresh;
676 	u64 bytes;
677 
678 	global_dirty_limits(&background_thresh, &dirty_thresh);
679 	bytes = (dirty_thresh * PAGE_SIZE * ratio) / BDI_RATIO_SCALE / 100;
680 
681 	return bytes;
682 }
683 
684 static int __bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
685 {
686 	unsigned int delta;
687 	int ret = 0;
688 
689 	if (min_ratio > 100 * BDI_RATIO_SCALE)
690 		return -EINVAL;
691 
692 	spin_lock_bh(&bdi_lock);
693 	if (min_ratio > bdi->max_ratio) {
694 		ret = -EINVAL;
695 	} else {
696 		if (min_ratio < bdi->min_ratio) {
697 			delta = bdi->min_ratio - min_ratio;
698 			bdi_min_ratio -= delta;
699 			bdi->min_ratio = min_ratio;
700 		} else {
701 			delta = min_ratio - bdi->min_ratio;
702 			if (bdi_min_ratio + delta < 100 * BDI_RATIO_SCALE) {
703 				bdi_min_ratio += delta;
704 				bdi->min_ratio = min_ratio;
705 			} else {
706 				ret = -EINVAL;
707 			}
708 		}
709 	}
710 	spin_unlock_bh(&bdi_lock);
711 
712 	return ret;
713 }
714 
715 static int __bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio)
716 {
717 	int ret = 0;
718 
719 	if (max_ratio > 100 * BDI_RATIO_SCALE)
720 		return -EINVAL;
721 
722 	spin_lock_bh(&bdi_lock);
723 	if (bdi->min_ratio > max_ratio) {
724 		ret = -EINVAL;
725 	} else {
726 		bdi->max_ratio = max_ratio;
727 		bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) /
728 						(100 * BDI_RATIO_SCALE);
729 	}
730 	spin_unlock_bh(&bdi_lock);
731 
732 	return ret;
733 }
734 
735 int bdi_set_min_ratio_no_scale(struct backing_dev_info *bdi, unsigned int min_ratio)
736 {
737 	return __bdi_set_min_ratio(bdi, min_ratio);
738 }
739 
740 int bdi_set_max_ratio_no_scale(struct backing_dev_info *bdi, unsigned int max_ratio)
741 {
742 	return __bdi_set_max_ratio(bdi, max_ratio);
743 }
744 
745 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
746 {
747 	return __bdi_set_min_ratio(bdi, min_ratio * BDI_RATIO_SCALE);
748 }
749 
750 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio)
751 {
752 	return __bdi_set_max_ratio(bdi, max_ratio * BDI_RATIO_SCALE);
753 }
754 EXPORT_SYMBOL(bdi_set_max_ratio);
755 
756 u64 bdi_get_min_bytes(struct backing_dev_info *bdi)
757 {
758 	return bdi_get_bytes(bdi->min_ratio);
759 }
760 
761 int bdi_set_min_bytes(struct backing_dev_info *bdi, u64 min_bytes)
762 {
763 	int ret;
764 	unsigned long pages = min_bytes >> PAGE_SHIFT;
765 	long min_ratio;
766 
767 	ret = bdi_check_pages_limit(pages);
768 	if (ret)
769 		return ret;
770 
771 	min_ratio = bdi_ratio_from_pages(pages);
772 	if (min_ratio < 0)
773 		return min_ratio;
774 	return __bdi_set_min_ratio(bdi, min_ratio);
775 }
776 
777 u64 bdi_get_max_bytes(struct backing_dev_info *bdi)
778 {
779 	return bdi_get_bytes(bdi->max_ratio);
780 }
781 
782 int bdi_set_max_bytes(struct backing_dev_info *bdi, u64 max_bytes)
783 {
784 	int ret;
785 	unsigned long pages = max_bytes >> PAGE_SHIFT;
786 	long max_ratio;
787 
788 	ret = bdi_check_pages_limit(pages);
789 	if (ret)
790 		return ret;
791 
792 	max_ratio = bdi_ratio_from_pages(pages);
793 	if (max_ratio < 0)
794 		return max_ratio;
795 	return __bdi_set_max_ratio(bdi, max_ratio);
796 }
797 
798 int bdi_set_strict_limit(struct backing_dev_info *bdi, unsigned int strict_limit)
799 {
800 	if (strict_limit > 1)
801 		return -EINVAL;
802 
803 	spin_lock_bh(&bdi_lock);
804 	if (strict_limit)
805 		bdi->capabilities |= BDI_CAP_STRICTLIMIT;
806 	else
807 		bdi->capabilities &= ~BDI_CAP_STRICTLIMIT;
808 	spin_unlock_bh(&bdi_lock);
809 
810 	return 0;
811 }
812 
813 static unsigned long dirty_freerun_ceiling(unsigned long thresh,
814 					   unsigned long bg_thresh)
815 {
816 	return (thresh + bg_thresh) / 2;
817 }
818 
819 static unsigned long hard_dirty_limit(struct wb_domain *dom,
820 				      unsigned long thresh)
821 {
822 	return max(thresh, dom->dirty_limit);
823 }
824 
825 /*
826  * Memory which can be further allocated to a memcg domain is capped by
827  * system-wide clean memory excluding the amount being used in the domain.
828  */
829 static void mdtc_calc_avail(struct dirty_throttle_control *mdtc,
830 			    unsigned long filepages, unsigned long headroom)
831 {
832 	struct dirty_throttle_control *gdtc = mdtc_gdtc(mdtc);
833 	unsigned long clean = filepages - min(filepages, mdtc->dirty);
834 	unsigned long global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty);
835 	unsigned long other_clean = global_clean - min(global_clean, clean);
836 
837 	mdtc->avail = filepages + min(headroom, other_clean);
838 }
839 
840 static inline bool dtc_is_global(struct dirty_throttle_control *dtc)
841 {
842 	return mdtc_gdtc(dtc) == NULL;
843 }
844 
845 /*
846  * Dirty background will ignore pages being written as we're trying to
847  * decide whether to put more under writeback.
848  */
849 static void domain_dirty_avail(struct dirty_throttle_control *dtc,
850 			       bool include_writeback)
851 {
852 	if (dtc_is_global(dtc)) {
853 		dtc->avail = global_dirtyable_memory();
854 		dtc->dirty = global_node_page_state(NR_FILE_DIRTY);
855 		if (include_writeback)
856 			dtc->dirty += global_node_page_state(NR_WRITEBACK);
857 	} else {
858 		unsigned long filepages = 0, headroom = 0, writeback = 0;
859 
860 		mem_cgroup_wb_stats(dtc->wb, &filepages, &headroom, &dtc->dirty,
861 				    &writeback);
862 		if (include_writeback)
863 			dtc->dirty += writeback;
864 		mdtc_calc_avail(dtc, filepages, headroom);
865 	}
866 }
867 
868 /**
869  * __wb_calc_thresh - @wb's share of dirty threshold
870  * @dtc: dirty_throttle_context of interest
871  * @thresh: dirty throttling or dirty background threshold of wb_domain in @dtc
872  *
873  * Note that balance_dirty_pages() will only seriously take dirty throttling
874  * threshold as a hard limit when sleeping max_pause per page is not enough
875  * to keep the dirty pages under control. For example, when the device is
876  * completely stalled due to some error conditions, or when there are 1000
877  * dd tasks writing to a slow 10MB/s USB key.
878  * In the other normal situations, it acts more gently by throttling the tasks
879  * more (rather than completely block them) when the wb dirty pages go high.
880  *
881  * It allocates high/low dirty limits to fast/slow devices, in order to prevent
882  * - starving fast devices
883  * - piling up dirty pages (that will take long time to sync) on slow devices
884  *
885  * The wb's share of dirty limit will be adapting to its throughput and
886  * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
887  *
888  * Return: @wb's dirty limit in pages. For dirty throttling limit, the term
889  * "dirty" in the context of dirty balancing includes all PG_dirty and
890  * PG_writeback pages.
891  */
892 static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc,
893 				      unsigned long thresh)
894 {
895 	struct wb_domain *dom = dtc_dom(dtc);
896 	struct bdi_writeback *wb = dtc->wb;
897 	u64 wb_thresh;
898 	u64 wb_max_thresh;
899 	unsigned long numerator, denominator;
900 	unsigned long wb_min_ratio, wb_max_ratio;
901 
902 	/*
903 	 * Calculate this wb's share of the thresh ratio.
904 	 */
905 	fprop_fraction_percpu(&dom->completions, dtc->wb_completions,
906 			      &numerator, &denominator);
907 
908 	wb_thresh = (thresh * (100 * BDI_RATIO_SCALE - bdi_min_ratio)) / (100 * BDI_RATIO_SCALE);
909 	wb_thresh *= numerator;
910 	wb_thresh = div64_ul(wb_thresh, denominator);
911 
912 	wb_min_max_ratio(wb, &wb_min_ratio, &wb_max_ratio);
913 
914 	wb_thresh += (thresh * wb_min_ratio) / (100 * BDI_RATIO_SCALE);
915 
916 	/*
917 	 * It's very possible that wb_thresh is close to 0 not because the
918 	 * device is slow, but that it has remained inactive for long time.
919 	 * Honour such devices a reasonable good (hopefully IO efficient)
920 	 * threshold, so that the occasional writes won't be blocked and active
921 	 * writes can rampup the threshold quickly.
922 	 */
923 	if (thresh > dtc->dirty) {
924 		if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT))
925 			wb_thresh = max(wb_thresh, (thresh - dtc->dirty) / 100);
926 		else
927 			wb_thresh = max(wb_thresh, (thresh - dtc->dirty) / 8);
928 	}
929 
930 	wb_max_thresh = thresh * wb_max_ratio / (100 * BDI_RATIO_SCALE);
931 	if (wb_thresh > wb_max_thresh)
932 		wb_thresh = wb_max_thresh;
933 
934 	return wb_thresh;
935 }
936 
937 unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh)
938 {
939 	struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };
940 
941 	domain_dirty_avail(&gdtc, true);
942 	return __wb_calc_thresh(&gdtc, thresh);
943 }
944 
945 unsigned long cgwb_calc_thresh(struct bdi_writeback *wb)
946 {
947 	struct dirty_throttle_control gdtc = { GDTC_INIT_NO_WB };
948 	struct dirty_throttle_control mdtc = { MDTC_INIT(wb, &gdtc) };
949 
950 	domain_dirty_avail(&gdtc, true);
951 	domain_dirty_avail(&mdtc, true);
952 	domain_dirty_limits(&mdtc);
953 
954 	return __wb_calc_thresh(&mdtc, mdtc.thresh);
955 }
956 
957 /*
958  *                           setpoint - dirty 3
959  *        f(dirty) := 1.0 + (----------------)
960  *                           limit - setpoint
961  *
962  * it's a 3rd order polynomial that subjects to
963  *
964  * (1) f(freerun)  = 2.0 => rampup dirty_ratelimit reasonably fast
965  * (2) f(setpoint) = 1.0 => the balance point
966  * (3) f(limit)    = 0   => the hard limit
967  * (4) df/dx      <= 0	 => negative feedback control
968  * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
969  *     => fast response on large errors; small oscillation near setpoint
970  */
971 static long long pos_ratio_polynom(unsigned long setpoint,
972 					  unsigned long dirty,
973 					  unsigned long limit)
974 {
975 	long long pos_ratio;
976 	long x;
977 
978 	x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
979 		      (limit - setpoint) | 1);
980 	pos_ratio = x;
981 	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
982 	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
983 	pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
984 
985 	return clamp(pos_ratio, 0LL, 2LL << RATELIMIT_CALC_SHIFT);
986 }
987 
988 /*
989  * Dirty position control.
990  *
991  * (o) global/bdi setpoints
992  *
993  * We want the dirty pages be balanced around the global/wb setpoints.
994  * When the number of dirty pages is higher/lower than the setpoint, the
995  * dirty position control ratio (and hence task dirty ratelimit) will be
996  * decreased/increased to bring the dirty pages back to the setpoint.
997  *
998  *     pos_ratio = 1 << RATELIMIT_CALC_SHIFT
999  *
1000  *     if (dirty < setpoint) scale up   pos_ratio
1001  *     if (dirty > setpoint) scale down pos_ratio
1002  *
1003  *     if (wb_dirty < wb_setpoint) scale up   pos_ratio
1004  *     if (wb_dirty > wb_setpoint) scale down pos_ratio
1005  *
1006  *     task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT
1007  *
1008  * (o) global control line
1009  *
1010  *     ^ pos_ratio
1011  *     |
1012  *     |            |<===== global dirty control scope ======>|
1013  * 2.0  * * * * * * *
1014  *     |            .*
1015  *     |            . *
1016  *     |            .   *
1017  *     |            .     *
1018  *     |            .        *
1019  *     |            .            *
1020  * 1.0 ................................*
1021  *     |            .                  .     *
1022  *     |            .                  .          *
1023  *     |            .                  .              *
1024  *     |            .                  .                 *
1025  *     |            .                  .                    *
1026  *   0 +------------.------------------.----------------------*------------->
1027  *           freerun^          setpoint^                 limit^   dirty pages
1028  *
1029  * (o) wb control line
1030  *
1031  *     ^ pos_ratio
1032  *     |
1033  *     |            *
1034  *     |              *
1035  *     |                *
1036  *     |                  *
1037  *     |                    * |<=========== span ============>|
1038  * 1.0 .......................*
1039  *     |                      . *
1040  *     |                      .   *
1041  *     |                      .     *
1042  *     |                      .       *
1043  *     |                      .         *
1044  *     |                      .           *
1045  *     |                      .             *
1046  *     |                      .               *
1047  *     |                      .                 *
1048  *     |                      .                   *
1049  *     |                      .                     *
1050  * 1/4 ...............................................* * * * * * * * * * * *
1051  *     |                      .                         .
1052  *     |                      .                           .
1053  *     |                      .                             .
1054  *   0 +----------------------.-------------------------------.------------->
1055  *                wb_setpoint^                    x_intercept^
1056  *
1057  * The wb control line won't drop below pos_ratio=1/4, so that wb_dirty can
1058  * be smoothly throttled down to normal if it starts high in situations like
1059  * - start writing to a slow SD card and a fast disk at the same time. The SD
1060  *   card's wb_dirty may rush to many times higher than wb_setpoint.
1061  * - the wb dirty thresh drops quickly due to change of JBOD workload
1062  */
1063 static void wb_position_ratio(struct dirty_throttle_control *dtc)
1064 {
1065 	struct bdi_writeback *wb = dtc->wb;
1066 	unsigned long write_bw = READ_ONCE(wb->avg_write_bandwidth);
1067 	unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
1068 	unsigned long limit = dtc->limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
1069 	unsigned long wb_thresh = dtc->wb_thresh;
1070 	unsigned long x_intercept;
1071 	unsigned long setpoint;		/* dirty pages' target balance point */
1072 	unsigned long wb_setpoint;
1073 	unsigned long span;
1074 	long long pos_ratio;		/* for scaling up/down the rate limit */
1075 	long x;
1076 
1077 	dtc->pos_ratio = 0;
1078 
1079 	if (unlikely(dtc->dirty >= limit))
1080 		return;
1081 
1082 	/*
1083 	 * global setpoint
1084 	 *
1085 	 * See comment for pos_ratio_polynom().
1086 	 */
1087 	setpoint = (freerun + limit) / 2;
1088 	pos_ratio = pos_ratio_polynom(setpoint, dtc->dirty, limit);
1089 
1090 	/*
1091 	 * The strictlimit feature is a tool preventing mistrusted filesystems
1092 	 * from growing a large number of dirty pages before throttling. For
1093 	 * such filesystems balance_dirty_pages always checks wb counters
1094 	 * against wb limits. Even if global "nr_dirty" is under "freerun".
1095 	 * This is especially important for fuse which sets bdi->max_ratio to
1096 	 * 1% by default.
1097 	 *
1098 	 * Here, in wb_position_ratio(), we calculate pos_ratio based on
1099 	 * two values: wb_dirty and wb_thresh. Let's consider an example:
1100 	 * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global
1101 	 * limits are set by default to 10% and 20% (background and throttle).
1102 	 * Then wb_thresh is 1% of 20% of 16GB. This amounts to ~8K pages.
1103 	 * wb_calc_thresh(wb, bg_thresh) is about ~4K pages. wb_setpoint is
1104 	 * about ~6K pages (as the average of background and throttle wb
1105 	 * limits). The 3rd order polynomial will provide positive feedback if
1106 	 * wb_dirty is under wb_setpoint and vice versa.
1107 	 *
1108 	 * Note, that we cannot use global counters in these calculations
1109 	 * because we want to throttle process writing to a strictlimit wb
1110 	 * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB
1111 	 * in the example above).
1112 	 */
1113 	if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
1114 		long long wb_pos_ratio;
1115 
1116 		if (dtc->wb_dirty >= wb_thresh)
1117 			return;
1118 
1119 		wb_setpoint = dirty_freerun_ceiling(wb_thresh,
1120 						    dtc->wb_bg_thresh);
1121 
1122 		if (wb_setpoint == 0 || wb_setpoint == wb_thresh)
1123 			return;
1124 
1125 		wb_pos_ratio = pos_ratio_polynom(wb_setpoint, dtc->wb_dirty,
1126 						 wb_thresh);
1127 
1128 		/*
1129 		 * Typically, for strictlimit case, wb_setpoint << setpoint
1130 		 * and pos_ratio >> wb_pos_ratio. In the other words global
1131 		 * state ("dirty") is not limiting factor and we have to
1132 		 * make decision based on wb counters. But there is an
1133 		 * important case when global pos_ratio should get precedence:
1134 		 * global limits are exceeded (e.g. due to activities on other
1135 		 * wb's) while given strictlimit wb is below limit.
1136 		 *
1137 		 * "pos_ratio * wb_pos_ratio" would work for the case above,
1138 		 * but it would look too non-natural for the case of all
1139 		 * activity in the system coming from a single strictlimit wb
1140 		 * with bdi->max_ratio == 100%.
1141 		 *
1142 		 * Note that min() below somewhat changes the dynamics of the
1143 		 * control system. Normally, pos_ratio value can be well over 3
1144 		 * (when globally we are at freerun and wb is well below wb
1145 		 * setpoint). Now the maximum pos_ratio in the same situation
1146 		 * is 2. We might want to tweak this if we observe the control
1147 		 * system is too slow to adapt.
1148 		 */
1149 		dtc->pos_ratio = min(pos_ratio, wb_pos_ratio);
1150 		return;
1151 	}
1152 
1153 	/*
1154 	 * We have computed basic pos_ratio above based on global situation. If
1155 	 * the wb is over/under its share of dirty pages, we want to scale
1156 	 * pos_ratio further down/up. That is done by the following mechanism.
1157 	 */
1158 
1159 	/*
1160 	 * wb setpoint
1161 	 *
1162 	 *        f(wb_dirty) := 1.0 + k * (wb_dirty - wb_setpoint)
1163 	 *
1164 	 *                        x_intercept - wb_dirty
1165 	 *                     := --------------------------
1166 	 *                        x_intercept - wb_setpoint
1167 	 *
1168 	 * The main wb control line is a linear function that subjects to
1169 	 *
1170 	 * (1) f(wb_setpoint) = 1.0
1171 	 * (2) k = - 1 / (8 * write_bw)  (in single wb case)
1172 	 *     or equally: x_intercept = wb_setpoint + 8 * write_bw
1173 	 *
1174 	 * For single wb case, the dirty pages are observed to fluctuate
1175 	 * regularly within range
1176 	 *        [wb_setpoint - write_bw/2, wb_setpoint + write_bw/2]
1177 	 * for various filesystems, where (2) can yield in a reasonable 12.5%
1178 	 * fluctuation range for pos_ratio.
1179 	 *
1180 	 * For JBOD case, wb_thresh (not wb_dirty!) could fluctuate up to its
1181 	 * own size, so move the slope over accordingly and choose a slope that
1182 	 * yields 100% pos_ratio fluctuation on suddenly doubled wb_thresh.
1183 	 */
1184 	if (unlikely(wb_thresh > dtc->thresh))
1185 		wb_thresh = dtc->thresh;
1186 	/*
1187 	 * scale global setpoint to wb's:
1188 	 *	wb_setpoint = setpoint * wb_thresh / thresh
1189 	 */
1190 	x = div_u64((u64)wb_thresh << 16, dtc->thresh | 1);
1191 	wb_setpoint = setpoint * (u64)x >> 16;
1192 	/*
1193 	 * Use span=(8*write_bw) in single wb case as indicated by
1194 	 * (thresh - wb_thresh ~= 0) and transit to wb_thresh in JBOD case.
1195 	 *
1196 	 *        wb_thresh                    thresh - wb_thresh
1197 	 * span = --------- * (8 * write_bw) + ------------------ * wb_thresh
1198 	 *         thresh                           thresh
1199 	 */
1200 	span = (dtc->thresh - wb_thresh + 8 * write_bw) * (u64)x >> 16;
1201 	x_intercept = wb_setpoint + span;
1202 
1203 	if (dtc->wb_dirty < x_intercept - span / 4) {
1204 		pos_ratio = div64_u64(pos_ratio * (x_intercept - dtc->wb_dirty),
1205 				      (x_intercept - wb_setpoint) | 1);
1206 	} else
1207 		pos_ratio /= 4;
1208 
1209 	/*
1210 	 * wb reserve area, safeguard against dirty pool underrun and disk idle
1211 	 * It may push the desired control point of global dirty pages higher
1212 	 * than setpoint.
1213 	 */
1214 	x_intercept = wb_thresh / 2;
1215 	if (dtc->wb_dirty < x_intercept) {
1216 		if (dtc->wb_dirty > x_intercept / 8)
1217 			pos_ratio = div_u64(pos_ratio * x_intercept,
1218 					    dtc->wb_dirty);
1219 		else
1220 			pos_ratio *= 8;
1221 	}
1222 
1223 	dtc->pos_ratio = pos_ratio;
1224 }
1225 
1226 static void wb_update_write_bandwidth(struct bdi_writeback *wb,
1227 				      unsigned long elapsed,
1228 				      unsigned long written)
1229 {
1230 	const unsigned long period = roundup_pow_of_two(3 * HZ);
1231 	unsigned long avg = wb->avg_write_bandwidth;
1232 	unsigned long old = wb->write_bandwidth;
1233 	u64 bw;
1234 
1235 	/*
1236 	 * bw = written * HZ / elapsed
1237 	 *
1238 	 *                   bw * elapsed + write_bandwidth * (period - elapsed)
1239 	 * write_bandwidth = ---------------------------------------------------
1240 	 *                                          period
1241 	 *
1242 	 * @written may have decreased due to folio_redirty_for_writepage().
1243 	 * Avoid underflowing @bw calculation.
1244 	 */
1245 	bw = written - min(written, wb->written_stamp);
1246 	bw *= HZ;
1247 	if (unlikely(elapsed > period)) {
1248 		bw = div64_ul(bw, elapsed);
1249 		avg = bw;
1250 		goto out;
1251 	}
1252 	bw += (u64)wb->write_bandwidth * (period - elapsed);
1253 	bw >>= ilog2(period);
1254 
1255 	/*
1256 	 * one more level of smoothing, for filtering out sudden spikes
1257 	 */
1258 	if (avg > old && old >= (unsigned long)bw)
1259 		avg -= (avg - old) >> 3;
1260 
1261 	if (avg < old && old <= (unsigned long)bw)
1262 		avg += (old - avg) >> 3;
1263 
1264 out:
1265 	/* keep avg > 0 to guarantee that tot > 0 if there are dirty wbs */
1266 	avg = max(avg, 1LU);
1267 	if (wb_has_dirty_io(wb)) {
1268 		long delta = avg - wb->avg_write_bandwidth;
1269 		WARN_ON_ONCE(atomic_long_add_return(delta,
1270 					&wb->bdi->tot_write_bandwidth) <= 0);
1271 	}
1272 	wb->write_bandwidth = bw;
1273 	WRITE_ONCE(wb->avg_write_bandwidth, avg);
1274 }
1275 
1276 static void update_dirty_limit(struct dirty_throttle_control *dtc)
1277 {
1278 	struct wb_domain *dom = dtc_dom(dtc);
1279 	unsigned long thresh = dtc->thresh;
1280 	unsigned long limit = dom->dirty_limit;
1281 
1282 	/*
1283 	 * Follow up in one step.
1284 	 */
1285 	if (limit < thresh) {
1286 		limit = thresh;
1287 		goto update;
1288 	}
1289 
1290 	/*
1291 	 * Follow down slowly. Use the higher one as the target, because thresh
1292 	 * may drop below dirty. This is exactly the reason to introduce
1293 	 * dom->dirty_limit which is guaranteed to lie above the dirty pages.
1294 	 */
1295 	thresh = max(thresh, dtc->dirty);
1296 	if (limit > thresh) {
1297 		limit -= (limit - thresh) >> 5;
1298 		goto update;
1299 	}
1300 	return;
1301 update:
1302 	dom->dirty_limit = limit;
1303 }
1304 
1305 static void domain_update_dirty_limit(struct dirty_throttle_control *dtc,
1306 				      unsigned long now)
1307 {
1308 	struct wb_domain *dom = dtc_dom(dtc);
1309 
1310 	/*
1311 	 * check locklessly first to optimize away locking for the most time
1312 	 */
1313 	if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL))
1314 		return;
1315 
1316 	spin_lock(&dom->lock);
1317 	if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) {
1318 		update_dirty_limit(dtc);
1319 		dom->dirty_limit_tstamp = now;
1320 	}
1321 	spin_unlock(&dom->lock);
1322 }
1323 
1324 /*
1325  * Maintain wb->dirty_ratelimit, the base dirty throttle rate.
1326  *
1327  * Normal wb tasks will be curbed at or below it in long term.
1328  * Obviously it should be around (write_bw / N) when there are N dd tasks.
1329  */
1330 static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc,
1331 				      unsigned long dirtied,
1332 				      unsigned long elapsed)
1333 {
1334 	struct bdi_writeback *wb = dtc->wb;
1335 	unsigned long dirty = dtc->dirty;
1336 	unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
1337 	unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
1338 	unsigned long setpoint = (freerun + limit) / 2;
1339 	unsigned long write_bw = wb->avg_write_bandwidth;
1340 	unsigned long dirty_ratelimit = wb->dirty_ratelimit;
1341 	unsigned long dirty_rate;
1342 	unsigned long task_ratelimit;
1343 	unsigned long balanced_dirty_ratelimit;
1344 	unsigned long step;
1345 	unsigned long x;
1346 	unsigned long shift;
1347 
1348 	/*
1349 	 * The dirty rate will match the writeout rate in long term, except
1350 	 * when dirty pages are truncated by userspace or re-dirtied by FS.
1351 	 */
1352 	dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed;
1353 
1354 	/*
1355 	 * task_ratelimit reflects each dd's dirty rate for the past 200ms.
1356 	 */
1357 	task_ratelimit = (u64)dirty_ratelimit *
1358 					dtc->pos_ratio >> RATELIMIT_CALC_SHIFT;
1359 	task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */
1360 
1361 	/*
1362 	 * A linear estimation of the "balanced" throttle rate. The theory is,
1363 	 * if there are N dd tasks, each throttled at task_ratelimit, the wb's
1364 	 * dirty_rate will be measured to be (N * task_ratelimit). So the below
1365 	 * formula will yield the balanced rate limit (write_bw / N).
1366 	 *
1367 	 * Note that the expanded form is not a pure rate feedback:
1368 	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate)		     (1)
1369 	 * but also takes pos_ratio into account:
1370 	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio  (2)
1371 	 *
1372 	 * (1) is not realistic because pos_ratio also takes part in balancing
1373 	 * the dirty rate.  Consider the state
1374 	 *	pos_ratio = 0.5						     (3)
1375 	 *	rate = 2 * (write_bw / N)				     (4)
1376 	 * If (1) is used, it will stuck in that state! Because each dd will
1377 	 * be throttled at
1378 	 *	task_ratelimit = pos_ratio * rate = (write_bw / N)	     (5)
1379 	 * yielding
1380 	 *	dirty_rate = N * task_ratelimit = write_bw		     (6)
1381 	 * put (6) into (1) we get
1382 	 *	rate_(i+1) = rate_(i)					     (7)
1383 	 *
1384 	 * So we end up using (2) to always keep
1385 	 *	rate_(i+1) ~= (write_bw / N)				     (8)
1386 	 * regardless of the value of pos_ratio. As long as (8) is satisfied,
1387 	 * pos_ratio is able to drive itself to 1.0, which is not only where
1388 	 * the dirty count meet the setpoint, but also where the slope of
1389 	 * pos_ratio is most flat and hence task_ratelimit is least fluctuated.
1390 	 */
1391 	balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
1392 					   dirty_rate | 1);
1393 	/*
1394 	 * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw
1395 	 */
1396 	if (unlikely(balanced_dirty_ratelimit > write_bw))
1397 		balanced_dirty_ratelimit = write_bw;
1398 
1399 	/*
1400 	 * We could safely do this and return immediately:
1401 	 *
1402 	 *	wb->dirty_ratelimit = balanced_dirty_ratelimit;
1403 	 *
1404 	 * However to get a more stable dirty_ratelimit, the below elaborated
1405 	 * code makes use of task_ratelimit to filter out singular points and
1406 	 * limit the step size.
1407 	 *
1408 	 * The below code essentially only uses the relative value of
1409 	 *
1410 	 *	task_ratelimit - dirty_ratelimit
1411 	 *	= (pos_ratio - 1) * dirty_ratelimit
1412 	 *
1413 	 * which reflects the direction and size of dirty position error.
1414 	 */
1415 
1416 	/*
1417 	 * dirty_ratelimit will follow balanced_dirty_ratelimit iff
1418 	 * task_ratelimit is on the same side of dirty_ratelimit, too.
1419 	 * For example, when
1420 	 * - dirty_ratelimit > balanced_dirty_ratelimit
1421 	 * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint)
1422 	 * lowering dirty_ratelimit will help meet both the position and rate
1423 	 * control targets. Otherwise, don't update dirty_ratelimit if it will
1424 	 * only help meet the rate target. After all, what the users ultimately
1425 	 * feel and care are stable dirty rate and small position error.
1426 	 *
1427 	 * |task_ratelimit - dirty_ratelimit| is used to limit the step size
1428 	 * and filter out the singular points of balanced_dirty_ratelimit. Which
1429 	 * keeps jumping around randomly and can even leap far away at times
1430 	 * due to the small 200ms estimation period of dirty_rate (we want to
1431 	 * keep that period small to reduce time lags).
1432 	 */
1433 	step = 0;
1434 
1435 	/*
1436 	 * For strictlimit case, calculations above were based on wb counters
1437 	 * and limits (starting from pos_ratio = wb_position_ratio() and up to
1438 	 * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate).
1439 	 * Hence, to calculate "step" properly, we have to use wb_dirty as
1440 	 * "dirty" and wb_setpoint as "setpoint".
1441 	 */
1442 	if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
1443 		dirty = dtc->wb_dirty;
1444 		setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2;
1445 	}
1446 
1447 	if (dirty < setpoint) {
1448 		x = min3(wb->balanced_dirty_ratelimit,
1449 			 balanced_dirty_ratelimit, task_ratelimit);
1450 		if (dirty_ratelimit < x)
1451 			step = x - dirty_ratelimit;
1452 	} else {
1453 		x = max3(wb->balanced_dirty_ratelimit,
1454 			 balanced_dirty_ratelimit, task_ratelimit);
1455 		if (dirty_ratelimit > x)
1456 			step = dirty_ratelimit - x;
1457 	}
1458 
1459 	/*
1460 	 * Don't pursue 100% rate matching. It's impossible since the balanced
1461 	 * rate itself is constantly fluctuating. So decrease the track speed
1462 	 * when it gets close to the target. Helps eliminate pointless tremors.
1463 	 */
1464 	shift = dirty_ratelimit / (2 * step + 1);
1465 	if (shift < BITS_PER_LONG)
1466 		step = DIV_ROUND_UP(step >> shift, 8);
1467 	else
1468 		step = 0;
1469 
1470 	if (dirty_ratelimit < balanced_dirty_ratelimit)
1471 		dirty_ratelimit += step;
1472 	else
1473 		dirty_ratelimit -= step;
1474 
1475 	WRITE_ONCE(wb->dirty_ratelimit, max(dirty_ratelimit, 1UL));
1476 	wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
1477 
1478 	trace_bdi_dirty_ratelimit(wb, dirty_rate, task_ratelimit);
1479 }
1480 
1481 static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc,
1482 				  struct dirty_throttle_control *mdtc,
1483 				  bool update_ratelimit)
1484 {
1485 	struct bdi_writeback *wb = gdtc->wb;
1486 	unsigned long now = jiffies;
1487 	unsigned long elapsed;
1488 	unsigned long dirtied;
1489 	unsigned long written;
1490 
1491 	spin_lock(&wb->list_lock);
1492 
1493 	/*
1494 	 * Lockless checks for elapsed time are racy and delayed update after
1495 	 * IO completion doesn't do it at all (to make sure written pages are
1496 	 * accounted reasonably quickly). Make sure elapsed >= 1 to avoid
1497 	 * division errors.
1498 	 */
1499 	elapsed = max(now - wb->bw_time_stamp, 1UL);
1500 	dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]);
1501 	written = percpu_counter_read(&wb->stat[WB_WRITTEN]);
1502 
1503 	if (update_ratelimit) {
1504 		domain_update_dirty_limit(gdtc, now);
1505 		wb_update_dirty_ratelimit(gdtc, dirtied, elapsed);
1506 
1507 		/*
1508 		 * @mdtc is always NULL if !CGROUP_WRITEBACK but the
1509 		 * compiler has no way to figure that out.  Help it.
1510 		 */
1511 		if (IS_ENABLED(CONFIG_CGROUP_WRITEBACK) && mdtc) {
1512 			domain_update_dirty_limit(mdtc, now);
1513 			wb_update_dirty_ratelimit(mdtc, dirtied, elapsed);
1514 		}
1515 	}
1516 	wb_update_write_bandwidth(wb, elapsed, written);
1517 
1518 	wb->dirtied_stamp = dirtied;
1519 	wb->written_stamp = written;
1520 	WRITE_ONCE(wb->bw_time_stamp, now);
1521 	spin_unlock(&wb->list_lock);
1522 }
1523 
1524 void wb_update_bandwidth(struct bdi_writeback *wb)
1525 {
1526 	struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };
1527 
1528 	__wb_update_bandwidth(&gdtc, NULL, false);
1529 }
1530 
1531 /* Interval after which we consider wb idle and don't estimate bandwidth */
1532 #define WB_BANDWIDTH_IDLE_JIF (HZ)
1533 
1534 static void wb_bandwidth_estimate_start(struct bdi_writeback *wb)
1535 {
1536 	unsigned long now = jiffies;
1537 	unsigned long elapsed = now - READ_ONCE(wb->bw_time_stamp);
1538 
1539 	if (elapsed > WB_BANDWIDTH_IDLE_JIF &&
1540 	    !atomic_read(&wb->writeback_inodes)) {
1541 		spin_lock(&wb->list_lock);
1542 		wb->dirtied_stamp = wb_stat(wb, WB_DIRTIED);
1543 		wb->written_stamp = wb_stat(wb, WB_WRITTEN);
1544 		WRITE_ONCE(wb->bw_time_stamp, now);
1545 		spin_unlock(&wb->list_lock);
1546 	}
1547 }
1548 
1549 /*
1550  * After a task dirtied this many pages, balance_dirty_pages_ratelimited()
1551  * will look to see if it needs to start dirty throttling.
1552  *
1553  * If dirty_poll_interval is too low, big NUMA machines will call the expensive
1554  * global_zone_page_state() too often. So scale it near-sqrt to the safety margin
1555  * (the number of pages we may dirty without exceeding the dirty limits).
1556  */
1557 static unsigned long dirty_poll_interval(unsigned long dirty,
1558 					 unsigned long thresh)
1559 {
1560 	if (thresh > dirty)
1561 		return 1UL << (ilog2(thresh - dirty) >> 1);
1562 
1563 	return 1;
1564 }
1565 
1566 static unsigned long wb_max_pause(struct bdi_writeback *wb,
1567 				  unsigned long wb_dirty)
1568 {
1569 	unsigned long bw = READ_ONCE(wb->avg_write_bandwidth);
1570 	unsigned long t;
1571 
1572 	/*
1573 	 * Limit pause time for small memory systems. If sleeping for too long
1574 	 * time, a small pool of dirty/writeback pages may go empty and disk go
1575 	 * idle.
1576 	 *
1577 	 * 8 serves as the safety ratio.
1578 	 */
1579 	t = wb_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
1580 	t++;
1581 
1582 	return min_t(unsigned long, t, MAX_PAUSE);
1583 }
1584 
1585 static long wb_min_pause(struct bdi_writeback *wb,
1586 			 long max_pause,
1587 			 unsigned long task_ratelimit,
1588 			 unsigned long dirty_ratelimit,
1589 			 int *nr_dirtied_pause)
1590 {
1591 	long hi = ilog2(READ_ONCE(wb->avg_write_bandwidth));
1592 	long lo = ilog2(READ_ONCE(wb->dirty_ratelimit));
1593 	long t;		/* target pause */
1594 	long pause;	/* estimated next pause */
1595 	int pages;	/* target nr_dirtied_pause */
1596 
1597 	/* target for 10ms pause on 1-dd case */
1598 	t = max(1, HZ / 100);
1599 
1600 	/*
1601 	 * Scale up pause time for concurrent dirtiers in order to reduce CPU
1602 	 * overheads.
1603 	 *
1604 	 * (N * 10ms) on 2^N concurrent tasks.
1605 	 */
1606 	if (hi > lo)
1607 		t += (hi - lo) * (10 * HZ) / 1024;
1608 
1609 	/*
1610 	 * This is a bit convoluted. We try to base the next nr_dirtied_pause
1611 	 * on the much more stable dirty_ratelimit. However the next pause time
1612 	 * will be computed based on task_ratelimit and the two rate limits may
1613 	 * depart considerably at some time. Especially if task_ratelimit goes
1614 	 * below dirty_ratelimit/2 and the target pause is max_pause, the next
1615 	 * pause time will be max_pause*2 _trimmed down_ to max_pause.  As a
1616 	 * result task_ratelimit won't be executed faithfully, which could
1617 	 * eventually bring down dirty_ratelimit.
1618 	 *
1619 	 * We apply two rules to fix it up:
1620 	 * 1) try to estimate the next pause time and if necessary, use a lower
1621 	 *    nr_dirtied_pause so as not to exceed max_pause. When this happens,
1622 	 *    nr_dirtied_pause will be "dancing" with task_ratelimit.
1623 	 * 2) limit the target pause time to max_pause/2, so that the normal
1624 	 *    small fluctuations of task_ratelimit won't trigger rule (1) and
1625 	 *    nr_dirtied_pause will remain as stable as dirty_ratelimit.
1626 	 */
1627 	t = min(t, 1 + max_pause / 2);
1628 	pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1629 
1630 	/*
1631 	 * Tiny nr_dirtied_pause is found to hurt I/O performance in the test
1632 	 * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}.
1633 	 * When the 16 consecutive reads are often interrupted by some dirty
1634 	 * throttling pause during the async writes, cfq will go into idles
1635 	 * (deadline is fine). So push nr_dirtied_pause as high as possible
1636 	 * until reaches DIRTY_POLL_THRESH=32 pages.
1637 	 */
1638 	if (pages < DIRTY_POLL_THRESH) {
1639 		t = max_pause;
1640 		pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1641 		if (pages > DIRTY_POLL_THRESH) {
1642 			pages = DIRTY_POLL_THRESH;
1643 			t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit;
1644 		}
1645 	}
1646 
1647 	pause = HZ * pages / (task_ratelimit + 1);
1648 	if (pause > max_pause) {
1649 		t = max_pause;
1650 		pages = task_ratelimit * t / roundup_pow_of_two(HZ);
1651 	}
1652 
1653 	*nr_dirtied_pause = pages;
1654 	/*
1655 	 * The minimal pause time will normally be half the target pause time.
1656 	 */
1657 	return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
1658 }
1659 
1660 static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
1661 {
1662 	struct bdi_writeback *wb = dtc->wb;
1663 	unsigned long wb_reclaimable;
1664 
1665 	/*
1666 	 * wb_thresh is not treated as some limiting factor as
1667 	 * dirty_thresh, due to reasons
1668 	 * - in JBOD setup, wb_thresh can fluctuate a lot
1669 	 * - in a system with HDD and USB key, the USB key may somehow
1670 	 *   go into state (wb_dirty >> wb_thresh) either because
1671 	 *   wb_dirty starts high, or because wb_thresh drops low.
1672 	 *   In this case we don't want to hard throttle the USB key
1673 	 *   dirtiers for 100 seconds until wb_dirty drops under
1674 	 *   wb_thresh. Instead the auxiliary wb control line in
1675 	 *   wb_position_ratio() will let the dirtier task progress
1676 	 *   at some rate <= (write_bw / 2) for bringing down wb_dirty.
1677 	 */
1678 	dtc->wb_thresh = __wb_calc_thresh(dtc, dtc->thresh);
1679 	dtc->wb_bg_thresh = dtc->thresh ?
1680 		div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
1681 
1682 	/*
1683 	 * In order to avoid the stacked BDI deadlock we need
1684 	 * to ensure we accurately count the 'dirty' pages when
1685 	 * the threshold is low.
1686 	 *
1687 	 * Otherwise it would be possible to get thresh+n pages
1688 	 * reported dirty, even though there are thresh-m pages
1689 	 * actually dirty; with m+n sitting in the percpu
1690 	 * deltas.
1691 	 */
1692 	if (dtc->wb_thresh < 2 * wb_stat_error()) {
1693 		wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
1694 		dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK);
1695 	} else {
1696 		wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE);
1697 		dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK);
1698 	}
1699 }
1700 
1701 static unsigned long domain_poll_intv(struct dirty_throttle_control *dtc,
1702 				      bool strictlimit)
1703 {
1704 	unsigned long dirty, thresh;
1705 
1706 	if (strictlimit) {
1707 		dirty = dtc->wb_dirty;
1708 		thresh = dtc->wb_thresh;
1709 	} else {
1710 		dirty = dtc->dirty;
1711 		thresh = dtc->thresh;
1712 	}
1713 
1714 	return dirty_poll_interval(dirty, thresh);
1715 }
1716 
1717 /*
1718  * Throttle it only when the background writeback cannot catch-up. This avoids
1719  * (excessively) small writeouts when the wb limits are ramping up in case of
1720  * !strictlimit.
1721  *
1722  * In strictlimit case make decision based on the wb counters and limits. Small
1723  * writeouts when the wb limits are ramping up are the price we consciously pay
1724  * for strictlimit-ing.
1725  */
1726 static void domain_dirty_freerun(struct dirty_throttle_control *dtc,
1727 				 bool strictlimit)
1728 {
1729 	unsigned long dirty, thresh, bg_thresh;
1730 
1731 	if (unlikely(strictlimit)) {
1732 		wb_dirty_limits(dtc);
1733 		dirty = dtc->wb_dirty;
1734 		thresh = dtc->wb_thresh;
1735 		bg_thresh = dtc->wb_bg_thresh;
1736 	} else {
1737 		dirty = dtc->dirty;
1738 		thresh = dtc->thresh;
1739 		bg_thresh = dtc->bg_thresh;
1740 	}
1741 	dtc->freerun = dirty <= dirty_freerun_ceiling(thresh, bg_thresh);
1742 }
1743 
1744 static void balance_domain_limits(struct dirty_throttle_control *dtc,
1745 				  bool strictlimit)
1746 {
1747 	domain_dirty_avail(dtc, true);
1748 	domain_dirty_limits(dtc);
1749 	domain_dirty_freerun(dtc, strictlimit);
1750 }
1751 
1752 static void wb_dirty_freerun(struct dirty_throttle_control *dtc,
1753 			     bool strictlimit)
1754 {
1755 	dtc->freerun = false;
1756 
1757 	/* was already handled in domain_dirty_freerun */
1758 	if (strictlimit)
1759 		return;
1760 
1761 	wb_dirty_limits(dtc);
1762 	/*
1763 	 * LOCAL_THROTTLE tasks must not be throttled when below the per-wb
1764 	 * freerun ceiling.
1765 	 */
1766 	if (!(current->flags & PF_LOCAL_THROTTLE))
1767 		return;
1768 
1769 	dtc->freerun = dtc->wb_dirty <
1770 		       dirty_freerun_ceiling(dtc->wb_thresh, dtc->wb_bg_thresh);
1771 }
1772 
1773 static inline void wb_dirty_exceeded(struct dirty_throttle_control *dtc,
1774 				     bool strictlimit)
1775 {
1776 	dtc->dirty_exceeded = (dtc->wb_dirty > dtc->wb_thresh) &&
1777 		((dtc->dirty > dtc->thresh) || strictlimit);
1778 }
1779 
1780 /*
1781  * The limits fields dirty_exceeded and pos_ratio won't be updated if wb is
1782  * in freerun state. Please don't use these invalid fields in freerun case.
1783  */
1784 static void balance_wb_limits(struct dirty_throttle_control *dtc,
1785 			      bool strictlimit)
1786 {
1787 	wb_dirty_freerun(dtc, strictlimit);
1788 	if (dtc->freerun)
1789 		return;
1790 
1791 	wb_dirty_exceeded(dtc, strictlimit);
1792 	wb_position_ratio(dtc);
1793 }
1794 
1795 /*
1796  * balance_dirty_pages() must be called by processes which are generating dirty
1797  * data.  It looks at the number of dirty pages in the machine and will force
1798  * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2.
1799  * If we're over `background_thresh' then the writeback threads are woken to
1800  * perform some writeout.
1801  */
1802 static int balance_dirty_pages(struct bdi_writeback *wb,
1803 			       unsigned long pages_dirtied, unsigned int flags)
1804 {
1805 	struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
1806 	struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
1807 	struct dirty_throttle_control * const gdtc = &gdtc_stor;
1808 	struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
1809 						     &mdtc_stor : NULL;
1810 	struct dirty_throttle_control *sdtc;
1811 	unsigned long nr_dirty;
1812 	long period;
1813 	long pause;
1814 	long max_pause;
1815 	long min_pause;
1816 	int nr_dirtied_pause;
1817 	unsigned long task_ratelimit;
1818 	unsigned long dirty_ratelimit;
1819 	struct backing_dev_info *bdi = wb->bdi;
1820 	bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT;
1821 	unsigned long start_time = jiffies;
1822 	int ret = 0;
1823 
1824 	for (;;) {
1825 		unsigned long now = jiffies;
1826 
1827 		nr_dirty = global_node_page_state(NR_FILE_DIRTY);
1828 
1829 		balance_domain_limits(gdtc, strictlimit);
1830 		if (mdtc) {
1831 			/*
1832 			 * If @wb belongs to !root memcg, repeat the same
1833 			 * basic calculations for the memcg domain.
1834 			 */
1835 			balance_domain_limits(mdtc, strictlimit);
1836 		}
1837 
1838 		if (nr_dirty > gdtc->bg_thresh && !writeback_in_progress(wb))
1839 			wb_start_background_writeback(wb);
1840 
1841 		/*
1842 		 * If memcg domain is in effect, @dirty should be under
1843 		 * both global and memcg freerun ceilings.
1844 		 */
1845 		if (gdtc->freerun && (!mdtc || mdtc->freerun)) {
1846 			unsigned long intv;
1847 			unsigned long m_intv;
1848 
1849 free_running:
1850 			intv = domain_poll_intv(gdtc, strictlimit);
1851 			m_intv = ULONG_MAX;
1852 
1853 			current->dirty_paused_when = now;
1854 			current->nr_dirtied = 0;
1855 			if (mdtc)
1856 				m_intv = domain_poll_intv(mdtc, strictlimit);
1857 			current->nr_dirtied_pause = min(intv, m_intv);
1858 			break;
1859 		}
1860 
1861 		mem_cgroup_flush_foreign(wb);
1862 
1863 		/*
1864 		 * Calculate global domain's pos_ratio and select the
1865 		 * global dtc by default.
1866 		 */
1867 		balance_wb_limits(gdtc, strictlimit);
1868 		if (gdtc->freerun)
1869 			goto free_running;
1870 		sdtc = gdtc;
1871 
1872 		if (mdtc) {
1873 			/*
1874 			 * If memcg domain is in effect, calculate its
1875 			 * pos_ratio.  @wb should satisfy constraints from
1876 			 * both global and memcg domains.  Choose the one
1877 			 * w/ lower pos_ratio.
1878 			 */
1879 			balance_wb_limits(mdtc, strictlimit);
1880 			if (mdtc->freerun)
1881 				goto free_running;
1882 			if (mdtc->pos_ratio < gdtc->pos_ratio)
1883 				sdtc = mdtc;
1884 		}
1885 
1886 		wb->dirty_exceeded = gdtc->dirty_exceeded ||
1887 				     (mdtc && mdtc->dirty_exceeded);
1888 		if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
1889 					   BANDWIDTH_INTERVAL))
1890 			__wb_update_bandwidth(gdtc, mdtc, true);
1891 
1892 		/* throttle according to the chosen dtc */
1893 		dirty_ratelimit = READ_ONCE(wb->dirty_ratelimit);
1894 		task_ratelimit = ((u64)dirty_ratelimit * sdtc->pos_ratio) >>
1895 							RATELIMIT_CALC_SHIFT;
1896 		max_pause = wb_max_pause(wb, sdtc->wb_dirty);
1897 		min_pause = wb_min_pause(wb, max_pause,
1898 					 task_ratelimit, dirty_ratelimit,
1899 					 &nr_dirtied_pause);
1900 
1901 		if (unlikely(task_ratelimit == 0)) {
1902 			period = max_pause;
1903 			pause = max_pause;
1904 			goto pause;
1905 		}
1906 		period = HZ * pages_dirtied / task_ratelimit;
1907 		pause = period;
1908 		if (current->dirty_paused_when)
1909 			pause -= now - current->dirty_paused_when;
1910 		/*
1911 		 * For less than 1s think time (ext3/4 may block the dirtier
1912 		 * for up to 800ms from time to time on 1-HDD; so does xfs,
1913 		 * however at much less frequency), try to compensate it in
1914 		 * future periods by updating the virtual time; otherwise just
1915 		 * do a reset, as it may be a light dirtier.
1916 		 */
1917 		if (pause < min_pause) {
1918 			trace_balance_dirty_pages(wb,
1919 						  sdtc,
1920 						  dirty_ratelimit,
1921 						  task_ratelimit,
1922 						  pages_dirtied,
1923 						  period,
1924 						  min(pause, 0L),
1925 						  start_time);
1926 			if (pause < -HZ) {
1927 				current->dirty_paused_when = now;
1928 				current->nr_dirtied = 0;
1929 			} else if (period) {
1930 				current->dirty_paused_when += period;
1931 				current->nr_dirtied = 0;
1932 			} else if (current->nr_dirtied_pause <= pages_dirtied)
1933 				current->nr_dirtied_pause += pages_dirtied;
1934 			break;
1935 		}
1936 		if (unlikely(pause > max_pause)) {
1937 			/* for occasional dropped task_ratelimit */
1938 			now += min(pause - max_pause, max_pause);
1939 			pause = max_pause;
1940 		}
1941 
1942 pause:
1943 		trace_balance_dirty_pages(wb,
1944 					  sdtc,
1945 					  dirty_ratelimit,
1946 					  task_ratelimit,
1947 					  pages_dirtied,
1948 					  period,
1949 					  pause,
1950 					  start_time);
1951 		if (flags & BDP_ASYNC) {
1952 			ret = -EAGAIN;
1953 			break;
1954 		}
1955 		__set_current_state(TASK_KILLABLE);
1956 		bdi->last_bdp_sleep = jiffies;
1957 		io_schedule_timeout(pause);
1958 
1959 		current->dirty_paused_when = now + pause;
1960 		current->nr_dirtied = 0;
1961 		current->nr_dirtied_pause = nr_dirtied_pause;
1962 
1963 		/*
1964 		 * This is typically equal to (dirty < thresh) and can also
1965 		 * keep "1000+ dd on a slow USB stick" under control.
1966 		 */
1967 		if (task_ratelimit)
1968 			break;
1969 
1970 		/*
1971 		 * In the case of an unresponsive NFS server and the NFS dirty
1972 		 * pages exceeds dirty_thresh, give the other good wb's a pipe
1973 		 * to go through, so that tasks on them still remain responsive.
1974 		 *
1975 		 * In theory 1 page is enough to keep the consumer-producer
1976 		 * pipe going: the flusher cleans 1 page => the task dirties 1
1977 		 * more page. However wb_dirty has accounting errors.  So use
1978 		 * the larger and more IO friendly wb_stat_error.
1979 		 */
1980 		if (sdtc->wb_dirty <= wb_stat_error())
1981 			break;
1982 
1983 		if (fatal_signal_pending(current))
1984 			break;
1985 	}
1986 	return ret;
1987 }
1988 
1989 static DEFINE_PER_CPU(int, bdp_ratelimits);
1990 
1991 /*
1992  * Normal tasks are throttled by
1993  *	loop {
1994  *		dirty tsk->nr_dirtied_pause pages;
1995  *		take a snap in balance_dirty_pages();
1996  *	}
1997  * However there is a worst case. If every task exit immediately when dirtied
1998  * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
1999  * called to throttle the page dirties. The solution is to save the not yet
2000  * throttled page dirties in dirty_throttle_leaks on task exit and charge them
2001  * randomly into the running tasks. This works well for the above worst case,
2002  * as the new task will pick up and accumulate the old task's leaked dirty
2003  * count and eventually get throttled.
2004  */
2005 DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
2006 
2007 /**
2008  * balance_dirty_pages_ratelimited_flags - Balance dirty memory state.
2009  * @mapping: address_space which was dirtied.
2010  * @flags: BDP flags.
2011  *
2012  * Processes which are dirtying memory should call in here once for each page
2013  * which was newly dirtied.  The function will periodically check the system's
2014  * dirty state and will initiate writeback if needed.
2015  *
2016  * See balance_dirty_pages_ratelimited() for details.
2017  *
2018  * Return: If @flags contains BDP_ASYNC, it may return -EAGAIN to
2019  * indicate that memory is out of balance and the caller must wait
2020  * for I/O to complete.  Otherwise, it will return 0 to indicate
2021  * that either memory was already in balance, or it was able to sleep
2022  * until the amount of dirty memory returned to balance.
2023  */
2024 int balance_dirty_pages_ratelimited_flags(struct address_space *mapping,
2025 					unsigned int flags)
2026 {
2027 	struct inode *inode = mapping->host;
2028 	struct backing_dev_info *bdi = inode_to_bdi(inode);
2029 	struct bdi_writeback *wb = NULL;
2030 	int ratelimit;
2031 	int ret = 0;
2032 	int *p;
2033 
2034 	if (!(bdi->capabilities & BDI_CAP_WRITEBACK))
2035 		return ret;
2036 
2037 	if (inode_cgwb_enabled(inode))
2038 		wb = wb_get_create_current(bdi, GFP_KERNEL);
2039 	if (!wb)
2040 		wb = &bdi->wb;
2041 
2042 	ratelimit = current->nr_dirtied_pause;
2043 	if (wb->dirty_exceeded)
2044 		ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));
2045 
2046 	preempt_disable();
2047 	/*
2048 	 * This prevents one CPU to accumulate too many dirtied pages without
2049 	 * calling into balance_dirty_pages(), which can happen when there are
2050 	 * 1000+ tasks, all of them start dirtying pages at exactly the same
2051 	 * time, hence all honoured too large initial task->nr_dirtied_pause.
2052 	 */
2053 	p =  this_cpu_ptr(&bdp_ratelimits);
2054 	if (unlikely(current->nr_dirtied >= ratelimit))
2055 		*p = 0;
2056 	else if (unlikely(*p >= ratelimit_pages)) {
2057 		*p = 0;
2058 		ratelimit = 0;
2059 	}
2060 	/*
2061 	 * Pick up the dirtied pages by the exited tasks. This avoids lots of
2062 	 * short-lived tasks (eg. gcc invocations in a kernel build) escaping
2063 	 * the dirty throttling and livelock other long-run dirtiers.
2064 	 */
2065 	p = this_cpu_ptr(&dirty_throttle_leaks);
2066 	if (*p > 0 && current->nr_dirtied < ratelimit) {
2067 		unsigned long nr_pages_dirtied;
2068 		nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
2069 		*p -= nr_pages_dirtied;
2070 		current->nr_dirtied += nr_pages_dirtied;
2071 	}
2072 	preempt_enable();
2073 
2074 	if (unlikely(current->nr_dirtied >= ratelimit))
2075 		ret = balance_dirty_pages(wb, current->nr_dirtied, flags);
2076 
2077 	wb_put(wb);
2078 	return ret;
2079 }
2080 EXPORT_SYMBOL_GPL(balance_dirty_pages_ratelimited_flags);
2081 
2082 /**
2083  * balance_dirty_pages_ratelimited - balance dirty memory state.
2084  * @mapping: address_space which was dirtied.
2085  *
2086  * Processes which are dirtying memory should call in here once for each page
2087  * which was newly dirtied.  The function will periodically check the system's
2088  * dirty state and will initiate writeback if needed.
2089  *
2090  * Once we're over the dirty memory limit we decrease the ratelimiting
2091  * by a lot, to prevent individual processes from overshooting the limit
2092  * by (ratelimit_pages) each.
2093  */
2094 void balance_dirty_pages_ratelimited(struct address_space *mapping)
2095 {
2096 	balance_dirty_pages_ratelimited_flags(mapping, 0);
2097 }
2098 EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
2099 
2100 /*
2101  * Similar to wb_dirty_limits, wb_bg_dirty_limits also calculates dirty
2102  * and thresh, but it's for background writeback.
2103  */
2104 static void wb_bg_dirty_limits(struct dirty_throttle_control *dtc)
2105 {
2106 	struct bdi_writeback *wb = dtc->wb;
2107 
2108 	dtc->wb_bg_thresh = __wb_calc_thresh(dtc, dtc->bg_thresh);
2109 	if (dtc->wb_bg_thresh < 2 * wb_stat_error())
2110 		dtc->wb_dirty = wb_stat_sum(wb, WB_RECLAIMABLE);
2111 	else
2112 		dtc->wb_dirty = wb_stat(wb, WB_RECLAIMABLE);
2113 }
2114 
2115 static bool domain_over_bg_thresh(struct dirty_throttle_control *dtc)
2116 {
2117 	domain_dirty_avail(dtc, false);
2118 	domain_dirty_limits(dtc);
2119 	if (dtc->dirty > dtc->bg_thresh)
2120 		return true;
2121 
2122 	wb_bg_dirty_limits(dtc);
2123 	if (dtc->wb_dirty > dtc->wb_bg_thresh)
2124 		return true;
2125 
2126 	return false;
2127 }
2128 
2129 /**
2130  * wb_over_bg_thresh - does @wb need to be written back?
2131  * @wb: bdi_writeback of interest
2132  *
2133  * Determines whether background writeback should keep writing @wb or it's
2134  * clean enough.
2135  *
2136  * Return: %true if writeback should continue.
2137  */
2138 bool wb_over_bg_thresh(struct bdi_writeback *wb)
2139 {
2140 	struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };
2141 	struct dirty_throttle_control mdtc = { MDTC_INIT(wb, &gdtc) };
2142 
2143 	if (domain_over_bg_thresh(&gdtc))
2144 		return true;
2145 
2146 	if (mdtc_valid(&mdtc))
2147 		return domain_over_bg_thresh(&mdtc);
2148 
2149 	return false;
2150 }
2151 
2152 #ifdef CONFIG_SYSCTL
2153 /*
2154  * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
2155  */
2156 static int dirty_writeback_centisecs_handler(const struct ctl_table *table, int write,
2157 		void *buffer, size_t *length, loff_t *ppos)
2158 {
2159 	unsigned int old_interval = dirty_writeback_interval;
2160 	int ret;
2161 
2162 	ret = proc_dointvec(table, write, buffer, length, ppos);
2163 
2164 	/*
2165 	 * Writing 0 to dirty_writeback_interval will disable periodic writeback
2166 	 * and a different non-zero value will wakeup the writeback threads.
2167 	 * wb_wakeup_delayed() would be more appropriate, but it's a pain to
2168 	 * iterate over all bdis and wbs.
2169 	 * The reason we do this is to make the change take effect immediately.
2170 	 */
2171 	if (!ret && write && dirty_writeback_interval &&
2172 		dirty_writeback_interval != old_interval)
2173 		wakeup_flusher_threads(WB_REASON_PERIODIC);
2174 
2175 	return ret;
2176 }
2177 #endif
2178 
2179 /*
2180  * If ratelimit_pages is too high then we can get into dirty-data overload
2181  * if a large number of processes all perform writes at the same time.
2182  *
2183  * Here we set ratelimit_pages to a level which ensures that when all CPUs are
2184  * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
2185  * thresholds.
2186  */
2187 
2188 void writeback_set_ratelimit(void)
2189 {
2190 	struct wb_domain *dom = &global_wb_domain;
2191 	unsigned long background_thresh;
2192 	unsigned long dirty_thresh;
2193 
2194 	global_dirty_limits(&background_thresh, &dirty_thresh);
2195 	dom->dirty_limit = dirty_thresh;
2196 	ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
2197 	if (ratelimit_pages < 16)
2198 		ratelimit_pages = 16;
2199 }
2200 
2201 static int page_writeback_cpu_online(unsigned int cpu)
2202 {
2203 	writeback_set_ratelimit();
2204 	return 0;
2205 }
2206 
2207 #ifdef CONFIG_SYSCTL
2208 
2209 static int laptop_mode;
2210 static int laptop_mode_handler(const struct ctl_table *table, int write,
2211 			       void *buffer, size_t *lenp, loff_t *ppos)
2212 {
2213 	int ret = proc_dointvec_jiffies(table, write, buffer, lenp, ppos);
2214 
2215 	if (!ret && write)
2216 		pr_warn("%s: vm.laptop_mode is deprecated. Ignoring setting.\n",
2217 			current->comm);
2218 
2219 	return ret;
2220 }
2221 
2222 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
2223 static const unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
2224 
2225 static const struct ctl_table vm_page_writeback_sysctls[] = {
2226 	{
2227 		.procname   = "dirty_background_ratio",
2228 		.data       = &dirty_background_ratio,
2229 		.maxlen     = sizeof(dirty_background_ratio),
2230 		.mode       = 0644,
2231 		.proc_handler   = dirty_background_ratio_handler,
2232 		.extra1     = SYSCTL_ZERO,
2233 		.extra2     = SYSCTL_ONE_HUNDRED,
2234 	},
2235 	{
2236 		.procname   = "dirty_background_bytes",
2237 		.data       = &dirty_background_bytes,
2238 		.maxlen     = sizeof(dirty_background_bytes),
2239 		.mode       = 0644,
2240 		.proc_handler   = dirty_background_bytes_handler,
2241 		.extra1     = SYSCTL_LONG_ONE,
2242 	},
2243 	{
2244 		.procname   = "dirty_ratio",
2245 		.data       = &vm_dirty_ratio,
2246 		.maxlen     = sizeof(vm_dirty_ratio),
2247 		.mode       = 0644,
2248 		.proc_handler   = dirty_ratio_handler,
2249 		.extra1     = SYSCTL_ZERO,
2250 		.extra2     = SYSCTL_ONE_HUNDRED,
2251 	},
2252 	{
2253 		.procname   = "dirty_bytes",
2254 		.data       = &vm_dirty_bytes,
2255 		.maxlen     = sizeof(vm_dirty_bytes),
2256 		.mode       = 0644,
2257 		.proc_handler   = dirty_bytes_handler,
2258 		.extra1     = (void *)&dirty_bytes_min,
2259 	},
2260 	{
2261 		.procname   = "dirty_writeback_centisecs",
2262 		.data       = &dirty_writeback_interval,
2263 		.maxlen     = sizeof(dirty_writeback_interval),
2264 		.mode       = 0644,
2265 		.proc_handler   = dirty_writeback_centisecs_handler,
2266 	},
2267 	{
2268 		.procname   = "dirty_expire_centisecs",
2269 		.data       = &dirty_expire_interval,
2270 		.maxlen     = sizeof(dirty_expire_interval),
2271 		.mode       = 0644,
2272 		.proc_handler   = proc_dointvec_minmax,
2273 		.extra1     = SYSCTL_ZERO,
2274 	},
2275 #ifdef CONFIG_HIGHMEM
2276 	{
2277 		.procname	= "highmem_is_dirtyable",
2278 		.data		= &vm_highmem_is_dirtyable,
2279 		.maxlen		= sizeof(vm_highmem_is_dirtyable),
2280 		.mode		= 0644,
2281 		.proc_handler	= proc_dointvec_minmax,
2282 		.extra1		= SYSCTL_ZERO,
2283 		.extra2		= SYSCTL_ONE,
2284 	},
2285 #endif
2286 	{
2287 		.procname	= "laptop_mode",
2288 		.data		= &laptop_mode,
2289 		.maxlen		= sizeof(laptop_mode),
2290 		.mode		= 0644,
2291 		.proc_handler	= laptop_mode_handler,
2292 	},
2293 };
2294 #endif
2295 
2296 /*
2297  * Called early on to tune the page writeback dirty limits.
2298  *
2299  * We used to scale dirty pages according to how total memory
2300  * related to pages that could be allocated for buffers.
2301  *
2302  * However, that was when we used "dirty_ratio" to scale with
2303  * all memory, and we don't do that any more. "dirty_ratio"
2304  * is now applied to total non-HIGHPAGE memory, and as such we can't
2305  * get into the old insane situation any more where we had
2306  * large amounts of dirty pages compared to a small amount of
2307  * non-HIGHMEM memory.
2308  *
2309  * But we might still want to scale the dirty_ratio by how
2310  * much memory the box has..
2311  */
2312 void __init page_writeback_init(void)
2313 {
2314 	BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
2315 
2316 	cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/writeback:online",
2317 			  page_writeback_cpu_online, NULL);
2318 	cpuhp_setup_state(CPUHP_MM_WRITEBACK_DEAD, "mm/writeback:dead", NULL,
2319 			  page_writeback_cpu_online);
2320 #ifdef CONFIG_SYSCTL
2321 	register_sysctl_init("vm", vm_page_writeback_sysctls);
2322 #endif
2323 }
2324 
2325 /**
2326  * tag_pages_for_writeback - tag pages to be written by writeback
2327  * @mapping: address space structure to write
2328  * @start: starting page index
2329  * @end: ending page index (inclusive)
2330  *
2331  * This function scans the page range from @start to @end (inclusive) and tags
2332  * all pages that have DIRTY tag set with a special TOWRITE tag.  The caller
2333  * can then use the TOWRITE tag to identify pages eligible for writeback.
2334  * This mechanism is used to avoid livelocking of writeback by a process
2335  * steadily creating new dirty pages in the file (thus it is important for this
2336  * function to be quick so that it can tag pages faster than a dirtying process
2337  * can create them).
2338  */
2339 void tag_pages_for_writeback(struct address_space *mapping,
2340 			     pgoff_t start, pgoff_t end)
2341 {
2342 	XA_STATE(xas, &mapping->i_pages, start);
2343 	unsigned int tagged = 0;
2344 	void *page;
2345 
2346 	xas_lock_irq(&xas);
2347 	xas_for_each_marked(&xas, page, end, PAGECACHE_TAG_DIRTY) {
2348 		xas_set_mark(&xas, PAGECACHE_TAG_TOWRITE);
2349 		if (++tagged % XA_CHECK_SCHED)
2350 			continue;
2351 
2352 		xas_pause(&xas);
2353 		xas_unlock_irq(&xas);
2354 		cond_resched();
2355 		xas_lock_irq(&xas);
2356 	}
2357 	xas_unlock_irq(&xas);
2358 }
2359 EXPORT_SYMBOL(tag_pages_for_writeback);
2360 
2361 static bool folio_prepare_writeback(struct address_space *mapping,
2362 		struct writeback_control *wbc, struct folio *folio)
2363 {
2364 	/*
2365 	 * Folio truncated or invalidated. We can freely skip it then,
2366 	 * even for data integrity operations: the folio has disappeared
2367 	 * concurrently, so there could be no real expectation of this
2368 	 * data integrity operation even if there is now a new, dirty
2369 	 * folio at the same pagecache index.
2370 	 */
2371 	if (unlikely(folio->mapping != mapping))
2372 		return false;
2373 
2374 	/*
2375 	 * Did somebody else write it for us?
2376 	 */
2377 	if (!folio_test_dirty(folio))
2378 		return false;
2379 
2380 	if (folio_test_writeback(folio)) {
2381 		if (wbc->sync_mode == WB_SYNC_NONE)
2382 			return false;
2383 		folio_wait_writeback(folio);
2384 	}
2385 	BUG_ON(folio_test_writeback(folio));
2386 
2387 	if (!folio_clear_dirty_for_io(folio))
2388 		return false;
2389 
2390 	return true;
2391 }
2392 
2393 
2394 static pgoff_t wbc_end(struct writeback_control *wbc)
2395 {
2396 	if (wbc->range_cyclic)
2397 		return -1;
2398 	return wbc->range_end >> PAGE_SHIFT;
2399 }
2400 
2401 static struct folio *writeback_get_folio(struct address_space *mapping,
2402 		struct writeback_control *wbc)
2403 {
2404 	struct folio *folio;
2405 
2406 retry:
2407 	folio = folio_batch_next(&wbc->fbatch);
2408 	if (!folio) {
2409 		folio_batch_release(&wbc->fbatch);
2410 		cond_resched();
2411 		filemap_get_folios_tag(mapping, &wbc->index, wbc_end(wbc),
2412 				wbc_to_tag(wbc), &wbc->fbatch);
2413 		folio = folio_batch_next(&wbc->fbatch);
2414 		if (!folio)
2415 			return NULL;
2416 	}
2417 
2418 	folio_lock(folio);
2419 	if (unlikely(!folio_prepare_writeback(mapping, wbc, folio))) {
2420 		folio_unlock(folio);
2421 		goto retry;
2422 	}
2423 
2424 	trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
2425 	return folio;
2426 }
2427 
2428 /**
2429  * writeback_iter - iterate folio of a mapping for writeback
2430  * @mapping: address space structure to write
2431  * @wbc: writeback context
2432  * @folio: previously iterated folio (%NULL to start)
2433  * @error: in-out pointer for writeback errors (see below)
2434  *
2435  * This function returns the next folio for the writeback operation described by
2436  * @wbc on @mapping and  should be called in a while loop in the ->writepages
2437  * implementation.
2438  *
2439  * To start the writeback operation, %NULL is passed in the @folio argument, and
2440  * for every subsequent iteration the folio returned previously should be passed
2441  * back in.
2442  *
2443  * If there was an error in the per-folio writeback inside the writeback_iter()
2444  * loop, @error should be set to the error value.
2445  *
2446  * Once the writeback described in @wbc has finished, this function will return
2447  * %NULL and if there was an error in any iteration restore it to @error.
2448  *
2449  * Note: callers should not manually break out of the loop using break or goto
2450  * but must keep calling writeback_iter() until it returns %NULL.
2451  *
2452  * Return: the folio to write or %NULL if the loop is done.
2453  */
2454 struct folio *writeback_iter(struct address_space *mapping,
2455 		struct writeback_control *wbc, struct folio *folio, int *error)
2456 {
2457 	if (!folio) {
2458 		folio_batch_init(&wbc->fbatch);
2459 		wbc->saved_err = *error = 0;
2460 
2461 		/*
2462 		 * For range cyclic writeback we remember where we stopped so
2463 		 * that we can continue where we stopped.
2464 		 *
2465 		 * For non-cyclic writeback we always start at the beginning of
2466 		 * the passed in range.
2467 		 */
2468 		if (wbc->range_cyclic)
2469 			wbc->index = mapping->writeback_index;
2470 		else
2471 			wbc->index = wbc->range_start >> PAGE_SHIFT;
2472 
2473 		/*
2474 		 * To avoid livelocks when other processes dirty new pages, we
2475 		 * first tag pages which should be written back and only then
2476 		 * start writing them.
2477 		 *
2478 		 * For data-integrity writeback we have to be careful so that we
2479 		 * do not miss some pages (e.g., because some other process has
2480 		 * cleared the TOWRITE tag we set).  The rule we follow is that
2481 		 * TOWRITE tag can be cleared only by the process clearing the
2482 		 * DIRTY tag (and submitting the page for I/O).
2483 		 */
2484 		if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2485 			tag_pages_for_writeback(mapping, wbc->index,
2486 					wbc_end(wbc));
2487 	} else {
2488 		wbc->nr_to_write -= folio_nr_pages(folio);
2489 
2490 		WARN_ON_ONCE(*error > 0);
2491 
2492 		/*
2493 		 * For integrity writeback we have to keep going until we have
2494 		 * written all the folios we tagged for writeback above, even if
2495 		 * we run past wbc->nr_to_write or encounter errors.
2496 		 * We stash away the first error we encounter in wbc->saved_err
2497 		 * so that it can be retrieved when we're done.  This is because
2498 		 * the file system may still have state to clear for each folio.
2499 		 *
2500 		 * For background writeback we exit as soon as we run past
2501 		 * wbc->nr_to_write or encounter the first error.
2502 		 */
2503 		if (wbc->sync_mode == WB_SYNC_ALL) {
2504 			if (*error && !wbc->saved_err)
2505 				wbc->saved_err = *error;
2506 		} else {
2507 			if (*error || wbc->nr_to_write <= 0)
2508 				goto done;
2509 		}
2510 	}
2511 
2512 	folio = writeback_get_folio(mapping, wbc);
2513 	if (!folio) {
2514 		/*
2515 		 * To avoid deadlocks between range_cyclic writeback and callers
2516 		 * that hold folios in writeback to aggregate I/O until
2517 		 * the writeback iteration finishes, we do not loop back to the
2518 		 * start of the file.  Doing so causes a folio lock/folio
2519 		 * writeback access order inversion - we should only ever lock
2520 		 * multiple folios in ascending folio->index order, and looping
2521 		 * back to the start of the file violates that rule and causes
2522 		 * deadlocks.
2523 		 */
2524 		if (wbc->range_cyclic)
2525 			mapping->writeback_index = 0;
2526 
2527 		/*
2528 		 * Return the first error we encountered (if there was any) to
2529 		 * the caller.
2530 		 */
2531 		*error = wbc->saved_err;
2532 	}
2533 	return folio;
2534 
2535 done:
2536 	if (wbc->range_cyclic)
2537 		mapping->writeback_index = folio_next_index(folio);
2538 	folio_batch_release(&wbc->fbatch);
2539 	return NULL;
2540 }
2541 EXPORT_SYMBOL_GPL(writeback_iter);
2542 
2543 int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
2544 {
2545 	int ret;
2546 	struct bdi_writeback *wb;
2547 
2548 	if (wbc->nr_to_write <= 0)
2549 		return 0;
2550 	wb = inode_to_wb_wbc(mapping->host, wbc);
2551 	wb_bandwidth_estimate_start(wb);
2552 	while (1) {
2553 		if (mapping->a_ops->writepages)
2554 			ret = mapping->a_ops->writepages(mapping, wbc);
2555 		else
2556 			/* deal with chardevs and other special files */
2557 			ret = 0;
2558 		if (ret != -ENOMEM || wbc->sync_mode != WB_SYNC_ALL)
2559 			break;
2560 
2561 		/*
2562 		 * Lacking an allocation context or the locality or writeback
2563 		 * state of any of the inode's pages, throttle based on
2564 		 * writeback activity on the local node. It's as good a
2565 		 * guess as any.
2566 		 */
2567 		reclaim_throttle(NODE_DATA(numa_node_id()),
2568 			VMSCAN_THROTTLE_WRITEBACK);
2569 	}
2570 	/*
2571 	 * Usually few pages are written by now from those we've just submitted
2572 	 * but if there's constant writeback being submitted, this makes sure
2573 	 * writeback bandwidth is updated once in a while.
2574 	 */
2575 	if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
2576 				   BANDWIDTH_INTERVAL))
2577 		wb_update_bandwidth(wb);
2578 	return ret;
2579 }
2580 
2581 /*
2582  * For address_spaces which do not use buffers nor write back.
2583  */
2584 bool noop_dirty_folio(struct address_space *mapping, struct folio *folio)
2585 {
2586 	if (!folio_test_dirty(folio))
2587 		return !folio_test_set_dirty(folio);
2588 	return false;
2589 }
2590 EXPORT_SYMBOL(noop_dirty_folio);
2591 
2592 /*
2593  * Helper function for set_page_dirty family.
2594  *
2595  * NOTE: This relies on being atomic wrt interrupts.
2596  */
2597 static void folio_account_dirtied(struct folio *folio,
2598 		struct address_space *mapping)
2599 {
2600 	struct inode *inode = mapping->host;
2601 
2602 	trace_writeback_dirty_folio(folio, mapping);
2603 
2604 	if (mapping_can_writeback(mapping)) {
2605 		struct bdi_writeback *wb;
2606 		long nr = folio_nr_pages(folio);
2607 
2608 		inode_attach_wb(inode, folio);
2609 		wb = inode_to_wb(inode);
2610 
2611 		lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, nr);
2612 		__zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
2613 		__node_stat_mod_folio(folio, NR_DIRTIED, nr);
2614 		wb_stat_mod(wb, WB_RECLAIMABLE, nr);
2615 		wb_stat_mod(wb, WB_DIRTIED, nr);
2616 		task_io_account_write(nr * PAGE_SIZE);
2617 		current->nr_dirtied += nr;
2618 		__this_cpu_add(bdp_ratelimits, nr);
2619 
2620 		mem_cgroup_track_foreign_dirty(folio, wb);
2621 	}
2622 }
2623 
2624 /*
2625  * Helper function for deaccounting dirty page without writeback.
2626  *
2627  */
2628 void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb)
2629 {
2630 	long nr = folio_nr_pages(folio);
2631 
2632 	lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr);
2633 	zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
2634 	wb_stat_mod(wb, WB_RECLAIMABLE, -nr);
2635 	task_io_account_cancelled_write(nr * PAGE_SIZE);
2636 }
2637 
2638 /*
2639  * Mark the folio dirty, and set it dirty in the page cache.
2640  *
2641  * If warn is true, then emit a warning if the folio is not uptodate and has
2642  * not been truncated.
2643  *
2644  * It is the caller's responsibility to prevent the folio from being truncated
2645  * while this function is in progress, although it may have been truncated
2646  * before this function is called.  Most callers have the folio locked.
2647  * A few have the folio blocked from truncation through other means (e.g.
2648  * zap_vma_pages() has it mapped and is holding the page table lock).
2649  * When called from mark_buffer_dirty(), the filesystem should hold a
2650  * reference to the buffer_head that is being marked dirty, which causes
2651  * try_to_free_buffers() to fail.
2652  */
2653 void __folio_mark_dirty(struct folio *folio, struct address_space *mapping,
2654 			     int warn)
2655 {
2656 	unsigned long flags;
2657 
2658 	/*
2659 	 * Shmem writeback relies on swap, and swap writeback is LRU based,
2660 	 * not using the dirty mark.
2661 	 */
2662 	VM_WARN_ON_ONCE(folio_test_swapcache(folio) || shmem_mapping(mapping));
2663 
2664 	xa_lock_irqsave(&mapping->i_pages, flags);
2665 	if (folio->mapping) {	/* Race with truncate? */
2666 		WARN_ON_ONCE(warn && !folio_test_uptodate(folio));
2667 		folio_account_dirtied(folio, mapping);
2668 		__xa_set_mark(&mapping->i_pages, folio->index,
2669 			      PAGECACHE_TAG_DIRTY);
2670 	}
2671 	xa_unlock_irqrestore(&mapping->i_pages, flags);
2672 }
2673 
2674 /**
2675  * filemap_dirty_folio - Mark a folio dirty for filesystems which do not use buffer_heads.
2676  * @mapping: Address space this folio belongs to.
2677  * @folio: Folio to be marked as dirty.
2678  *
2679  * Filesystems which do not use buffer heads should call this function
2680  * from their dirty_folio address space operation.  It ignores the
2681  * contents of folio_get_private(), so if the filesystem marks individual
2682  * blocks as dirty, the filesystem should handle that itself.
2683  *
2684  * This is also sometimes used by filesystems which use buffer_heads when
2685  * a single buffer is being dirtied: we want to set the folio dirty in
2686  * that case, but not all the buffers.  This is a "bottom-up" dirtying,
2687  * whereas block_dirty_folio() is a "top-down" dirtying.
2688  *
2689  * The caller must ensure this doesn't race with truncation.  Most will
2690  * simply hold the folio lock, but e.g. zap_pte_range() calls with the
2691  * folio mapped and the pte lock held, which also locks out truncation.
2692  */
2693 bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio)
2694 {
2695 	if (folio_test_set_dirty(folio))
2696 		return false;
2697 
2698 	__folio_mark_dirty(folio, mapping, !folio_test_private(folio));
2699 
2700 	if (mapping->host) {
2701 		/* !PageAnon && !swapper_space */
2702 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
2703 	}
2704 	return true;
2705 }
2706 EXPORT_SYMBOL(filemap_dirty_folio);
2707 
2708 /**
2709  * folio_redirty_for_writepage - Decline to write a dirty folio.
2710  * @wbc: The writeback control.
2711  * @folio: The folio.
2712  *
2713  * When a writepage implementation decides that it doesn't want to write
2714  * @folio for some reason, it should call this function, unlock @folio and
2715  * return 0.
2716  *
2717  * Return: True if we redirtied the folio.  False if someone else dirtied
2718  * it first.
2719  */
2720 bool folio_redirty_for_writepage(struct writeback_control *wbc,
2721 		struct folio *folio)
2722 {
2723 	struct address_space *mapping = folio->mapping;
2724 	long nr = folio_nr_pages(folio);
2725 	bool ret;
2726 
2727 	wbc->pages_skipped += nr;
2728 	ret = filemap_dirty_folio(mapping, folio);
2729 	if (mapping && mapping_can_writeback(mapping)) {
2730 		struct inode *inode = mapping->host;
2731 		struct bdi_writeback *wb;
2732 		struct wb_lock_cookie cookie = {};
2733 
2734 		wb = unlocked_inode_to_wb_begin(inode, &cookie);
2735 		current->nr_dirtied -= nr;
2736 		node_stat_mod_folio(folio, NR_DIRTIED, -nr);
2737 		wb_stat_mod(wb, WB_DIRTIED, -nr);
2738 		unlocked_inode_to_wb_end(inode, &cookie);
2739 	}
2740 	return ret;
2741 }
2742 EXPORT_SYMBOL(folio_redirty_for_writepage);
2743 
2744 /**
2745  * folio_mark_dirty - Mark a folio as being modified.
2746  * @folio: The folio.
2747  *
2748  * The folio may not be truncated while this function is running.
2749  * Holding the folio lock is sufficient to prevent truncation, but some
2750  * callers cannot acquire a sleeping lock.  These callers instead hold
2751  * the page table lock for a page table which contains at least one page
2752  * in this folio.  Truncation will block on the page table lock as it
2753  * unmaps pages before removing the folio from its mapping.
2754  *
2755  * Return: True if the folio was newly dirtied, false if it was already dirty.
2756  */
2757 bool folio_mark_dirty(struct folio *folio)
2758 {
2759 	struct address_space *mapping = folio_mapping(folio);
2760 
2761 	if (likely(mapping)) {
2762 		/*
2763 		 * readahead/folio_deactivate could remain
2764 		 * PG_readahead/PG_reclaim due to race with folio_end_writeback
2765 		 * About readahead, if the folio is written, the flags would be
2766 		 * reset. So no problem.
2767 		 * About folio_deactivate, if the folio is redirtied,
2768 		 * the flag will be reset. So no problem. but if the
2769 		 * folio is used by readahead it will confuse readahead
2770 		 * and make it restart the size rampup process. But it's
2771 		 * a trivial problem.
2772 		 */
2773 		if (folio_test_reclaim(folio))
2774 			folio_clear_reclaim(folio);
2775 		return mapping->a_ops->dirty_folio(mapping, folio);
2776 	}
2777 
2778 	return noop_dirty_folio(mapping, folio);
2779 }
2780 EXPORT_SYMBOL(folio_mark_dirty);
2781 
2782 /*
2783  * folio_mark_dirty() is racy if the caller has no reference against
2784  * folio->mapping->host, and if the folio is unlocked.  This is because another
2785  * CPU could truncate the folio off the mapping and then free the mapping.
2786  *
2787  * Usually, the folio _is_ locked, or the caller is a user-space process which
2788  * holds a reference on the inode by having an open file.
2789  *
2790  * In other cases, the folio should be locked before running folio_mark_dirty().
2791  */
2792 bool folio_mark_dirty_lock(struct folio *folio)
2793 {
2794 	bool ret;
2795 
2796 	folio_lock(folio);
2797 	ret = folio_mark_dirty(folio);
2798 	folio_unlock(folio);
2799 	return ret;
2800 }
2801 EXPORT_SYMBOL(folio_mark_dirty_lock);
2802 
2803 /*
2804  * This cancels just the dirty bit on the kernel page itself, it does NOT
2805  * actually remove dirty bits on any mmap's that may be around. It also
2806  * leaves the page tagged dirty, so any sync activity will still find it on
2807  * the dirty lists, and in particular, clear_page_dirty_for_io() will still
2808  * look at the dirty bits in the VM.
2809  *
2810  * Doing this should *normally* only ever be done when a page is truncated,
2811  * and is not actually mapped anywhere at all. However, fs/buffer.c does
2812  * this when it notices that somebody has cleaned out all the buffers on a
2813  * page without actually doing it through the VM. Can you say "ext3 is
2814  * horribly ugly"? Thought you could.
2815  */
2816 void __folio_cancel_dirty(struct folio *folio)
2817 {
2818 	struct address_space *mapping = folio_mapping(folio);
2819 
2820 	if (mapping_can_writeback(mapping)) {
2821 		struct inode *inode = mapping->host;
2822 		struct bdi_writeback *wb;
2823 		struct wb_lock_cookie cookie = {};
2824 
2825 		wb = unlocked_inode_to_wb_begin(inode, &cookie);
2826 
2827 		if (folio_test_clear_dirty(folio))
2828 			folio_account_cleaned(folio, wb);
2829 
2830 		unlocked_inode_to_wb_end(inode, &cookie);
2831 	} else {
2832 		folio_clear_dirty(folio);
2833 	}
2834 }
2835 EXPORT_SYMBOL(__folio_cancel_dirty);
2836 
2837 /*
2838  * Clear a folio's dirty flag, while caring for dirty memory accounting.
2839  * Returns true if the folio was previously dirty.
2840  *
2841  * This is for preparing to put the folio under writeout.  We leave
2842  * the folio tagged as dirty in the xarray so that a concurrent
2843  * write-for-sync can discover it via a PAGECACHE_TAG_DIRTY walk.
2844  * The ->writepage implementation will run either folio_start_writeback()
2845  * or folio_mark_dirty(), at which stage we bring the folio's dirty flag
2846  * and xarray dirty tag back into sync.
2847  *
2848  * This incoherency between the folio's dirty flag and xarray tag is
2849  * unfortunate, but it only exists while the folio is locked.
2850  */
2851 bool folio_clear_dirty_for_io(struct folio *folio)
2852 {
2853 	struct address_space *mapping = folio_mapping(folio);
2854 	bool ret = false;
2855 
2856 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2857 
2858 	if (mapping && mapping_can_writeback(mapping)) {
2859 		struct inode *inode = mapping->host;
2860 		struct bdi_writeback *wb;
2861 		struct wb_lock_cookie cookie = {};
2862 
2863 		/*
2864 		 * Yes, Virginia, this is indeed insane.
2865 		 *
2866 		 * We use this sequence to make sure that
2867 		 *  (a) we account for dirty stats properly
2868 		 *  (b) we tell the low-level filesystem to
2869 		 *      mark the whole folio dirty if it was
2870 		 *      dirty in a pagetable. Only to then
2871 		 *  (c) clean the folio again and return 1 to
2872 		 *      cause the writeback.
2873 		 *
2874 		 * This way we avoid all nasty races with the
2875 		 * dirty bit in multiple places and clearing
2876 		 * them concurrently from different threads.
2877 		 *
2878 		 * Note! Normally the "folio_mark_dirty(folio)"
2879 		 * has no effect on the actual dirty bit - since
2880 		 * that will already usually be set. But we
2881 		 * need the side effects, and it can help us
2882 		 * avoid races.
2883 		 *
2884 		 * We basically use the folio "master dirty bit"
2885 		 * as a serialization point for all the different
2886 		 * threads doing their things.
2887 		 */
2888 		if (folio_mkclean(folio))
2889 			folio_mark_dirty(folio);
2890 		/*
2891 		 * We carefully synchronise fault handlers against
2892 		 * installing a dirty pte and marking the folio dirty
2893 		 * at this point.  We do this by having them hold the
2894 		 * page lock while dirtying the folio, and folios are
2895 		 * always locked coming in here, so we get the desired
2896 		 * exclusion.
2897 		 */
2898 		wb = unlocked_inode_to_wb_begin(inode, &cookie);
2899 		if (folio_test_clear_dirty(folio)) {
2900 			long nr = folio_nr_pages(folio);
2901 			lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr);
2902 			zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
2903 			wb_stat_mod(wb, WB_RECLAIMABLE, -nr);
2904 			ret = true;
2905 		}
2906 		unlocked_inode_to_wb_end(inode, &cookie);
2907 		return ret;
2908 	}
2909 	return folio_test_clear_dirty(folio);
2910 }
2911 EXPORT_SYMBOL(folio_clear_dirty_for_io);
2912 
2913 static void wb_inode_writeback_start(struct bdi_writeback *wb)
2914 {
2915 	atomic_inc(&wb->writeback_inodes);
2916 }
2917 
2918 static void wb_inode_writeback_end(struct bdi_writeback *wb)
2919 {
2920 	unsigned long flags;
2921 	atomic_dec(&wb->writeback_inodes);
2922 	/*
2923 	 * Make sure estimate of writeback throughput gets updated after
2924 	 * writeback completed. We delay the update by BANDWIDTH_INTERVAL
2925 	 * (which is the interval other bandwidth updates use for batching) so
2926 	 * that if multiple inodes end writeback at a similar time, they get
2927 	 * batched into one bandwidth update.
2928 	 */
2929 	spin_lock_irqsave(&wb->work_lock, flags);
2930 	if (test_bit(WB_registered, &wb->state))
2931 		queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
2932 	spin_unlock_irqrestore(&wb->work_lock, flags);
2933 }
2934 
2935 bool __folio_end_writeback(struct folio *folio)
2936 {
2937 	long nr = folio_nr_pages(folio);
2938 	struct address_space *mapping = folio_mapping(folio);
2939 	bool ret;
2940 
2941 	if (mapping && mapping_use_writeback_tags(mapping)) {
2942 		struct inode *inode = mapping->host;
2943 		struct bdi_writeback *wb;
2944 		unsigned long flags;
2945 
2946 		xa_lock_irqsave(&mapping->i_pages, flags);
2947 		ret = folio_xor_flags_has_waiters(folio, 1 << PG_writeback);
2948 		__xa_clear_mark(&mapping->i_pages, folio->index,
2949 					PAGECACHE_TAG_WRITEBACK);
2950 
2951 		wb = inode_to_wb(inode);
2952 		wb_stat_mod(wb, WB_WRITEBACK, -nr);
2953 		__wb_writeout_add(wb, nr);
2954 		if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) {
2955 			wb_inode_writeback_end(wb);
2956 			if (mapping->host)
2957 				sb_clear_inode_writeback(mapping->host);
2958 		}
2959 
2960 		xa_unlock_irqrestore(&mapping->i_pages, flags);
2961 	} else {
2962 		ret = folio_xor_flags_has_waiters(folio, 1 << PG_writeback);
2963 	}
2964 
2965 	lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
2966 	zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
2967 	node_stat_mod_folio(folio, NR_WRITTEN, nr);
2968 
2969 	return ret;
2970 }
2971 
2972 void __folio_start_writeback(struct folio *folio, bool keep_write)
2973 {
2974 	long nr = folio_nr_pages(folio);
2975 	struct address_space *mapping = folio_mapping(folio);
2976 	int access_ret;
2977 
2978 	VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
2979 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2980 
2981 	if (mapping && mapping_use_writeback_tags(mapping)) {
2982 		XA_STATE(xas, &mapping->i_pages, folio->index);
2983 		struct inode *inode = mapping->host;
2984 		struct bdi_writeback *wb;
2985 		unsigned long flags;
2986 		bool on_wblist;
2987 
2988 		xas_lock_irqsave(&xas, flags);
2989 		xas_load(&xas);
2990 		folio_test_set_writeback(folio);
2991 
2992 		on_wblist = mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
2993 
2994 		xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK);
2995 		wb = inode_to_wb(inode);
2996 		wb_stat_mod(wb, WB_WRITEBACK, nr);
2997 		if (!on_wblist) {
2998 			wb_inode_writeback_start(wb);
2999 			/*
3000 			 * We can come through here when swapping anonymous
3001 			 * folios, so we don't necessarily have an inode to
3002 			 * track for sync.
3003 			 */
3004 			if (mapping->host)
3005 				sb_mark_inode_writeback(mapping->host);
3006 		}
3007 
3008 		if (!folio_test_dirty(folio))
3009 			xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
3010 		if (!keep_write)
3011 			xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
3012 		xas_unlock_irqrestore(&xas, flags);
3013 	} else {
3014 		folio_test_set_writeback(folio);
3015 	}
3016 
3017 	lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr);
3018 	zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
3019 
3020 	access_ret = arch_make_folio_accessible(folio);
3021 	/*
3022 	 * If writeback has been triggered on a page that cannot be made
3023 	 * accessible, it is too late to recover here.
3024 	 */
3025 	VM_BUG_ON_FOLIO(access_ret != 0, folio);
3026 }
3027 EXPORT_SYMBOL(__folio_start_writeback);
3028 
3029 /**
3030  * folio_wait_writeback - Wait for a folio to finish writeback.
3031  * @folio: The folio to wait for.
3032  *
3033  * If the folio is currently being written back to storage, wait for the
3034  * I/O to complete.
3035  *
3036  * Context: Sleeps.  Must be called in process context and with
3037  * no spinlocks held.  Caller should hold a reference on the folio.
3038  * If the folio is not locked, writeback may start again after writeback
3039  * has finished.
3040  */
3041 void folio_wait_writeback(struct folio *folio)
3042 {
3043 	while (folio_test_writeback(folio)) {
3044 		trace_folio_wait_writeback(folio, folio_mapping(folio));
3045 		folio_wait_bit(folio, PG_writeback);
3046 	}
3047 }
3048 EXPORT_SYMBOL_GPL(folio_wait_writeback);
3049 
3050 /**
3051  * folio_wait_writeback_killable - Wait for a folio to finish writeback.
3052  * @folio: The folio to wait for.
3053  *
3054  * If the folio is currently being written back to storage, wait for the
3055  * I/O to complete or a fatal signal to arrive.
3056  *
3057  * Context: Sleeps.  Must be called in process context and with
3058  * no spinlocks held.  Caller should hold a reference on the folio.
3059  * If the folio is not locked, writeback may start again after writeback
3060  * has finished.
3061  * Return: 0 on success, -EINTR if we get a fatal signal while waiting.
3062  */
3063 int folio_wait_writeback_killable(struct folio *folio)
3064 {
3065 	while (folio_test_writeback(folio)) {
3066 		trace_folio_wait_writeback(folio, folio_mapping(folio));
3067 		if (folio_wait_bit_killable(folio, PG_writeback))
3068 			return -EINTR;
3069 	}
3070 
3071 	return 0;
3072 }
3073 EXPORT_SYMBOL_GPL(folio_wait_writeback_killable);
3074 
3075 /**
3076  * folio_wait_stable() - wait for writeback to finish, if necessary.
3077  * @folio: The folio to wait on.
3078  *
3079  * This function determines if the given folio is related to a backing
3080  * device that requires folio contents to be held stable during writeback.
3081  * If so, then it will wait for any pending writeback to complete.
3082  *
3083  * Context: Sleeps.  Must be called in process context and with
3084  * no spinlocks held.  Caller should hold a reference on the folio.
3085  * If the folio is not locked, writeback may start again after writeback
3086  * has finished.
3087  */
3088 void folio_wait_stable(struct folio *folio)
3089 {
3090 	if (mapping_stable_writes(folio_mapping(folio)))
3091 		folio_wait_writeback(folio);
3092 }
3093 EXPORT_SYMBOL_GPL(folio_wait_stable);
3094