Lines Matching +full:delta +full:- +full:x +full:- +full:threshold
1 // SPDX-License-Identifier: GPL-2.0-only
3 * mm/page-writeback.c
26 #include <linux/backing-dev.h>
55 #define DIRTY_POLL_THRESH (128 >> (PAGE_SHIFT - 10))
101 * The interval between `kupdate'-style writebacks
120 /* End of sysctl-exported parameters */
135 .wb_completions = &(__wb)->completions
141 .wb_completions = &(__wb)->memcg_completions, \
146 return dtc->dom; in mdtc_valid()
151 return dtc->dom; in dtc_dom()
156 return mdtc->gdtc; in mdtc_gdtc()
161 return &wb->memcg_completions; in wb_memcg_completions()
167 unsigned long this_bw = READ_ONCE(wb->avg_write_bandwidth); in wb_min_max_ratio()
168 unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth); in wb_min_max_ratio()
169 unsigned long long min = wb->bdi->min_ratio; in wb_min_max_ratio()
170 unsigned long long max = wb->bdi->max_ratio; in wb_min_max_ratio()
194 .wb_completions = &(__wb)->completions
221 *minp = wb->bdi->min_ratio; in wb_min_max_ratio()
222 *maxp = wb->bdi->max_ratio; in wb_min_max_ratio()
235 * user-configurable dirty ratio is the effective number of pages that
240 * absolute number of bytes, calculating the per-zone dirty limit can
246 * node_dirtyable_memory - number of dirtyable pages in a node
250 * page cache. This is the base value for the per-node dirty limits.
258 struct zone *zone = pgdat->node_zones + z; in node_dirtyable_memory()
271 nr_pages -= min(nr_pages, pgdat->totalreserve_pages); in node_dirtyable_memory()
283 unsigned long x = 0; in highmem_dirtyable_memory() local
294 z = &NODE_DATA(node)->node_zones[i]; in highmem_dirtyable_memory()
300 nr_pages -= min(nr_pages, high_wmark_pages(z)); in highmem_dirtyable_memory()
303 x += nr_pages; in highmem_dirtyable_memory()
313 return min(x, total); in highmem_dirtyable_memory()
320 * global_dirtyable_memory - number of globally dirtyable pages
327 unsigned long x; in global_dirtyable_memory() local
329 x = global_zone_page_state(NR_FREE_PAGES); in global_dirtyable_memory()
335 x -= min(x, totalreserve_pages); in global_dirtyable_memory()
337 x += global_node_page_state(NR_INACTIVE_FILE); in global_dirtyable_memory()
338 x += global_node_page_state(NR_ACTIVE_FILE); in global_dirtyable_memory()
341 x -= highmem_dirtyable_memory(x); in global_dirtyable_memory()
343 return x + 1; /* Ensure that we never return 0 */ in global_dirtyable_memory()
347 * domain_dirty_limits - calculate thresh and bg_thresh for a wb_domain
350 * Calculate @dtc->thresh and ->bg_thresh considering
352 * must ensure that @dtc->avail is set before calling this function. The
353 * dirty limits will be lifted by 1/4 for real-time tasks.
357 const unsigned long available_memory = dtc->avail; in domain_dirty_limits()
361 /* convert ratios to per-PAGE_SIZE for higher precision */ in domain_dirty_limits()
370 unsigned long global_avail = gdtc->avail; in domain_dirty_limits()
376 * per-PAGE_SIZE, they can be obtained by dividing bytes by in domain_dirty_limits()
405 * 32-bits. This gives 16TB dirty limits max which is hopefully enough. in domain_dirty_limits()
409 /* This makes sure bg_thresh is within 32-bits as well */ in domain_dirty_limits()
412 dtc->thresh = thresh; in domain_dirty_limits()
413 dtc->bg_thresh = bg_thresh; in domain_dirty_limits()
421 * global_dirty_limits - background-writeback and dirty-throttling thresholds
440 * node_dirty_limit - maximum number of dirty pages allowed in a node
463 * 32-bits. This gives 16TB dirty limits max which is hopefully enough. in node_dirty_limit()
469 * node_dirty_ok - tells whether a node is within its dirty limits
509 return -ERANGE; in dirty_background_bytes_handler()
540 return -ERANGE; in dirty_bytes_handler()
562 __fprop_add_percpu_max(&dom->completions, completions, in wb_domain_writeout_add()
565 if (unlikely(!dom->period_time)) { in wb_domain_writeout_add()
572 dom->period_time = wp_next_time(jiffies); in wb_domain_writeout_add()
573 mod_timer(&dom->period_timer, dom->period_time); in wb_domain_writeout_add()
586 wb_domain_writeout_add(&global_wb_domain, &wb->completions, in __wb_writeout_add()
587 wb->bdi->max_prop_frac, nr); in __wb_writeout_add()
592 wb->bdi->max_prop_frac, nr); in __wb_writeout_add()
612 int miss_periods = (jiffies - dom->period_time) / in writeout_period()
615 if (fprop_new_period(&dom->completions, miss_periods + 1)) { in writeout_period()
616 dom->period_time = wp_next_time(dom->period_time + in writeout_period()
618 mod_timer(&dom->period_timer, dom->period_time); in writeout_period()
624 dom->period_time = 0; in writeout_period()
632 spin_lock_init(&dom->lock); in wb_domain_init()
634 timer_setup(&dom->period_timer, writeout_period, TIMER_DEFERRABLE); in wb_domain_init()
636 dom->dirty_limit_tstamp = jiffies; in wb_domain_init()
638 return fprop_global_init(&dom->completions, gfp); in wb_domain_init()
644 timer_delete_sync(&dom->period_timer); in wb_domain_exit()
645 fprop_global_destroy(&dom->completions); in wb_domain_exit()
661 return -EINVAL; in bdi_check_pages_limit()
674 return -EINVAL; in bdi_ratio_from_pages()
694 unsigned int delta; in __bdi_set_min_ratio() local
698 return -EINVAL; in __bdi_set_min_ratio()
701 if (min_ratio > bdi->max_ratio) { in __bdi_set_min_ratio()
702 ret = -EINVAL; in __bdi_set_min_ratio()
704 if (min_ratio < bdi->min_ratio) { in __bdi_set_min_ratio()
705 delta = bdi->min_ratio - min_ratio; in __bdi_set_min_ratio()
706 bdi_min_ratio -= delta; in __bdi_set_min_ratio()
707 bdi->min_ratio = min_ratio; in __bdi_set_min_ratio()
709 delta = min_ratio - bdi->min_ratio; in __bdi_set_min_ratio()
710 if (bdi_min_ratio + delta < 100 * BDI_RATIO_SCALE) { in __bdi_set_min_ratio()
711 bdi_min_ratio += delta; in __bdi_set_min_ratio()
712 bdi->min_ratio = min_ratio; in __bdi_set_min_ratio()
714 ret = -EINVAL; in __bdi_set_min_ratio()
728 return -EINVAL; in __bdi_set_max_ratio()
731 if (bdi->min_ratio > max_ratio) { in __bdi_set_max_ratio()
732 ret = -EINVAL; in __bdi_set_max_ratio()
734 bdi->max_ratio = max_ratio; in __bdi_set_max_ratio()
735 bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / in __bdi_set_max_ratio()
766 return bdi_get_bytes(bdi->min_ratio); in bdi_get_min_bytes()
787 return bdi_get_bytes(bdi->max_ratio); in bdi_get_max_bytes()
809 return -EINVAL; in bdi_set_strict_limit()
813 bdi->capabilities |= BDI_CAP_STRICTLIMIT; in bdi_set_strict_limit()
815 bdi->capabilities &= ~BDI_CAP_STRICTLIMIT; in bdi_set_strict_limit()
830 return max(thresh, dom->dirty_limit); in hard_dirty_limit()
835 * system-wide clean memory excluding the amount being used in the domain.
841 unsigned long clean = filepages - min(filepages, mdtc->dirty); in mdtc_calc_avail()
842 unsigned long global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty); in mdtc_calc_avail()
843 unsigned long other_clean = global_clean - min(global_clean, clean); in mdtc_calc_avail()
845 mdtc->avail = filepages + min(headroom, other_clean); in mdtc_calc_avail()
861 dtc->avail = global_dirtyable_memory(); in domain_dirty_avail()
862 dtc->dirty = global_node_page_state(NR_FILE_DIRTY); in domain_dirty_avail()
864 dtc->dirty += global_node_page_state(NR_WRITEBACK); in domain_dirty_avail()
868 mem_cgroup_wb_stats(dtc->wb, &filepages, &headroom, &dtc->dirty, in domain_dirty_avail()
871 dtc->dirty += writeback; in domain_dirty_avail()
877 * __wb_calc_thresh - @wb's share of dirty threshold
879 * @thresh: dirty throttling or dirty background threshold of wb_domain in @dtc
882 * threshold as a hard limit when sleeping max_pause per page is not enough
890 * - starving fast devices
891 * - piling up dirty pages (that will take long time to sync) on slow devices
894 * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
904 struct bdi_writeback *wb = dtc->wb; in __wb_calc_thresh()
913 fprop_fraction_percpu(&dom->completions, dtc->wb_completions, in __wb_calc_thresh()
916 wb_thresh = (thresh * (100 * BDI_RATIO_SCALE - bdi_min_ratio)) / (100 * BDI_RATIO_SCALE); in __wb_calc_thresh()
928 * threshold, so that the occasional writes won't be blocked and active in __wb_calc_thresh()
929 * writes can rampup the threshold quickly. in __wb_calc_thresh()
931 if (thresh > dtc->dirty) { in __wb_calc_thresh()
932 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) in __wb_calc_thresh()
933 wb_thresh = max(wb_thresh, (thresh - dtc->dirty) / 100); in __wb_calc_thresh()
935 wb_thresh = max(wb_thresh, (thresh - dtc->dirty) / 8); in __wb_calc_thresh()
966 * setpoint - dirty 3
967 * f(dirty) := 1.0 + (----------------)
968 * limit - setpoint
984 long x; in pos_ratio_polynom() local
986 x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT, in pos_ratio_polynom()
987 (limit - setpoint) | 1); in pos_ratio_polynom()
988 pos_ratio = x; in pos_ratio_polynom()
989 pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; in pos_ratio_polynom()
990 pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; in pos_ratio_polynom()
1034 * 0 +------------.------------------.----------------------*------------->
1062 * 0 +----------------------.-------------------------------.------------->
1067 * - start writing to a slow SD card and a fast disk at the same time. The SD
1069 * - the wb dirty thresh drops quickly due to change of JBOD workload
1073 struct bdi_writeback *wb = dtc->wb; in wb_position_ratio()
1074 unsigned long write_bw = READ_ONCE(wb->avg_write_bandwidth); in wb_position_ratio()
1075 unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh); in wb_position_ratio()
1076 unsigned long limit = dtc->limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh); in wb_position_ratio()
1077 unsigned long wb_thresh = dtc->wb_thresh; in wb_position_ratio()
1083 long x; in wb_position_ratio() local
1085 dtc->pos_ratio = 0; in wb_position_ratio()
1087 if (unlikely(dtc->dirty >= limit)) in wb_position_ratio()
1096 pos_ratio = pos_ratio_polynom(setpoint, dtc->dirty, limit); in wb_position_ratio()
1103 * This is especially important for fuse which sets bdi->max_ratio to in wb_position_ratio()
1108 * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global in wb_position_ratio()
1121 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) { in wb_position_ratio()
1124 if (dtc->wb_dirty >= wb_thresh) in wb_position_ratio()
1128 dtc->wb_bg_thresh); in wb_position_ratio()
1133 wb_pos_ratio = pos_ratio_polynom(wb_setpoint, dtc->wb_dirty, in wb_position_ratio()
1146 * but it would look too non-natural for the case of all in wb_position_ratio()
1148 * with bdi->max_ratio == 100%. in wb_position_ratio()
1157 dtc->pos_ratio = min(pos_ratio, wb_pos_ratio); in wb_position_ratio()
1170 * f(wb_dirty) := 1.0 + k * (wb_dirty - wb_setpoint) in wb_position_ratio()
1172 * x_intercept - wb_dirty in wb_position_ratio()
1173 * := -------------------------- in wb_position_ratio()
1174 * x_intercept - wb_setpoint in wb_position_ratio()
1179 * (2) k = - 1 / (8 * write_bw) (in single wb case) in wb_position_ratio()
1184 * [wb_setpoint - write_bw/2, wb_setpoint + write_bw/2] in wb_position_ratio()
1192 if (unlikely(wb_thresh > dtc->thresh)) in wb_position_ratio()
1193 wb_thresh = dtc->thresh; in wb_position_ratio()
1198 x = div_u64((u64)wb_thresh << 16, dtc->thresh | 1); in wb_position_ratio()
1199 wb_setpoint = setpoint * (u64)x >> 16; in wb_position_ratio()
1202 * (thresh - wb_thresh ~= 0) and transit to wb_thresh in JBOD case. in wb_position_ratio()
1204 * wb_thresh thresh - wb_thresh in wb_position_ratio()
1205 * span = --------- * (8 * write_bw) + ------------------ * wb_thresh in wb_position_ratio()
1208 span = (dtc->thresh - wb_thresh + 8 * write_bw) * (u64)x >> 16; in wb_position_ratio()
1211 if (dtc->wb_dirty < x_intercept - span / 4) { in wb_position_ratio()
1212 pos_ratio = div64_u64(pos_ratio * (x_intercept - dtc->wb_dirty), in wb_position_ratio()
1213 (x_intercept - wb_setpoint) | 1); in wb_position_ratio()
1223 if (dtc->wb_dirty < x_intercept) { in wb_position_ratio()
1224 if (dtc->wb_dirty > x_intercept / 8) in wb_position_ratio()
1226 dtc->wb_dirty); in wb_position_ratio()
1231 dtc->pos_ratio = pos_ratio; in wb_position_ratio()
1239 unsigned long avg = wb->avg_write_bandwidth; in wb_update_write_bandwidth()
1240 unsigned long old = wb->write_bandwidth; in wb_update_write_bandwidth()
1246 * bw * elapsed + write_bandwidth * (period - elapsed) in wb_update_write_bandwidth()
1247 * write_bandwidth = --------------------------------------------------- in wb_update_write_bandwidth()
1253 bw = written - min(written, wb->written_stamp); in wb_update_write_bandwidth()
1260 bw += (u64)wb->write_bandwidth * (period - elapsed); in wb_update_write_bandwidth()
1267 avg -= (avg - old) >> 3; in wb_update_write_bandwidth()
1270 avg += (old - avg) >> 3; in wb_update_write_bandwidth()
1276 long delta = avg - wb->avg_write_bandwidth; in wb_update_write_bandwidth() local
1277 WARN_ON_ONCE(atomic_long_add_return(delta, in wb_update_write_bandwidth()
1278 &wb->bdi->tot_write_bandwidth) <= 0); in wb_update_write_bandwidth()
1280 wb->write_bandwidth = bw; in wb_update_write_bandwidth()
1281 WRITE_ONCE(wb->avg_write_bandwidth, avg); in wb_update_write_bandwidth()
1287 unsigned long thresh = dtc->thresh; in update_dirty_limit()
1288 unsigned long limit = dom->dirty_limit; in update_dirty_limit()
1301 * dom->dirty_limit which is guaranteed to lie above the dirty pages. in update_dirty_limit()
1303 thresh = max(thresh, dtc->dirty); in update_dirty_limit()
1305 limit -= (limit - thresh) >> 5; in update_dirty_limit()
1310 dom->dirty_limit = limit; in update_dirty_limit()
1321 if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) in domain_update_dirty_limit()
1324 spin_lock(&dom->lock); in domain_update_dirty_limit()
1325 if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) { in domain_update_dirty_limit()
1327 dom->dirty_limit_tstamp = now; in domain_update_dirty_limit()
1329 spin_unlock(&dom->lock); in domain_update_dirty_limit()
1333 * Maintain wb->dirty_ratelimit, the base dirty throttle rate.
1342 struct bdi_writeback *wb = dtc->wb; in wb_update_dirty_ratelimit()
1343 unsigned long dirty = dtc->dirty; in wb_update_dirty_ratelimit()
1344 unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh); in wb_update_dirty_ratelimit()
1345 unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh); in wb_update_dirty_ratelimit()
1347 unsigned long write_bw = wb->avg_write_bandwidth; in wb_update_dirty_ratelimit()
1348 unsigned long dirty_ratelimit = wb->dirty_ratelimit; in wb_update_dirty_ratelimit()
1353 unsigned long x; in wb_update_dirty_ratelimit() local
1358 * when dirty pages are truncated by userspace or re-dirtied by FS. in wb_update_dirty_ratelimit()
1360 dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed; in wb_update_dirty_ratelimit()
1366 dtc->pos_ratio >> RATELIMIT_CALC_SHIFT; in wb_update_dirty_ratelimit()
1410 * wb->dirty_ratelimit = balanced_dirty_ratelimit; in wb_update_dirty_ratelimit()
1418 * task_ratelimit - dirty_ratelimit in wb_update_dirty_ratelimit()
1419 * = (pos_ratio - 1) * dirty_ratelimit in wb_update_dirty_ratelimit()
1428 * - dirty_ratelimit > balanced_dirty_ratelimit in wb_update_dirty_ratelimit()
1429 * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint) in wb_update_dirty_ratelimit()
1435 * |task_ratelimit - dirty_ratelimit| is used to limit the step size in wb_update_dirty_ratelimit()
1450 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) { in wb_update_dirty_ratelimit()
1451 dirty = dtc->wb_dirty; in wb_update_dirty_ratelimit()
1452 setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2; in wb_update_dirty_ratelimit()
1456 x = min3(wb->balanced_dirty_ratelimit, in wb_update_dirty_ratelimit()
1458 if (dirty_ratelimit < x) in wb_update_dirty_ratelimit()
1459 step = x - dirty_ratelimit; in wb_update_dirty_ratelimit()
1461 x = max3(wb->balanced_dirty_ratelimit, in wb_update_dirty_ratelimit()
1463 if (dirty_ratelimit > x) in wb_update_dirty_ratelimit()
1464 step = dirty_ratelimit - x; in wb_update_dirty_ratelimit()
1481 dirty_ratelimit -= step; in wb_update_dirty_ratelimit()
1483 WRITE_ONCE(wb->dirty_ratelimit, max(dirty_ratelimit, 1UL)); in wb_update_dirty_ratelimit()
1484 wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit; in wb_update_dirty_ratelimit()
1493 struct bdi_writeback *wb = gdtc->wb; in __wb_update_bandwidth()
1499 spin_lock(&wb->list_lock); in __wb_update_bandwidth()
1507 elapsed = max(now - wb->bw_time_stamp, 1UL); in __wb_update_bandwidth()
1508 dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]); in __wb_update_bandwidth()
1509 written = percpu_counter_read(&wb->stat[WB_WRITTEN]); in __wb_update_bandwidth()
1526 wb->dirtied_stamp = dirtied; in __wb_update_bandwidth()
1527 wb->written_stamp = written; in __wb_update_bandwidth()
1528 WRITE_ONCE(wb->bw_time_stamp, now); in __wb_update_bandwidth()
1529 spin_unlock(&wb->list_lock); in __wb_update_bandwidth()
1545 unsigned long elapsed = now - READ_ONCE(wb->bw_time_stamp); in wb_bandwidth_estimate_start()
1548 !atomic_read(&wb->writeback_inodes)) { in wb_bandwidth_estimate_start()
1549 spin_lock(&wb->list_lock); in wb_bandwidth_estimate_start()
1550 wb->dirtied_stamp = wb_stat(wb, WB_DIRTIED); in wb_bandwidth_estimate_start()
1551 wb->written_stamp = wb_stat(wb, WB_WRITTEN); in wb_bandwidth_estimate_start()
1552 WRITE_ONCE(wb->bw_time_stamp, now); in wb_bandwidth_estimate_start()
1553 spin_unlock(&wb->list_lock); in wb_bandwidth_estimate_start()
1562 * global_zone_page_state() too often. So scale it near-sqrt to the safety margin
1569 return 1UL << (ilog2(thresh - dirty) >> 1); in dirty_poll_interval()
1577 unsigned long bw = READ_ONCE(wb->avg_write_bandwidth); in wb_max_pause()
1599 long hi = ilog2(READ_ONCE(wb->avg_write_bandwidth)); in wb_min_pause()
1600 long lo = ilog2(READ_ONCE(wb->dirty_ratelimit)); in wb_min_pause()
1605 /* target for 10ms pause on 1-dd case */ in wb_min_pause()
1615 t += (hi - lo) * (10 * HZ) / 1024; in wb_min_pause()
1640 * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}. in wb_min_pause()
1670 struct bdi_writeback *wb = dtc->wb; in wb_dirty_limits()
1676 * - in JBOD setup, wb_thresh can fluctuate a lot in wb_dirty_limits()
1677 * - in a system with HDD and USB key, the USB key may somehow in wb_dirty_limits()
1686 dtc->wb_thresh = __wb_calc_thresh(dtc, dtc->thresh); in wb_dirty_limits()
1687 dtc->wb_bg_thresh = dtc->thresh ? in wb_dirty_limits()
1688 div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0; in wb_dirty_limits()
1693 * the threshold is low. in wb_dirty_limits()
1696 * reported dirty, even though there are thresh-m pages in wb_dirty_limits()
1700 if (dtc->wb_thresh < 2 * wb_stat_error()) { in wb_dirty_limits()
1702 dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK); in wb_dirty_limits()
1705 dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK); in wb_dirty_limits()
1715 dirty = dtc->wb_dirty; in domain_poll_intv()
1716 thresh = dtc->wb_thresh; in domain_poll_intv()
1718 dirty = dtc->dirty; in domain_poll_intv()
1719 thresh = dtc->thresh; in domain_poll_intv()
1726 * Throttle it only when the background writeback cannot catch-up. This avoids
1732 * for strictlimit-ing.
1741 dirty = dtc->wb_dirty; in domain_dirty_freerun()
1742 thresh = dtc->wb_thresh; in domain_dirty_freerun()
1743 bg_thresh = dtc->wb_bg_thresh; in domain_dirty_freerun()
1745 dirty = dtc->dirty; in domain_dirty_freerun()
1746 thresh = dtc->thresh; in domain_dirty_freerun()
1747 bg_thresh = dtc->bg_thresh; in domain_dirty_freerun()
1749 dtc->freerun = dirty <= dirty_freerun_ceiling(thresh, bg_thresh); in domain_dirty_freerun()
1763 dtc->freerun = false; in wb_dirty_freerun()
1771 * LOCAL_THROTTLE tasks must not be throttled when below the per-wb in wb_dirty_freerun()
1774 if (!(current->flags & PF_LOCAL_THROTTLE)) in wb_dirty_freerun()
1777 dtc->freerun = dtc->wb_dirty < in wb_dirty_freerun()
1778 dirty_freerun_ceiling(dtc->wb_thresh, dtc->wb_bg_thresh); in wb_dirty_freerun()
1784 dtc->dirty_exceeded = (dtc->wb_dirty > dtc->wb_thresh) && in wb_dirty_exceeded()
1785 ((dtc->dirty > dtc->thresh) || strictlimit); in wb_dirty_exceeded()
1796 if (dtc->freerun) in balance_wb_limits()
1827 struct backing_dev_info *bdi = wb->bdi; in balance_dirty_pages()
1828 bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT; in balance_dirty_pages()
1847 * In laptop mode, we wait until hitting the higher threshold in balance_dirty_pages()
1849 * the way down to the lower threshold. So slow writers cause in balance_dirty_pages()
1855 if (!laptop_mode && nr_dirty > gdtc->bg_thresh && in balance_dirty_pages()
1863 if (gdtc->freerun && (!mdtc || mdtc->freerun)) { in balance_dirty_pages()
1871 current->dirty_paused_when = now; in balance_dirty_pages()
1872 current->nr_dirtied = 0; in balance_dirty_pages()
1875 current->nr_dirtied_pause = min(intv, m_intv); in balance_dirty_pages()
1890 if (gdtc->freerun) in balance_dirty_pages()
1902 if (mdtc->freerun) in balance_dirty_pages()
1904 if (mdtc->pos_ratio < gdtc->pos_ratio) in balance_dirty_pages()
1908 wb->dirty_exceeded = gdtc->dirty_exceeded || in balance_dirty_pages()
1909 (mdtc && mdtc->dirty_exceeded); in balance_dirty_pages()
1910 if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) + in balance_dirty_pages()
1915 dirty_ratelimit = READ_ONCE(wb->dirty_ratelimit); in balance_dirty_pages()
1916 task_ratelimit = ((u64)dirty_ratelimit * sdtc->pos_ratio) >> in balance_dirty_pages()
1918 max_pause = wb_max_pause(wb, sdtc->wb_dirty); in balance_dirty_pages()
1930 if (current->dirty_paused_when) in balance_dirty_pages()
1931 pause -= now - current->dirty_paused_when; in balance_dirty_pages()
1934 * for up to 800ms from time to time on 1-HDD; so does xfs, in balance_dirty_pages()
1948 if (pause < -HZ) { in balance_dirty_pages()
1949 current->dirty_paused_when = now; in balance_dirty_pages()
1950 current->nr_dirtied = 0; in balance_dirty_pages()
1952 current->dirty_paused_when += period; in balance_dirty_pages()
1953 current->nr_dirtied = 0; in balance_dirty_pages()
1954 } else if (current->nr_dirtied_pause <= pages_dirtied) in balance_dirty_pages()
1955 current->nr_dirtied_pause += pages_dirtied; in balance_dirty_pages()
1960 now += min(pause - max_pause, max_pause); in balance_dirty_pages()
1974 ret = -EAGAIN; in balance_dirty_pages()
1978 bdi->last_bdp_sleep = jiffies; in balance_dirty_pages()
1981 current->dirty_paused_when = now + pause; in balance_dirty_pages()
1982 current->nr_dirtied = 0; in balance_dirty_pages()
1983 current->nr_dirtied_pause = nr_dirtied_pause; in balance_dirty_pages()
1997 * In theory 1 page is enough to keep the consumer-producer in balance_dirty_pages()
2002 if (sdtc->wb_dirty <= wb_stat_error()) in balance_dirty_pages()
2016 * dirty tsk->nr_dirtied_pause pages;
2020 * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
2030 * balance_dirty_pages_ratelimited_flags - Balance dirty memory state.
2040 * Return: If @flags contains BDP_ASYNC, it may return -EAGAIN to
2049 struct inode *inode = mapping->host; in balance_dirty_pages_ratelimited_flags()
2056 if (!(bdi->capabilities & BDI_CAP_WRITEBACK)) in balance_dirty_pages_ratelimited_flags()
2062 wb = &bdi->wb; in balance_dirty_pages_ratelimited_flags()
2064 ratelimit = current->nr_dirtied_pause; in balance_dirty_pages_ratelimited_flags()
2065 if (wb->dirty_exceeded) in balance_dirty_pages_ratelimited_flags()
2066 ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10)); in balance_dirty_pages_ratelimited_flags()
2073 * time, hence all honoured too large initial task->nr_dirtied_pause. in balance_dirty_pages_ratelimited_flags()
2076 if (unlikely(current->nr_dirtied >= ratelimit)) in balance_dirty_pages_ratelimited_flags()
2084 * short-lived tasks (eg. gcc invocations in a kernel build) escaping in balance_dirty_pages_ratelimited_flags()
2085 * the dirty throttling and livelock other long-run dirtiers. in balance_dirty_pages_ratelimited_flags()
2088 if (*p > 0 && current->nr_dirtied < ratelimit) { in balance_dirty_pages_ratelimited_flags()
2090 nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied); in balance_dirty_pages_ratelimited_flags()
2091 *p -= nr_pages_dirtied; in balance_dirty_pages_ratelimited_flags()
2092 current->nr_dirtied += nr_pages_dirtied; in balance_dirty_pages_ratelimited_flags()
2096 if (unlikely(current->nr_dirtied >= ratelimit)) in balance_dirty_pages_ratelimited_flags()
2097 ret = balance_dirty_pages(wb, current->nr_dirtied, flags); in balance_dirty_pages_ratelimited_flags()
2105 * balance_dirty_pages_ratelimited - balance dirty memory state.
2128 struct bdi_writeback *wb = dtc->wb; in wb_bg_dirty_limits()
2130 dtc->wb_bg_thresh = __wb_calc_thresh(dtc, dtc->bg_thresh); in wb_bg_dirty_limits()
2131 if (dtc->wb_bg_thresh < 2 * wb_stat_error()) in wb_bg_dirty_limits()
2132 dtc->wb_dirty = wb_stat_sum(wb, WB_RECLAIMABLE); in wb_bg_dirty_limits()
2134 dtc->wb_dirty = wb_stat(wb, WB_RECLAIMABLE); in wb_bg_dirty_limits()
2141 if (dtc->dirty > dtc->bg_thresh) in domain_over_bg_thresh()
2145 if (dtc->wb_dirty > dtc->wb_bg_thresh) in domain_over_bg_thresh()
2152 * wb_over_bg_thresh - does @wb need to be written back?
2188 * and a different non-zero value will wakeup the writeback threads. in dirty_writeback_centisecs_handler()
2212 * then push it back - the user is still using the disk.
2216 mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode); in laptop_io_completion()
2231 timer_delete(&bdi->laptop_mode_wb_timer); in laptop_sync_completion()
2237 * If ratelimit_pages is too high then we can get into dirty-data overload
2252 dom->dirty_limit = dirty_thresh; in writeback_set_ratelimit()
2348 * is now applied to total non-HIGHPAGE memory, and as such we can't
2351 * non-HIGHMEM memory.
2370 * tag_pages_for_writeback - tag pages to be written by writeback
2386 XA_STATE(xas, &mapping->i_pages, start); in tag_pages_for_writeback()
2415 if (unlikely(folio->mapping != mapping)) in folio_prepare_writeback()
2425 if (wbc->sync_mode == WB_SYNC_NONE) in folio_prepare_writeback()
2439 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) in wbc_to_tag()
2446 if (wbc->range_cyclic) in wbc_end()
2447 return -1; in wbc_end()
2448 return wbc->range_end >> PAGE_SHIFT; in wbc_end()
2457 folio = folio_batch_next(&wbc->fbatch); in writeback_get_folio()
2459 folio_batch_release(&wbc->fbatch); in writeback_get_folio()
2461 filemap_get_folios_tag(mapping, &wbc->index, wbc_end(wbc), in writeback_get_folio()
2462 wbc_to_tag(wbc), &wbc->fbatch); in writeback_get_folio()
2463 folio = folio_batch_next(&wbc->fbatch); in writeback_get_folio()
2474 trace_wbc_writepage(wbc, inode_to_bdi(mapping->host)); in writeback_get_folio()
2479 * writeback_iter - iterate folio of a mapping for writeback
2483 * @error: in-out pointer for writeback errors (see below)
2486 * @wbc on @mapping and should be called in a while loop in the ->writepages
2493 * If there was an error in the per-folio writeback inside the writeback_iter()
2508 folio_batch_init(&wbc->fbatch); in writeback_iter()
2509 wbc->saved_err = *error = 0; in writeback_iter()
2515 * For non-cyclic writeback we always start at the beginning of in writeback_iter()
2518 if (wbc->range_cyclic) in writeback_iter()
2519 wbc->index = mapping->writeback_index; in writeback_iter()
2521 wbc->index = wbc->range_start >> PAGE_SHIFT; in writeback_iter()
2528 * For data-integrity writeback we have to be careful so that we in writeback_iter()
2534 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) in writeback_iter()
2535 tag_pages_for_writeback(mapping, wbc->index, in writeback_iter()
2538 wbc->nr_to_write -= folio_nr_pages(folio); in writeback_iter()
2545 * we run past wbc->nr_to_write or encounter errors. in writeback_iter()
2546 * We stash away the first error we encounter in wbc->saved_err in writeback_iter()
2551 * wbc->nr_to_write or encounter the first error. in writeback_iter()
2553 if (wbc->sync_mode == WB_SYNC_ALL) { in writeback_iter()
2554 if (*error && !wbc->saved_err) in writeback_iter()
2555 wbc->saved_err = *error; in writeback_iter()
2557 if (*error || wbc->nr_to_write <= 0) in writeback_iter()
2569 * writeback access order inversion - we should only ever lock in writeback_iter()
2570 * multiple folios in ascending folio->index order, and looping in writeback_iter()
2574 if (wbc->range_cyclic) in writeback_iter()
2575 mapping->writeback_index = 0; in writeback_iter()
2581 *error = wbc->saved_err; in writeback_iter()
2586 if (wbc->range_cyclic) in writeback_iter()
2587 mapping->writeback_index = folio_next_index(folio); in writeback_iter()
2588 folio_batch_release(&wbc->fbatch); in writeback_iter()
2598 if (wbc->nr_to_write <= 0) in do_writepages()
2600 wb = inode_to_wb_wbc(mapping->host, wbc); in do_writepages()
2603 if (mapping->a_ops->writepages) in do_writepages()
2604 ret = mapping->a_ops->writepages(mapping, wbc); in do_writepages()
2608 if (ret != -ENOMEM || wbc->sync_mode != WB_SYNC_ALL) in do_writepages()
2625 if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) + in do_writepages()
2650 struct inode *inode = mapping->host; in folio_account_dirtied()
2667 current->nr_dirtied += nr; in folio_account_dirtied()
2682 lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr); in folio_account_cleaned()
2683 zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr); in folio_account_cleaned()
2684 wb_stat_mod(wb, WB_RECLAIMABLE, -nr); in folio_account_cleaned()
2714 xa_lock_irqsave(&mapping->i_pages, flags); in __folio_mark_dirty()
2715 if (folio->mapping) { /* Race with truncate? */ in __folio_mark_dirty()
2718 __xa_set_mark(&mapping->i_pages, folio->index, in __folio_mark_dirty()
2721 xa_unlock_irqrestore(&mapping->i_pages, flags); in __folio_mark_dirty()
2725 * filemap_dirty_folio - Mark a folio dirty for filesystems which do not use buffer_heads.
2736 * that case, but not all the buffers. This is a "bottom-up" dirtying,
2737 * whereas block_dirty_folio() is a "top-down" dirtying.
2750 if (mapping->host) { in filemap_dirty_folio()
2752 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); in filemap_dirty_folio()
2759 * folio_redirty_for_writepage - Decline to write a dirty folio.
2773 struct address_space *mapping = folio->mapping; in folio_redirty_for_writepage()
2777 wbc->pages_skipped += nr; in folio_redirty_for_writepage()
2780 struct inode *inode = mapping->host; in folio_redirty_for_writepage()
2785 current->nr_dirtied -= nr; in folio_redirty_for_writepage()
2786 node_stat_mod_folio(folio, NR_DIRTIED, -nr); in folio_redirty_for_writepage()
2787 wb_stat_mod(wb, WB_DIRTIED, -nr); in folio_redirty_for_writepage()
2795 * folio_mark_dirty - Mark a folio as being modified.
2825 return mapping->a_ops->dirty_folio(mapping, folio); in folio_mark_dirty()
2834 * folio->mapping->host, and if the folio is unlocked. This is because another
2837 * Usually, the folio _is_ locked, or the caller is a user-space process which
2871 struct inode *inode = mapping->host; in __folio_cancel_dirty()
2893 * write-for-sync can discover it via a PAGECACHE_TAG_DIRTY walk.
2894 * The ->writepage implementation will run either folio_start_writeback()
2909 struct inode *inode = mapping->host; in folio_clear_dirty_for_io()
2918 * (b) we tell the low-level filesystem to in folio_clear_dirty_for_io()
2929 * has no effect on the actual dirty bit - since in folio_clear_dirty_for_io()
2951 lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr); in folio_clear_dirty_for_io()
2952 zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr); in folio_clear_dirty_for_io()
2953 wb_stat_mod(wb, WB_RECLAIMABLE, -nr); in folio_clear_dirty_for_io()
2965 atomic_inc(&wb->writeback_inodes); in wb_inode_writeback_start()
2971 atomic_dec(&wb->writeback_inodes); in wb_inode_writeback_end()
2979 spin_lock_irqsave(&wb->work_lock, flags); in wb_inode_writeback_end()
2980 if (test_bit(WB_registered, &wb->state)) in wb_inode_writeback_end()
2981 queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL); in wb_inode_writeback_end()
2982 spin_unlock_irqrestore(&wb->work_lock, flags); in wb_inode_writeback_end()
2992 struct inode *inode = mapping->host; in __folio_end_writeback()
2996 xa_lock_irqsave(&mapping->i_pages, flags); in __folio_end_writeback()
2998 __xa_clear_mark(&mapping->i_pages, folio->index, in __folio_end_writeback()
3002 wb_stat_mod(wb, WB_WRITEBACK, -nr); in __folio_end_writeback()
3006 if (mapping->host) in __folio_end_writeback()
3007 sb_clear_inode_writeback(mapping->host); in __folio_end_writeback()
3010 xa_unlock_irqrestore(&mapping->i_pages, flags); in __folio_end_writeback()
3015 lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr); in __folio_end_writeback()
3016 zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr); in __folio_end_writeback()
3032 XA_STATE(xas, &mapping->i_pages, folio->index); in __folio_start_writeback()
3033 struct inode *inode = mapping->host; in __folio_start_writeback()
3054 if (mapping->host) in __folio_start_writeback()
3055 sb_mark_inode_writeback(mapping->host); in __folio_start_writeback()
3080 * folio_wait_writeback - Wait for a folio to finish writeback.
3101 * folio_wait_writeback_killable - Wait for a folio to finish writeback.
3111 * Return: 0 on success, -EINTR if we get a fatal signal while waiting.
3118 return -EINTR; in folio_wait_writeback_killable()
3126 * folio_wait_stable() - wait for writeback to finish, if necessary.