Lines Matching +full:out +full:- +full:of +full:- +full:window

1 // SPDX-License-Identifier: GPL-2.0-only
25 * The window size (vmpressure_win) is the number of scanned pages before
26 * we try to analyze scanned/reclaimed ratio. So the window is used as a
27 * rate-limit tunable for the "low" level notification, and also for
28 * averaging the ratio for medium/critical levels. Using small window
29 * sizes can cause lot of false positives, but too big window size will
32 * As the vmscan reclaimer logic works with chunks which are multiple of
33 * SWAP_CLUSTER_MAX, it makes sense to use it for the window size as well.
35 * TODO: Make the window size depend on machine size, as we do for vmstat
51 * critical pressure as number of pages will be less than "window size".
58 * prio <= DEF_PRIORITY - 2 : kswapd becomes somewhat overwhelmed
64 * critical level when scanning depth is ~10% of the lru size (vmscan
132 goto out;
134 * We calculate the ratio (in percents) of how many pages were
135 * scanned vs. reclaimed in a given time frame (window). Note that
136 * time is in VM reclaimer's "ticks", i.e. number of pages
140 pressure = scale - (reclaimed * scale / scanned);
143 out:
164 mutex_lock(&vmpr->events_lock);
165 list_for_each_entry(ev, &vmpr->events, node) {
166 if (ancestor && ev->mode == VMPRESSURE_LOCAL)
168 if (signalled && ev->mode == VMPRESSURE_NO_PASSTHROUGH)
170 if (level < ev->level)
172 eventfd_signal(ev->efd);
175 mutex_unlock(&vmpr->events_lock);
189 spin_lock(&vmpr->sr_lock);
196 * vmpr->reclaimed is in sync.
198 scanned = vmpr->tree_scanned;
200 spin_unlock(&vmpr->sr_lock);
204 reclaimed = vmpr->tree_reclaimed;
205 vmpr->tree_scanned = 0;
206 vmpr->tree_reclaimed = 0;
207 spin_unlock(&vmpr->sr_lock);
219 * vmpressure() - Account memory pressure through scanned/reclaimed ratio
223 * @scanned: number of pages scanned
224 * @reclaimed: number of pages reclaimed
232 * notified of the entire subtree's reclaim efficiency.
235 * only in-kernel users are notified.
248 * The in-kernel users only care about the reclaim efficiency
250 * isn't and won't be any in-kernel user in a legacy cgroup.
260 * pressure; if we notify userland about that kind of pressure,
262 * freeing of memory by userland (since userland is more likely to
263 * have HIGHMEM/MOVABLE pages instead of the DMA fallback). That
265 * Indirect reclaim (kswapd) sets sc->gfp_mask to GFP_KERNEL, so
283 spin_lock(&vmpr->sr_lock);
284 scanned = vmpr->tree_scanned += scanned;
285 vmpr->tree_reclaimed += reclaimed;
286 spin_unlock(&vmpr->sr_lock);
290 schedule_work(&vmpr->work);
294 /* For now, no users for root-level efficiency */
298 spin_lock(&vmpr->sr_lock);
299 scanned = vmpr->scanned += scanned;
300 reclaimed = vmpr->reclaimed += reclaimed;
302 spin_unlock(&vmpr->sr_lock);
305 vmpr->scanned = vmpr->reclaimed = 0;
306 spin_unlock(&vmpr->sr_lock);
325 * vmpressure_prio() - Account memory pressure through reclaimer priority level
346 * information before shrinker dives into long shrinking of long
357 * vmpressure_register_event() - Bind vmpressure notifications to an eventfd
364 * @eventfd. The @args parameter is a comma-delimited string that denotes a
365 * pressure level threshold (one of vmpressure_str_levels, i.e. "low", "medium",
366 * or "critical") and an optional mode (one of vmpressure_str_modes, i.e.
371 * Return: 0 on success, -ENOMEM on memory failure or -EINVAL if @args could
387 return -ENOMEM;
393 goto out;
401 goto out;
407 ret = -ENOMEM;
408 goto out;
411 ev->efd = eventfd;
412 ev->level = level;
413 ev->mode = mode;
415 mutex_lock(&vmpr->events_lock);
416 list_add(&ev->node, &vmpr->events);
417 mutex_unlock(&vmpr->events_lock);
419 out:
425 * vmpressure_unregister_event() - Unbind eventfd from vmpressure
441 mutex_lock(&vmpr->events_lock);
442 list_for_each_entry(ev, &vmpr->events, node) {
443 if (ev->efd != eventfd)
445 list_del(&ev->node);
449 mutex_unlock(&vmpr->events_lock);
453 * vmpressure_init() - Initialize vmpressure control structure
461 spin_lock_init(&vmpr->sr_lock);
462 mutex_init(&vmpr->events_lock);
463 INIT_LIST_HEAD(&vmpr->events);
464 INIT_WORK(&vmpr->work, vmpressure_work_fn);
468 * vmpressure_cleanup() - shuts down vmpressure control structure
480 flush_work(&vmpr->work);