Lines Matching +full:current +full:- +full:limit

1 // SPDX-License-Identifier: GPL-2.0-only
6 * after a certain limit is reached.
8 * Since it is trivial to hit the task limit without hitting any kmemcg limits
15 * number of processes currently in the cgroup is given by pids.current.
17 * possible to have pids.current > pids.max. However, it is not possible to
18 * violate a cgroup policy through fork(). fork() will return -EAGAIN if forking
21 * To set a cgroup to have no limit, set pids.max to "max". This is the default
23 * stringent limit in the hierarchy is followed).
25 * pids.current tracks all child cgroup hierarchies, so parent/pids.current is
26 * a superset of parent/child/pids.current.
42 /* Fork failed in subtree because this pids_cgroup limit was hit. */
44 /* Fork failed in this pids_cgroup because ancestor limit was hit. */
53 * Use 64-bit types so that we can safely represent "max" as
57 atomic64_t limit; member
75 return css_pids(pids->css.parent); in parent_pids()
85 return ERR_PTR(-ENOMEM); in pids_css_alloc()
87 atomic64_set(&pids->limit, PIDS_MAX); in pids_css_alloc()
88 return &pids->css; in pids_css_alloc()
102 if (nr_pids > READ_ONCE(p->watermark)) in pids_update_watermark()
103 WRITE_ONCE(p->watermark, nr_pids); in pids_update_watermark()
107 * pids_cancel - uncharge the local pid count
120 WARN_ON_ONCE(atomic64_add_negative(-num, &pids->counter)); in pids_cancel()
124 * pids_uncharge - hierarchically uncharge the pid count
137 * pids_charge - hierarchically charge the pid count
141 * This function does *not* follow the pid limit set. It cannot fail and the new
142 * pid count may exceed the limit. This is only used for reverting failed
143 * attaches, where there is no other way out than violating the limit.
150 int64_t new = atomic64_add_return(num, &p->counter); in pids_charge()
157 * pids_try_charge - hierarchically try to charge the pid count
162 * This function follows the set limit. It will fail if the charge would cause
163 * the new value to exceed the hierarchical limit. Returns 0 if the charge
164 * succeeded, otherwise -EAGAIN.
171 int64_t new = atomic64_add_return(num, &p->counter); in pids_try_charge()
172 int64_t limit = atomic64_read(&p->limit); in pids_try_charge() local
176 * p->limit is %PIDS_MAX then we know that this test will never in pids_try_charge()
179 if (new > limit) { in pids_try_charge()
184 * Not technically accurate if we go over limit somewhere up in pids_try_charge()
197 return -EAGAIN; in pids_try_charge()
248 /* Only log the first time limit is hit. */ in pids_event()
249 if (atomic64_inc_return(&p->events_local[PIDCG_FORKFAIL]) == 1) { in pids_event()
251 pr_cont_cgroup_path(p->css.cgroup); in pids_event()
256 cgroup_file_notify(&p->events_local_file); in pids_event()
260 atomic64_inc(&pids_over_limit->events_local[PIDCG_MAX]); in pids_event()
261 cgroup_file_notify(&pids_over_limit->events_local_file); in pids_event()
264 atomic64_inc(&p->events[PIDCG_MAX]); in pids_event()
265 cgroup_file_notify(&p->events_file); in pids_event()
278 pids = css_pids(cset->subsys[pids_cgrp_id]); in pids_can_fork()
290 pids = css_pids(cset->subsys[pids_cgrp_id]); in pids_cancel_fork()
306 int64_t limit; in pids_max_write() local
311 limit = PIDS_MAX; in pids_max_write()
315 err = kstrtoll(buf, 0, &limit); in pids_max_write()
319 if (limit < 0 || limit >= PIDS_MAX) in pids_max_write()
320 return -EINVAL; in pids_max_write()
324 * Limit updates don't need to be mutex'd, since it isn't in pids_max_write()
325 * critical that any racing fork()s follow the new limit. in pids_max_write()
327 atomic64_set(&pids->limit, limit); in pids_max_write()
335 int64_t limit = atomic64_read(&pids->limit); in pids_max_show() local
337 if (limit >= PIDS_MAX) in pids_max_show()
340 seq_printf(sf, "%lld\n", limit); in pids_max_show()
350 return atomic64_read(&pids->counter); in pids_current_read()
358 return READ_ONCE(pids->watermark); in pids_peak_read()
372 events = local ? pids->events_local : pids->events; in __pids_events_show()
398 .name = "current",
430 .name = "current",