| /linux/tools/testing/selftests/bpf/prog_tests/ |
| H A D | cgroup_hierarchical_stats.c | 54 } cgroups[] = { variable 64 #define N_CGROUPS ARRAY_SIZE(cgroups) 133 fd = create_and_get_cgroup(cgroups[i].path); in setup_cgroups() 137 cgroups[i].fd = fd; in setup_cgroups() 138 cgroups[i].id = get_cgroup_id(cgroups[i].path); in setup_cgroups() 147 close(cgroups[i].fd); in cleanup_cgroups() 175 if (join_parent_cgroup(cgroups[i].path)) in attach_processes() 220 attach_counters[i] = get_attach_counter(cgroups[i].id, in check_attach_counters() 221 cgroups[i].name); in check_attach_counters() 288 err = setup_cgroup_iter(*skel, cgroups[i].fd, cgroups[i].name); in setup_progs() [all …]
|
| /linux/tools/cgroup/ |
| H A D | memcg_shrinker.py | 11 cgroups = {} 17 cgroups[ino] = path 20 return cgroups 44 cgroups = scan_cgroups("/sys/fs/cgroup/") 58 cg = cgroups[ino]
|
| /linux/Documentation/admin-guide/cgroup-v1/ |
| H A D | cgroups.rst | 21 1.1 What are cgroups ? 22 1.2 Why are cgroups needed ? 23 1.3 How are cgroups implemented ? 26 1.6 How do I use cgroups ? 41 1.1 What are cgroups ? 54 facilities provided by cgroups to treat groups of tasks in 60 A *hierarchy* is a set of cgroups arranged in a tree, such that 61 every task in the system is in exactly one of the cgroups in the 67 cgroups. Each hierarchy is a partition of all tasks in the system. 69 User-level code may create and destroy cgroups by name in an [all …]
|
| H A D | net_cls.rst | 9 different priorities to packets from different cgroups. 13 Creating a net_cls cgroups instance creates a net_cls.classid file.
|
| H A D | devices.rst | 43 Any task can move itself between cgroups. This clearly won't 60 device cgroups maintain hierarchy by making sure a cgroup never has more 121 not be possible once the device cgroups has children. 126 device cgroups is implemented internally using a behavior (ALLOW, DENY) and a
|
| H A D | index.rst | 10 cgroups
|
| /linux/tools/perf/util/ |
| H A D | cgroup.c | 563 down_write(&env->cgroups.lock); in cgroup__findnew() 564 cgrp = __cgroup__findnew(&env->cgroups.tree, id, true, path); in cgroup__findnew() 565 up_write(&env->cgroups.lock); in cgroup__findnew() 578 down_read(&env->cgroups.lock); in cgroup__find() 579 cgrp = __cgroup__findnew(&env->cgroups.tree, id, false, NULL); in cgroup__find() 580 up_read(&env->cgroups.lock); in cgroup__find() 589 down_write(&env->cgroups.lock); in perf_env__purge_cgroups() 590 while (!RB_EMPTY_ROOT(&env->cgroups.tree)) { in perf_env__purge_cgroups() 591 node = rb_first(&env->cgroups.tree); in perf_env__purge_cgroups() 594 rb_erase(node, &env->cgroups.tree); in perf_env__purge_cgroups() [all …]
|
| H A D | bpf-trace-summary.c | 24 static struct rb_root cgroups = RB_ROOT; variable 55 read_all_cgroups(&cgroups); in trace_prepare_bpf_summary() 346 struct cgroup *cgrp = __cgroup__find(&cgroups, data->key); in print_cgroup_stat() 455 if (!RB_EMPTY_ROOT(&cgroups)) { in trace_cleanup_bpf_summary() 458 rbtree_postorder_for_each_entry_safe(cgrp, tmp, &cgroups, node) in trace_cleanup_bpf_summary() 461 cgroups = RB_ROOT; in trace_cleanup_bpf_summary()
|
| H A D | bpf_lock_contention.c | 387 read_all_cgroups(&con->cgroups); in lock_contention_prepare() 625 struct cgroup *cgrp = __cgroup__find(&con->cgroups, cgrp_id); in lock_contention_get_name() 840 while (!RB_EMPTY_ROOT(&con->cgroups)) { in lock_contention_finish() 841 struct rb_node *node = rb_first(&con->cgroups); in lock_contention_finish() 844 rb_erase(node, &con->cgroups); in lock_contention_finish()
|
| H A D | cgroup.h | 31 int evlist__expand_cgroup(struct evlist *evlist, const char *cgroups, bool open_cgroup);
|
| H A D | lock-contention.h | 151 struct rb_root cgroups; member
|
| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | percpu_alloc_cgrp_local_storage.c | 30 e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, in BPF_PROG() 56 e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0); in BPF_PROG() 89 e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0); in BPF_PROG()
|
| H A D | rcu_read_lock.c | 34 struct css_set *cgroups; in get_cgroup_id() local 42 cgroups = task->cgroups; in get_cgroup_id() 43 if (!cgroups) in get_cgroup_id() 45 cgroup_id = cgroups->dfl_cgrp->kn->id; in get_cgroup_id()
|
| H A D | cgrp_ls_recursion.c | 59 __on_update(task->cgroups->dfl_cgrp); in BPF_PROG() 92 __on_enter(regs, id, task->cgroups->dfl_cgrp); in BPF_PROG()
|
| H A D | cgrp_ls_tp_btf.c | 86 __on_enter(regs, id, task->cgroups->dfl_cgrp); in BPF_PROG() 124 __on_exit(regs, id, task->cgroups->dfl_cgrp); in BPF_PROG()
|
| /linux/Documentation/admin-guide/ |
| H A D | cgroup-v2.rst | 110 multiple individual control groups, the plural form "cgroups" is used. 127 cgroups form a tree structure and every process in the system belongs 137 processes which belong to the cgroups consisting the inclusive 213 propagation into leaf cgroups. This allows protecting entire 268 A given cgroup may have multiple child cgroups forming a tree 334 different cgroups and are not subject to the no internal process 335 constraint - threaded controllers can be enabled on non-leaf cgroups 341 can't have populated child cgroups which aren't threaded. Because the 343 serve both as a threaded domain and a parent to domain cgroups. 407 between threads in a non-leaf cgroup and its child cgroups. Each [all …]
|
| /linux/block/ |
| H A D | Kconfig.iosched | 38 (cgroups-v1) or io (cgroups-v2) controller.
|
| /linux/tools/perf/Documentation/ |
| H A D | perf-bench.txt | 128 --cgroups=:: 129 Names of cgroups for sender and receiver, separated by a comma. 131 Note that perf doesn't create nor delete the cgroups, so users should 132 make sure that the cgroups exist and are accessible before use. 154 (executing 1000000 pipe operations between cgroups)
|
| /linux/Documentation/gpu/ |
| H A D | drm-compute.rst | 38 controlling resources. The standard kernel way of doing so is cgroups. 40 This creates a third option, using cgroups to prevent eviction. Both GPU and 43 into cgroups, that will allow jobs to run next to each other without
|
| /linux/Documentation/bpf/ |
| H A D | map_cgrp_storage.rst | 9 storage for cgroups. It is only available with ``CONFIG_CGROUPS``. 56 ptr = bpf_cgrp_storage_get(&cgrp_storage, task->cgroups->dfl_cgrp, 0,
|
| H A D | map_cgroup_storage.rst | 10 attach to cgroups; the programs are made available by the same Kconfig. The 16 cgroups on their own. 132 that uses the map. A program may be attached to multiple cgroups or have
|
| /linux/include/linux/ |
| H A D | psi.h | 63 rcu_assign_pointer(p->cgroups, to); in cgroup_move_task()
|
| /linux/tools/perf/util/bpf_skel/ |
| H A D | off_cpu.bpf.c | 155 return BPF_CORE_READ(t, cgroups, dfl_cgrp, kn, id); in get_cgroup_id() 166 cgrp = BPF_CORE_READ(t, cgroups, subsys[perf_subsys_id], cgroup); in get_cgroup_id()
|
| /linux/tools/perf/tests/shell/ |
| H A D | record_bpf_filter.sh | 154 -a --all-cgroups --synth=cgroup -o "${perfdata}" true 2> /dev/null
|
| /linux/Documentation/accounting/ |
| H A D | cgroupstats.rst | 11 and attributes specific to cgroups. It should be very easy to
|