| /linux/kernel/cgroup/ |
| H A D | freezer.c | 15 static bool cgroup_update_frozen_flag(struct cgroup *cgrp, bool frozen) in cgroup_update_frozen_flag() argument 20 if (test_bit(CGRP_FROZEN, &cgrp->flags) == frozen) in cgroup_update_frozen_flag() 24 set_bit(CGRP_FROZEN, &cgrp->flags); in cgroup_update_frozen_flag() 26 clear_bit(CGRP_FROZEN, &cgrp->flags); in cgroup_update_frozen_flag() 28 cgroup_file_notify(&cgrp->events_file); in cgroup_update_frozen_flag() 29 TRACE_CGROUP_PATH(notify_frozen, cgrp, frozen); in cgroup_update_frozen_flag() 36 static void cgroup_propagate_frozen(struct cgroup *cgrp, bool frozen) in cgroup_propagate_frozen() argument 46 while ((cgrp = cgroup_parent(cgrp))) { in cgroup_propagate_frozen() 48 cgrp->freezer.nr_frozen_descendants += desc; in cgroup_propagate_frozen() 49 if (!test_bit(CGRP_FREEZE, &cgrp->flags) || in cgroup_propagate_frozen() [all …]
|
| H A D | cgroup.c | 197 .cgrp.self.rstat_cpu = &root_rstat_cpu, 198 .cgrp.rstat_base_cpu = &root_rstat_base_cpu, 279 static int cgroup_apply_control(struct cgroup *cgrp); 280 static void cgroup_finalize_control(struct cgroup *cgrp, int ret); 283 static int cgroup_destroy_locked(struct cgroup *cgrp); 284 static struct cgroup_subsys_state *css_create(struct cgroup *cgrp, 289 struct cgroup *cgrp, struct cftype cfts[], 358 bool cgroup_on_dfl(const struct cgroup *cgrp) in cgroup_on_dfl() argument 360 return cgrp->root == &cgrp_dfl_root; in cgroup_on_dfl() 394 static bool cgroup_has_tasks(struct cgroup *cgrp) in cgroup_has_tasks() argument [all …]
|
| H A D | rstat.c | 15 static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu); 35 struct cgroup *cgrp, int cpu) in cgroup_rstat_base_cpu() argument 37 return per_cpu_ptr(cgrp->rstat_base_cpu, cpu); in cgroup_rstat_base_cpu() 342 __weak noinline void bpf_rstat_flush(struct cgroup *cgrp, in bpf_rstat_flush() 362 struct cgroup *cgrp = css->cgroup; in __css_rstat_lock() 369 trace_cgroup_rstat_lock_contended(cgrp, cpu_in_loop, contended); in __css_rstat_lock() 372 trace_cgroup_rstat_locked(cgrp, cpu_in_loop, contended); in __css_rstat_unlock() 379 struct cgroup *cgrp = css->cgroup; in __css_rstat_unlock() 383 trace_cgroup_rstat_unlock(cgrp, cpu_in_loop, false); 435 struct cgroup *cgrp in css_rstat_init() 339 bpf_rstat_flush(struct cgroup * cgrp,struct cgroup * parent,int cpu) bpf_rstat_flush() argument 359 struct cgroup *cgrp = css->cgroup; __css_rstat_lock() local 376 struct cgroup *cgrp = css->cgroup; __css_rstat_unlock() local 432 struct cgroup *cgrp = css->cgroup; css_rstat_init() local 497 struct cgroup *cgrp = css->cgroup; css_rstat_exit() local 560 cgroup_base_stat_flush(struct cgroup * cgrp,int cpu) cgroup_base_stat_flush() argument 600 cgroup_base_stat_cputime_account_begin(struct cgroup * cgrp,unsigned long * flags) cgroup_base_stat_cputime_account_begin() argument 609 cgroup_base_stat_cputime_account_end(struct cgroup * cgrp,struct cgroup_rstat_base_cpu * rstatbc,unsigned long flags) cgroup_base_stat_cputime_account_end() argument 618 __cgroup_account_cputime(struct cgroup * cgrp,u64 delta_exec) __cgroup_account_cputime() argument 628 __cgroup_account_cputime_field(struct cgroup * cgrp,enum cpu_usage_stat index,u64 delta_exec) __cgroup_account_cputime_field() argument 712 struct cgroup *cgrp = seq_css(seq)->cgroup; cgroup_base_stat_cputime_show() local [all...] |
| H A D | cgroup-v1.c | 206 void cgroup1_pidlist_destroy_all(struct cgroup *cgrp) in cgroup1_pidlist_destroy_all() argument 210 mutex_lock(&cgrp->pidlist_mutex); in cgroup1_pidlist_destroy_all() 211 list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links) in cgroup1_pidlist_destroy_all() 213 mutex_unlock(&cgrp->pidlist_mutex); in cgroup1_pidlist_destroy_all() 216 BUG_ON(!list_empty(&cgrp->pidlists)); in cgroup1_pidlist_destroy_all() 287 static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp, in cgroup_pidlist_find() argument 294 lockdep_assert_held(&cgrp->pidlist_mutex); in cgroup_pidlist_find() 296 list_for_each_entry(l, &cgrp->pidlists, links) in cgroup_pidlist_find() 308 static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp, in cgroup_pidlist_find_create() argument 313 lockdep_assert_held(&cgrp->pidlist_mutex); in cgroup_pidlist_find_create() [all …]
|
| H A D | cgroup-internal.h | 27 #define TRACE_CGROUP_PATH(type, cgrp, ...) \ argument 33 cgroup_path(cgrp, trace_cgroup_path, \ 35 trace_cgroup_##type(cgrp, trace_cgroup_path, \ 98 struct cgroup *cgrp; member 187 static inline bool cgroup_is_dead(const struct cgroup *cgrp) in cgroup_is_dead() argument 189 return !(cgrp->self.flags & CSS_ONLINE); in cgroup_is_dead() 192 static inline bool notify_on_release(const struct cgroup *cgrp) in notify_on_release() argument 194 return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); in notify_on_release() 225 bool cgroup_on_dfl(const struct cgroup *cgrp); 232 int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen, [all …]
|
| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | cgroup_read_xattr.c | 29 struct cgroup *cgrp; in BPF_PROG() local 31 cgrp = bpf_cgroup_from_id(cgrp_id); in BPF_PROG() 32 if (!cgrp) in BPF_PROG() 35 read_xattr(cgrp); in BPF_PROG() 36 bpf_cgroup_release(cgrp); in BPF_PROG() 45 struct cgroup *cgrp; in BPF_PROG() local 47 cgrp = bpf_cgroup_from_id(cgrp_id); in BPF_PROG() 48 if (!cgrp) in BPF_PROG() 51 read_xattr(cgrp); in BPF_PROG() 52 bpf_cgroup_release(cgrp); in BPF_PROG() 62 struct cgroup *cgrp; BPF_PROG() local 81 struct cgroup *cgrp; BPF_PROG() local 100 struct cgroup *cgrp; BPF_PROG() local 121 struct cgroup *cgrp, *ancestor; BPF_PROG() local 143 struct cgroup *cgrp, *ancestor; BPF_PROG() local [all...] |
| H A D | cgrp_kfunc_failure.c | 20 static struct __cgrps_kfunc_map_value *insert_lookup_cgrp(struct cgroup *cgrp) in insert_lookup_cgrp() argument 24 status = cgrps_kfunc_map_insert(cgrp); in insert_lookup_cgrp() 28 return cgrps_kfunc_map_value_lookup(cgrp); in insert_lookup_cgrp() 33 int BPF_PROG(cgrp_kfunc_acquire_untrusted, struct cgroup *cgrp, const char *path) in BPF_PROG() argument 38 v = insert_lookup_cgrp(cgrp); in BPF_PROG() 43 acquired = bpf_cgroup_acquire(v->cgrp); in BPF_PROG() 52 int BPF_PROG(cgrp_kfunc_acquire_no_null_check, struct cgroup *cgrp, const char *path) in BPF_PROG() argument 56 acquired = bpf_cgroup_acquire(cgrp); in BPF_PROG() 68 int BPF_PROG(cgrp_kfunc_acquire_fp, struct cgroup *cgrp, const char *path) in BPF_PROG() argument 82 int BPF_PROG(cgrp_kfunc_acquire_unsafe_kretprobe, struct cgroup *cgrp) in BPF_PROG() argument [all …]
|
| H A D | cgrp_kfunc_success.c | 17 * TP_PROTO(struct cgroup *cgrp, const char *path), 18 * TP_ARGS(cgrp, path) 33 int BPF_PROG(test_cgrp_acquire_release_argument, struct cgroup *cgrp, const char *path) in BPF_PROG() argument 40 acquired = bpf_cgroup_acquire(cgrp); in BPF_PROG() 50 int BPF_PROG(test_cgrp_acquire_leave_in_map, struct cgroup *cgrp, const char *path) in BPF_PROG() argument 57 status = cgrps_kfunc_map_insert(cgrp); in BPF_PROG() 65 int BPF_PROG(test_cgrp_xchg_release, struct cgroup *cgrp, const char *path) in BPF_PROG() argument 74 status = cgrps_kfunc_map_insert(cgrp); in BPF_PROG() 80 v = cgrps_kfunc_map_value_lookup(cgrp); in BPF_PROG() 86 kptr = v->cgrp; in BPF_PROG() 108 BPF_PROG(test_cgrp_get_release,struct cgroup * cgrp,const char * path) BPF_PROG() argument 139 BPF_PROG(test_cgrp_get_ancestors,struct cgroup * cgrp,const char * path) BPF_PROG() argument 184 BPF_PROG(test_cgrp_from_id,struct cgroup * cgrp,const char * path) BPF_PROG() argument [all...] |
| H A D | cgrp_ls_recursion.c | 28 void bpf_cgroup_release(struct cgroup *cgrp) __ksym; 30 static void __on_update(struct cgroup *cgrp) in __on_update() argument 34 ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE); in __on_update() 38 ptr = bpf_cgrp_storage_get(&map_b, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE); in __on_update() 47 struct cgroup *cgrp; in BPF_PROG() local 50 cgrp = bpf_task_get_cgroup1(task, target_hid); in BPF_PROG() 51 if (!cgrp) in BPF_PROG() 54 __on_update(cgrp); in BPF_PROG() 55 bpf_cgroup_release(cgrp); in BPF_PROG() 63 static void __on_enter(struct pt_regs *regs, long id, struct cgroup *cgrp) in __on_enter() argument [all …]
|
| H A D | cgrp_ls_tp_btf.c | 34 void bpf_cgroup_release(struct cgroup *cgrp) __ksym; 36 static void __on_enter(struct pt_regs *regs, long id, struct cgroup *cgrp) in __on_enter() argument 42 ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, in __on_enter() 48 err = bpf_cgrp_storage_delete(&map_a, cgrp); in __on_enter() 53 ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, 0); in __on_enter() 58 ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, in __on_enter() 70 struct cgroup *cgrp; in BPF_PROG() local 77 cgrp = bpf_task_get_cgroup1(task, target_hid); in BPF_PROG() 78 if (!cgrp) in BPF_PROG() 81 __on_enter(regs, id, cgrp); in BPF_PROG() [all …]
|
| H A D | iters_css_task.c | 28 struct cgroup *cgrp; in BPF_PROG() local 33 cgrp = bpf_cgroup_from_id(cg_id); in BPF_PROG() 35 if (!cgrp) in BPF_PROG() 38 css = &cgrp->self; in BPF_PROG() 45 bpf_cgroup_release(cgrp); in BPF_PROG() 50 static inline u64 cgroup_id(struct cgroup *cgrp) in cgroup_id() argument 52 return cgrp->kn->id; in cgroup_id() 59 struct cgroup *cgrp = ctx->cgroup; in cgroup_id_printer() local 64 if (cgrp == NULL) { in cgroup_id_printer() 73 BPF_SEQ_PRINTF(seq, "%8llu\n", cgroup_id(cgrp)); in cgroup_id_printer() [all …]
|
| H A D | iters_task_failure.c | 34 struct cgroup *cgrp = bpf_cgroup_from_id(cg_id); in BPF_PROG() local 37 if (!cgrp) in BPF_PROG() 39 root_css = &cgrp->self; in BPF_PROG() 44 bpf_cgroup_release(cgrp); in BPF_PROG() 69 struct cgroup *cgrp = bpf_cgroup_from_id(cg_id); in BPF_PROG() local 72 if (!cgrp) in BPF_PROG() 74 root_css = &cgrp->self; in BPF_PROG() 83 bpf_cgroup_release(cgrp); in BPF_PROG() 92 struct cgroup *cgrp = bpf_cgroup_from_id(cg_id); in BPF_PROG() local 96 if (cgrp in BPF_PROG() [all...] |
| H A D | test_task_under_cgroup.c | 23 struct cgroup *cgrp = NULL; in BPF_PROG() local 36 cgrp = bpf_cgroup_from_id(cgid); in BPF_PROG() 37 if (!cgrp) in BPF_PROG() 40 if (bpf_task_under_cgroup(acquired, cgrp)) in BPF_PROG() 44 if (cgrp) in BPF_PROG() 45 bpf_cgroup_release(cgrp); in BPF_PROG() 54 struct cgroup *cgrp = NULL; in BPF_PROG() local 66 cgrp = bpf_cgroup_from_id(1); in BPF_PROG() 67 if (!cgrp) in BPF_PROG() 69 if (!bpf_task_under_cgroup(task, cgrp)) in BPF_PROG() [all...] |
| H A D | cgrp_kfunc_common.h | 13 struct cgroup __kptr * cgrp; member 25 struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym; 30 static inline struct __cgrps_kfunc_map_value *cgrps_kfunc_map_value_lookup(struct cgroup *cgrp) in cgrps_kfunc_map_value_lookup() argument 35 status = bpf_probe_read_kernel(&id, sizeof(id), &cgrp->self.id); in cgrps_kfunc_map_value_lookup() 42 static inline int cgrps_kfunc_map_insert(struct cgroup *cgrp) in cgrps_kfunc_map_insert() argument 49 status = bpf_probe_read_kernel(&id, sizeof(id), &cgrp->self.id); in cgrps_kfunc_map_insert() 53 local.cgrp = NULL; in cgrps_kfunc_map_insert() 64 acquired = bpf_cgroup_acquire(cgrp); in cgrps_kfunc_map_insert() 70 old = bpf_kptr_xchg(&v->cgrp, acquired); in cgrps_kfunc_map_insert()
|
| H A D | test_cgroup1_hierarchy.c | 14 struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym; 15 void bpf_cgroup_release(struct cgroup *cgrp) __ksym; 19 struct cgroup *cgrp, *ancestor; in bpf_link_create_verify() local 32 cgrp = bpf_task_get_cgroup1(task, target_hid); in bpf_link_create_verify() 33 if (!cgrp) in bpf_link_create_verify() 37 if (cgrp->kn->id == target_ancestor_cgid) in bpf_link_create_verify() 40 ancestor = bpf_cgroup_ancestor(cgrp, target_ancestor_level); in bpf_link_create_verify() 49 bpf_cgroup_release(cgrp); in bpf_link_create_verify()
|
| H A D | cgroup_hierarchical_stats.c | 44 static uint64_t cgroup_id(struct cgroup *cgrp) in cgroup_id() 46 return cgrp->kn->id; in cgroup_id() 84 int BPF_PROG(flusher, struct cgroup *cgrp, struct cgroup *parent, int cpu) in BPF_PROG() 88 __u64 cg_id = cgroup_id(cgrp); in BPF_PROG() 134 int BPF_PROG(dumper, struct bpf_iter_meta *meta, struct cgroup *cgrp) in BPF_PROG() 138 __u64 cg_id = cgrp ? cgroup_id(cgrp) : 0; in BPF_PROG() 145 css_rstat_flush(&cgrp->self); in BPF_PROG() 43 cgroup_id(struct cgroup * cgrp) cgroup_id() argument 83 BPF_PROG(flusher,struct cgroup * cgrp,struct cgroup * parent,int cpu) BPF_PROG() argument 133 BPF_PROG(dumper,struct bpf_iter_meta * meta,struct cgroup * cgrp) BPF_PROG() argument
|
| /linux/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ |
| H A D | cgrp.c | 32 nvkm_cgrp_ectx_put(struct nvkm_cgrp *cgrp, struct nvkm_ectx **pectx) in nvkm_cgrp_ectx_put() argument 40 CGRP_TRACE(cgrp, "dtor ectx %d[%s]", engn->id, engn->engine->subdev.name); in nvkm_cgrp_ectx_put() 51 nvkm_cgrp_ectx_get(struct nvkm_cgrp *cgrp, struct nvkm_engn *engn, struct nvkm_ectx **pectx, in nvkm_cgrp_ectx_get() argument 63 ectx = nvkm_list_find(ectx, &cgrp->ectxs, head, ectx->engn == engn); in nvkm_cgrp_ectx_get() 71 CGRP_TRACE(cgrp, "ctor ectx %d[%s]", engn->id, engn->engine->subdev.name); in nvkm_cgrp_ectx_get() 78 list_add_tail(&ectx->head, &cgrp->ectxs); in nvkm_cgrp_ectx_get() 87 nvkm_cgrp_ectx_put(cgrp, pectx); in nvkm_cgrp_ectx_get() 93 nvkm_cgrp_vctx_put(struct nvkm_cgrp *cgrp, struct nvkm_vctx **pvctx) in nvkm_cgrp_vctx_put() argument 101 CGRP_TRACE(cgrp, "dtor vctx %d[%s]", engn->id, engn->engine->subdev.name); in nvkm_cgrp_vctx_put() 105 nvkm_cgrp_ectx_put(cgrp, &vctx->ectx); in nvkm_cgrp_vctx_put() [all …]
|
| H A D | runl.c | 36 struct nvkm_cgrp *cgrp = NULL; in nvkm_engn_cgrp_get() local 48 cgrp = chan->cgrp; in nvkm_engn_cgrp_get() 50 cgrp = nvkm_runl_cgrp_get_cgid(engn->runl, id, pirqflags); in nvkm_engn_cgrp_get() 53 WARN_ON(!cgrp); in nvkm_engn_cgrp_get() 54 return cgrp; in nvkm_engn_cgrp_get() 61 struct nvkm_cgrp *cgrp, *gtmp; in nvkm_runl_rc() local 75 nvkm_runl_foreach_cgrp_safe(cgrp, gtmp, runl) { in nvkm_runl_rc() 76 state = atomic_cmpxchg(&cgrp->rc, NVKM_CGRP_RC_PENDING, NVKM_CGRP_RC_RUNNING); in nvkm_runl_rc() 79 nvkm_cgrp_foreach_chan_safe(chan, ctmp, cgrp) { in nvkm_runl_rc() 102 cgrp = nvkm_engn_cgrp_get(engn, &flags); in nvkm_runl_rc() [all …]
|
| H A D | gk110.c | 38 struct nvkm_cgrp *cgrp = chan->cgrp; in gk110_chan_preempt() local 40 if (cgrp->hw) { in gk110_chan_preempt() 41 cgrp->func->preempt(cgrp); in gk110_chan_preempt() 61 gk110_cgrp_preempt(struct nvkm_cgrp *cgrp) in gk110_cgrp_preempt() argument 63 nvkm_wr32(cgrp->runl->fifo->engine.subdev.device, 0x002634, 0x01000000 | cgrp->id); in gk110_cgrp_preempt() 72 gk110_runl_insert_cgrp(struct nvkm_cgrp *cgrp, struct nvkm_memory *memory, u64 offset) in gk110_runl_insert_cgrp() argument 74 nvkm_wo32(memory, offset + 0, (cgrp->chan_nr << 26) | (128 << 18) | in gk110_runl_insert_cgrp() 75 (3 << 14) | 0x00002000 | cgrp->id); in gk110_runl_insert_cgrp() 123 .cgrp = {{ 0, 0, KEPLER_CHANNEL_GROUP_A }, &gk110_cgrp },
|
| H A D | ucgrp.c | 33 struct nvkm_cgrp *cgrp; member 40 struct nvkm_cgrp *cgrp = nvkm_ucgrp(oclass->parent)->cgrp; in nvkm_ucgrp_chan_new() local 42 return nvkm_uchan_new(cgrp->runl->fifo, cgrp, oclass, argv, argc, pobject); in nvkm_ucgrp_chan_new() 48 struct nvkm_cgrp *cgrp = nvkm_ucgrp(object)->cgrp; in nvkm_ucgrp_sclass() local 49 struct nvkm_fifo *fifo = cgrp->runl->fifo; in nvkm_ucgrp_sclass() 70 nvkm_cgrp_unref(&ucgrp->cgrp); in nvkm_ucgrp_dtor() 115 ret = nvkm_cgrp_new(runl, args->v0.name, vmm, true, &ucgrp->cgrp); in nvkm_ucgrp_new() 120 args->v0.cgid = ucgrp->cgrp->id; in nvkm_ucgrp_new()
|
| /linux/tools/sched_ext/ |
| H A D | scx_flatcg.bpf.c | 163 static struct fcg_cgrp_ctx *find_cgrp_ctx(struct cgroup *cgrp) in find_cgrp_ctx() argument 167 cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0, 0); in find_cgrp_ctx() 169 scx_bpf_error("cgrp_ctx lookup failed for cgid %llu", cgrp->kn->id); in find_cgrp_ctx() 175 static struct fcg_cgrp_ctx *find_ancestor_cgrp_ctx(struct cgroup *cgrp, int level) in find_ancestor_cgrp_ctx() argument 179 cgrp = bpf_cgroup_ancestor(cgrp, level); in find_ancestor_cgrp_ctx() 180 if (!cgrp) { in find_ancestor_cgrp_ctx() 185 cgc = find_cgrp_ctx(cgrp); in find_ancestor_cgrp_ctx() 188 bpf_cgroup_release(cgrp); in find_ancestor_cgrp_ctx() 192 static void cgrp_refresh_hweight(struct cgroup *cgrp, struct fcg_cgrp_ctx *cgc) in cgrp_refresh_hweight() argument 207 bpf_for(level, 0, cgrp->level + 1) { in cgrp_refresh_hweight() [all …]
|
| /linux/tools/perf/util/ |
| H A D | cgroup.c | 67 int read_cgroup_id(struct cgroup *cgrp) in read_cgroup_id() argument 75 scnprintf(path, PATH_MAX, "%s/%s", mnt, cgrp->name); in read_cgroup_id() 77 cgrp->id = __read_cgroup_id(path); in read_cgroup_id() 109 if (!counter->cgrp) in evlist__find_cgroup() 111 if (!strcmp(counter->cgrp->name, str)) in evlist__find_cgroup() 112 return cgroup__get(counter->cgrp); in evlist__find_cgroup() 157 struct cgroup *cgrp = evlist__findnew_cgroup(evlist, str); in add_cgroup() local 160 if (!cgrp) in add_cgroup() 173 cgroup__put(cgrp); in add_cgroup() 176 counter->cgrp = cgrp; in add_cgroup() [all …]
|
| H A D | bpf_counter_cgroup.c | 100 struct cgroup *cgrp, *leader_cgrp; in bperf_load_program() local 144 cgrp = NULL; in bperf_load_program() 148 if (cgrp == NULL || evsel->cgrp == leader_cgrp) { in bperf_load_program() 149 leader_cgrp = evsel->cgrp; in bperf_load_program() 150 evsel->cgrp = NULL; in bperf_load_program() 165 evsel->cgrp = leader_cgrp; in bperf_load_program() 168 if (evsel->cgrp == cgrp) in bperf_load_program() 171 cgrp = evsel->cgrp; in bperf_load_program() 173 if (read_cgroup_id(cgrp) < 0) { in bperf_load_program() 174 pr_debug("Failed to get cgroup id for %s\n", cgrp->name); in bperf_load_program() [all …]
|
| /linux/include/linux/ |
| H A D | cgroup.h | 334 static inline u64 cgroup_id(const struct cgroup *cgrp) in cgroup_id() argument 336 return cgrp->kn->id; in cgroup_id() 375 static inline void cgroup_get(struct cgroup *cgrp) in cgroup_get() argument 377 css_get(&cgrp->self); in cgroup_get() 380 static inline bool cgroup_tryget(struct cgroup *cgrp) in cgroup_tryget() argument 382 return css_tryget(&cgrp->self); in cgroup_tryget() 385 static inline void cgroup_put(struct cgroup *cgrp) in cgroup_put() argument 387 css_put(&cgrp->self); in cgroup_put() 519 static inline struct cgroup *cgroup_parent(struct cgroup *cgrp) in cgroup_parent() argument 521 struct cgroup_subsys_state *parent_css = cgrp->self.parent; in cgroup_parent() [all …]
|
| /linux/tools/testing/selftests/bpf/ |
| H A D | test_sockmap.c | 1468 static int __test_exec(int cgrp, int test, struct sockmap_options *opt) in __test_exec() argument 1492 err = run_options(opt, cgrp, test); in __test_exec() 1501 static void test_exec(int cgrp, struct sockmap_options *opt) in test_exec() argument 1508 err = __test_exec(cgrp, SENDMSG, opt); in test_exec() 1513 err = __test_exec(cgrp, SENDPAGE, opt); in test_exec() 1519 static void test_send_one(struct sockmap_options *opt, int cgrp) in test_send_one() argument 1524 test_exec(cgrp, opt); in test_send_one() 1529 test_exec(cgrp, opt); in test_send_one() 1534 test_exec(cgrp, opt); in test_send_one() 1538 static void test_send_many(struct sockmap_options *opt, int cgrp) in test_send_many() argument 1551 test_send_large(struct sockmap_options * opt,int cgrp) test_send_large() argument 1559 test_send(struct sockmap_options * opt,int cgrp) test_send() argument 1567 test_txmsg_pass(int cgrp,struct sockmap_options * opt) test_txmsg_pass() argument 1574 test_txmsg_redir(int cgrp,struct sockmap_options * opt) test_txmsg_redir() argument 1580 test_txmsg_redir_wait_sndmem(int cgrp,struct sockmap_options * opt) test_txmsg_redir_wait_sndmem() argument 1592 test_txmsg_drop(int cgrp,struct sockmap_options * opt) test_txmsg_drop() argument 1598 test_txmsg_ingress_redir(int cgrp,struct sockmap_options * opt) test_txmsg_ingress_redir() argument 1605 test_txmsg_skb(int cgrp,struct sockmap_options * opt) test_txmsg_skb() argument 1663 test_txmsg_cork_hangs(int cgrp,struct sockmap_options * opt) test_txmsg_cork_hangs() argument 1684 test_txmsg_pull(int cgrp,struct sockmap_options * opt) test_txmsg_pull() argument 1719 test_txmsg_pop(int cgrp,struct sockmap_options * opt) test_txmsg_pop() argument 1763 test_txmsg_push(int cgrp,struct sockmap_options * opt) test_txmsg_push() argument 1800 test_txmsg_push_pop(int cgrp,struct sockmap_options * opt) test_txmsg_push_pop() argument 1847 test_txmsg_apply(int cgrp,struct sockmap_options * opt) test_txmsg_apply() argument 1892 test_txmsg_cork(int cgrp,struct sockmap_options * opt) test_txmsg_cork() argument 1907 test_txmsg_ingress_parser(int cgrp,struct sockmap_options * opt) test_txmsg_ingress_parser() argument 1919 test_txmsg_ingress_parser2(int cgrp,struct sockmap_options * opt) test_txmsg_ingress_parser2() argument [all...] |