1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * linux/cgroup-defs.h - basic definitions for cgroup
4 *
5 * This file provides basic type and interface. Include this file directly
6 * only if necessary to avoid cyclic dependencies.
7 */
8 #ifndef _LINUX_CGROUP_DEFS_H
9 #define _LINUX_CGROUP_DEFS_H
10
11 #include <linux/limits.h>
12 #include <linux/list.h>
13 #include <linux/idr.h>
14 #include <linux/wait.h>
15 #include <linux/mutex.h>
16 #include <linux/rcupdate.h>
17 #include <linux/refcount.h>
18 #include <linux/percpu-refcount.h>
19 #include <linux/percpu-rwsem.h>
20 #include <linux/u64_stats_sync.h>
21 #include <linux/workqueue.h>
22 #include <linux/bpf-cgroup-defs.h>
23 #include <linux/psi_types.h>
24
25 #ifdef CONFIG_CGROUPS
26
27 struct cgroup;
28 struct cgroup_root;
29 struct cgroup_subsys;
30 struct cgroup_taskset;
31 struct kernfs_node;
32 struct kernfs_ops;
33 struct kernfs_open_file;
34 struct seq_file;
35 struct poll_table_struct;
36
37 #define MAX_CGROUP_TYPE_NAMELEN 32
38 #define MAX_CGROUP_ROOT_NAMELEN 64
39 #define MAX_CFTYPE_NAME 64
40
41 /* define the enumeration of all cgroup subsystems */
42 #define SUBSYS(_x) _x ## _cgrp_id,
43 enum cgroup_subsys_id {
44 #include <linux/cgroup_subsys.h>
45 CGROUP_SUBSYS_COUNT,
46 };
47 #undef SUBSYS
48
49 /* bits in struct cgroup_subsys_state flags field */
50 enum {
51 CSS_NO_REF = (1 << 0), /* no reference counting for this css */
52 CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */
53 CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */
54 CSS_VISIBLE = (1 << 3), /* css is visible to userland */
55 CSS_DYING = (1 << 4), /* css is dying */
56 };
57
58 /* bits in struct cgroup flags field */
59 enum {
60 /* Control Group requires release notifications to userspace */
61 CGRP_NOTIFY_ON_RELEASE,
62 /*
63 * Clone the parent's configuration when creating a new child
64 * cpuset cgroup. For historical reasons, this option can be
65 * specified at mount time and thus is implemented here.
66 */
67 CGRP_CPUSET_CLONE_CHILDREN,
68
69 /* Control group has to be frozen. */
70 CGRP_FREEZE,
71
72 /* Cgroup is frozen. */
73 CGRP_FROZEN,
74 };
75
76 /* cgroup_root->flags */
77 enum {
78 CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */
79 CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */
80
81 /*
82 * Consider namespaces as delegation boundaries. If this flag is
83 * set, controller specific interface files in a namespace root
84 * aren't writeable from inside the namespace.
85 */
86 CGRP_ROOT_NS_DELEGATE = (1 << 3),
87
88 /*
89 * Reduce latencies on dynamic cgroup modifications such as task
90 * migrations and controller on/offs by disabling percpu operation on
91 * cgroup_threadgroup_rwsem. This makes hot path operations such as
92 * forks and exits into the slow path and more expensive.
93 *
94 * Alleviate the contention between fork, exec, exit operations and
95 * writing to cgroup.procs by taking a per threadgroup rwsem instead of
96 * the global cgroup_threadgroup_rwsem. Fork and other operations
97 * from threads in different thread groups no longer contend with
98 * writing to cgroup.procs.
99 *
100 * The static usage pattern of creating a cgroup, enabling controllers,
101 * and then seeding it with CLONE_INTO_CGROUP doesn't require write
102 * locking cgroup_threadgroup_rwsem and thus doesn't benefit from
103 * favordynmod.
104 */
105 CGRP_ROOT_FAVOR_DYNMODS = (1 << 4),
106
107 /*
108 * Enable cpuset controller in v1 cgroup to use v2 behavior.
109 */
110 CGRP_ROOT_CPUSET_V2_MODE = (1 << 16),
111
112 /*
113 * Enable legacy local memory.events.
114 */
115 CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 17),
116
117 /*
118 * Enable recursive subtree protection
119 */
120 CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 18),
121
122 /*
123 * Enable hugetlb accounting for the memory controller.
124 */
125 CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = (1 << 19),
126
127 /*
128 * Enable legacy local pids.events.
129 */
130 CGRP_ROOT_PIDS_LOCAL_EVENTS = (1 << 20),
131 };
132
133 /* cftype->flags */
134 enum {
135 CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */
136 CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */
137 CFTYPE_NS_DELEGATABLE = (1 << 2), /* writeable beyond delegation boundaries */
138
139 CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */
140 CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */
141 CFTYPE_DEBUG = (1 << 5), /* create when cgroup_debug */
142
143 /* internal flags, do not use outside cgroup core proper */
144 __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */
145 __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */
146 __CFTYPE_ADDED = (1 << 18),
147 };
148
149 enum cgroup_attach_lock_mode {
150 /* Default */
151 CGRP_ATTACH_LOCK_GLOBAL,
152
153 /* When pid=0 && threadgroup=false, see comments in cgroup_procs_write_start */
154 CGRP_ATTACH_LOCK_NONE,
155
156 /* When favordynmods is on, see comments above CGRP_ROOT_FAVOR_DYNMODS */
157 CGRP_ATTACH_LOCK_PER_THREADGROUP,
158 };
159
160 /*
161 * cgroup_file is the handle for a file instance created in a cgroup which
162 * is used, for example, to generate file changed notifications. This can
163 * be obtained by setting cftype->file_offset.
164 */
165 struct cgroup_file {
166 /* do not access any fields from outside cgroup core */
167 struct kernfs_node *kn;
168 unsigned long notified_at;
169 struct timer_list notify_timer;
170 };
171
172 /*
173 * Per-subsystem/per-cgroup state maintained by the system. This is the
174 * fundamental structural building block that controllers deal with.
175 *
176 * Fields marked with "PI:" are public and immutable and may be accessed
177 * directly without synchronization.
178 */
179 struct cgroup_subsys_state {
180 /* PI: the cgroup that this css is attached to */
181 struct cgroup *cgroup;
182
183 /* PI: the cgroup subsystem that this css is attached to */
184 struct cgroup_subsys *ss;
185
186 /* reference count - access via css_[try]get() and css_put() */
187 struct percpu_ref refcnt;
188
189 /*
190 * Depending on the context, this field is initialized
191 * via css_rstat_init() at different places:
192 *
193 * when css is associated with cgroup::self
194 * when css->cgroup is the root cgroup
195 * performed in cgroup_init()
196 * when css->cgroup is not the root cgroup
197 * performed in cgroup_create()
198 * when css is associated with a subsystem
199 * when css->cgroup is the root cgroup
200 * performed in cgroup_init_subsys() in the non-early path
201 * when css->cgroup is not the root cgroup
202 * performed in css_create()
203 */
204 struct css_rstat_cpu __percpu *rstat_cpu;
205
206 /*
207 * siblings list anchored at the parent's ->children
208 *
209 * linkage is protected by cgroup_mutex or RCU
210 */
211 struct list_head sibling;
212 struct list_head children;
213
214 /*
215 * PI: Subsys-unique ID. 0 is unused and root is always 1. The
216 * matching css can be looked up using css_from_id().
217 */
218 int id;
219
220 unsigned int flags;
221
222 /*
223 * Monotonically increasing unique serial number which defines a
224 * uniform order among all csses. It's guaranteed that all
225 * ->children lists are in the ascending order of ->serial_nr and
226 * used to allow interrupting and resuming iterations.
227 */
228 u64 serial_nr;
229
230 /*
231 * Incremented by online self and children. Used to guarantee that
232 * parents are not offlined before their children.
233 */
234 atomic_t online_cnt;
235
236 /* percpu_ref killing and RCU release */
237 struct work_struct destroy_work;
238 struct rcu_work destroy_rwork;
239
240 /*
241 * PI: the parent css. Placed here for cache proximity to following
242 * fields of the containing structure.
243 */
244 struct cgroup_subsys_state *parent;
245
246 /*
247 * Keep track of total numbers of visible descendant CSSes.
248 * The total number of dying CSSes is tracked in
249 * css->cgroup->nr_dying_subsys[ssid].
250 * Protected by cgroup_mutex.
251 */
252 int nr_descendants;
253
254 /*
255 * A singly-linked list of css structures to be rstat flushed.
256 * This is a scratch field to be used exclusively by
257 * css_rstat_flush().
258 *
259 * Protected by rstat_base_lock when css is cgroup::self.
260 * Protected by css->ss->rstat_ss_lock otherwise.
261 */
262 struct cgroup_subsys_state *rstat_flush_next;
263 };
264
265 /*
266 * A css_set is a structure holding pointers to a set of
267 * cgroup_subsys_state objects. This saves space in the task struct
268 * object and speeds up fork()/exit(), since a single inc/dec and a
269 * list_add()/del() can bump the reference count on the entire cgroup
270 * set for a task.
271 */
272 struct css_set {
273 /*
274 * Set of subsystem states, one for each subsystem. This array is
275 * immutable after creation apart from the init_css_set during
276 * subsystem registration (at boot time).
277 */
278 struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
279
280 /* reference count */
281 refcount_t refcount;
282
283 /*
284 * For a domain cgroup, the following points to self. If threaded,
285 * to the matching cset of the nearest domain ancestor. The
286 * dom_cset provides access to the domain cgroup and its csses to
287 * which domain level resource consumptions should be charged.
288 */
289 struct css_set *dom_cset;
290
291 /* the default cgroup associated with this css_set */
292 struct cgroup *dfl_cgrp;
293
294 /* internal task count, protected by css_set_lock */
295 int nr_tasks;
296
297 /*
298 * Lists running through all tasks using this cgroup group.
299 * mg_tasks lists tasks which belong to this cset but are in the
300 * process of being migrated out or in. Protected by
301 * css_set_lock, but, during migration, once tasks are moved to
302 * mg_tasks, it can be read safely while holding cgroup_mutex.
303 */
304 struct list_head tasks;
305 struct list_head mg_tasks;
306 struct list_head dying_tasks;
307
308 /* all css_task_iters currently walking this cset */
309 struct list_head task_iters;
310
311 /*
312 * On the default hierarchy, ->subsys[ssid] may point to a css
313 * attached to an ancestor instead of the cgroup this css_set is
314 * associated with. The following node is anchored at
315 * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
316 * iterate through all css's attached to a given cgroup.
317 */
318 struct list_head e_cset_node[CGROUP_SUBSYS_COUNT];
319
320 /* all threaded csets whose ->dom_cset points to this cset */
321 struct list_head threaded_csets;
322 struct list_head threaded_csets_node;
323
324 /*
325 * List running through all cgroup groups in the same hash
326 * slot. Protected by css_set_lock
327 */
328 struct hlist_node hlist;
329
330 /*
331 * List of cgrp_cset_links pointing at cgroups referenced from this
332 * css_set. Protected by css_set_lock.
333 */
334 struct list_head cgrp_links;
335
336 /*
337 * List of csets participating in the on-going migration either as
338 * source or destination. Protected by cgroup_mutex.
339 */
340 struct list_head mg_src_preload_node;
341 struct list_head mg_dst_preload_node;
342 struct list_head mg_node;
343
344 /*
345 * If this cset is acting as the source of migration the following
346 * two fields are set. mg_src_cgrp and mg_dst_cgrp are
347 * respectively the source and destination cgroups of the on-going
348 * migration. mg_dst_cset is the destination cset the target tasks
349 * on this cset should be migrated to. Protected by cgroup_mutex.
350 */
351 struct cgroup *mg_src_cgrp;
352 struct cgroup *mg_dst_cgrp;
353 struct css_set *mg_dst_cset;
354
355 /* dead and being drained, ignore for migration */
356 bool dead;
357
358 /* For RCU-protected deletion */
359 struct rcu_head rcu_head;
360 };
361
362 struct cgroup_base_stat {
363 struct task_cputime cputime;
364
365 #ifdef CONFIG_SCHED_CORE
366 u64 forceidle_sum;
367 #endif
368 u64 ntime;
369 };
370
371 /*
372 * rstat - cgroup scalable recursive statistics. Accounting is done
373 * per-cpu in css_rstat_cpu which is then lazily propagated up the
374 * hierarchy on reads.
375 *
376 * When a stat gets updated, the css_rstat_cpu and its ancestors are
377 * linked into the updated tree. On the following read, propagation only
378 * considers and consumes the updated tree. This makes reading O(the
379 * number of descendants which have been active since last read) instead of
380 * O(the total number of descendants).
381 *
382 * This is important because there can be a lot of (draining) cgroups which
383 * aren't active and stat may be read frequently. The combination can
384 * become very expensive. By propagating selectively, increasing reading
385 * frequency decreases the cost of each read.
386 *
387 * This struct hosts both the fields which implement the above -
388 * updated_children and updated_next.
389 */
390 struct css_rstat_cpu {
391 /*
392 * Child cgroups with stat updates on this cpu since the last read
393 * are linked on the parent's ->updated_children through
394 * ->updated_next. updated_children is terminated by its container css.
395 */
396 struct cgroup_subsys_state *updated_children;
397 struct cgroup_subsys_state *updated_next; /* NULL if not on the list */
398
399 struct llist_node lnode; /* lockless list for update */
400 struct cgroup_subsys_state *owner; /* back pointer */
401 };
402
403 /*
404 * This struct hosts the fields which track basic resource statistics on
405 * top of it - bsync, bstat and last_bstat.
406 */
407 struct cgroup_rstat_base_cpu {
408 /*
409 * ->bsync protects ->bstat. These are the only fields which get
410 * updated in the hot path.
411 */
412 struct u64_stats_sync bsync;
413 struct cgroup_base_stat bstat;
414
415 /*
416 * Snapshots at the last reading. These are used to calculate the
417 * deltas to propagate to the global counters.
418 */
419 struct cgroup_base_stat last_bstat;
420
421 /*
422 * This field is used to record the cumulative per-cpu time of
423 * the cgroup and its descendants. Currently it can be read via
424 * eBPF/drgn etc, and we are still trying to determine how to
425 * expose it in the cgroupfs interface.
426 */
427 struct cgroup_base_stat subtree_bstat;
428
429 /*
430 * Snapshots at the last reading. These are used to calculate the
431 * deltas to propagate to the per-cpu subtree_bstat.
432 */
433 struct cgroup_base_stat last_subtree_bstat;
434 };
435
436 struct cgroup_freezer_state {
437 /* Should the cgroup and its descendants be frozen. */
438 bool freeze;
439
440 /* Should the cgroup actually be frozen? */
441 bool e_freeze;
442
443 /* Fields below are protected by css_set_lock */
444
445 /* Number of frozen descendant cgroups */
446 int nr_frozen_descendants;
447
448 /*
449 * Number of tasks, which are counted as frozen:
450 * frozen, SIGSTOPped, and PTRACEd.
451 */
452 int nr_frozen_tasks;
453
454 /* Freeze time data consistency protection */
455 seqcount_t freeze_seq;
456
457 /*
458 * Most recent time the cgroup was requested to freeze.
459 * Accesses guarded by freeze_seq counter. Writes serialized
460 * by css_set_lock.
461 */
462 u64 freeze_start_nsec;
463
464 /*
465 * Total duration the cgroup has spent freezing.
466 * Accesses guarded by freeze_seq counter. Writes serialized
467 * by css_set_lock.
468 */
469 u64 frozen_nsec;
470 };
471
472 struct cgroup {
473 /* self css with NULL ->ss, points back to this cgroup */
474 struct cgroup_subsys_state self;
475
476 unsigned long flags; /* "unsigned long" so bitops work */
477
478 /*
479 * The depth this cgroup is at. The root is at depth zero and each
480 * step down the hierarchy increments the level. This along with
481 * ancestors[] can determine whether a given cgroup is a
482 * descendant of another without traversing the hierarchy.
483 */
484 int level;
485
486 /* Maximum allowed descent tree depth */
487 int max_depth;
488
489 /*
490 * Keep track of total numbers of visible and dying descent cgroups.
491 * Dying cgroups are cgroups which were deleted by a user,
492 * but are still existing because someone else is holding a reference.
493 * max_descendants is a maximum allowed number of descent cgroups.
494 *
495 * nr_descendants and nr_dying_descendants are protected
496 * by cgroup_mutex and css_set_lock. It's fine to read them holding
497 * any of cgroup_mutex and css_set_lock; for writing both locks
498 * should be held.
499 */
500 int nr_descendants;
501 int nr_dying_descendants;
502 int max_descendants;
503
504 /*
505 * Each non-empty css_set associated with this cgroup contributes
506 * one to nr_populated_csets. The counter is zero iff this cgroup
507 * doesn't have any tasks.
508 *
509 * All children which have non-zero nr_populated_csets and/or
510 * nr_populated_children of their own contribute one to either
511 * nr_populated_domain_children or nr_populated_threaded_children
512 * depending on their type. Each counter is zero iff all cgroups
513 * of the type in the subtree proper don't have any tasks.
514 */
515 int nr_populated_csets;
516 int nr_populated_domain_children;
517 int nr_populated_threaded_children;
518
519 int nr_threaded_children; /* # of live threaded child cgroups */
520
521 /* sequence number for cgroup.kill, serialized by css_set_lock. */
522 unsigned int kill_seq;
523
524 struct kernfs_node *kn; /* cgroup kernfs entry */
525 struct cgroup_file procs_file; /* handle for "cgroup.procs" */
526 struct cgroup_file events_file; /* handle for "cgroup.events" */
527
528 /* handles for "{cpu,memory,io,irq}.pressure" */
529 struct cgroup_file psi_files[NR_PSI_RESOURCES];
530
531 /*
532 * The bitmask of subsystems enabled on the child cgroups.
533 * ->subtree_control is the one configured through
534 * "cgroup.subtree_control" while ->subtree_ss_mask is the effective
535 * one which may have more subsystems enabled. Controller knobs
536 * are made available iff it's enabled in ->subtree_control.
537 */
538 u16 subtree_control;
539 u16 subtree_ss_mask;
540 u16 old_subtree_control;
541 u16 old_subtree_ss_mask;
542
543 /* Private pointers for each registered subsystem */
544 struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
545
546 /*
547 * Keep track of total number of dying CSSes at and below this cgroup.
548 * Protected by cgroup_mutex.
549 */
550 int nr_dying_subsys[CGROUP_SUBSYS_COUNT];
551
552 struct cgroup_root *root;
553
554 /*
555 * List of cgrp_cset_links pointing at css_sets with tasks in this
556 * cgroup. Protected by css_set_lock.
557 */
558 struct list_head cset_links;
559
560 /*
561 * On the default hierarchy, a css_set for a cgroup with some
562 * susbsys disabled will point to css's which are associated with
563 * the closest ancestor which has the subsys enabled. The
564 * following lists all css_sets which point to this cgroup's css
565 * for the given subsystem.
566 */
567 struct list_head e_csets[CGROUP_SUBSYS_COUNT];
568
569 /*
570 * If !threaded, self. If threaded, it points to the nearest
571 * domain ancestor. Inside a threaded subtree, cgroups are exempt
572 * from process granularity and no-internal-task constraint.
573 * Domain level resource consumptions which aren't tied to a
574 * specific task are charged to the dom_cgrp.
575 */
576 struct cgroup *dom_cgrp;
577 struct cgroup *old_dom_cgrp; /* used while enabling threaded */
578
579 /*
580 * Depending on the context, this field is initialized via
581 * css_rstat_init() at different places:
582 *
583 * when cgroup is the root cgroup
584 * performed in cgroup_setup_root()
585 * otherwise
586 * performed in cgroup_create()
587 */
588 struct cgroup_rstat_base_cpu __percpu *rstat_base_cpu;
589
590 /*
591 * Add padding to keep the read mostly rstat per-cpu pointer on a
592 * different cacheline than the following *bstat fields which can have
593 * frequent updates.
594 */
595 CACHELINE_PADDING(_pad_);
596
597 /* cgroup basic resource statistics */
598 struct cgroup_base_stat last_bstat;
599 struct cgroup_base_stat bstat;
600 struct prev_cputime prev_cputime; /* for printing out cputime */
601
602 /*
603 * list of pidlists, up to two for each namespace (one for procs, one
604 * for tasks); created on demand.
605 */
606 struct list_head pidlists;
607 struct mutex pidlist_mutex;
608
609 /* used to wait for offlining of csses */
610 wait_queue_head_t offline_waitq;
611
612 /* used to schedule release agent */
613 struct work_struct release_agent_work;
614
615 /* used to track pressure stalls */
616 struct psi_group *psi;
617
618 /* used to store eBPF programs */
619 struct cgroup_bpf bpf;
620
621 /* Used to store internal freezer state */
622 struct cgroup_freezer_state freezer;
623
624 #ifdef CONFIG_BPF_SYSCALL
625 struct bpf_local_storage __rcu *bpf_cgrp_storage;
626 #endif
627
628 /* All ancestors including self */
629 struct cgroup *ancestors[];
630 };
631
632 /*
633 * A cgroup_root represents the root of a cgroup hierarchy, and may be
634 * associated with a kernfs_root to form an active hierarchy. This is
635 * internal to cgroup core. Don't access directly from controllers.
636 */
637 struct cgroup_root {
638 struct kernfs_root *kf_root;
639
640 /* The bitmask of subsystems attached to this hierarchy */
641 unsigned int subsys_mask;
642
643 /* Unique id for this hierarchy. */
644 int hierarchy_id;
645
646 /* A list running through the active hierarchies */
647 struct list_head root_list;
648 struct rcu_head rcu; /* Must be near the top */
649
650 /*
651 * The root cgroup. The containing cgroup_root will be destroyed on its
652 * release. cgrp->ancestors[0] will be used overflowing into the
653 * following field. cgrp_ancestor_storage must immediately follow.
654 */
655 struct cgroup cgrp;
656
657 /* must follow cgrp for cgrp->ancestors[0], see above */
658 struct cgroup *cgrp_ancestor_storage;
659
660 /* Number of cgroups in the hierarchy, used only for /proc/cgroups */
661 atomic_t nr_cgrps;
662
663 /* Hierarchy-specific flags */
664 unsigned int flags;
665
666 /* The path to use for release notifications. */
667 char release_agent_path[PATH_MAX];
668
669 /* The name for this hierarchy - may be empty */
670 char name[MAX_CGROUP_ROOT_NAMELEN];
671 };
672
673 /*
674 * struct cftype: handler definitions for cgroup control files
675 *
676 * When reading/writing to a file:
677 * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata
678 * - the 'cftype' of the file is file->f_path.dentry->d_fsdata
679 */
680 struct cftype {
681 /*
682 * Name of the subsystem is prepended in cgroup_file_name().
683 * Zero length string indicates end of cftype array.
684 */
685 char name[MAX_CFTYPE_NAME];
686 unsigned long private;
687
688 /*
689 * The maximum length of string, excluding trailing nul, that can
690 * be passed to write. If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed.
691 */
692 size_t max_write_len;
693
694 /* CFTYPE_* flags */
695 unsigned int flags;
696
697 /*
698 * If non-zero, should contain the offset from the start of css to
699 * a struct cgroup_file field. cgroup will record the handle of
700 * the created file into it. The recorded handle can be used as
701 * long as the containing css remains accessible.
702 */
703 unsigned int file_offset;
704
705 /*
706 * Fields used for internal bookkeeping. Initialized automatically
707 * during registration.
708 */
709 struct cgroup_subsys *ss; /* NULL for cgroup core files */
710 struct list_head node; /* anchored at ss->cfts */
711 struct kernfs_ops *kf_ops;
712
713 int (*open)(struct kernfs_open_file *of);
714 void (*release)(struct kernfs_open_file *of);
715
716 /*
717 * read_u64() is a shortcut for the common case of returning a
718 * single integer. Use it in place of read()
719 */
720 u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft);
721 /*
722 * read_s64() is a signed version of read_u64()
723 */
724 s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft);
725
726 /* generic seq_file read interface */
727 int (*seq_show)(struct seq_file *sf, void *v);
728
729 /* optional ops, implement all or none */
730 void *(*seq_start)(struct seq_file *sf, loff_t *ppos);
731 void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos);
732 void (*seq_stop)(struct seq_file *sf, void *v);
733
734 /*
735 * write_u64() is a shortcut for the common case of accepting
736 * a single integer (as parsed by simple_strtoull) from
737 * userspace. Use in place of write(); return 0 or error.
738 */
739 int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft,
740 u64 val);
741 /*
742 * write_s64() is a signed version of write_u64()
743 */
744 int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft,
745 s64 val);
746
747 /*
748 * write() is the generic write callback which maps directly to
749 * kernfs write operation and overrides all other operations.
750 * Maximum write size is determined by ->max_write_len. Use
751 * of_css/cft() to access the associated css and cft.
752 */
753 ssize_t (*write)(struct kernfs_open_file *of,
754 char *buf, size_t nbytes, loff_t off);
755
756 __poll_t (*poll)(struct kernfs_open_file *of,
757 struct poll_table_struct *pt);
758
759 struct lock_class_key lockdep_key;
760 };
761
762 /*
763 * Control Group subsystem type.
764 * See Documentation/admin-guide/cgroup-v1/cgroups.rst for details
765 */
766 struct cgroup_subsys {
767 struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
768 int (*css_online)(struct cgroup_subsys_state *css);
769 void (*css_offline)(struct cgroup_subsys_state *css);
770 void (*css_released)(struct cgroup_subsys_state *css);
771 void (*css_free)(struct cgroup_subsys_state *css);
772 void (*css_reset)(struct cgroup_subsys_state *css);
773 void (*css_killed)(struct cgroup_subsys_state *css);
774 void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu);
775 int (*css_extra_stat_show)(struct seq_file *seq,
776 struct cgroup_subsys_state *css);
777 int (*css_local_stat_show)(struct seq_file *seq,
778 struct cgroup_subsys_state *css);
779
780 int (*can_attach)(struct cgroup_taskset *tset);
781 void (*cancel_attach)(struct cgroup_taskset *tset);
782 void (*attach)(struct cgroup_taskset *tset);
783 int (*can_fork)(struct task_struct *task,
784 struct css_set *cset);
785 void (*cancel_fork)(struct task_struct *task, struct css_set *cset);
786 void (*fork)(struct task_struct *task);
787 void (*exit)(struct task_struct *task);
788 void (*release)(struct task_struct *task);
789 void (*bind)(struct cgroup_subsys_state *root_css);
790
791 bool early_init:1;
792
793 /*
794 * If %true, the controller, on the default hierarchy, doesn't show
795 * up in "cgroup.controllers" or "cgroup.subtree_control", is
796 * implicitly enabled on all cgroups on the default hierarchy, and
797 * bypasses the "no internal process" constraint. This is for
798 * utility type controllers which is transparent to userland.
799 *
800 * An implicit controller can be stolen from the default hierarchy
801 * anytime and thus must be okay with offline csses from previous
802 * hierarchies coexisting with csses for the current one.
803 */
804 bool implicit_on_dfl:1;
805
806 /*
807 * If %true, the controller, supports threaded mode on the default
808 * hierarchy. In a threaded subtree, both process granularity and
809 * no-internal-process constraint are ignored and a threaded
810 * controllers should be able to handle that.
811 *
812 * Note that as an implicit controller is automatically enabled on
813 * all cgroups on the default hierarchy, it should also be
814 * threaded. implicit && !threaded is not supported.
815 */
816 bool threaded:1;
817
818 /* the following two fields are initialized automatically during boot */
819 int id;
820 const char *name;
821
822 /* optional, initialized automatically during boot if not set */
823 const char *legacy_name;
824
825 /* link to parent, protected by cgroup_lock() */
826 struct cgroup_root *root;
827
828 /* idr for css->id */
829 struct idr css_idr;
830
831 /*
832 * List of cftypes. Each entry is the first entry of an array
833 * terminated by zero length name.
834 */
835 struct list_head cfts;
836
837 /*
838 * Base cftypes which are automatically registered. The two can
839 * point to the same array.
840 */
841 struct cftype *dfl_cftypes; /* for the default hierarchy */
842 struct cftype *legacy_cftypes; /* for the legacy hierarchies */
843
844 /*
845 * A subsystem may depend on other subsystems. When such subsystem
846 * is enabled on a cgroup, the depended-upon subsystems are enabled
847 * together if available. Subsystems enabled due to dependency are
848 * not visible to userland until explicitly enabled. The following
849 * specifies the mask of subsystems that this one depends on.
850 */
851 unsigned int depends_on;
852
853 spinlock_t rstat_ss_lock;
854 struct llist_head __percpu *lhead; /* lockless update list head */
855 };
856
857 extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
858 extern bool cgroup_enable_per_threadgroup_rwsem;
859
860 struct cgroup_of_peak {
861 unsigned long value;
862 struct list_head list;
863 };
864
865 /**
866 * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
867 * @tsk: target task
868 *
869 * Allows cgroup operations to synchronize against threadgroup changes
870 * using a global percpu_rw_semaphore and a per threadgroup rw_semaphore when
871 * favordynmods is on. See the comment above CGRP_ROOT_FAVOR_DYNMODS definition.
872 */
cgroup_threadgroup_change_begin(struct task_struct * tsk)873 static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
874 {
875 percpu_down_read(&cgroup_threadgroup_rwsem);
876 if (cgroup_enable_per_threadgroup_rwsem)
877 down_read(&tsk->signal->cgroup_threadgroup_rwsem);
878 }
879
880 /**
881 * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups
882 * @tsk: target task
883 *
884 * Counterpart of cgroup_threadcgroup_change_begin().
885 */
cgroup_threadgroup_change_end(struct task_struct * tsk)886 static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
887 {
888 if (cgroup_enable_per_threadgroup_rwsem)
889 up_read(&tsk->signal->cgroup_threadgroup_rwsem);
890 percpu_up_read(&cgroup_threadgroup_rwsem);
891 }
892
893 #else /* CONFIG_CGROUPS */
894
895 #define CGROUP_SUBSYS_COUNT 0
896
cgroup_threadgroup_change_begin(struct task_struct * tsk)897 static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
898 {
899 might_sleep();
900 }
901
cgroup_threadgroup_change_end(struct task_struct * tsk)902 static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
903
904 #endif /* CONFIG_CGROUPS */
905
906 #ifdef CONFIG_SOCK_CGROUP_DATA
907
908 /*
909 * sock_cgroup_data is embedded at sock->sk_cgrp_data and contains
910 * per-socket cgroup information except for memcg association.
911 *
912 * On legacy hierarchies, net_prio and net_cls controllers directly
913 * set attributes on each sock which can then be tested by the network
914 * layer. On the default hierarchy, each sock is associated with the
915 * cgroup it was created in and the networking layer can match the
916 * cgroup directly.
917 */
918 struct sock_cgroup_data {
919 struct cgroup *cgroup; /* v2 */
920 #ifdef CONFIG_CGROUP_NET_CLASSID
921 u32 classid; /* v1 */
922 #endif
923 #ifdef CONFIG_CGROUP_NET_PRIO
924 u16 prioidx; /* v1 */
925 #endif
926 };
927
sock_cgroup_prioidx(const struct sock_cgroup_data * skcd)928 static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
929 {
930 #ifdef CONFIG_CGROUP_NET_PRIO
931 return READ_ONCE(skcd->prioidx);
932 #else
933 return 1;
934 #endif
935 }
936
937 #ifdef CONFIG_CGROUP_NET_CLASSID
sock_cgroup_classid(const struct sock_cgroup_data * skcd)938 static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
939 {
940 return READ_ONCE(skcd->classid);
941 }
942 #endif
943
sock_cgroup_set_prioidx(struct sock_cgroup_data * skcd,u16 prioidx)944 static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
945 u16 prioidx)
946 {
947 #ifdef CONFIG_CGROUP_NET_PRIO
948 WRITE_ONCE(skcd->prioidx, prioidx);
949 #endif
950 }
951
952 #ifdef CONFIG_CGROUP_NET_CLASSID
sock_cgroup_set_classid(struct sock_cgroup_data * skcd,u32 classid)953 static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd,
954 u32 classid)
955 {
956 WRITE_ONCE(skcd->classid, classid);
957 }
958 #endif
959
960 #else /* CONFIG_SOCK_CGROUP_DATA */
961
962 struct sock_cgroup_data {
963 };
964
965 #endif /* CONFIG_SOCK_CGROUP_DATA */
966
967 #endif /* _LINUX_CGROUP_DEFS_H */
968