1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * linux/cgroup-defs.h - basic definitions for cgroup 4 * 5 * This file provides basic type and interface. Include this file directly 6 * only if necessary to avoid cyclic dependencies. 7 */ 8 #ifndef _LINUX_CGROUP_DEFS_H 9 #define _LINUX_CGROUP_DEFS_H 10 11 #include <linux/limits.h> 12 #include <linux/list.h> 13 #include <linux/idr.h> 14 #include <linux/wait.h> 15 #include <linux/mutex.h> 16 #include <linux/rcupdate.h> 17 #include <linux/refcount.h> 18 #include <linux/percpu-refcount.h> 19 #include <linux/percpu-rwsem.h> 20 #include <linux/u64_stats_sync.h> 21 #include <linux/workqueue.h> 22 #include <linux/bpf-cgroup-defs.h> 23 #include <linux/psi_types.h> 24 25 #ifdef CONFIG_CGROUPS 26 27 struct cgroup; 28 struct cgroup_root; 29 struct cgroup_subsys; 30 struct cgroup_taskset; 31 struct kernfs_node; 32 struct kernfs_ops; 33 struct kernfs_open_file; 34 struct seq_file; 35 struct poll_table_struct; 36 37 #define MAX_CGROUP_TYPE_NAMELEN 32 38 #define MAX_CGROUP_ROOT_NAMELEN 64 39 #define MAX_CFTYPE_NAME 64 40 41 /* define the enumeration of all cgroup subsystems */ 42 #define SUBSYS(_x) _x ## _cgrp_id, 43 enum cgroup_subsys_id { 44 #include <linux/cgroup_subsys.h> 45 CGROUP_SUBSYS_COUNT, 46 }; 47 #undef SUBSYS 48 49 /* bits in struct cgroup_subsys_state flags field */ 50 enum { 51 CSS_NO_REF = (1 << 0), /* no reference counting for this css */ 52 CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ 53 CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */ 54 CSS_VISIBLE = (1 << 3), /* css is visible to userland */ 55 CSS_DYING = (1 << 4), /* css is dying */ 56 }; 57 58 /* bits in struct cgroup flags field */ 59 enum { 60 /* Control Group requires release notifications to userspace */ 61 CGRP_NOTIFY_ON_RELEASE, 62 /* 63 * Clone the parent's configuration when creating a new child 64 * cpuset cgroup. For historical reasons, this option can be 65 * specified at mount time and thus is implemented here. 66 */ 67 CGRP_CPUSET_CLONE_CHILDREN, 68 69 /* Control group has to be frozen. */ 70 CGRP_FREEZE, 71 72 /* Cgroup is frozen. */ 73 CGRP_FROZEN, 74 }; 75 76 /* cgroup_root->flags */ 77 enum { 78 CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */ 79 CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */ 80 81 /* 82 * Consider namespaces as delegation boundaries. If this flag is 83 * set, controller specific interface files in a namespace root 84 * aren't writeable from inside the namespace. 85 */ 86 CGRP_ROOT_NS_DELEGATE = (1 << 3), 87 88 /* 89 * Reduce latencies on dynamic cgroup modifications such as task 90 * migrations and controller on/offs by disabling percpu operation on 91 * cgroup_threadgroup_rwsem. This makes hot path operations such as 92 * forks and exits into the slow path and more expensive. 93 * 94 * The static usage pattern of creating a cgroup, enabling controllers, 95 * and then seeding it with CLONE_INTO_CGROUP doesn't require write 96 * locking cgroup_threadgroup_rwsem and thus doesn't benefit from 97 * favordynmod. 98 */ 99 CGRP_ROOT_FAVOR_DYNMODS = (1 << 4), 100 101 /* 102 * Enable cpuset controller in v1 cgroup to use v2 behavior. 103 */ 104 CGRP_ROOT_CPUSET_V2_MODE = (1 << 16), 105 106 /* 107 * Enable legacy local memory.events. 108 */ 109 CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 17), 110 111 /* 112 * Enable recursive subtree protection 113 */ 114 CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 18), 115 116 /* 117 * Enable hugetlb accounting for the memory controller. 118 */ 119 CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = (1 << 19), 120 121 /* 122 * Enable legacy local pids.events. 123 */ 124 CGRP_ROOT_PIDS_LOCAL_EVENTS = (1 << 20), 125 }; 126 127 /* cftype->flags */ 128 enum { 129 CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */ 130 CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */ 131 CFTYPE_NS_DELEGATABLE = (1 << 2), /* writeable beyond delegation boundaries */ 132 133 CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */ 134 CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */ 135 CFTYPE_DEBUG = (1 << 5), /* create when cgroup_debug */ 136 137 /* internal flags, do not use outside cgroup core proper */ 138 __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */ 139 __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */ 140 __CFTYPE_ADDED = (1 << 18), 141 }; 142 143 /* 144 * cgroup_file is the handle for a file instance created in a cgroup which 145 * is used, for example, to generate file changed notifications. This can 146 * be obtained by setting cftype->file_offset. 147 */ 148 struct cgroup_file { 149 /* do not access any fields from outside cgroup core */ 150 struct kernfs_node *kn; 151 unsigned long notified_at; 152 struct timer_list notify_timer; 153 }; 154 155 /* 156 * Per-subsystem/per-cgroup state maintained by the system. This is the 157 * fundamental structural building block that controllers deal with. 158 * 159 * Fields marked with "PI:" are public and immutable and may be accessed 160 * directly without synchronization. 161 */ 162 struct cgroup_subsys_state { 163 /* PI: the cgroup that this css is attached to */ 164 struct cgroup *cgroup; 165 166 /* PI: the cgroup subsystem that this css is attached to */ 167 struct cgroup_subsys *ss; 168 169 /* reference count - access via css_[try]get() and css_put() */ 170 struct percpu_ref refcnt; 171 172 /* 173 * Depending on the context, this field is initialized 174 * via css_rstat_init() at different places: 175 * 176 * when css is associated with cgroup::self 177 * when css->cgroup is the root cgroup 178 * performed in cgroup_init() 179 * when css->cgroup is not the root cgroup 180 * performed in cgroup_create() 181 * when css is associated with a subsystem 182 * when css->cgroup is the root cgroup 183 * performed in cgroup_init_subsys() in the non-early path 184 * when css->cgroup is not the root cgroup 185 * performed in css_create() 186 */ 187 struct css_rstat_cpu __percpu *rstat_cpu; 188 189 /* 190 * siblings list anchored at the parent's ->children 191 * 192 * linkage is protected by cgroup_mutex or RCU 193 */ 194 struct list_head sibling; 195 struct list_head children; 196 197 /* 198 * PI: Subsys-unique ID. 0 is unused and root is always 1. The 199 * matching css can be looked up using css_from_id(). 200 */ 201 int id; 202 203 unsigned int flags; 204 205 /* 206 * Monotonically increasing unique serial number which defines a 207 * uniform order among all csses. It's guaranteed that all 208 * ->children lists are in the ascending order of ->serial_nr and 209 * used to allow interrupting and resuming iterations. 210 */ 211 u64 serial_nr; 212 213 /* 214 * Incremented by online self and children. Used to guarantee that 215 * parents are not offlined before their children. 216 */ 217 atomic_t online_cnt; 218 219 /* percpu_ref killing and RCU release */ 220 struct work_struct destroy_work; 221 struct rcu_work destroy_rwork; 222 223 /* 224 * PI: the parent css. Placed here for cache proximity to following 225 * fields of the containing structure. 226 */ 227 struct cgroup_subsys_state *parent; 228 229 /* 230 * Keep track of total numbers of visible descendant CSSes. 231 * The total number of dying CSSes is tracked in 232 * css->cgroup->nr_dying_subsys[ssid]. 233 * Protected by cgroup_mutex. 234 */ 235 int nr_descendants; 236 237 /* 238 * A singly-linked list of css structures to be rstat flushed. 239 * This is a scratch field to be used exclusively by 240 * css_rstat_flush(). 241 * 242 * Protected by rstat_base_lock when css is cgroup::self. 243 * Protected by css->ss->rstat_ss_lock otherwise. 244 */ 245 struct cgroup_subsys_state *rstat_flush_next; 246 }; 247 248 /* 249 * A css_set is a structure holding pointers to a set of 250 * cgroup_subsys_state objects. This saves space in the task struct 251 * object and speeds up fork()/exit(), since a single inc/dec and a 252 * list_add()/del() can bump the reference count on the entire cgroup 253 * set for a task. 254 */ 255 struct css_set { 256 /* 257 * Set of subsystem states, one for each subsystem. This array is 258 * immutable after creation apart from the init_css_set during 259 * subsystem registration (at boot time). 260 */ 261 struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; 262 263 /* reference count */ 264 refcount_t refcount; 265 266 /* 267 * For a domain cgroup, the following points to self. If threaded, 268 * to the matching cset of the nearest domain ancestor. The 269 * dom_cset provides access to the domain cgroup and its csses to 270 * which domain level resource consumptions should be charged. 271 */ 272 struct css_set *dom_cset; 273 274 /* the default cgroup associated with this css_set */ 275 struct cgroup *dfl_cgrp; 276 277 /* internal task count, protected by css_set_lock */ 278 int nr_tasks; 279 280 /* 281 * Lists running through all tasks using this cgroup group. 282 * mg_tasks lists tasks which belong to this cset but are in the 283 * process of being migrated out or in. Protected by 284 * css_set_lock, but, during migration, once tasks are moved to 285 * mg_tasks, it can be read safely while holding cgroup_mutex. 286 */ 287 struct list_head tasks; 288 struct list_head mg_tasks; 289 struct list_head dying_tasks; 290 291 /* all css_task_iters currently walking this cset */ 292 struct list_head task_iters; 293 294 /* 295 * On the default hierarchy, ->subsys[ssid] may point to a css 296 * attached to an ancestor instead of the cgroup this css_set is 297 * associated with. The following node is anchored at 298 * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to 299 * iterate through all css's attached to a given cgroup. 300 */ 301 struct list_head e_cset_node[CGROUP_SUBSYS_COUNT]; 302 303 /* all threaded csets whose ->dom_cset points to this cset */ 304 struct list_head threaded_csets; 305 struct list_head threaded_csets_node; 306 307 /* 308 * List running through all cgroup groups in the same hash 309 * slot. Protected by css_set_lock 310 */ 311 struct hlist_node hlist; 312 313 /* 314 * List of cgrp_cset_links pointing at cgroups referenced from this 315 * css_set. Protected by css_set_lock. 316 */ 317 struct list_head cgrp_links; 318 319 /* 320 * List of csets participating in the on-going migration either as 321 * source or destination. Protected by cgroup_mutex. 322 */ 323 struct list_head mg_src_preload_node; 324 struct list_head mg_dst_preload_node; 325 struct list_head mg_node; 326 327 /* 328 * If this cset is acting as the source of migration the following 329 * two fields are set. mg_src_cgrp and mg_dst_cgrp are 330 * respectively the source and destination cgroups of the on-going 331 * migration. mg_dst_cset is the destination cset the target tasks 332 * on this cset should be migrated to. Protected by cgroup_mutex. 333 */ 334 struct cgroup *mg_src_cgrp; 335 struct cgroup *mg_dst_cgrp; 336 struct css_set *mg_dst_cset; 337 338 /* dead and being drained, ignore for migration */ 339 bool dead; 340 341 /* For RCU-protected deletion */ 342 struct rcu_head rcu_head; 343 }; 344 345 struct cgroup_base_stat { 346 struct task_cputime cputime; 347 348 #ifdef CONFIG_SCHED_CORE 349 u64 forceidle_sum; 350 #endif 351 u64 ntime; 352 }; 353 354 /* 355 * rstat - cgroup scalable recursive statistics. Accounting is done 356 * per-cpu in css_rstat_cpu which is then lazily propagated up the 357 * hierarchy on reads. 358 * 359 * When a stat gets updated, the css_rstat_cpu and its ancestors are 360 * linked into the updated tree. On the following read, propagation only 361 * considers and consumes the updated tree. This makes reading O(the 362 * number of descendants which have been active since last read) instead of 363 * O(the total number of descendants). 364 * 365 * This is important because there can be a lot of (draining) cgroups which 366 * aren't active and stat may be read frequently. The combination can 367 * become very expensive. By propagating selectively, increasing reading 368 * frequency decreases the cost of each read. 369 * 370 * This struct hosts both the fields which implement the above - 371 * updated_children and updated_next. 372 */ 373 struct css_rstat_cpu { 374 /* 375 * Child cgroups with stat updates on this cpu since the last read 376 * are linked on the parent's ->updated_children through 377 * ->updated_next. updated_children is terminated by its container css. 378 */ 379 struct cgroup_subsys_state *updated_children; 380 struct cgroup_subsys_state *updated_next; /* NULL if not on the list */ 381 382 struct llist_node lnode; /* lockless list for update */ 383 struct cgroup_subsys_state *owner; /* back pointer */ 384 }; 385 386 /* 387 * This struct hosts the fields which track basic resource statistics on 388 * top of it - bsync, bstat and last_bstat. 389 */ 390 struct cgroup_rstat_base_cpu { 391 /* 392 * ->bsync protects ->bstat. These are the only fields which get 393 * updated in the hot path. 394 */ 395 struct u64_stats_sync bsync; 396 struct cgroup_base_stat bstat; 397 398 /* 399 * Snapshots at the last reading. These are used to calculate the 400 * deltas to propagate to the global counters. 401 */ 402 struct cgroup_base_stat last_bstat; 403 404 /* 405 * This field is used to record the cumulative per-cpu time of 406 * the cgroup and its descendants. Currently it can be read via 407 * eBPF/drgn etc, and we are still trying to determine how to 408 * expose it in the cgroupfs interface. 409 */ 410 struct cgroup_base_stat subtree_bstat; 411 412 /* 413 * Snapshots at the last reading. These are used to calculate the 414 * deltas to propagate to the per-cpu subtree_bstat. 415 */ 416 struct cgroup_base_stat last_subtree_bstat; 417 }; 418 419 struct cgroup_freezer_state { 420 /* Should the cgroup and its descendants be frozen. */ 421 bool freeze; 422 423 /* Should the cgroup actually be frozen? */ 424 bool e_freeze; 425 426 /* Fields below are protected by css_set_lock */ 427 428 /* Number of frozen descendant cgroups */ 429 int nr_frozen_descendants; 430 431 /* 432 * Number of tasks, which are counted as frozen: 433 * frozen, SIGSTOPped, and PTRACEd. 434 */ 435 int nr_frozen_tasks; 436 }; 437 438 struct cgroup { 439 /* self css with NULL ->ss, points back to this cgroup */ 440 struct cgroup_subsys_state self; 441 442 unsigned long flags; /* "unsigned long" so bitops work */ 443 444 /* 445 * The depth this cgroup is at. The root is at depth zero and each 446 * step down the hierarchy increments the level. This along with 447 * ancestors[] can determine whether a given cgroup is a 448 * descendant of another without traversing the hierarchy. 449 */ 450 int level; 451 452 /* Maximum allowed descent tree depth */ 453 int max_depth; 454 455 /* 456 * Keep track of total numbers of visible and dying descent cgroups. 457 * Dying cgroups are cgroups which were deleted by a user, 458 * but are still existing because someone else is holding a reference. 459 * max_descendants is a maximum allowed number of descent cgroups. 460 * 461 * nr_descendants and nr_dying_descendants are protected 462 * by cgroup_mutex and css_set_lock. It's fine to read them holding 463 * any of cgroup_mutex and css_set_lock; for writing both locks 464 * should be held. 465 */ 466 int nr_descendants; 467 int nr_dying_descendants; 468 int max_descendants; 469 470 /* 471 * Each non-empty css_set associated with this cgroup contributes 472 * one to nr_populated_csets. The counter is zero iff this cgroup 473 * doesn't have any tasks. 474 * 475 * All children which have non-zero nr_populated_csets and/or 476 * nr_populated_children of their own contribute one to either 477 * nr_populated_domain_children or nr_populated_threaded_children 478 * depending on their type. Each counter is zero iff all cgroups 479 * of the type in the subtree proper don't have any tasks. 480 */ 481 int nr_populated_csets; 482 int nr_populated_domain_children; 483 int nr_populated_threaded_children; 484 485 int nr_threaded_children; /* # of live threaded child cgroups */ 486 487 /* sequence number for cgroup.kill, serialized by css_set_lock. */ 488 unsigned int kill_seq; 489 490 struct kernfs_node *kn; /* cgroup kernfs entry */ 491 struct cgroup_file procs_file; /* handle for "cgroup.procs" */ 492 struct cgroup_file events_file; /* handle for "cgroup.events" */ 493 494 /* handles for "{cpu,memory,io,irq}.pressure" */ 495 struct cgroup_file psi_files[NR_PSI_RESOURCES]; 496 497 /* 498 * The bitmask of subsystems enabled on the child cgroups. 499 * ->subtree_control is the one configured through 500 * "cgroup.subtree_control" while ->subtree_ss_mask is the effective 501 * one which may have more subsystems enabled. Controller knobs 502 * are made available iff it's enabled in ->subtree_control. 503 */ 504 u16 subtree_control; 505 u16 subtree_ss_mask; 506 u16 old_subtree_control; 507 u16 old_subtree_ss_mask; 508 509 /* Private pointers for each registered subsystem */ 510 struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT]; 511 512 /* 513 * Keep track of total number of dying CSSes at and below this cgroup. 514 * Protected by cgroup_mutex. 515 */ 516 int nr_dying_subsys[CGROUP_SUBSYS_COUNT]; 517 518 struct cgroup_root *root; 519 520 /* 521 * List of cgrp_cset_links pointing at css_sets with tasks in this 522 * cgroup. Protected by css_set_lock. 523 */ 524 struct list_head cset_links; 525 526 /* 527 * On the default hierarchy, a css_set for a cgroup with some 528 * susbsys disabled will point to css's which are associated with 529 * the closest ancestor which has the subsys enabled. The 530 * following lists all css_sets which point to this cgroup's css 531 * for the given subsystem. 532 */ 533 struct list_head e_csets[CGROUP_SUBSYS_COUNT]; 534 535 /* 536 * If !threaded, self. If threaded, it points to the nearest 537 * domain ancestor. Inside a threaded subtree, cgroups are exempt 538 * from process granularity and no-internal-task constraint. 539 * Domain level resource consumptions which aren't tied to a 540 * specific task are charged to the dom_cgrp. 541 */ 542 struct cgroup *dom_cgrp; 543 struct cgroup *old_dom_cgrp; /* used while enabling threaded */ 544 545 /* 546 * Depending on the context, this field is initialized via 547 * css_rstat_init() at different places: 548 * 549 * when cgroup is the root cgroup 550 * performed in cgroup_setup_root() 551 * otherwise 552 * performed in cgroup_create() 553 */ 554 struct cgroup_rstat_base_cpu __percpu *rstat_base_cpu; 555 556 /* 557 * Add padding to keep the read mostly rstat per-cpu pointer on a 558 * different cacheline than the following *bstat fields which can have 559 * frequent updates. 560 */ 561 CACHELINE_PADDING(_pad_); 562 563 /* cgroup basic resource statistics */ 564 struct cgroup_base_stat last_bstat; 565 struct cgroup_base_stat bstat; 566 struct prev_cputime prev_cputime; /* for printing out cputime */ 567 568 /* 569 * list of pidlists, up to two for each namespace (one for procs, one 570 * for tasks); created on demand. 571 */ 572 struct list_head pidlists; 573 struct mutex pidlist_mutex; 574 575 /* used to wait for offlining of csses */ 576 wait_queue_head_t offline_waitq; 577 578 /* used to schedule release agent */ 579 struct work_struct release_agent_work; 580 581 /* used to track pressure stalls */ 582 struct psi_group *psi; 583 584 /* used to store eBPF programs */ 585 struct cgroup_bpf bpf; 586 587 /* Used to store internal freezer state */ 588 struct cgroup_freezer_state freezer; 589 590 #ifdef CONFIG_BPF_SYSCALL 591 struct bpf_local_storage __rcu *bpf_cgrp_storage; 592 #endif 593 594 /* All ancestors including self */ 595 struct cgroup *ancestors[]; 596 }; 597 598 /* 599 * A cgroup_root represents the root of a cgroup hierarchy, and may be 600 * associated with a kernfs_root to form an active hierarchy. This is 601 * internal to cgroup core. Don't access directly from controllers. 602 */ 603 struct cgroup_root { 604 struct kernfs_root *kf_root; 605 606 /* The bitmask of subsystems attached to this hierarchy */ 607 unsigned int subsys_mask; 608 609 /* Unique id for this hierarchy. */ 610 int hierarchy_id; 611 612 /* A list running through the active hierarchies */ 613 struct list_head root_list; 614 struct rcu_head rcu; /* Must be near the top */ 615 616 /* 617 * The root cgroup. The containing cgroup_root will be destroyed on its 618 * release. cgrp->ancestors[0] will be used overflowing into the 619 * following field. cgrp_ancestor_storage must immediately follow. 620 */ 621 struct cgroup cgrp; 622 623 /* must follow cgrp for cgrp->ancestors[0], see above */ 624 struct cgroup *cgrp_ancestor_storage; 625 626 /* Number of cgroups in the hierarchy, used only for /proc/cgroups */ 627 atomic_t nr_cgrps; 628 629 /* Hierarchy-specific flags */ 630 unsigned int flags; 631 632 /* The path to use for release notifications. */ 633 char release_agent_path[PATH_MAX]; 634 635 /* The name for this hierarchy - may be empty */ 636 char name[MAX_CGROUP_ROOT_NAMELEN]; 637 }; 638 639 /* 640 * struct cftype: handler definitions for cgroup control files 641 * 642 * When reading/writing to a file: 643 * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata 644 * - the 'cftype' of the file is file->f_path.dentry->d_fsdata 645 */ 646 struct cftype { 647 /* 648 * Name of the subsystem is prepended in cgroup_file_name(). 649 * Zero length string indicates end of cftype array. 650 */ 651 char name[MAX_CFTYPE_NAME]; 652 unsigned long private; 653 654 /* 655 * The maximum length of string, excluding trailing nul, that can 656 * be passed to write. If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed. 657 */ 658 size_t max_write_len; 659 660 /* CFTYPE_* flags */ 661 unsigned int flags; 662 663 /* 664 * If non-zero, should contain the offset from the start of css to 665 * a struct cgroup_file field. cgroup will record the handle of 666 * the created file into it. The recorded handle can be used as 667 * long as the containing css remains accessible. 668 */ 669 unsigned int file_offset; 670 671 /* 672 * Fields used for internal bookkeeping. Initialized automatically 673 * during registration. 674 */ 675 struct cgroup_subsys *ss; /* NULL for cgroup core files */ 676 struct list_head node; /* anchored at ss->cfts */ 677 struct kernfs_ops *kf_ops; 678 679 int (*open)(struct kernfs_open_file *of); 680 void (*release)(struct kernfs_open_file *of); 681 682 /* 683 * read_u64() is a shortcut for the common case of returning a 684 * single integer. Use it in place of read() 685 */ 686 u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft); 687 /* 688 * read_s64() is a signed version of read_u64() 689 */ 690 s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft); 691 692 /* generic seq_file read interface */ 693 int (*seq_show)(struct seq_file *sf, void *v); 694 695 /* optional ops, implement all or none */ 696 void *(*seq_start)(struct seq_file *sf, loff_t *ppos); 697 void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos); 698 void (*seq_stop)(struct seq_file *sf, void *v); 699 700 /* 701 * write_u64() is a shortcut for the common case of accepting 702 * a single integer (as parsed by simple_strtoull) from 703 * userspace. Use in place of write(); return 0 or error. 704 */ 705 int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft, 706 u64 val); 707 /* 708 * write_s64() is a signed version of write_u64() 709 */ 710 int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft, 711 s64 val); 712 713 /* 714 * write() is the generic write callback which maps directly to 715 * kernfs write operation and overrides all other operations. 716 * Maximum write size is determined by ->max_write_len. Use 717 * of_css/cft() to access the associated css and cft. 718 */ 719 ssize_t (*write)(struct kernfs_open_file *of, 720 char *buf, size_t nbytes, loff_t off); 721 722 __poll_t (*poll)(struct kernfs_open_file *of, 723 struct poll_table_struct *pt); 724 725 struct lock_class_key lockdep_key; 726 }; 727 728 /* 729 * Control Group subsystem type. 730 * See Documentation/admin-guide/cgroup-v1/cgroups.rst for details 731 */ 732 struct cgroup_subsys { 733 struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); 734 int (*css_online)(struct cgroup_subsys_state *css); 735 void (*css_offline)(struct cgroup_subsys_state *css); 736 void (*css_released)(struct cgroup_subsys_state *css); 737 void (*css_free)(struct cgroup_subsys_state *css); 738 void (*css_reset)(struct cgroup_subsys_state *css); 739 void (*css_killed)(struct cgroup_subsys_state *css); 740 void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu); 741 int (*css_extra_stat_show)(struct seq_file *seq, 742 struct cgroup_subsys_state *css); 743 int (*css_local_stat_show)(struct seq_file *seq, 744 struct cgroup_subsys_state *css); 745 746 int (*can_attach)(struct cgroup_taskset *tset); 747 void (*cancel_attach)(struct cgroup_taskset *tset); 748 void (*attach)(struct cgroup_taskset *tset); 749 void (*post_attach)(void); 750 int (*can_fork)(struct task_struct *task, 751 struct css_set *cset); 752 void (*cancel_fork)(struct task_struct *task, struct css_set *cset); 753 void (*fork)(struct task_struct *task); 754 void (*exit)(struct task_struct *task); 755 void (*release)(struct task_struct *task); 756 void (*bind)(struct cgroup_subsys_state *root_css); 757 758 bool early_init:1; 759 760 /* 761 * If %true, the controller, on the default hierarchy, doesn't show 762 * up in "cgroup.controllers" or "cgroup.subtree_control", is 763 * implicitly enabled on all cgroups on the default hierarchy, and 764 * bypasses the "no internal process" constraint. This is for 765 * utility type controllers which is transparent to userland. 766 * 767 * An implicit controller can be stolen from the default hierarchy 768 * anytime and thus must be okay with offline csses from previous 769 * hierarchies coexisting with csses for the current one. 770 */ 771 bool implicit_on_dfl:1; 772 773 /* 774 * If %true, the controller, supports threaded mode on the default 775 * hierarchy. In a threaded subtree, both process granularity and 776 * no-internal-process constraint are ignored and a threaded 777 * controllers should be able to handle that. 778 * 779 * Note that as an implicit controller is automatically enabled on 780 * all cgroups on the default hierarchy, it should also be 781 * threaded. implicit && !threaded is not supported. 782 */ 783 bool threaded:1; 784 785 /* the following two fields are initialized automatically during boot */ 786 int id; 787 const char *name; 788 789 /* optional, initialized automatically during boot if not set */ 790 const char *legacy_name; 791 792 /* link to parent, protected by cgroup_lock() */ 793 struct cgroup_root *root; 794 795 /* idr for css->id */ 796 struct idr css_idr; 797 798 /* 799 * List of cftypes. Each entry is the first entry of an array 800 * terminated by zero length name. 801 */ 802 struct list_head cfts; 803 804 /* 805 * Base cftypes which are automatically registered. The two can 806 * point to the same array. 807 */ 808 struct cftype *dfl_cftypes; /* for the default hierarchy */ 809 struct cftype *legacy_cftypes; /* for the legacy hierarchies */ 810 811 /* 812 * A subsystem may depend on other subsystems. When such subsystem 813 * is enabled on a cgroup, the depended-upon subsystems are enabled 814 * together if available. Subsystems enabled due to dependency are 815 * not visible to userland until explicitly enabled. The following 816 * specifies the mask of subsystems that this one depends on. 817 */ 818 unsigned int depends_on; 819 820 spinlock_t rstat_ss_lock; 821 struct llist_head __percpu *lhead; /* lockless update list head */ 822 }; 823 824 extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; 825 826 struct cgroup_of_peak { 827 unsigned long value; 828 struct list_head list; 829 }; 830 831 /** 832 * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups 833 * @tsk: target task 834 * 835 * Allows cgroup operations to synchronize against threadgroup changes 836 * using a percpu_rw_semaphore. 837 */ 838 static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) 839 { 840 percpu_down_read(&cgroup_threadgroup_rwsem); 841 } 842 843 /** 844 * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups 845 * @tsk: target task 846 * 847 * Counterpart of cgroup_threadcgroup_change_begin(). 848 */ 849 static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) 850 { 851 percpu_up_read(&cgroup_threadgroup_rwsem); 852 } 853 854 #else /* CONFIG_CGROUPS */ 855 856 #define CGROUP_SUBSYS_COUNT 0 857 858 static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) 859 { 860 might_sleep(); 861 } 862 863 static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {} 864 865 #endif /* CONFIG_CGROUPS */ 866 867 #ifdef CONFIG_SOCK_CGROUP_DATA 868 869 /* 870 * sock_cgroup_data is embedded at sock->sk_cgrp_data and contains 871 * per-socket cgroup information except for memcg association. 872 * 873 * On legacy hierarchies, net_prio and net_cls controllers directly 874 * set attributes on each sock which can then be tested by the network 875 * layer. On the default hierarchy, each sock is associated with the 876 * cgroup it was created in and the networking layer can match the 877 * cgroup directly. 878 */ 879 struct sock_cgroup_data { 880 struct cgroup *cgroup; /* v2 */ 881 #ifdef CONFIG_CGROUP_NET_CLASSID 882 u32 classid; /* v1 */ 883 #endif 884 #ifdef CONFIG_CGROUP_NET_PRIO 885 u16 prioidx; /* v1 */ 886 #endif 887 }; 888 889 static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd) 890 { 891 #ifdef CONFIG_CGROUP_NET_PRIO 892 return READ_ONCE(skcd->prioidx); 893 #else 894 return 1; 895 #endif 896 } 897 898 #ifdef CONFIG_CGROUP_NET_CLASSID 899 static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd) 900 { 901 return READ_ONCE(skcd->classid); 902 } 903 #endif 904 905 static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd, 906 u16 prioidx) 907 { 908 #ifdef CONFIG_CGROUP_NET_PRIO 909 WRITE_ONCE(skcd->prioidx, prioidx); 910 #endif 911 } 912 913 #ifdef CONFIG_CGROUP_NET_CLASSID 914 static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd, 915 u32 classid) 916 { 917 WRITE_ONCE(skcd->classid, classid); 918 } 919 #endif 920 921 #else /* CONFIG_SOCK_CGROUP_DATA */ 922 923 struct sock_cgroup_data { 924 }; 925 926 #endif /* CONFIG_SOCK_CGROUP_DATA */ 927 928 #endif /* _LINUX_CGROUP_DEFS_H */ 929