1 /* 2 * Generic process-grouping system. 3 * 4 * Based originally on the cpuset system, extracted by Paul Menage 5 * Copyright (C) 2006 Google, Inc 6 * 7 * Notifications support 8 * Copyright (C) 2009 Nokia Corporation 9 * Author: Kirill A. Shutemov 10 * 11 * Copyright notices from the original cpuset code: 12 * -------------------------------------------------- 13 * Copyright (C) 2003 BULL SA. 14 * Copyright (C) 2004-2006 Silicon Graphics, Inc. 15 * 16 * Portions derived from Patrick Mochel's sysfs code. 17 * sysfs is Copyright (c) 2001-3 Patrick Mochel 18 * 19 * 2003-10-10 Written by Simon Derr. 20 * 2003-10-22 Updates by Stephen Hemminger. 21 * 2004 May-July Rework by Paul Jackson. 22 * --------------------------------------------------- 23 * 24 * This file is subject to the terms and conditions of the GNU General Public 25 * License. See the file COPYING in the main directory of the Linux 26 * distribution for more details. 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include "cgroup-internal.h" 32 33 #include <linux/cred.h> 34 #include <linux/errno.h> 35 #include <linux/init_task.h> 36 #include <linux/kernel.h> 37 #include <linux/magic.h> 38 #include <linux/mutex.h> 39 #include <linux/mount.h> 40 #include <linux/pagemap.h> 41 #include <linux/proc_fs.h> 42 #include <linux/rcupdate.h> 43 #include <linux/sched.h> 44 #include <linux/sched/task.h> 45 #include <linux/slab.h> 46 #include <linux/spinlock.h> 47 #include <linux/percpu-rwsem.h> 48 #include <linux/string.h> 49 #include <linux/hashtable.h> 50 #include <linux/idr.h> 51 #include <linux/kthread.h> 52 #include <linux/atomic.h> 53 #include <linux/cpuset.h> 54 #include <linux/proc_ns.h> 55 #include <linux/nsproxy.h> 56 #include <linux/file.h> 57 #include <linux/sched/cputime.h> 58 #include <linux/psi.h> 59 #include <net/sock.h> 60 61 #define CREATE_TRACE_POINTS 62 #include <trace/events/cgroup.h> 63 64 #define CGROUP_FILE_NAME_MAX (MAX_CGROUP_TYPE_NAMELEN + \ 65 MAX_CFTYPE_NAME + 2) 66 /* let's not notify more than 100 times per second */ 67 #define CGROUP_FILE_NOTIFY_MIN_INTV DIV_ROUND_UP(HZ, 100) 68 69 /* 70 * cgroup_mutex is the master lock. Any modification to cgroup or its 71 * hierarchy must be performed while holding it. 72 * 73 * css_set_lock protects task->cgroups pointer, the list of css_set 74 * objects, and the chain of tasks off each css_set. 75 * 76 * These locks are exported if CONFIG_PROVE_RCU so that accessors in 77 * cgroup.h can use them for lockdep annotations. 78 */ 79 DEFINE_MUTEX(cgroup_mutex); 80 DEFINE_SPINLOCK(css_set_lock); 81 82 #ifdef CONFIG_PROVE_RCU 83 EXPORT_SYMBOL_GPL(cgroup_mutex); 84 EXPORT_SYMBOL_GPL(css_set_lock); 85 #endif 86 87 DEFINE_SPINLOCK(trace_cgroup_path_lock); 88 char trace_cgroup_path[TRACE_CGROUP_PATH_LEN]; 89 bool cgroup_debug __read_mostly; 90 91 /* 92 * Protects cgroup_idr and css_idr so that IDs can be released without 93 * grabbing cgroup_mutex. 94 */ 95 static DEFINE_SPINLOCK(cgroup_idr_lock); 96 97 /* 98 * Protects cgroup_file->kn for !self csses. It synchronizes notifications 99 * against file removal/re-creation across css hiding. 100 */ 101 static DEFINE_SPINLOCK(cgroup_file_kn_lock); 102 103 struct percpu_rw_semaphore cgroup_threadgroup_rwsem; 104 105 #define cgroup_assert_mutex_or_rcu_locked() \ 106 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ 107 !lockdep_is_held(&cgroup_mutex), \ 108 "cgroup_mutex or RCU read lock required"); 109 110 /* 111 * cgroup destruction makes heavy use of work items and there can be a lot 112 * of concurrent destructions. Use a separate workqueue so that cgroup 113 * destruction work items don't end up filling up max_active of system_wq 114 * which may lead to deadlock. 115 */ 116 static struct workqueue_struct *cgroup_destroy_wq; 117 118 /* generate an array of cgroup subsystem pointers */ 119 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys, 120 struct cgroup_subsys *cgroup_subsys[] = { 121 #include <linux/cgroup_subsys.h> 122 }; 123 #undef SUBSYS 124 125 /* array of cgroup subsystem names */ 126 #define SUBSYS(_x) [_x ## _cgrp_id] = #_x, 127 static const char *cgroup_subsys_name[] = { 128 #include <linux/cgroup_subsys.h> 129 }; 130 #undef SUBSYS 131 132 /* array of static_keys for cgroup_subsys_enabled() and cgroup_subsys_on_dfl() */ 133 #define SUBSYS(_x) \ 134 DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_enabled_key); \ 135 DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_on_dfl_key); \ 136 EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_enabled_key); \ 137 EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_on_dfl_key); 138 #include <linux/cgroup_subsys.h> 139 #undef SUBSYS 140 141 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_enabled_key, 142 static struct static_key_true *cgroup_subsys_enabled_key[] = { 143 #include <linux/cgroup_subsys.h> 144 }; 145 #undef SUBSYS 146 147 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_on_dfl_key, 148 static struct static_key_true *cgroup_subsys_on_dfl_key[] = { 149 #include <linux/cgroup_subsys.h> 150 }; 151 #undef SUBSYS 152 153 static DEFINE_PER_CPU(struct cgroup_rstat_cpu, cgrp_dfl_root_rstat_cpu); 154 155 /* 156 * The default hierarchy, reserved for the subsystems that are otherwise 157 * unattached - it never has more than a single cgroup, and all tasks are 158 * part of that cgroup. 159 */ 160 struct cgroup_root cgrp_dfl_root = { .cgrp.rstat_cpu = &cgrp_dfl_root_rstat_cpu }; 161 EXPORT_SYMBOL_GPL(cgrp_dfl_root); 162 163 /* 164 * The default hierarchy always exists but is hidden until mounted for the 165 * first time. This is for backward compatibility. 166 */ 167 static bool cgrp_dfl_visible; 168 169 /* some controllers are not supported in the default hierarchy */ 170 static u16 cgrp_dfl_inhibit_ss_mask; 171 172 /* some controllers are implicitly enabled on the default hierarchy */ 173 static u16 cgrp_dfl_implicit_ss_mask; 174 175 /* some controllers can be threaded on the default hierarchy */ 176 static u16 cgrp_dfl_threaded_ss_mask; 177 178 /* The list of hierarchy roots */ 179 LIST_HEAD(cgroup_roots); 180 static int cgroup_root_count; 181 182 /* hierarchy ID allocation and mapping, protected by cgroup_mutex */ 183 static DEFINE_IDR(cgroup_hierarchy_idr); 184 185 /* 186 * Assign a monotonically increasing serial number to csses. It guarantees 187 * cgroups with bigger numbers are newer than those with smaller numbers. 188 * Also, as csses are always appended to the parent's ->children list, it 189 * guarantees that sibling csses are always sorted in the ascending serial 190 * number order on the list. Protected by cgroup_mutex. 191 */ 192 static u64 css_serial_nr_next = 1; 193 194 /* 195 * These bitmasks identify subsystems with specific features to avoid 196 * having to do iterative checks repeatedly. 197 */ 198 static u16 have_fork_callback __read_mostly; 199 static u16 have_exit_callback __read_mostly; 200 static u16 have_free_callback __read_mostly; 201 static u16 have_canfork_callback __read_mostly; 202 203 /* cgroup namespace for init task */ 204 struct cgroup_namespace init_cgroup_ns = { 205 .count = REFCOUNT_INIT(2), 206 .user_ns = &init_user_ns, 207 .ns.ops = &cgroupns_operations, 208 .ns.inum = PROC_CGROUP_INIT_INO, 209 .root_cset = &init_css_set, 210 }; 211 212 static struct file_system_type cgroup2_fs_type; 213 static struct cftype cgroup_base_files[]; 214 215 static int cgroup_apply_control(struct cgroup *cgrp); 216 static void cgroup_finalize_control(struct cgroup *cgrp, int ret); 217 static void css_task_iter_advance(struct css_task_iter *it); 218 static int cgroup_destroy_locked(struct cgroup *cgrp); 219 static struct cgroup_subsys_state *css_create(struct cgroup *cgrp, 220 struct cgroup_subsys *ss); 221 static void css_release(struct percpu_ref *ref); 222 static void kill_css(struct cgroup_subsys_state *css); 223 static int cgroup_addrm_files(struct cgroup_subsys_state *css, 224 struct cgroup *cgrp, struct cftype cfts[], 225 bool is_add); 226 227 /** 228 * cgroup_ssid_enabled - cgroup subsys enabled test by subsys ID 229 * @ssid: subsys ID of interest 230 * 231 * cgroup_subsys_enabled() can only be used with literal subsys names which 232 * is fine for individual subsystems but unsuitable for cgroup core. This 233 * is slower static_key_enabled() based test indexed by @ssid. 234 */ 235 bool cgroup_ssid_enabled(int ssid) 236 { 237 if (CGROUP_SUBSYS_COUNT == 0) 238 return false; 239 240 return static_key_enabled(cgroup_subsys_enabled_key[ssid]); 241 } 242 243 /** 244 * cgroup_on_dfl - test whether a cgroup is on the default hierarchy 245 * @cgrp: the cgroup of interest 246 * 247 * The default hierarchy is the v2 interface of cgroup and this function 248 * can be used to test whether a cgroup is on the default hierarchy for 249 * cases where a subsystem should behave differnetly depending on the 250 * interface version. 251 * 252 * The set of behaviors which change on the default hierarchy are still 253 * being determined and the mount option is prefixed with __DEVEL__. 254 * 255 * List of changed behaviors: 256 * 257 * - Mount options "noprefix", "xattr", "clone_children", "release_agent" 258 * and "name" are disallowed. 259 * 260 * - When mounting an existing superblock, mount options should match. 261 * 262 * - Remount is disallowed. 263 * 264 * - rename(2) is disallowed. 265 * 266 * - "tasks" is removed. Everything should be at process granularity. Use 267 * "cgroup.procs" instead. 268 * 269 * - "cgroup.procs" is not sorted. pids will be unique unless they got 270 * recycled inbetween reads. 271 * 272 * - "release_agent" and "notify_on_release" are removed. Replacement 273 * notification mechanism will be implemented. 274 * 275 * - "cgroup.clone_children" is removed. 276 * 277 * - "cgroup.subtree_populated" is available. Its value is 0 if the cgroup 278 * and its descendants contain no task; otherwise, 1. The file also 279 * generates kernfs notification which can be monitored through poll and 280 * [di]notify when the value of the file changes. 281 * 282 * - cpuset: tasks will be kept in empty cpusets when hotplug happens and 283 * take masks of ancestors with non-empty cpus/mems, instead of being 284 * moved to an ancestor. 285 * 286 * - cpuset: a task can be moved into an empty cpuset, and again it takes 287 * masks of ancestors. 288 * 289 * - memcg: use_hierarchy is on by default and the cgroup file for the flag 290 * is not created. 291 * 292 * - blkcg: blk-throttle becomes properly hierarchical. 293 * 294 * - debug: disallowed on the default hierarchy. 295 */ 296 bool cgroup_on_dfl(const struct cgroup *cgrp) 297 { 298 return cgrp->root == &cgrp_dfl_root; 299 } 300 301 /* IDR wrappers which synchronize using cgroup_idr_lock */ 302 static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end, 303 gfp_t gfp_mask) 304 { 305 int ret; 306 307 idr_preload(gfp_mask); 308 spin_lock_bh(&cgroup_idr_lock); 309 ret = idr_alloc(idr, ptr, start, end, gfp_mask & ~__GFP_DIRECT_RECLAIM); 310 spin_unlock_bh(&cgroup_idr_lock); 311 idr_preload_end(); 312 return ret; 313 } 314 315 static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id) 316 { 317 void *ret; 318 319 spin_lock_bh(&cgroup_idr_lock); 320 ret = idr_replace(idr, ptr, id); 321 spin_unlock_bh(&cgroup_idr_lock); 322 return ret; 323 } 324 325 static void cgroup_idr_remove(struct idr *idr, int id) 326 { 327 spin_lock_bh(&cgroup_idr_lock); 328 idr_remove(idr, id); 329 spin_unlock_bh(&cgroup_idr_lock); 330 } 331 332 static bool cgroup_has_tasks(struct cgroup *cgrp) 333 { 334 return cgrp->nr_populated_csets; 335 } 336 337 bool cgroup_is_threaded(struct cgroup *cgrp) 338 { 339 return cgrp->dom_cgrp != cgrp; 340 } 341 342 /* can @cgrp host both domain and threaded children? */ 343 static bool cgroup_is_mixable(struct cgroup *cgrp) 344 { 345 /* 346 * Root isn't under domain level resource control exempting it from 347 * the no-internal-process constraint, so it can serve as a thread 348 * root and a parent of resource domains at the same time. 349 */ 350 return !cgroup_parent(cgrp); 351 } 352 353 /* can @cgrp become a thread root? should always be true for a thread root */ 354 static bool cgroup_can_be_thread_root(struct cgroup *cgrp) 355 { 356 /* mixables don't care */ 357 if (cgroup_is_mixable(cgrp)) 358 return true; 359 360 /* domain roots can't be nested under threaded */ 361 if (cgroup_is_threaded(cgrp)) 362 return false; 363 364 /* can only have either domain or threaded children */ 365 if (cgrp->nr_populated_domain_children) 366 return false; 367 368 /* and no domain controllers can be enabled */ 369 if (cgrp->subtree_control & ~cgrp_dfl_threaded_ss_mask) 370 return false; 371 372 return true; 373 } 374 375 /* is @cgrp root of a threaded subtree? */ 376 bool cgroup_is_thread_root(struct cgroup *cgrp) 377 { 378 /* thread root should be a domain */ 379 if (cgroup_is_threaded(cgrp)) 380 return false; 381 382 /* a domain w/ threaded children is a thread root */ 383 if (cgrp->nr_threaded_children) 384 return true; 385 386 /* 387 * A domain which has tasks and explicit threaded controllers 388 * enabled is a thread root. 389 */ 390 if (cgroup_has_tasks(cgrp) && 391 (cgrp->subtree_control & cgrp_dfl_threaded_ss_mask)) 392 return true; 393 394 return false; 395 } 396 397 /* a domain which isn't connected to the root w/o brekage can't be used */ 398 static bool cgroup_is_valid_domain(struct cgroup *cgrp) 399 { 400 /* the cgroup itself can be a thread root */ 401 if (cgroup_is_threaded(cgrp)) 402 return false; 403 404 /* but the ancestors can't be unless mixable */ 405 while ((cgrp = cgroup_parent(cgrp))) { 406 if (!cgroup_is_mixable(cgrp) && cgroup_is_thread_root(cgrp)) 407 return false; 408 if (cgroup_is_threaded(cgrp)) 409 return false; 410 } 411 412 return true; 413 } 414 415 /* subsystems visibly enabled on a cgroup */ 416 static u16 cgroup_control(struct cgroup *cgrp) 417 { 418 struct cgroup *parent = cgroup_parent(cgrp); 419 u16 root_ss_mask = cgrp->root->subsys_mask; 420 421 if (parent) { 422 u16 ss_mask = parent->subtree_control; 423 424 /* threaded cgroups can only have threaded controllers */ 425 if (cgroup_is_threaded(cgrp)) 426 ss_mask &= cgrp_dfl_threaded_ss_mask; 427 return ss_mask; 428 } 429 430 if (cgroup_on_dfl(cgrp)) 431 root_ss_mask &= ~(cgrp_dfl_inhibit_ss_mask | 432 cgrp_dfl_implicit_ss_mask); 433 return root_ss_mask; 434 } 435 436 /* subsystems enabled on a cgroup */ 437 static u16 cgroup_ss_mask(struct cgroup *cgrp) 438 { 439 struct cgroup *parent = cgroup_parent(cgrp); 440 441 if (parent) { 442 u16 ss_mask = parent->subtree_ss_mask; 443 444 /* threaded cgroups can only have threaded controllers */ 445 if (cgroup_is_threaded(cgrp)) 446 ss_mask &= cgrp_dfl_threaded_ss_mask; 447 return ss_mask; 448 } 449 450 return cgrp->root->subsys_mask; 451 } 452 453 /** 454 * cgroup_css - obtain a cgroup's css for the specified subsystem 455 * @cgrp: the cgroup of interest 456 * @ss: the subsystem of interest (%NULL returns @cgrp->self) 457 * 458 * Return @cgrp's css (cgroup_subsys_state) associated with @ss. This 459 * function must be called either under cgroup_mutex or rcu_read_lock() and 460 * the caller is responsible for pinning the returned css if it wants to 461 * keep accessing it outside the said locks. This function may return 462 * %NULL if @cgrp doesn't have @subsys_id enabled. 463 */ 464 static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp, 465 struct cgroup_subsys *ss) 466 { 467 if (ss) 468 return rcu_dereference_check(cgrp->subsys[ss->id], 469 lockdep_is_held(&cgroup_mutex)); 470 else 471 return &cgrp->self; 472 } 473 474 /** 475 * cgroup_tryget_css - try to get a cgroup's css for the specified subsystem 476 * @cgrp: the cgroup of interest 477 * @ss: the subsystem of interest 478 * 479 * Find and get @cgrp's css assocaited with @ss. If the css doesn't exist 480 * or is offline, %NULL is returned. 481 */ 482 static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp, 483 struct cgroup_subsys *ss) 484 { 485 struct cgroup_subsys_state *css; 486 487 rcu_read_lock(); 488 css = cgroup_css(cgrp, ss); 489 if (!css || !css_tryget_online(css)) 490 css = NULL; 491 rcu_read_unlock(); 492 493 return css; 494 } 495 496 /** 497 * cgroup_e_css_by_mask - obtain a cgroup's effective css for the specified ss 498 * @cgrp: the cgroup of interest 499 * @ss: the subsystem of interest (%NULL returns @cgrp->self) 500 * 501 * Similar to cgroup_css() but returns the effective css, which is defined 502 * as the matching css of the nearest ancestor including self which has @ss 503 * enabled. If @ss is associated with the hierarchy @cgrp is on, this 504 * function is guaranteed to return non-NULL css. 505 */ 506 static struct cgroup_subsys_state *cgroup_e_css_by_mask(struct cgroup *cgrp, 507 struct cgroup_subsys *ss) 508 { 509 lockdep_assert_held(&cgroup_mutex); 510 511 if (!ss) 512 return &cgrp->self; 513 514 /* 515 * This function is used while updating css associations and thus 516 * can't test the csses directly. Test ss_mask. 517 */ 518 while (!(cgroup_ss_mask(cgrp) & (1 << ss->id))) { 519 cgrp = cgroup_parent(cgrp); 520 if (!cgrp) 521 return NULL; 522 } 523 524 return cgroup_css(cgrp, ss); 525 } 526 527 /** 528 * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem 529 * @cgrp: the cgroup of interest 530 * @ss: the subsystem of interest 531 * 532 * Find and get the effective css of @cgrp for @ss. The effective css is 533 * defined as the matching css of the nearest ancestor including self which 534 * has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on, 535 * the root css is returned, so this function always returns a valid css. 536 * 537 * The returned css is not guaranteed to be online, and therefore it is the 538 * callers responsiblity to tryget a reference for it. 539 */ 540 struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp, 541 struct cgroup_subsys *ss) 542 { 543 struct cgroup_subsys_state *css; 544 545 do { 546 css = cgroup_css(cgrp, ss); 547 548 if (css) 549 return css; 550 cgrp = cgroup_parent(cgrp); 551 } while (cgrp); 552 553 return init_css_set.subsys[ss->id]; 554 } 555 556 /** 557 * cgroup_get_e_css - get a cgroup's effective css for the specified subsystem 558 * @cgrp: the cgroup of interest 559 * @ss: the subsystem of interest 560 * 561 * Find and get the effective css of @cgrp for @ss. The effective css is 562 * defined as the matching css of the nearest ancestor including self which 563 * has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on, 564 * the root css is returned, so this function always returns a valid css. 565 * The returned css must be put using css_put(). 566 */ 567 struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgrp, 568 struct cgroup_subsys *ss) 569 { 570 struct cgroup_subsys_state *css; 571 572 rcu_read_lock(); 573 574 do { 575 css = cgroup_css(cgrp, ss); 576 577 if (css && css_tryget_online(css)) 578 goto out_unlock; 579 cgrp = cgroup_parent(cgrp); 580 } while (cgrp); 581 582 css = init_css_set.subsys[ss->id]; 583 css_get(css); 584 out_unlock: 585 rcu_read_unlock(); 586 return css; 587 } 588 589 static void cgroup_get_live(struct cgroup *cgrp) 590 { 591 WARN_ON_ONCE(cgroup_is_dead(cgrp)); 592 css_get(&cgrp->self); 593 } 594 595 struct cgroup_subsys_state *of_css(struct kernfs_open_file *of) 596 { 597 struct cgroup *cgrp = of->kn->parent->priv; 598 struct cftype *cft = of_cft(of); 599 600 /* 601 * This is open and unprotected implementation of cgroup_css(). 602 * seq_css() is only called from a kernfs file operation which has 603 * an active reference on the file. Because all the subsystem 604 * files are drained before a css is disassociated with a cgroup, 605 * the matching css from the cgroup's subsys table is guaranteed to 606 * be and stay valid until the enclosing operation is complete. 607 */ 608 if (cft->ss) 609 return rcu_dereference_raw(cgrp->subsys[cft->ss->id]); 610 else 611 return &cgrp->self; 612 } 613 EXPORT_SYMBOL_GPL(of_css); 614 615 /** 616 * for_each_css - iterate all css's of a cgroup 617 * @css: the iteration cursor 618 * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end 619 * @cgrp: the target cgroup to iterate css's of 620 * 621 * Should be called under cgroup_[tree_]mutex. 622 */ 623 #define for_each_css(css, ssid, cgrp) \ 624 for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ 625 if (!((css) = rcu_dereference_check( \ 626 (cgrp)->subsys[(ssid)], \ 627 lockdep_is_held(&cgroup_mutex)))) { } \ 628 else 629 630 /** 631 * for_each_e_css - iterate all effective css's of a cgroup 632 * @css: the iteration cursor 633 * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end 634 * @cgrp: the target cgroup to iterate css's of 635 * 636 * Should be called under cgroup_[tree_]mutex. 637 */ 638 #define for_each_e_css(css, ssid, cgrp) \ 639 for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ 640 if (!((css) = cgroup_e_css_by_mask(cgrp, \ 641 cgroup_subsys[(ssid)]))) \ 642 ; \ 643 else 644 645 /** 646 * do_each_subsys_mask - filter for_each_subsys with a bitmask 647 * @ss: the iteration cursor 648 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end 649 * @ss_mask: the bitmask 650 * 651 * The block will only run for cases where the ssid-th bit (1 << ssid) of 652 * @ss_mask is set. 653 */ 654 #define do_each_subsys_mask(ss, ssid, ss_mask) do { \ 655 unsigned long __ss_mask = (ss_mask); \ 656 if (!CGROUP_SUBSYS_COUNT) { /* to avoid spurious gcc warning */ \ 657 (ssid) = 0; \ 658 break; \ 659 } \ 660 for_each_set_bit(ssid, &__ss_mask, CGROUP_SUBSYS_COUNT) { \ 661 (ss) = cgroup_subsys[ssid]; \ 662 { 663 664 #define while_each_subsys_mask() \ 665 } \ 666 } \ 667 } while (false) 668 669 /* iterate over child cgrps, lock should be held throughout iteration */ 670 #define cgroup_for_each_live_child(child, cgrp) \ 671 list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \ 672 if (({ lockdep_assert_held(&cgroup_mutex); \ 673 cgroup_is_dead(child); })) \ 674 ; \ 675 else 676 677 /* walk live descendants in preorder */ 678 #define cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) \ 679 css_for_each_descendant_pre((d_css), cgroup_css((cgrp), NULL)) \ 680 if (({ lockdep_assert_held(&cgroup_mutex); \ 681 (dsct) = (d_css)->cgroup; \ 682 cgroup_is_dead(dsct); })) \ 683 ; \ 684 else 685 686 /* walk live descendants in postorder */ 687 #define cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) \ 688 css_for_each_descendant_post((d_css), cgroup_css((cgrp), NULL)) \ 689 if (({ lockdep_assert_held(&cgroup_mutex); \ 690 (dsct) = (d_css)->cgroup; \ 691 cgroup_is_dead(dsct); })) \ 692 ; \ 693 else 694 695 /* 696 * The default css_set - used by init and its children prior to any 697 * hierarchies being mounted. It contains a pointer to the root state 698 * for each subsystem. Also used to anchor the list of css_sets. Not 699 * reference-counted, to improve performance when child cgroups 700 * haven't been created. 701 */ 702 struct css_set init_css_set = { 703 .refcount = REFCOUNT_INIT(1), 704 .dom_cset = &init_css_set, 705 .tasks = LIST_HEAD_INIT(init_css_set.tasks), 706 .mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks), 707 .task_iters = LIST_HEAD_INIT(init_css_set.task_iters), 708 .threaded_csets = LIST_HEAD_INIT(init_css_set.threaded_csets), 709 .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links), 710 .mg_preload_node = LIST_HEAD_INIT(init_css_set.mg_preload_node), 711 .mg_node = LIST_HEAD_INIT(init_css_set.mg_node), 712 713 /* 714 * The following field is re-initialized when this cset gets linked 715 * in cgroup_init(). However, let's initialize the field 716 * statically too so that the default cgroup can be accessed safely 717 * early during boot. 718 */ 719 .dfl_cgrp = &cgrp_dfl_root.cgrp, 720 }; 721 722 static int css_set_count = 1; /* 1 for init_css_set */ 723 724 static bool css_set_threaded(struct css_set *cset) 725 { 726 return cset->dom_cset != cset; 727 } 728 729 /** 730 * css_set_populated - does a css_set contain any tasks? 731 * @cset: target css_set 732 * 733 * css_set_populated() should be the same as !!cset->nr_tasks at steady 734 * state. However, css_set_populated() can be called while a task is being 735 * added to or removed from the linked list before the nr_tasks is 736 * properly updated. Hence, we can't just look at ->nr_tasks here. 737 */ 738 static bool css_set_populated(struct css_set *cset) 739 { 740 lockdep_assert_held(&css_set_lock); 741 742 return !list_empty(&cset->tasks) || !list_empty(&cset->mg_tasks); 743 } 744 745 /** 746 * cgroup_update_populated - update the populated count of a cgroup 747 * @cgrp: the target cgroup 748 * @populated: inc or dec populated count 749 * 750 * One of the css_sets associated with @cgrp is either getting its first 751 * task or losing the last. Update @cgrp->nr_populated_* accordingly. The 752 * count is propagated towards root so that a given cgroup's 753 * nr_populated_children is zero iff none of its descendants contain any 754 * tasks. 755 * 756 * @cgrp's interface file "cgroup.populated" is zero if both 757 * @cgrp->nr_populated_csets and @cgrp->nr_populated_children are zero and 758 * 1 otherwise. When the sum changes from or to zero, userland is notified 759 * that the content of the interface file has changed. This can be used to 760 * detect when @cgrp and its descendants become populated or empty. 761 */ 762 static void cgroup_update_populated(struct cgroup *cgrp, bool populated) 763 { 764 struct cgroup *child = NULL; 765 int adj = populated ? 1 : -1; 766 767 lockdep_assert_held(&css_set_lock); 768 769 do { 770 bool was_populated = cgroup_is_populated(cgrp); 771 772 if (!child) { 773 cgrp->nr_populated_csets += adj; 774 } else { 775 if (cgroup_is_threaded(child)) 776 cgrp->nr_populated_threaded_children += adj; 777 else 778 cgrp->nr_populated_domain_children += adj; 779 } 780 781 if (was_populated == cgroup_is_populated(cgrp)) 782 break; 783 784 cgroup1_check_for_release(cgrp); 785 cgroup_file_notify(&cgrp->events_file); 786 787 child = cgrp; 788 cgrp = cgroup_parent(cgrp); 789 } while (cgrp); 790 } 791 792 /** 793 * css_set_update_populated - update populated state of a css_set 794 * @cset: target css_set 795 * @populated: whether @cset is populated or depopulated 796 * 797 * @cset is either getting the first task or losing the last. Update the 798 * populated counters of all associated cgroups accordingly. 799 */ 800 static void css_set_update_populated(struct css_set *cset, bool populated) 801 { 802 struct cgrp_cset_link *link; 803 804 lockdep_assert_held(&css_set_lock); 805 806 list_for_each_entry(link, &cset->cgrp_links, cgrp_link) 807 cgroup_update_populated(link->cgrp, populated); 808 } 809 810 /** 811 * css_set_move_task - move a task from one css_set to another 812 * @task: task being moved 813 * @from_cset: css_set @task currently belongs to (may be NULL) 814 * @to_cset: new css_set @task is being moved to (may be NULL) 815 * @use_mg_tasks: move to @to_cset->mg_tasks instead of ->tasks 816 * 817 * Move @task from @from_cset to @to_cset. If @task didn't belong to any 818 * css_set, @from_cset can be NULL. If @task is being disassociated 819 * instead of moved, @to_cset can be NULL. 820 * 821 * This function automatically handles populated counter updates and 822 * css_task_iter adjustments but the caller is responsible for managing 823 * @from_cset and @to_cset's reference counts. 824 */ 825 static void css_set_move_task(struct task_struct *task, 826 struct css_set *from_cset, struct css_set *to_cset, 827 bool use_mg_tasks) 828 { 829 lockdep_assert_held(&css_set_lock); 830 831 if (to_cset && !css_set_populated(to_cset)) 832 css_set_update_populated(to_cset, true); 833 834 if (from_cset) { 835 struct css_task_iter *it, *pos; 836 837 WARN_ON_ONCE(list_empty(&task->cg_list)); 838 839 /* 840 * @task is leaving, advance task iterators which are 841 * pointing to it so that they can resume at the next 842 * position. Advancing an iterator might remove it from 843 * the list, use safe walk. See css_task_iter_advance*() 844 * for details. 845 */ 846 list_for_each_entry_safe(it, pos, &from_cset->task_iters, 847 iters_node) 848 if (it->task_pos == &task->cg_list) 849 css_task_iter_advance(it); 850 851 list_del_init(&task->cg_list); 852 if (!css_set_populated(from_cset)) 853 css_set_update_populated(from_cset, false); 854 } else { 855 WARN_ON_ONCE(!list_empty(&task->cg_list)); 856 } 857 858 if (to_cset) { 859 /* 860 * We are synchronized through cgroup_threadgroup_rwsem 861 * against PF_EXITING setting such that we can't race 862 * against cgroup_exit() changing the css_set to 863 * init_css_set and dropping the old one. 864 */ 865 WARN_ON_ONCE(task->flags & PF_EXITING); 866 867 cgroup_move_task(task, to_cset); 868 list_add_tail(&task->cg_list, use_mg_tasks ? &to_cset->mg_tasks : 869 &to_cset->tasks); 870 } 871 } 872 873 /* 874 * hash table for cgroup groups. This improves the performance to find 875 * an existing css_set. This hash doesn't (currently) take into 876 * account cgroups in empty hierarchies. 877 */ 878 #define CSS_SET_HASH_BITS 7 879 static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS); 880 881 static unsigned long css_set_hash(struct cgroup_subsys_state *css[]) 882 { 883 unsigned long key = 0UL; 884 struct cgroup_subsys *ss; 885 int i; 886 887 for_each_subsys(ss, i) 888 key += (unsigned long)css[i]; 889 key = (key >> 16) ^ key; 890 891 return key; 892 } 893 894 void put_css_set_locked(struct css_set *cset) 895 { 896 struct cgrp_cset_link *link, *tmp_link; 897 struct cgroup_subsys *ss; 898 int ssid; 899 900 lockdep_assert_held(&css_set_lock); 901 902 if (!refcount_dec_and_test(&cset->refcount)) 903 return; 904 905 WARN_ON_ONCE(!list_empty(&cset->threaded_csets)); 906 907 /* This css_set is dead. unlink it and release cgroup and css refs */ 908 for_each_subsys(ss, ssid) { 909 list_del(&cset->e_cset_node[ssid]); 910 css_put(cset->subsys[ssid]); 911 } 912 hash_del(&cset->hlist); 913 css_set_count--; 914 915 list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) { 916 list_del(&link->cset_link); 917 list_del(&link->cgrp_link); 918 if (cgroup_parent(link->cgrp)) 919 cgroup_put(link->cgrp); 920 kfree(link); 921 } 922 923 if (css_set_threaded(cset)) { 924 list_del(&cset->threaded_csets_node); 925 put_css_set_locked(cset->dom_cset); 926 } 927 928 kfree_rcu(cset, rcu_head); 929 } 930 931 /** 932 * compare_css_sets - helper function for find_existing_css_set(). 933 * @cset: candidate css_set being tested 934 * @old_cset: existing css_set for a task 935 * @new_cgrp: cgroup that's being entered by the task 936 * @template: desired set of css pointers in css_set (pre-calculated) 937 * 938 * Returns true if "cset" matches "old_cset" except for the hierarchy 939 * which "new_cgrp" belongs to, for which it should match "new_cgrp". 940 */ 941 static bool compare_css_sets(struct css_set *cset, 942 struct css_set *old_cset, 943 struct cgroup *new_cgrp, 944 struct cgroup_subsys_state *template[]) 945 { 946 struct cgroup *new_dfl_cgrp; 947 struct list_head *l1, *l2; 948 949 /* 950 * On the default hierarchy, there can be csets which are 951 * associated with the same set of cgroups but different csses. 952 * Let's first ensure that csses match. 953 */ 954 if (memcmp(template, cset->subsys, sizeof(cset->subsys))) 955 return false; 956 957 958 /* @cset's domain should match the default cgroup's */ 959 if (cgroup_on_dfl(new_cgrp)) 960 new_dfl_cgrp = new_cgrp; 961 else 962 new_dfl_cgrp = old_cset->dfl_cgrp; 963 964 if (new_dfl_cgrp->dom_cgrp != cset->dom_cset->dfl_cgrp) 965 return false; 966 967 /* 968 * Compare cgroup pointers in order to distinguish between 969 * different cgroups in hierarchies. As different cgroups may 970 * share the same effective css, this comparison is always 971 * necessary. 972 */ 973 l1 = &cset->cgrp_links; 974 l2 = &old_cset->cgrp_links; 975 while (1) { 976 struct cgrp_cset_link *link1, *link2; 977 struct cgroup *cgrp1, *cgrp2; 978 979 l1 = l1->next; 980 l2 = l2->next; 981 /* See if we reached the end - both lists are equal length. */ 982 if (l1 == &cset->cgrp_links) { 983 BUG_ON(l2 != &old_cset->cgrp_links); 984 break; 985 } else { 986 BUG_ON(l2 == &old_cset->cgrp_links); 987 } 988 /* Locate the cgroups associated with these links. */ 989 link1 = list_entry(l1, struct cgrp_cset_link, cgrp_link); 990 link2 = list_entry(l2, struct cgrp_cset_link, cgrp_link); 991 cgrp1 = link1->cgrp; 992 cgrp2 = link2->cgrp; 993 /* Hierarchies should be linked in the same order. */ 994 BUG_ON(cgrp1->root != cgrp2->root); 995 996 /* 997 * If this hierarchy is the hierarchy of the cgroup 998 * that's changing, then we need to check that this 999 * css_set points to the new cgroup; if it's any other 1000 * hierarchy, then this css_set should point to the 1001 * same cgroup as the old css_set. 1002 */ 1003 if (cgrp1->root == new_cgrp->root) { 1004 if (cgrp1 != new_cgrp) 1005 return false; 1006 } else { 1007 if (cgrp1 != cgrp2) 1008 return false; 1009 } 1010 } 1011 return true; 1012 } 1013 1014 /** 1015 * find_existing_css_set - init css array and find the matching css_set 1016 * @old_cset: the css_set that we're using before the cgroup transition 1017 * @cgrp: the cgroup that we're moving into 1018 * @template: out param for the new set of csses, should be clear on entry 1019 */ 1020 static struct css_set *find_existing_css_set(struct css_set *old_cset, 1021 struct cgroup *cgrp, 1022 struct cgroup_subsys_state *template[]) 1023 { 1024 struct cgroup_root *root = cgrp->root; 1025 struct cgroup_subsys *ss; 1026 struct css_set *cset; 1027 unsigned long key; 1028 int i; 1029 1030 /* 1031 * Build the set of subsystem state objects that we want to see in the 1032 * new css_set. while subsystems can change globally, the entries here 1033 * won't change, so no need for locking. 1034 */ 1035 for_each_subsys(ss, i) { 1036 if (root->subsys_mask & (1UL << i)) { 1037 /* 1038 * @ss is in this hierarchy, so we want the 1039 * effective css from @cgrp. 1040 */ 1041 template[i] = cgroup_e_css_by_mask(cgrp, ss); 1042 } else { 1043 /* 1044 * @ss is not in this hierarchy, so we don't want 1045 * to change the css. 1046 */ 1047 template[i] = old_cset->subsys[i]; 1048 } 1049 } 1050 1051 key = css_set_hash(template); 1052 hash_for_each_possible(css_set_table, cset, hlist, key) { 1053 if (!compare_css_sets(cset, old_cset, cgrp, template)) 1054 continue; 1055 1056 /* This css_set matches what we need */ 1057 return cset; 1058 } 1059 1060 /* No existing cgroup group matched */ 1061 return NULL; 1062 } 1063 1064 static void free_cgrp_cset_links(struct list_head *links_to_free) 1065 { 1066 struct cgrp_cset_link *link, *tmp_link; 1067 1068 list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) { 1069 list_del(&link->cset_link); 1070 kfree(link); 1071 } 1072 } 1073 1074 /** 1075 * allocate_cgrp_cset_links - allocate cgrp_cset_links 1076 * @count: the number of links to allocate 1077 * @tmp_links: list_head the allocated links are put on 1078 * 1079 * Allocate @count cgrp_cset_link structures and chain them on @tmp_links 1080 * through ->cset_link. Returns 0 on success or -errno. 1081 */ 1082 static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links) 1083 { 1084 struct cgrp_cset_link *link; 1085 int i; 1086 1087 INIT_LIST_HEAD(tmp_links); 1088 1089 for (i = 0; i < count; i++) { 1090 link = kzalloc(sizeof(*link), GFP_KERNEL); 1091 if (!link) { 1092 free_cgrp_cset_links(tmp_links); 1093 return -ENOMEM; 1094 } 1095 list_add(&link->cset_link, tmp_links); 1096 } 1097 return 0; 1098 } 1099 1100 /** 1101 * link_css_set - a helper function to link a css_set to a cgroup 1102 * @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links() 1103 * @cset: the css_set to be linked 1104 * @cgrp: the destination cgroup 1105 */ 1106 static void link_css_set(struct list_head *tmp_links, struct css_set *cset, 1107 struct cgroup *cgrp) 1108 { 1109 struct cgrp_cset_link *link; 1110 1111 BUG_ON(list_empty(tmp_links)); 1112 1113 if (cgroup_on_dfl(cgrp)) 1114 cset->dfl_cgrp = cgrp; 1115 1116 link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link); 1117 link->cset = cset; 1118 link->cgrp = cgrp; 1119 1120 /* 1121 * Always add links to the tail of the lists so that the lists are 1122 * in choronological order. 1123 */ 1124 list_move_tail(&link->cset_link, &cgrp->cset_links); 1125 list_add_tail(&link->cgrp_link, &cset->cgrp_links); 1126 1127 if (cgroup_parent(cgrp)) 1128 cgroup_get_live(cgrp); 1129 } 1130 1131 /** 1132 * find_css_set - return a new css_set with one cgroup updated 1133 * @old_cset: the baseline css_set 1134 * @cgrp: the cgroup to be updated 1135 * 1136 * Return a new css_set that's equivalent to @old_cset, but with @cgrp 1137 * substituted into the appropriate hierarchy. 1138 */ 1139 static struct css_set *find_css_set(struct css_set *old_cset, 1140 struct cgroup *cgrp) 1141 { 1142 struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { }; 1143 struct css_set *cset; 1144 struct list_head tmp_links; 1145 struct cgrp_cset_link *link; 1146 struct cgroup_subsys *ss; 1147 unsigned long key; 1148 int ssid; 1149 1150 lockdep_assert_held(&cgroup_mutex); 1151 1152 /* First see if we already have a cgroup group that matches 1153 * the desired set */ 1154 spin_lock_irq(&css_set_lock); 1155 cset = find_existing_css_set(old_cset, cgrp, template); 1156 if (cset) 1157 get_css_set(cset); 1158 spin_unlock_irq(&css_set_lock); 1159 1160 if (cset) 1161 return cset; 1162 1163 cset = kzalloc(sizeof(*cset), GFP_KERNEL); 1164 if (!cset) 1165 return NULL; 1166 1167 /* Allocate all the cgrp_cset_link objects that we'll need */ 1168 if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) { 1169 kfree(cset); 1170 return NULL; 1171 } 1172 1173 refcount_set(&cset->refcount, 1); 1174 cset->dom_cset = cset; 1175 INIT_LIST_HEAD(&cset->tasks); 1176 INIT_LIST_HEAD(&cset->mg_tasks); 1177 INIT_LIST_HEAD(&cset->task_iters); 1178 INIT_LIST_HEAD(&cset->threaded_csets); 1179 INIT_HLIST_NODE(&cset->hlist); 1180 INIT_LIST_HEAD(&cset->cgrp_links); 1181 INIT_LIST_HEAD(&cset->mg_preload_node); 1182 INIT_LIST_HEAD(&cset->mg_node); 1183 1184 /* Copy the set of subsystem state objects generated in 1185 * find_existing_css_set() */ 1186 memcpy(cset->subsys, template, sizeof(cset->subsys)); 1187 1188 spin_lock_irq(&css_set_lock); 1189 /* Add reference counts and links from the new css_set. */ 1190 list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) { 1191 struct cgroup *c = link->cgrp; 1192 1193 if (c->root == cgrp->root) 1194 c = cgrp; 1195 link_css_set(&tmp_links, cset, c); 1196 } 1197 1198 BUG_ON(!list_empty(&tmp_links)); 1199 1200 css_set_count++; 1201 1202 /* Add @cset to the hash table */ 1203 key = css_set_hash(cset->subsys); 1204 hash_add(css_set_table, &cset->hlist, key); 1205 1206 for_each_subsys(ss, ssid) { 1207 struct cgroup_subsys_state *css = cset->subsys[ssid]; 1208 1209 list_add_tail(&cset->e_cset_node[ssid], 1210 &css->cgroup->e_csets[ssid]); 1211 css_get(css); 1212 } 1213 1214 spin_unlock_irq(&css_set_lock); 1215 1216 /* 1217 * If @cset should be threaded, look up the matching dom_cset and 1218 * link them up. We first fully initialize @cset then look for the 1219 * dom_cset. It's simpler this way and safe as @cset is guaranteed 1220 * to stay empty until we return. 1221 */ 1222 if (cgroup_is_threaded(cset->dfl_cgrp)) { 1223 struct css_set *dcset; 1224 1225 dcset = find_css_set(cset, cset->dfl_cgrp->dom_cgrp); 1226 if (!dcset) { 1227 put_css_set(cset); 1228 return NULL; 1229 } 1230 1231 spin_lock_irq(&css_set_lock); 1232 cset->dom_cset = dcset; 1233 list_add_tail(&cset->threaded_csets_node, 1234 &dcset->threaded_csets); 1235 spin_unlock_irq(&css_set_lock); 1236 } 1237 1238 return cset; 1239 } 1240 1241 struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root) 1242 { 1243 struct cgroup *root_cgrp = kf_root->kn->priv; 1244 1245 return root_cgrp->root; 1246 } 1247 1248 static int cgroup_init_root_id(struct cgroup_root *root) 1249 { 1250 int id; 1251 1252 lockdep_assert_held(&cgroup_mutex); 1253 1254 id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 0, 0, GFP_KERNEL); 1255 if (id < 0) 1256 return id; 1257 1258 root->hierarchy_id = id; 1259 return 0; 1260 } 1261 1262 static void cgroup_exit_root_id(struct cgroup_root *root) 1263 { 1264 lockdep_assert_held(&cgroup_mutex); 1265 1266 idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id); 1267 } 1268 1269 void cgroup_free_root(struct cgroup_root *root) 1270 { 1271 if (root) { 1272 idr_destroy(&root->cgroup_idr); 1273 kfree(root); 1274 } 1275 } 1276 1277 static void cgroup_destroy_root(struct cgroup_root *root) 1278 { 1279 struct cgroup *cgrp = &root->cgrp; 1280 struct cgrp_cset_link *link, *tmp_link; 1281 1282 trace_cgroup_destroy_root(root); 1283 1284 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp); 1285 1286 BUG_ON(atomic_read(&root->nr_cgrps)); 1287 BUG_ON(!list_empty(&cgrp->self.children)); 1288 1289 /* Rebind all subsystems back to the default hierarchy */ 1290 WARN_ON(rebind_subsystems(&cgrp_dfl_root, root->subsys_mask)); 1291 1292 /* 1293 * Release all the links from cset_links to this hierarchy's 1294 * root cgroup 1295 */ 1296 spin_lock_irq(&css_set_lock); 1297 1298 list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) { 1299 list_del(&link->cset_link); 1300 list_del(&link->cgrp_link); 1301 kfree(link); 1302 } 1303 1304 spin_unlock_irq(&css_set_lock); 1305 1306 if (!list_empty(&root->root_list)) { 1307 list_del(&root->root_list); 1308 cgroup_root_count--; 1309 } 1310 1311 cgroup_exit_root_id(root); 1312 1313 mutex_unlock(&cgroup_mutex); 1314 1315 kernfs_destroy_root(root->kf_root); 1316 cgroup_free_root(root); 1317 } 1318 1319 /* 1320 * look up cgroup associated with current task's cgroup namespace on the 1321 * specified hierarchy 1322 */ 1323 static struct cgroup * 1324 current_cgns_cgroup_from_root(struct cgroup_root *root) 1325 { 1326 struct cgroup *res = NULL; 1327 struct css_set *cset; 1328 1329 lockdep_assert_held(&css_set_lock); 1330 1331 rcu_read_lock(); 1332 1333 cset = current->nsproxy->cgroup_ns->root_cset; 1334 if (cset == &init_css_set) { 1335 res = &root->cgrp; 1336 } else { 1337 struct cgrp_cset_link *link; 1338 1339 list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { 1340 struct cgroup *c = link->cgrp; 1341 1342 if (c->root == root) { 1343 res = c; 1344 break; 1345 } 1346 } 1347 } 1348 rcu_read_unlock(); 1349 1350 BUG_ON(!res); 1351 return res; 1352 } 1353 1354 /* look up cgroup associated with given css_set on the specified hierarchy */ 1355 static struct cgroup *cset_cgroup_from_root(struct css_set *cset, 1356 struct cgroup_root *root) 1357 { 1358 struct cgroup *res = NULL; 1359 1360 lockdep_assert_held(&cgroup_mutex); 1361 lockdep_assert_held(&css_set_lock); 1362 1363 if (cset == &init_css_set) { 1364 res = &root->cgrp; 1365 } else if (root == &cgrp_dfl_root) { 1366 res = cset->dfl_cgrp; 1367 } else { 1368 struct cgrp_cset_link *link; 1369 1370 list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { 1371 struct cgroup *c = link->cgrp; 1372 1373 if (c->root == root) { 1374 res = c; 1375 break; 1376 } 1377 } 1378 } 1379 1380 BUG_ON(!res); 1381 return res; 1382 } 1383 1384 /* 1385 * Return the cgroup for "task" from the given hierarchy. Must be 1386 * called with cgroup_mutex and css_set_lock held. 1387 */ 1388 struct cgroup *task_cgroup_from_root(struct task_struct *task, 1389 struct cgroup_root *root) 1390 { 1391 /* 1392 * No need to lock the task - since we hold cgroup_mutex the 1393 * task can't change groups, so the only thing that can happen 1394 * is that it exits and its css is set back to init_css_set. 1395 */ 1396 return cset_cgroup_from_root(task_css_set(task), root); 1397 } 1398 1399 /* 1400 * A task must hold cgroup_mutex to modify cgroups. 1401 * 1402 * Any task can increment and decrement the count field without lock. 1403 * So in general, code holding cgroup_mutex can't rely on the count 1404 * field not changing. However, if the count goes to zero, then only 1405 * cgroup_attach_task() can increment it again. Because a count of zero 1406 * means that no tasks are currently attached, therefore there is no 1407 * way a task attached to that cgroup can fork (the other way to 1408 * increment the count). So code holding cgroup_mutex can safely 1409 * assume that if the count is zero, it will stay zero. Similarly, if 1410 * a task holds cgroup_mutex on a cgroup with zero count, it 1411 * knows that the cgroup won't be removed, as cgroup_rmdir() 1412 * needs that mutex. 1413 * 1414 * A cgroup can only be deleted if both its 'count' of using tasks 1415 * is zero, and its list of 'children' cgroups is empty. Since all 1416 * tasks in the system use _some_ cgroup, and since there is always at 1417 * least one task in the system (init, pid == 1), therefore, root cgroup 1418 * always has either children cgroups and/or using tasks. So we don't 1419 * need a special hack to ensure that root cgroup cannot be deleted. 1420 * 1421 * P.S. One more locking exception. RCU is used to guard the 1422 * update of a tasks cgroup pointer by cgroup_attach_task() 1423 */ 1424 1425 static struct kernfs_syscall_ops cgroup_kf_syscall_ops; 1426 1427 static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft, 1428 char *buf) 1429 { 1430 struct cgroup_subsys *ss = cft->ss; 1431 1432 if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) && 1433 !(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) { 1434 const char *dbg = (cft->flags & CFTYPE_DEBUG) ? ".__DEBUG__." : ""; 1435 1436 snprintf(buf, CGROUP_FILE_NAME_MAX, "%s%s.%s", 1437 dbg, cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name, 1438 cft->name); 1439 } else { 1440 strscpy(buf, cft->name, CGROUP_FILE_NAME_MAX); 1441 } 1442 return buf; 1443 } 1444 1445 /** 1446 * cgroup_file_mode - deduce file mode of a control file 1447 * @cft: the control file in question 1448 * 1449 * S_IRUGO for read, S_IWUSR for write. 1450 */ 1451 static umode_t cgroup_file_mode(const struct cftype *cft) 1452 { 1453 umode_t mode = 0; 1454 1455 if (cft->read_u64 || cft->read_s64 || cft->seq_show) 1456 mode |= S_IRUGO; 1457 1458 if (cft->write_u64 || cft->write_s64 || cft->write) { 1459 if (cft->flags & CFTYPE_WORLD_WRITABLE) 1460 mode |= S_IWUGO; 1461 else 1462 mode |= S_IWUSR; 1463 } 1464 1465 return mode; 1466 } 1467 1468 /** 1469 * cgroup_calc_subtree_ss_mask - calculate subtree_ss_mask 1470 * @subtree_control: the new subtree_control mask to consider 1471 * @this_ss_mask: available subsystems 1472 * 1473 * On the default hierarchy, a subsystem may request other subsystems to be 1474 * enabled together through its ->depends_on mask. In such cases, more 1475 * subsystems than specified in "cgroup.subtree_control" may be enabled. 1476 * 1477 * This function calculates which subsystems need to be enabled if 1478 * @subtree_control is to be applied while restricted to @this_ss_mask. 1479 */ 1480 static u16 cgroup_calc_subtree_ss_mask(u16 subtree_control, u16 this_ss_mask) 1481 { 1482 u16 cur_ss_mask = subtree_control; 1483 struct cgroup_subsys *ss; 1484 int ssid; 1485 1486 lockdep_assert_held(&cgroup_mutex); 1487 1488 cur_ss_mask |= cgrp_dfl_implicit_ss_mask; 1489 1490 while (true) { 1491 u16 new_ss_mask = cur_ss_mask; 1492 1493 do_each_subsys_mask(ss, ssid, cur_ss_mask) { 1494 new_ss_mask |= ss->depends_on; 1495 } while_each_subsys_mask(); 1496 1497 /* 1498 * Mask out subsystems which aren't available. This can 1499 * happen only if some depended-upon subsystems were bound 1500 * to non-default hierarchies. 1501 */ 1502 new_ss_mask &= this_ss_mask; 1503 1504 if (new_ss_mask == cur_ss_mask) 1505 break; 1506 cur_ss_mask = new_ss_mask; 1507 } 1508 1509 return cur_ss_mask; 1510 } 1511 1512 /** 1513 * cgroup_kn_unlock - unlocking helper for cgroup kernfs methods 1514 * @kn: the kernfs_node being serviced 1515 * 1516 * This helper undoes cgroup_kn_lock_live() and should be invoked before 1517 * the method finishes if locking succeeded. Note that once this function 1518 * returns the cgroup returned by cgroup_kn_lock_live() may become 1519 * inaccessible any time. If the caller intends to continue to access the 1520 * cgroup, it should pin it before invoking this function. 1521 */ 1522 void cgroup_kn_unlock(struct kernfs_node *kn) 1523 { 1524 struct cgroup *cgrp; 1525 1526 if (kernfs_type(kn) == KERNFS_DIR) 1527 cgrp = kn->priv; 1528 else 1529 cgrp = kn->parent->priv; 1530 1531 mutex_unlock(&cgroup_mutex); 1532 1533 kernfs_unbreak_active_protection(kn); 1534 cgroup_put(cgrp); 1535 } 1536 1537 /** 1538 * cgroup_kn_lock_live - locking helper for cgroup kernfs methods 1539 * @kn: the kernfs_node being serviced 1540 * @drain_offline: perform offline draining on the cgroup 1541 * 1542 * This helper is to be used by a cgroup kernfs method currently servicing 1543 * @kn. It breaks the active protection, performs cgroup locking and 1544 * verifies that the associated cgroup is alive. Returns the cgroup if 1545 * alive; otherwise, %NULL. A successful return should be undone by a 1546 * matching cgroup_kn_unlock() invocation. If @drain_offline is %true, the 1547 * cgroup is drained of offlining csses before return. 1548 * 1549 * Any cgroup kernfs method implementation which requires locking the 1550 * associated cgroup should use this helper. It avoids nesting cgroup 1551 * locking under kernfs active protection and allows all kernfs operations 1552 * including self-removal. 1553 */ 1554 struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn, bool drain_offline) 1555 { 1556 struct cgroup *cgrp; 1557 1558 if (kernfs_type(kn) == KERNFS_DIR) 1559 cgrp = kn->priv; 1560 else 1561 cgrp = kn->parent->priv; 1562 1563 /* 1564 * We're gonna grab cgroup_mutex which nests outside kernfs 1565 * active_ref. cgroup liveliness check alone provides enough 1566 * protection against removal. Ensure @cgrp stays accessible and 1567 * break the active_ref protection. 1568 */ 1569 if (!cgroup_tryget(cgrp)) 1570 return NULL; 1571 kernfs_break_active_protection(kn); 1572 1573 if (drain_offline) 1574 cgroup_lock_and_drain_offline(cgrp); 1575 else 1576 mutex_lock(&cgroup_mutex); 1577 1578 if (!cgroup_is_dead(cgrp)) 1579 return cgrp; 1580 1581 cgroup_kn_unlock(kn); 1582 return NULL; 1583 } 1584 1585 static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft) 1586 { 1587 char name[CGROUP_FILE_NAME_MAX]; 1588 1589 lockdep_assert_held(&cgroup_mutex); 1590 1591 if (cft->file_offset) { 1592 struct cgroup_subsys_state *css = cgroup_css(cgrp, cft->ss); 1593 struct cgroup_file *cfile = (void *)css + cft->file_offset; 1594 1595 spin_lock_irq(&cgroup_file_kn_lock); 1596 cfile->kn = NULL; 1597 spin_unlock_irq(&cgroup_file_kn_lock); 1598 1599 del_timer_sync(&cfile->notify_timer); 1600 } 1601 1602 kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name)); 1603 } 1604 1605 /** 1606 * css_clear_dir - remove subsys files in a cgroup directory 1607 * @css: taget css 1608 */ 1609 static void css_clear_dir(struct cgroup_subsys_state *css) 1610 { 1611 struct cgroup *cgrp = css->cgroup; 1612 struct cftype *cfts; 1613 1614 if (!(css->flags & CSS_VISIBLE)) 1615 return; 1616 1617 css->flags &= ~CSS_VISIBLE; 1618 1619 if (!css->ss) { 1620 if (cgroup_on_dfl(cgrp)) 1621 cfts = cgroup_base_files; 1622 else 1623 cfts = cgroup1_base_files; 1624 1625 cgroup_addrm_files(css, cgrp, cfts, false); 1626 } else { 1627 list_for_each_entry(cfts, &css->ss->cfts, node) 1628 cgroup_addrm_files(css, cgrp, cfts, false); 1629 } 1630 } 1631 1632 /** 1633 * css_populate_dir - create subsys files in a cgroup directory 1634 * @css: target css 1635 * 1636 * On failure, no file is added. 1637 */ 1638 static int css_populate_dir(struct cgroup_subsys_state *css) 1639 { 1640 struct cgroup *cgrp = css->cgroup; 1641 struct cftype *cfts, *failed_cfts; 1642 int ret; 1643 1644 if ((css->flags & CSS_VISIBLE) || !cgrp->kn) 1645 return 0; 1646 1647 if (!css->ss) { 1648 if (cgroup_on_dfl(cgrp)) 1649 cfts = cgroup_base_files; 1650 else 1651 cfts = cgroup1_base_files; 1652 1653 ret = cgroup_addrm_files(&cgrp->self, cgrp, cfts, true); 1654 if (ret < 0) 1655 return ret; 1656 } else { 1657 list_for_each_entry(cfts, &css->ss->cfts, node) { 1658 ret = cgroup_addrm_files(css, cgrp, cfts, true); 1659 if (ret < 0) { 1660 failed_cfts = cfts; 1661 goto err; 1662 } 1663 } 1664 } 1665 1666 css->flags |= CSS_VISIBLE; 1667 1668 return 0; 1669 err: 1670 list_for_each_entry(cfts, &css->ss->cfts, node) { 1671 if (cfts == failed_cfts) 1672 break; 1673 cgroup_addrm_files(css, cgrp, cfts, false); 1674 } 1675 return ret; 1676 } 1677 1678 int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask) 1679 { 1680 struct cgroup *dcgrp = &dst_root->cgrp; 1681 struct cgroup_subsys *ss; 1682 int ssid, i, ret; 1683 1684 lockdep_assert_held(&cgroup_mutex); 1685 1686 do_each_subsys_mask(ss, ssid, ss_mask) { 1687 /* 1688 * If @ss has non-root csses attached to it, can't move. 1689 * If @ss is an implicit controller, it is exempt from this 1690 * rule and can be stolen. 1691 */ 1692 if (css_next_child(NULL, cgroup_css(&ss->root->cgrp, ss)) && 1693 !ss->implicit_on_dfl) 1694 return -EBUSY; 1695 1696 /* can't move between two non-dummy roots either */ 1697 if (ss->root != &cgrp_dfl_root && dst_root != &cgrp_dfl_root) 1698 return -EBUSY; 1699 } while_each_subsys_mask(); 1700 1701 do_each_subsys_mask(ss, ssid, ss_mask) { 1702 struct cgroup_root *src_root = ss->root; 1703 struct cgroup *scgrp = &src_root->cgrp; 1704 struct cgroup_subsys_state *css = cgroup_css(scgrp, ss); 1705 struct css_set *cset; 1706 1707 WARN_ON(!css || cgroup_css(dcgrp, ss)); 1708 1709 /* disable from the source */ 1710 src_root->subsys_mask &= ~(1 << ssid); 1711 WARN_ON(cgroup_apply_control(scgrp)); 1712 cgroup_finalize_control(scgrp, 0); 1713 1714 /* rebind */ 1715 RCU_INIT_POINTER(scgrp->subsys[ssid], NULL); 1716 rcu_assign_pointer(dcgrp->subsys[ssid], css); 1717 ss->root = dst_root; 1718 css->cgroup = dcgrp; 1719 1720 spin_lock_irq(&css_set_lock); 1721 hash_for_each(css_set_table, i, cset, hlist) 1722 list_move_tail(&cset->e_cset_node[ss->id], 1723 &dcgrp->e_csets[ss->id]); 1724 spin_unlock_irq(&css_set_lock); 1725 1726 /* default hierarchy doesn't enable controllers by default */ 1727 dst_root->subsys_mask |= 1 << ssid; 1728 if (dst_root == &cgrp_dfl_root) { 1729 static_branch_enable(cgroup_subsys_on_dfl_key[ssid]); 1730 } else { 1731 dcgrp->subtree_control |= 1 << ssid; 1732 static_branch_disable(cgroup_subsys_on_dfl_key[ssid]); 1733 } 1734 1735 ret = cgroup_apply_control(dcgrp); 1736 if (ret) 1737 pr_warn("partial failure to rebind %s controller (err=%d)\n", 1738 ss->name, ret); 1739 1740 if (ss->bind) 1741 ss->bind(css); 1742 } while_each_subsys_mask(); 1743 1744 kernfs_activate(dcgrp->kn); 1745 return 0; 1746 } 1747 1748 int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node, 1749 struct kernfs_root *kf_root) 1750 { 1751 int len = 0; 1752 char *buf = NULL; 1753 struct cgroup_root *kf_cgroot = cgroup_root_from_kf(kf_root); 1754 struct cgroup *ns_cgroup; 1755 1756 buf = kmalloc(PATH_MAX, GFP_KERNEL); 1757 if (!buf) 1758 return -ENOMEM; 1759 1760 spin_lock_irq(&css_set_lock); 1761 ns_cgroup = current_cgns_cgroup_from_root(kf_cgroot); 1762 len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX); 1763 spin_unlock_irq(&css_set_lock); 1764 1765 if (len >= PATH_MAX) 1766 len = -ERANGE; 1767 else if (len > 0) { 1768 seq_escape(sf, buf, " \t\n\\"); 1769 len = 0; 1770 } 1771 kfree(buf); 1772 return len; 1773 } 1774 1775 static int parse_cgroup_root_flags(char *data, unsigned int *root_flags) 1776 { 1777 char *token; 1778 1779 *root_flags = 0; 1780 1781 if (!data || *data == '\0') 1782 return 0; 1783 1784 while ((token = strsep(&data, ",")) != NULL) { 1785 if (!strcmp(token, "nsdelegate")) { 1786 *root_flags |= CGRP_ROOT_NS_DELEGATE; 1787 continue; 1788 } 1789 1790 pr_err("cgroup2: unknown option \"%s\"\n", token); 1791 return -EINVAL; 1792 } 1793 1794 return 0; 1795 } 1796 1797 static void apply_cgroup_root_flags(unsigned int root_flags) 1798 { 1799 if (current->nsproxy->cgroup_ns == &init_cgroup_ns) { 1800 if (root_flags & CGRP_ROOT_NS_DELEGATE) 1801 cgrp_dfl_root.flags |= CGRP_ROOT_NS_DELEGATE; 1802 else 1803 cgrp_dfl_root.flags &= ~CGRP_ROOT_NS_DELEGATE; 1804 } 1805 } 1806 1807 static int cgroup_show_options(struct seq_file *seq, struct kernfs_root *kf_root) 1808 { 1809 if (cgrp_dfl_root.flags & CGRP_ROOT_NS_DELEGATE) 1810 seq_puts(seq, ",nsdelegate"); 1811 return 0; 1812 } 1813 1814 static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data) 1815 { 1816 unsigned int root_flags; 1817 int ret; 1818 1819 ret = parse_cgroup_root_flags(data, &root_flags); 1820 if (ret) 1821 return ret; 1822 1823 apply_cgroup_root_flags(root_flags); 1824 return 0; 1825 } 1826 1827 /* 1828 * To reduce the fork() overhead for systems that are not actually using 1829 * their cgroups capability, we don't maintain the lists running through 1830 * each css_set to its tasks until we see the list actually used - in other 1831 * words after the first mount. 1832 */ 1833 static bool use_task_css_set_links __read_mostly; 1834 1835 static void cgroup_enable_task_cg_lists(void) 1836 { 1837 struct task_struct *p, *g; 1838 1839 /* 1840 * We need tasklist_lock because RCU is not safe against 1841 * while_each_thread(). Besides, a forking task that has passed 1842 * cgroup_post_fork() without seeing use_task_css_set_links = 1 1843 * is not guaranteed to have its child immediately visible in the 1844 * tasklist if we walk through it with RCU. 1845 */ 1846 read_lock(&tasklist_lock); 1847 spin_lock_irq(&css_set_lock); 1848 1849 if (use_task_css_set_links) 1850 goto out_unlock; 1851 1852 use_task_css_set_links = true; 1853 1854 do_each_thread(g, p) { 1855 WARN_ON_ONCE(!list_empty(&p->cg_list) || 1856 task_css_set(p) != &init_css_set); 1857 1858 /* 1859 * We should check if the process is exiting, otherwise 1860 * it will race with cgroup_exit() in that the list 1861 * entry won't be deleted though the process has exited. 1862 * Do it while holding siglock so that we don't end up 1863 * racing against cgroup_exit(). 1864 * 1865 * Interrupts were already disabled while acquiring 1866 * the css_set_lock, so we do not need to disable it 1867 * again when acquiring the sighand->siglock here. 1868 */ 1869 spin_lock(&p->sighand->siglock); 1870 if (!(p->flags & PF_EXITING)) { 1871 struct css_set *cset = task_css_set(p); 1872 1873 if (!css_set_populated(cset)) 1874 css_set_update_populated(cset, true); 1875 list_add_tail(&p->cg_list, &cset->tasks); 1876 get_css_set(cset); 1877 cset->nr_tasks++; 1878 } 1879 spin_unlock(&p->sighand->siglock); 1880 } while_each_thread(g, p); 1881 out_unlock: 1882 spin_unlock_irq(&css_set_lock); 1883 read_unlock(&tasklist_lock); 1884 } 1885 1886 static void init_cgroup_housekeeping(struct cgroup *cgrp) 1887 { 1888 struct cgroup_subsys *ss; 1889 int ssid; 1890 1891 INIT_LIST_HEAD(&cgrp->self.sibling); 1892 INIT_LIST_HEAD(&cgrp->self.children); 1893 INIT_LIST_HEAD(&cgrp->cset_links); 1894 INIT_LIST_HEAD(&cgrp->pidlists); 1895 mutex_init(&cgrp->pidlist_mutex); 1896 cgrp->self.cgroup = cgrp; 1897 cgrp->self.flags |= CSS_ONLINE; 1898 cgrp->dom_cgrp = cgrp; 1899 cgrp->max_descendants = INT_MAX; 1900 cgrp->max_depth = INT_MAX; 1901 INIT_LIST_HEAD(&cgrp->rstat_css_list); 1902 prev_cputime_init(&cgrp->prev_cputime); 1903 1904 for_each_subsys(ss, ssid) 1905 INIT_LIST_HEAD(&cgrp->e_csets[ssid]); 1906 1907 init_waitqueue_head(&cgrp->offline_waitq); 1908 INIT_WORK(&cgrp->release_agent_work, cgroup1_release_agent); 1909 } 1910 1911 void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts) 1912 { 1913 struct cgroup *cgrp = &root->cgrp; 1914 1915 INIT_LIST_HEAD(&root->root_list); 1916 atomic_set(&root->nr_cgrps, 1); 1917 cgrp->root = root; 1918 init_cgroup_housekeeping(cgrp); 1919 idr_init(&root->cgroup_idr); 1920 1921 root->flags = opts->flags; 1922 if (opts->release_agent) 1923 strscpy(root->release_agent_path, opts->release_agent, PATH_MAX); 1924 if (opts->name) 1925 strscpy(root->name, opts->name, MAX_CGROUP_ROOT_NAMELEN); 1926 if (opts->cpuset_clone_children) 1927 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags); 1928 } 1929 1930 int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask, int ref_flags) 1931 { 1932 LIST_HEAD(tmp_links); 1933 struct cgroup *root_cgrp = &root->cgrp; 1934 struct kernfs_syscall_ops *kf_sops; 1935 struct css_set *cset; 1936 int i, ret; 1937 1938 lockdep_assert_held(&cgroup_mutex); 1939 1940 ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_KERNEL); 1941 if (ret < 0) 1942 goto out; 1943 root_cgrp->id = ret; 1944 root_cgrp->ancestor_ids[0] = ret; 1945 1946 ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release, 1947 ref_flags, GFP_KERNEL); 1948 if (ret) 1949 goto out; 1950 1951 /* 1952 * We're accessing css_set_count without locking css_set_lock here, 1953 * but that's OK - it can only be increased by someone holding 1954 * cgroup_lock, and that's us. Later rebinding may disable 1955 * controllers on the default hierarchy and thus create new csets, 1956 * which can't be more than the existing ones. Allocate 2x. 1957 */ 1958 ret = allocate_cgrp_cset_links(2 * css_set_count, &tmp_links); 1959 if (ret) 1960 goto cancel_ref; 1961 1962 ret = cgroup_init_root_id(root); 1963 if (ret) 1964 goto cancel_ref; 1965 1966 kf_sops = root == &cgrp_dfl_root ? 1967 &cgroup_kf_syscall_ops : &cgroup1_kf_syscall_ops; 1968 1969 root->kf_root = kernfs_create_root(kf_sops, 1970 KERNFS_ROOT_CREATE_DEACTIVATED | 1971 KERNFS_ROOT_SUPPORT_EXPORTOP, 1972 root_cgrp); 1973 if (IS_ERR(root->kf_root)) { 1974 ret = PTR_ERR(root->kf_root); 1975 goto exit_root_id; 1976 } 1977 root_cgrp->kn = root->kf_root->kn; 1978 1979 ret = css_populate_dir(&root_cgrp->self); 1980 if (ret) 1981 goto destroy_root; 1982 1983 ret = rebind_subsystems(root, ss_mask); 1984 if (ret) 1985 goto destroy_root; 1986 1987 ret = cgroup_bpf_inherit(root_cgrp); 1988 WARN_ON_ONCE(ret); 1989 1990 trace_cgroup_setup_root(root); 1991 1992 /* 1993 * There must be no failure case after here, since rebinding takes 1994 * care of subsystems' refcounts, which are explicitly dropped in 1995 * the failure exit path. 1996 */ 1997 list_add(&root->root_list, &cgroup_roots); 1998 cgroup_root_count++; 1999 2000 /* 2001 * Link the root cgroup in this hierarchy into all the css_set 2002 * objects. 2003 */ 2004 spin_lock_irq(&css_set_lock); 2005 hash_for_each(css_set_table, i, cset, hlist) { 2006 link_css_set(&tmp_links, cset, root_cgrp); 2007 if (css_set_populated(cset)) 2008 cgroup_update_populated(root_cgrp, true); 2009 } 2010 spin_unlock_irq(&css_set_lock); 2011 2012 BUG_ON(!list_empty(&root_cgrp->self.children)); 2013 BUG_ON(atomic_read(&root->nr_cgrps) != 1); 2014 2015 kernfs_activate(root_cgrp->kn); 2016 ret = 0; 2017 goto out; 2018 2019 destroy_root: 2020 kernfs_destroy_root(root->kf_root); 2021 root->kf_root = NULL; 2022 exit_root_id: 2023 cgroup_exit_root_id(root); 2024 cancel_ref: 2025 percpu_ref_exit(&root_cgrp->self.refcnt); 2026 out: 2027 free_cgrp_cset_links(&tmp_links); 2028 return ret; 2029 } 2030 2031 struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags, 2032 struct cgroup_root *root, unsigned long magic, 2033 struct cgroup_namespace *ns) 2034 { 2035 struct dentry *dentry; 2036 bool new_sb; 2037 2038 dentry = kernfs_mount(fs_type, flags, root->kf_root, magic, &new_sb); 2039 2040 /* 2041 * In non-init cgroup namespace, instead of root cgroup's dentry, 2042 * we return the dentry corresponding to the cgroupns->root_cgrp. 2043 */ 2044 if (!IS_ERR(dentry) && ns != &init_cgroup_ns) { 2045 struct dentry *nsdentry; 2046 struct cgroup *cgrp; 2047 2048 mutex_lock(&cgroup_mutex); 2049 spin_lock_irq(&css_set_lock); 2050 2051 cgrp = cset_cgroup_from_root(ns->root_cset, root); 2052 2053 spin_unlock_irq(&css_set_lock); 2054 mutex_unlock(&cgroup_mutex); 2055 2056 nsdentry = kernfs_node_dentry(cgrp->kn, dentry->d_sb); 2057 dput(dentry); 2058 dentry = nsdentry; 2059 } 2060 2061 if (IS_ERR(dentry) || !new_sb) 2062 cgroup_put(&root->cgrp); 2063 2064 return dentry; 2065 } 2066 2067 static struct dentry *cgroup_mount(struct file_system_type *fs_type, 2068 int flags, const char *unused_dev_name, 2069 void *data) 2070 { 2071 struct cgroup_namespace *ns = current->nsproxy->cgroup_ns; 2072 struct dentry *dentry; 2073 int ret; 2074 2075 get_cgroup_ns(ns); 2076 2077 /* Check if the caller has permission to mount. */ 2078 if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN)) { 2079 put_cgroup_ns(ns); 2080 return ERR_PTR(-EPERM); 2081 } 2082 2083 /* 2084 * The first time anyone tries to mount a cgroup, enable the list 2085 * linking each css_set to its tasks and fix up all existing tasks. 2086 */ 2087 if (!use_task_css_set_links) 2088 cgroup_enable_task_cg_lists(); 2089 2090 if (fs_type == &cgroup2_fs_type) { 2091 unsigned int root_flags; 2092 2093 ret = parse_cgroup_root_flags(data, &root_flags); 2094 if (ret) { 2095 put_cgroup_ns(ns); 2096 return ERR_PTR(ret); 2097 } 2098 2099 cgrp_dfl_visible = true; 2100 cgroup_get_live(&cgrp_dfl_root.cgrp); 2101 2102 dentry = cgroup_do_mount(&cgroup2_fs_type, flags, &cgrp_dfl_root, 2103 CGROUP2_SUPER_MAGIC, ns); 2104 if (!IS_ERR(dentry)) 2105 apply_cgroup_root_flags(root_flags); 2106 } else { 2107 dentry = cgroup1_mount(&cgroup_fs_type, flags, data, 2108 CGROUP_SUPER_MAGIC, ns); 2109 } 2110 2111 put_cgroup_ns(ns); 2112 return dentry; 2113 } 2114 2115 static void cgroup_kill_sb(struct super_block *sb) 2116 { 2117 struct kernfs_root *kf_root = kernfs_root_from_sb(sb); 2118 struct cgroup_root *root = cgroup_root_from_kf(kf_root); 2119 2120 /* 2121 * If @root doesn't have any mounts or children, start killing it. 2122 * This prevents new mounts by disabling percpu_ref_tryget_live(). 2123 * cgroup_mount() may wait for @root's release. 2124 * 2125 * And don't kill the default root. 2126 */ 2127 if (!list_empty(&root->cgrp.self.children) || 2128 root == &cgrp_dfl_root) 2129 cgroup_put(&root->cgrp); 2130 else 2131 percpu_ref_kill(&root->cgrp.self.refcnt); 2132 2133 kernfs_kill_sb(sb); 2134 } 2135 2136 struct file_system_type cgroup_fs_type = { 2137 .name = "cgroup", 2138 .mount = cgroup_mount, 2139 .kill_sb = cgroup_kill_sb, 2140 .fs_flags = FS_USERNS_MOUNT, 2141 }; 2142 2143 static struct file_system_type cgroup2_fs_type = { 2144 .name = "cgroup2", 2145 .mount = cgroup_mount, 2146 .kill_sb = cgroup_kill_sb, 2147 .fs_flags = FS_USERNS_MOUNT, 2148 }; 2149 2150 int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen, 2151 struct cgroup_namespace *ns) 2152 { 2153 struct cgroup *root = cset_cgroup_from_root(ns->root_cset, cgrp->root); 2154 2155 return kernfs_path_from_node(cgrp->kn, root->kn, buf, buflen); 2156 } 2157 2158 int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen, 2159 struct cgroup_namespace *ns) 2160 { 2161 int ret; 2162 2163 mutex_lock(&cgroup_mutex); 2164 spin_lock_irq(&css_set_lock); 2165 2166 ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns); 2167 2168 spin_unlock_irq(&css_set_lock); 2169 mutex_unlock(&cgroup_mutex); 2170 2171 return ret; 2172 } 2173 EXPORT_SYMBOL_GPL(cgroup_path_ns); 2174 2175 /** 2176 * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy 2177 * @task: target task 2178 * @buf: the buffer to write the path into 2179 * @buflen: the length of the buffer 2180 * 2181 * Determine @task's cgroup on the first (the one with the lowest non-zero 2182 * hierarchy_id) cgroup hierarchy and copy its path into @buf. This 2183 * function grabs cgroup_mutex and shouldn't be used inside locks used by 2184 * cgroup controller callbacks. 2185 * 2186 * Return value is the same as kernfs_path(). 2187 */ 2188 int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) 2189 { 2190 struct cgroup_root *root; 2191 struct cgroup *cgrp; 2192 int hierarchy_id = 1; 2193 int ret; 2194 2195 mutex_lock(&cgroup_mutex); 2196 spin_lock_irq(&css_set_lock); 2197 2198 root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id); 2199 2200 if (root) { 2201 cgrp = task_cgroup_from_root(task, root); 2202 ret = cgroup_path_ns_locked(cgrp, buf, buflen, &init_cgroup_ns); 2203 } else { 2204 /* if no hierarchy exists, everyone is in "/" */ 2205 ret = strlcpy(buf, "/", buflen); 2206 } 2207 2208 spin_unlock_irq(&css_set_lock); 2209 mutex_unlock(&cgroup_mutex); 2210 return ret; 2211 } 2212 EXPORT_SYMBOL_GPL(task_cgroup_path); 2213 2214 /** 2215 * cgroup_migrate_add_task - add a migration target task to a migration context 2216 * @task: target task 2217 * @mgctx: target migration context 2218 * 2219 * Add @task, which is a migration target, to @mgctx->tset. This function 2220 * becomes noop if @task doesn't need to be migrated. @task's css_set 2221 * should have been added as a migration source and @task->cg_list will be 2222 * moved from the css_set's tasks list to mg_tasks one. 2223 */ 2224 static void cgroup_migrate_add_task(struct task_struct *task, 2225 struct cgroup_mgctx *mgctx) 2226 { 2227 struct css_set *cset; 2228 2229 lockdep_assert_held(&css_set_lock); 2230 2231 /* @task either already exited or can't exit until the end */ 2232 if (task->flags & PF_EXITING) 2233 return; 2234 2235 /* leave @task alone if post_fork() hasn't linked it yet */ 2236 if (list_empty(&task->cg_list)) 2237 return; 2238 2239 cset = task_css_set(task); 2240 if (!cset->mg_src_cgrp) 2241 return; 2242 2243 mgctx->tset.nr_tasks++; 2244 2245 list_move_tail(&task->cg_list, &cset->mg_tasks); 2246 if (list_empty(&cset->mg_node)) 2247 list_add_tail(&cset->mg_node, 2248 &mgctx->tset.src_csets); 2249 if (list_empty(&cset->mg_dst_cset->mg_node)) 2250 list_add_tail(&cset->mg_dst_cset->mg_node, 2251 &mgctx->tset.dst_csets); 2252 } 2253 2254 /** 2255 * cgroup_taskset_first - reset taskset and return the first task 2256 * @tset: taskset of interest 2257 * @dst_cssp: output variable for the destination css 2258 * 2259 * @tset iteration is initialized and the first task is returned. 2260 */ 2261 struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset, 2262 struct cgroup_subsys_state **dst_cssp) 2263 { 2264 tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node); 2265 tset->cur_task = NULL; 2266 2267 return cgroup_taskset_next(tset, dst_cssp); 2268 } 2269 2270 /** 2271 * cgroup_taskset_next - iterate to the next task in taskset 2272 * @tset: taskset of interest 2273 * @dst_cssp: output variable for the destination css 2274 * 2275 * Return the next task in @tset. Iteration must have been initialized 2276 * with cgroup_taskset_first(). 2277 */ 2278 struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset, 2279 struct cgroup_subsys_state **dst_cssp) 2280 { 2281 struct css_set *cset = tset->cur_cset; 2282 struct task_struct *task = tset->cur_task; 2283 2284 while (&cset->mg_node != tset->csets) { 2285 if (!task) 2286 task = list_first_entry(&cset->mg_tasks, 2287 struct task_struct, cg_list); 2288 else 2289 task = list_next_entry(task, cg_list); 2290 2291 if (&task->cg_list != &cset->mg_tasks) { 2292 tset->cur_cset = cset; 2293 tset->cur_task = task; 2294 2295 /* 2296 * This function may be called both before and 2297 * after cgroup_taskset_migrate(). The two cases 2298 * can be distinguished by looking at whether @cset 2299 * has its ->mg_dst_cset set. 2300 */ 2301 if (cset->mg_dst_cset) 2302 *dst_cssp = cset->mg_dst_cset->subsys[tset->ssid]; 2303 else 2304 *dst_cssp = cset->subsys[tset->ssid]; 2305 2306 return task; 2307 } 2308 2309 cset = list_next_entry(cset, mg_node); 2310 task = NULL; 2311 } 2312 2313 return NULL; 2314 } 2315 2316 /** 2317 * cgroup_taskset_migrate - migrate a taskset 2318 * @mgctx: migration context 2319 * 2320 * Migrate tasks in @mgctx as setup by migration preparation functions. 2321 * This function fails iff one of the ->can_attach callbacks fails and 2322 * guarantees that either all or none of the tasks in @mgctx are migrated. 2323 * @mgctx is consumed regardless of success. 2324 */ 2325 static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx) 2326 { 2327 struct cgroup_taskset *tset = &mgctx->tset; 2328 struct cgroup_subsys *ss; 2329 struct task_struct *task, *tmp_task; 2330 struct css_set *cset, *tmp_cset; 2331 int ssid, failed_ssid, ret; 2332 2333 /* check that we can legitimately attach to the cgroup */ 2334 if (tset->nr_tasks) { 2335 do_each_subsys_mask(ss, ssid, mgctx->ss_mask) { 2336 if (ss->can_attach) { 2337 tset->ssid = ssid; 2338 ret = ss->can_attach(tset); 2339 if (ret) { 2340 failed_ssid = ssid; 2341 goto out_cancel_attach; 2342 } 2343 } 2344 } while_each_subsys_mask(); 2345 } 2346 2347 /* 2348 * Now that we're guaranteed success, proceed to move all tasks to 2349 * the new cgroup. There are no failure cases after here, so this 2350 * is the commit point. 2351 */ 2352 spin_lock_irq(&css_set_lock); 2353 list_for_each_entry(cset, &tset->src_csets, mg_node) { 2354 list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) { 2355 struct css_set *from_cset = task_css_set(task); 2356 struct css_set *to_cset = cset->mg_dst_cset; 2357 2358 get_css_set(to_cset); 2359 to_cset->nr_tasks++; 2360 css_set_move_task(task, from_cset, to_cset, true); 2361 put_css_set_locked(from_cset); 2362 from_cset->nr_tasks--; 2363 } 2364 } 2365 spin_unlock_irq(&css_set_lock); 2366 2367 /* 2368 * Migration is committed, all target tasks are now on dst_csets. 2369 * Nothing is sensitive to fork() after this point. Notify 2370 * controllers that migration is complete. 2371 */ 2372 tset->csets = &tset->dst_csets; 2373 2374 if (tset->nr_tasks) { 2375 do_each_subsys_mask(ss, ssid, mgctx->ss_mask) { 2376 if (ss->attach) { 2377 tset->ssid = ssid; 2378 ss->attach(tset); 2379 } 2380 } while_each_subsys_mask(); 2381 } 2382 2383 ret = 0; 2384 goto out_release_tset; 2385 2386 out_cancel_attach: 2387 if (tset->nr_tasks) { 2388 do_each_subsys_mask(ss, ssid, mgctx->ss_mask) { 2389 if (ssid == failed_ssid) 2390 break; 2391 if (ss->cancel_attach) { 2392 tset->ssid = ssid; 2393 ss->cancel_attach(tset); 2394 } 2395 } while_each_subsys_mask(); 2396 } 2397 out_release_tset: 2398 spin_lock_irq(&css_set_lock); 2399 list_splice_init(&tset->dst_csets, &tset->src_csets); 2400 list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) { 2401 list_splice_tail_init(&cset->mg_tasks, &cset->tasks); 2402 list_del_init(&cset->mg_node); 2403 } 2404 spin_unlock_irq(&css_set_lock); 2405 2406 /* 2407 * Re-initialize the cgroup_taskset structure in case it is reused 2408 * again in another cgroup_migrate_add_task()/cgroup_migrate_execute() 2409 * iteration. 2410 */ 2411 tset->nr_tasks = 0; 2412 tset->csets = &tset->src_csets; 2413 return ret; 2414 } 2415 2416 /** 2417 * cgroup_migrate_vet_dst - verify whether a cgroup can be migration destination 2418 * @dst_cgrp: destination cgroup to test 2419 * 2420 * On the default hierarchy, except for the mixable, (possible) thread root 2421 * and threaded cgroups, subtree_control must be zero for migration 2422 * destination cgroups with tasks so that child cgroups don't compete 2423 * against tasks. 2424 */ 2425 int cgroup_migrate_vet_dst(struct cgroup *dst_cgrp) 2426 { 2427 /* v1 doesn't have any restriction */ 2428 if (!cgroup_on_dfl(dst_cgrp)) 2429 return 0; 2430 2431 /* verify @dst_cgrp can host resources */ 2432 if (!cgroup_is_valid_domain(dst_cgrp->dom_cgrp)) 2433 return -EOPNOTSUPP; 2434 2435 /* mixables don't care */ 2436 if (cgroup_is_mixable(dst_cgrp)) 2437 return 0; 2438 2439 /* 2440 * If @dst_cgrp is already or can become a thread root or is 2441 * threaded, it doesn't matter. 2442 */ 2443 if (cgroup_can_be_thread_root(dst_cgrp) || cgroup_is_threaded(dst_cgrp)) 2444 return 0; 2445 2446 /* apply no-internal-process constraint */ 2447 if (dst_cgrp->subtree_control) 2448 return -EBUSY; 2449 2450 return 0; 2451 } 2452 2453 /** 2454 * cgroup_migrate_finish - cleanup after attach 2455 * @mgctx: migration context 2456 * 2457 * Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst(). See 2458 * those functions for details. 2459 */ 2460 void cgroup_migrate_finish(struct cgroup_mgctx *mgctx) 2461 { 2462 LIST_HEAD(preloaded); 2463 struct css_set *cset, *tmp_cset; 2464 2465 lockdep_assert_held(&cgroup_mutex); 2466 2467 spin_lock_irq(&css_set_lock); 2468 2469 list_splice_tail_init(&mgctx->preloaded_src_csets, &preloaded); 2470 list_splice_tail_init(&mgctx->preloaded_dst_csets, &preloaded); 2471 2472 list_for_each_entry_safe(cset, tmp_cset, &preloaded, mg_preload_node) { 2473 cset->mg_src_cgrp = NULL; 2474 cset->mg_dst_cgrp = NULL; 2475 cset->mg_dst_cset = NULL; 2476 list_del_init(&cset->mg_preload_node); 2477 put_css_set_locked(cset); 2478 } 2479 2480 spin_unlock_irq(&css_set_lock); 2481 } 2482 2483 /** 2484 * cgroup_migrate_add_src - add a migration source css_set 2485 * @src_cset: the source css_set to add 2486 * @dst_cgrp: the destination cgroup 2487 * @mgctx: migration context 2488 * 2489 * Tasks belonging to @src_cset are about to be migrated to @dst_cgrp. Pin 2490 * @src_cset and add it to @mgctx->src_csets, which should later be cleaned 2491 * up by cgroup_migrate_finish(). 2492 * 2493 * This function may be called without holding cgroup_threadgroup_rwsem 2494 * even if the target is a process. Threads may be created and destroyed 2495 * but as long as cgroup_mutex is not dropped, no new css_set can be put 2496 * into play and the preloaded css_sets are guaranteed to cover all 2497 * migrations. 2498 */ 2499 void cgroup_migrate_add_src(struct css_set *src_cset, 2500 struct cgroup *dst_cgrp, 2501 struct cgroup_mgctx *mgctx) 2502 { 2503 struct cgroup *src_cgrp; 2504 2505 lockdep_assert_held(&cgroup_mutex); 2506 lockdep_assert_held(&css_set_lock); 2507 2508 /* 2509 * If ->dead, @src_set is associated with one or more dead cgroups 2510 * and doesn't contain any migratable tasks. Ignore it early so 2511 * that the rest of migration path doesn't get confused by it. 2512 */ 2513 if (src_cset->dead) 2514 return; 2515 2516 src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root); 2517 2518 if (!list_empty(&src_cset->mg_preload_node)) 2519 return; 2520 2521 WARN_ON(src_cset->mg_src_cgrp); 2522 WARN_ON(src_cset->mg_dst_cgrp); 2523 WARN_ON(!list_empty(&src_cset->mg_tasks)); 2524 WARN_ON(!list_empty(&src_cset->mg_node)); 2525 2526 src_cset->mg_src_cgrp = src_cgrp; 2527 src_cset->mg_dst_cgrp = dst_cgrp; 2528 get_css_set(src_cset); 2529 list_add_tail(&src_cset->mg_preload_node, &mgctx->preloaded_src_csets); 2530 } 2531 2532 /** 2533 * cgroup_migrate_prepare_dst - prepare destination css_sets for migration 2534 * @mgctx: migration context 2535 * 2536 * Tasks are about to be moved and all the source css_sets have been 2537 * preloaded to @mgctx->preloaded_src_csets. This function looks up and 2538 * pins all destination css_sets, links each to its source, and append them 2539 * to @mgctx->preloaded_dst_csets. 2540 * 2541 * This function must be called after cgroup_migrate_add_src() has been 2542 * called on each migration source css_set. After migration is performed 2543 * using cgroup_migrate(), cgroup_migrate_finish() must be called on 2544 * @mgctx. 2545 */ 2546 int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx) 2547 { 2548 struct css_set *src_cset, *tmp_cset; 2549 2550 lockdep_assert_held(&cgroup_mutex); 2551 2552 /* look up the dst cset for each src cset and link it to src */ 2553 list_for_each_entry_safe(src_cset, tmp_cset, &mgctx->preloaded_src_csets, 2554 mg_preload_node) { 2555 struct css_set *dst_cset; 2556 struct cgroup_subsys *ss; 2557 int ssid; 2558 2559 dst_cset = find_css_set(src_cset, src_cset->mg_dst_cgrp); 2560 if (!dst_cset) 2561 goto err; 2562 2563 WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset); 2564 2565 /* 2566 * If src cset equals dst, it's noop. Drop the src. 2567 * cgroup_migrate() will skip the cset too. Note that we 2568 * can't handle src == dst as some nodes are used by both. 2569 */ 2570 if (src_cset == dst_cset) { 2571 src_cset->mg_src_cgrp = NULL; 2572 src_cset->mg_dst_cgrp = NULL; 2573 list_del_init(&src_cset->mg_preload_node); 2574 put_css_set(src_cset); 2575 put_css_set(dst_cset); 2576 continue; 2577 } 2578 2579 src_cset->mg_dst_cset = dst_cset; 2580 2581 if (list_empty(&dst_cset->mg_preload_node)) 2582 list_add_tail(&dst_cset->mg_preload_node, 2583 &mgctx->preloaded_dst_csets); 2584 else 2585 put_css_set(dst_cset); 2586 2587 for_each_subsys(ss, ssid) 2588 if (src_cset->subsys[ssid] != dst_cset->subsys[ssid]) 2589 mgctx->ss_mask |= 1 << ssid; 2590 } 2591 2592 return 0; 2593 err: 2594 cgroup_migrate_finish(mgctx); 2595 return -ENOMEM; 2596 } 2597 2598 /** 2599 * cgroup_migrate - migrate a process or task to a cgroup 2600 * @leader: the leader of the process or the task to migrate 2601 * @threadgroup: whether @leader points to the whole process or a single task 2602 * @mgctx: migration context 2603 * 2604 * Migrate a process or task denoted by @leader. If migrating a process, 2605 * the caller must be holding cgroup_threadgroup_rwsem. The caller is also 2606 * responsible for invoking cgroup_migrate_add_src() and 2607 * cgroup_migrate_prepare_dst() on the targets before invoking this 2608 * function and following up with cgroup_migrate_finish(). 2609 * 2610 * As long as a controller's ->can_attach() doesn't fail, this function is 2611 * guaranteed to succeed. This means that, excluding ->can_attach() 2612 * failure, when migrating multiple targets, the success or failure can be 2613 * decided for all targets by invoking group_migrate_prepare_dst() before 2614 * actually starting migrating. 2615 */ 2616 int cgroup_migrate(struct task_struct *leader, bool threadgroup, 2617 struct cgroup_mgctx *mgctx) 2618 { 2619 struct task_struct *task; 2620 2621 /* 2622 * Prevent freeing of tasks while we take a snapshot. Tasks that are 2623 * already PF_EXITING could be freed from underneath us unless we 2624 * take an rcu_read_lock. 2625 */ 2626 spin_lock_irq(&css_set_lock); 2627 rcu_read_lock(); 2628 task = leader; 2629 do { 2630 cgroup_migrate_add_task(task, mgctx); 2631 if (!threadgroup) 2632 break; 2633 } while_each_thread(leader, task); 2634 rcu_read_unlock(); 2635 spin_unlock_irq(&css_set_lock); 2636 2637 return cgroup_migrate_execute(mgctx); 2638 } 2639 2640 /** 2641 * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup 2642 * @dst_cgrp: the cgroup to attach to 2643 * @leader: the task or the leader of the threadgroup to be attached 2644 * @threadgroup: attach the whole threadgroup? 2645 * 2646 * Call holding cgroup_mutex and cgroup_threadgroup_rwsem. 2647 */ 2648 int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader, 2649 bool threadgroup) 2650 { 2651 DEFINE_CGROUP_MGCTX(mgctx); 2652 struct task_struct *task; 2653 int ret; 2654 2655 ret = cgroup_migrate_vet_dst(dst_cgrp); 2656 if (ret) 2657 return ret; 2658 2659 /* look up all src csets */ 2660 spin_lock_irq(&css_set_lock); 2661 rcu_read_lock(); 2662 task = leader; 2663 do { 2664 cgroup_migrate_add_src(task_css_set(task), dst_cgrp, &mgctx); 2665 if (!threadgroup) 2666 break; 2667 } while_each_thread(leader, task); 2668 rcu_read_unlock(); 2669 spin_unlock_irq(&css_set_lock); 2670 2671 /* prepare dst csets and commit */ 2672 ret = cgroup_migrate_prepare_dst(&mgctx); 2673 if (!ret) 2674 ret = cgroup_migrate(leader, threadgroup, &mgctx); 2675 2676 cgroup_migrate_finish(&mgctx); 2677 2678 if (!ret) 2679 TRACE_CGROUP_PATH(attach_task, dst_cgrp, leader, threadgroup); 2680 2681 return ret; 2682 } 2683 2684 struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup) 2685 __acquires(&cgroup_threadgroup_rwsem) 2686 { 2687 struct task_struct *tsk; 2688 pid_t pid; 2689 2690 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) 2691 return ERR_PTR(-EINVAL); 2692 2693 percpu_down_write(&cgroup_threadgroup_rwsem); 2694 2695 rcu_read_lock(); 2696 if (pid) { 2697 tsk = find_task_by_vpid(pid); 2698 if (!tsk) { 2699 tsk = ERR_PTR(-ESRCH); 2700 goto out_unlock_threadgroup; 2701 } 2702 } else { 2703 tsk = current; 2704 } 2705 2706 if (threadgroup) 2707 tsk = tsk->group_leader; 2708 2709 /* 2710 * kthreads may acquire PF_NO_SETAFFINITY during initialization. 2711 * If userland migrates such a kthread to a non-root cgroup, it can 2712 * become trapped in a cpuset, or RT kthread may be born in a 2713 * cgroup with no rt_runtime allocated. Just say no. 2714 */ 2715 if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) { 2716 tsk = ERR_PTR(-EINVAL); 2717 goto out_unlock_threadgroup; 2718 } 2719 2720 get_task_struct(tsk); 2721 goto out_unlock_rcu; 2722 2723 out_unlock_threadgroup: 2724 percpu_up_write(&cgroup_threadgroup_rwsem); 2725 out_unlock_rcu: 2726 rcu_read_unlock(); 2727 return tsk; 2728 } 2729 2730 void cgroup_procs_write_finish(struct task_struct *task) 2731 __releases(&cgroup_threadgroup_rwsem) 2732 { 2733 struct cgroup_subsys *ss; 2734 int ssid; 2735 2736 /* release reference from cgroup_procs_write_start() */ 2737 put_task_struct(task); 2738 2739 percpu_up_write(&cgroup_threadgroup_rwsem); 2740 for_each_subsys(ss, ssid) 2741 if (ss->post_attach) 2742 ss->post_attach(); 2743 } 2744 2745 static void cgroup_print_ss_mask(struct seq_file *seq, u16 ss_mask) 2746 { 2747 struct cgroup_subsys *ss; 2748 bool printed = false; 2749 int ssid; 2750 2751 do_each_subsys_mask(ss, ssid, ss_mask) { 2752 if (printed) 2753 seq_putc(seq, ' '); 2754 seq_printf(seq, "%s", ss->name); 2755 printed = true; 2756 } while_each_subsys_mask(); 2757 if (printed) 2758 seq_putc(seq, '\n'); 2759 } 2760 2761 /* show controllers which are enabled from the parent */ 2762 static int cgroup_controllers_show(struct seq_file *seq, void *v) 2763 { 2764 struct cgroup *cgrp = seq_css(seq)->cgroup; 2765 2766 cgroup_print_ss_mask(seq, cgroup_control(cgrp)); 2767 return 0; 2768 } 2769 2770 /* show controllers which are enabled for a given cgroup's children */ 2771 static int cgroup_subtree_control_show(struct seq_file *seq, void *v) 2772 { 2773 struct cgroup *cgrp = seq_css(seq)->cgroup; 2774 2775 cgroup_print_ss_mask(seq, cgrp->subtree_control); 2776 return 0; 2777 } 2778 2779 /** 2780 * cgroup_update_dfl_csses - update css assoc of a subtree in default hierarchy 2781 * @cgrp: root of the subtree to update csses for 2782 * 2783 * @cgrp's control masks have changed and its subtree's css associations 2784 * need to be updated accordingly. This function looks up all css_sets 2785 * which are attached to the subtree, creates the matching updated css_sets 2786 * and migrates the tasks to the new ones. 2787 */ 2788 static int cgroup_update_dfl_csses(struct cgroup *cgrp) 2789 { 2790 DEFINE_CGROUP_MGCTX(mgctx); 2791 struct cgroup_subsys_state *d_css; 2792 struct cgroup *dsct; 2793 struct css_set *src_cset; 2794 int ret; 2795 2796 lockdep_assert_held(&cgroup_mutex); 2797 2798 percpu_down_write(&cgroup_threadgroup_rwsem); 2799 2800 /* look up all csses currently attached to @cgrp's subtree */ 2801 spin_lock_irq(&css_set_lock); 2802 cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { 2803 struct cgrp_cset_link *link; 2804 2805 list_for_each_entry(link, &dsct->cset_links, cset_link) 2806 cgroup_migrate_add_src(link->cset, dsct, &mgctx); 2807 } 2808 spin_unlock_irq(&css_set_lock); 2809 2810 /* NULL dst indicates self on default hierarchy */ 2811 ret = cgroup_migrate_prepare_dst(&mgctx); 2812 if (ret) 2813 goto out_finish; 2814 2815 spin_lock_irq(&css_set_lock); 2816 list_for_each_entry(src_cset, &mgctx.preloaded_src_csets, mg_preload_node) { 2817 struct task_struct *task, *ntask; 2818 2819 /* all tasks in src_csets need to be migrated */ 2820 list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list) 2821 cgroup_migrate_add_task(task, &mgctx); 2822 } 2823 spin_unlock_irq(&css_set_lock); 2824 2825 ret = cgroup_migrate_execute(&mgctx); 2826 out_finish: 2827 cgroup_migrate_finish(&mgctx); 2828 percpu_up_write(&cgroup_threadgroup_rwsem); 2829 return ret; 2830 } 2831 2832 /** 2833 * cgroup_lock_and_drain_offline - lock cgroup_mutex and drain offlined csses 2834 * @cgrp: root of the target subtree 2835 * 2836 * Because css offlining is asynchronous, userland may try to re-enable a 2837 * controller while the previous css is still around. This function grabs 2838 * cgroup_mutex and drains the previous css instances of @cgrp's subtree. 2839 */ 2840 void cgroup_lock_and_drain_offline(struct cgroup *cgrp) 2841 __acquires(&cgroup_mutex) 2842 { 2843 struct cgroup *dsct; 2844 struct cgroup_subsys_state *d_css; 2845 struct cgroup_subsys *ss; 2846 int ssid; 2847 2848 restart: 2849 mutex_lock(&cgroup_mutex); 2850 2851 cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) { 2852 for_each_subsys(ss, ssid) { 2853 struct cgroup_subsys_state *css = cgroup_css(dsct, ss); 2854 DEFINE_WAIT(wait); 2855 2856 if (!css || !percpu_ref_is_dying(&css->refcnt)) 2857 continue; 2858 2859 cgroup_get_live(dsct); 2860 prepare_to_wait(&dsct->offline_waitq, &wait, 2861 TASK_UNINTERRUPTIBLE); 2862 2863 mutex_unlock(&cgroup_mutex); 2864 schedule(); 2865 finish_wait(&dsct->offline_waitq, &wait); 2866 2867 cgroup_put(dsct); 2868 goto restart; 2869 } 2870 } 2871 } 2872 2873 /** 2874 * cgroup_save_control - save control masks and dom_cgrp of a subtree 2875 * @cgrp: root of the target subtree 2876 * 2877 * Save ->subtree_control, ->subtree_ss_mask and ->dom_cgrp to the 2878 * respective old_ prefixed fields for @cgrp's subtree including @cgrp 2879 * itself. 2880 */ 2881 static void cgroup_save_control(struct cgroup *cgrp) 2882 { 2883 struct cgroup *dsct; 2884 struct cgroup_subsys_state *d_css; 2885 2886 cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { 2887 dsct->old_subtree_control = dsct->subtree_control; 2888 dsct->old_subtree_ss_mask = dsct->subtree_ss_mask; 2889 dsct->old_dom_cgrp = dsct->dom_cgrp; 2890 } 2891 } 2892 2893 /** 2894 * cgroup_propagate_control - refresh control masks of a subtree 2895 * @cgrp: root of the target subtree 2896 * 2897 * For @cgrp and its subtree, ensure ->subtree_ss_mask matches 2898 * ->subtree_control and propagate controller availability through the 2899 * subtree so that descendants don't have unavailable controllers enabled. 2900 */ 2901 static void cgroup_propagate_control(struct cgroup *cgrp) 2902 { 2903 struct cgroup *dsct; 2904 struct cgroup_subsys_state *d_css; 2905 2906 cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { 2907 dsct->subtree_control &= cgroup_control(dsct); 2908 dsct->subtree_ss_mask = 2909 cgroup_calc_subtree_ss_mask(dsct->subtree_control, 2910 cgroup_ss_mask(dsct)); 2911 } 2912 } 2913 2914 /** 2915 * cgroup_restore_control - restore control masks and dom_cgrp of a subtree 2916 * @cgrp: root of the target subtree 2917 * 2918 * Restore ->subtree_control, ->subtree_ss_mask and ->dom_cgrp from the 2919 * respective old_ prefixed fields for @cgrp's subtree including @cgrp 2920 * itself. 2921 */ 2922 static void cgroup_restore_control(struct cgroup *cgrp) 2923 { 2924 struct cgroup *dsct; 2925 struct cgroup_subsys_state *d_css; 2926 2927 cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) { 2928 dsct->subtree_control = dsct->old_subtree_control; 2929 dsct->subtree_ss_mask = dsct->old_subtree_ss_mask; 2930 dsct->dom_cgrp = dsct->old_dom_cgrp; 2931 } 2932 } 2933 2934 static bool css_visible(struct cgroup_subsys_state *css) 2935 { 2936 struct cgroup_subsys *ss = css->ss; 2937 struct cgroup *cgrp = css->cgroup; 2938 2939 if (cgroup_control(cgrp) & (1 << ss->id)) 2940 return true; 2941 if (!(cgroup_ss_mask(cgrp) & (1 << ss->id))) 2942 return false; 2943 return cgroup_on_dfl(cgrp) && ss->implicit_on_dfl; 2944 } 2945 2946 /** 2947 * cgroup_apply_control_enable - enable or show csses according to control 2948 * @cgrp: root of the target subtree 2949 * 2950 * Walk @cgrp's subtree and create new csses or make the existing ones 2951 * visible. A css is created invisible if it's being implicitly enabled 2952 * through dependency. An invisible css is made visible when the userland 2953 * explicitly enables it. 2954 * 2955 * Returns 0 on success, -errno on failure. On failure, csses which have 2956 * been processed already aren't cleaned up. The caller is responsible for 2957 * cleaning up with cgroup_apply_control_disable(). 2958 */ 2959 static int cgroup_apply_control_enable(struct cgroup *cgrp) 2960 { 2961 struct cgroup *dsct; 2962 struct cgroup_subsys_state *d_css; 2963 struct cgroup_subsys *ss; 2964 int ssid, ret; 2965 2966 cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { 2967 for_each_subsys(ss, ssid) { 2968 struct cgroup_subsys_state *css = cgroup_css(dsct, ss); 2969 2970 WARN_ON_ONCE(css && percpu_ref_is_dying(&css->refcnt)); 2971 2972 if (!(cgroup_ss_mask(dsct) & (1 << ss->id))) 2973 continue; 2974 2975 if (!css) { 2976 css = css_create(dsct, ss); 2977 if (IS_ERR(css)) 2978 return PTR_ERR(css); 2979 } 2980 2981 if (css_visible(css)) { 2982 ret = css_populate_dir(css); 2983 if (ret) 2984 return ret; 2985 } 2986 } 2987 } 2988 2989 return 0; 2990 } 2991 2992 /** 2993 * cgroup_apply_control_disable - kill or hide csses according to control 2994 * @cgrp: root of the target subtree 2995 * 2996 * Walk @cgrp's subtree and kill and hide csses so that they match 2997 * cgroup_ss_mask() and cgroup_visible_mask(). 2998 * 2999 * A css is hidden when the userland requests it to be disabled while other 3000 * subsystems are still depending on it. The css must not actively control 3001 * resources and be in the vanilla state if it's made visible again later. 3002 * Controllers which may be depended upon should provide ->css_reset() for 3003 * this purpose. 3004 */ 3005 static void cgroup_apply_control_disable(struct cgroup *cgrp) 3006 { 3007 struct cgroup *dsct; 3008 struct cgroup_subsys_state *d_css; 3009 struct cgroup_subsys *ss; 3010 int ssid; 3011 3012 cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) { 3013 for_each_subsys(ss, ssid) { 3014 struct cgroup_subsys_state *css = cgroup_css(dsct, ss); 3015 3016 WARN_ON_ONCE(css && percpu_ref_is_dying(&css->refcnt)); 3017 3018 if (!css) 3019 continue; 3020 3021 if (css->parent && 3022 !(cgroup_ss_mask(dsct) & (1 << ss->id))) { 3023 kill_css(css); 3024 } else if (!css_visible(css)) { 3025 css_clear_dir(css); 3026 if (ss->css_reset) 3027 ss->css_reset(css); 3028 } 3029 } 3030 } 3031 } 3032 3033 /** 3034 * cgroup_apply_control - apply control mask updates to the subtree 3035 * @cgrp: root of the target subtree 3036 * 3037 * subsystems can be enabled and disabled in a subtree using the following 3038 * steps. 3039 * 3040 * 1. Call cgroup_save_control() to stash the current state. 3041 * 2. Update ->subtree_control masks in the subtree as desired. 3042 * 3. Call cgroup_apply_control() to apply the changes. 3043 * 4. Optionally perform other related operations. 3044 * 5. Call cgroup_finalize_control() to finish up. 3045 * 3046 * This function implements step 3 and propagates the mask changes 3047 * throughout @cgrp's subtree, updates csses accordingly and perform 3048 * process migrations. 3049 */ 3050 static int cgroup_apply_control(struct cgroup *cgrp) 3051 { 3052 int ret; 3053 3054 cgroup_propagate_control(cgrp); 3055 3056 ret = cgroup_apply_control_enable(cgrp); 3057 if (ret) 3058 return ret; 3059 3060 /* 3061 * At this point, cgroup_e_css_by_mask() results reflect the new csses 3062 * making the following cgroup_update_dfl_csses() properly update 3063 * css associations of all tasks in the subtree. 3064 */ 3065 ret = cgroup_update_dfl_csses(cgrp); 3066 if (ret) 3067 return ret; 3068 3069 return 0; 3070 } 3071 3072 /** 3073 * cgroup_finalize_control - finalize control mask update 3074 * @cgrp: root of the target subtree 3075 * @ret: the result of the update 3076 * 3077 * Finalize control mask update. See cgroup_apply_control() for more info. 3078 */ 3079 static void cgroup_finalize_control(struct cgroup *cgrp, int ret) 3080 { 3081 if (ret) { 3082 cgroup_restore_control(cgrp); 3083 cgroup_propagate_control(cgrp); 3084 } 3085 3086 cgroup_apply_control_disable(cgrp); 3087 } 3088 3089 static int cgroup_vet_subtree_control_enable(struct cgroup *cgrp, u16 enable) 3090 { 3091 u16 domain_enable = enable & ~cgrp_dfl_threaded_ss_mask; 3092 3093 /* if nothing is getting enabled, nothing to worry about */ 3094 if (!enable) 3095 return 0; 3096 3097 /* can @cgrp host any resources? */ 3098 if (!cgroup_is_valid_domain(cgrp->dom_cgrp)) 3099 return -EOPNOTSUPP; 3100 3101 /* mixables don't care */ 3102 if (cgroup_is_mixable(cgrp)) 3103 return 0; 3104 3105 if (domain_enable) { 3106 /* can't enable domain controllers inside a thread subtree */ 3107 if (cgroup_is_thread_root(cgrp) || cgroup_is_threaded(cgrp)) 3108 return -EOPNOTSUPP; 3109 } else { 3110 /* 3111 * Threaded controllers can handle internal competitions 3112 * and are always allowed inside a (prospective) thread 3113 * subtree. 3114 */ 3115 if (cgroup_can_be_thread_root(cgrp) || cgroup_is_threaded(cgrp)) 3116 return 0; 3117 } 3118 3119 /* 3120 * Controllers can't be enabled for a cgroup with tasks to avoid 3121 * child cgroups competing against tasks. 3122 */ 3123 if (cgroup_has_tasks(cgrp)) 3124 return -EBUSY; 3125 3126 return 0; 3127 } 3128 3129 /* change the enabled child controllers for a cgroup in the default hierarchy */ 3130 static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of, 3131 char *buf, size_t nbytes, 3132 loff_t off) 3133 { 3134 u16 enable = 0, disable = 0; 3135 struct cgroup *cgrp, *child; 3136 struct cgroup_subsys *ss; 3137 char *tok; 3138 int ssid, ret; 3139 3140 /* 3141 * Parse input - space separated list of subsystem names prefixed 3142 * with either + or -. 3143 */ 3144 buf = strstrip(buf); 3145 while ((tok = strsep(&buf, " "))) { 3146 if (tok[0] == '\0') 3147 continue; 3148 do_each_subsys_mask(ss, ssid, ~cgrp_dfl_inhibit_ss_mask) { 3149 if (!cgroup_ssid_enabled(ssid) || 3150 strcmp(tok + 1, ss->name)) 3151 continue; 3152 3153 if (*tok == '+') { 3154 enable |= 1 << ssid; 3155 disable &= ~(1 << ssid); 3156 } else if (*tok == '-') { 3157 disable |= 1 << ssid; 3158 enable &= ~(1 << ssid); 3159 } else { 3160 return -EINVAL; 3161 } 3162 break; 3163 } while_each_subsys_mask(); 3164 if (ssid == CGROUP_SUBSYS_COUNT) 3165 return -EINVAL; 3166 } 3167 3168 cgrp = cgroup_kn_lock_live(of->kn, true); 3169 if (!cgrp) 3170 return -ENODEV; 3171 3172 for_each_subsys(ss, ssid) { 3173 if (enable & (1 << ssid)) { 3174 if (cgrp->subtree_control & (1 << ssid)) { 3175 enable &= ~(1 << ssid); 3176 continue; 3177 } 3178 3179 if (!(cgroup_control(cgrp) & (1 << ssid))) { 3180 ret = -ENOENT; 3181 goto out_unlock; 3182 } 3183 } else if (disable & (1 << ssid)) { 3184 if (!(cgrp->subtree_control & (1 << ssid))) { 3185 disable &= ~(1 << ssid); 3186 continue; 3187 } 3188 3189 /* a child has it enabled? */ 3190 cgroup_for_each_live_child(child, cgrp) { 3191 if (child->subtree_control & (1 << ssid)) { 3192 ret = -EBUSY; 3193 goto out_unlock; 3194 } 3195 } 3196 } 3197 } 3198 3199 if (!enable && !disable) { 3200 ret = 0; 3201 goto out_unlock; 3202 } 3203 3204 ret = cgroup_vet_subtree_control_enable(cgrp, enable); 3205 if (ret) 3206 goto out_unlock; 3207 3208 /* save and update control masks and prepare csses */ 3209 cgroup_save_control(cgrp); 3210 3211 cgrp->subtree_control |= enable; 3212 cgrp->subtree_control &= ~disable; 3213 3214 ret = cgroup_apply_control(cgrp); 3215 cgroup_finalize_control(cgrp, ret); 3216 if (ret) 3217 goto out_unlock; 3218 3219 kernfs_activate(cgrp->kn); 3220 out_unlock: 3221 cgroup_kn_unlock(of->kn); 3222 return ret ?: nbytes; 3223 } 3224 3225 /** 3226 * cgroup_enable_threaded - make @cgrp threaded 3227 * @cgrp: the target cgroup 3228 * 3229 * Called when "threaded" is written to the cgroup.type interface file and 3230 * tries to make @cgrp threaded and join the parent's resource domain. 3231 * This function is never called on the root cgroup as cgroup.type doesn't 3232 * exist on it. 3233 */ 3234 static int cgroup_enable_threaded(struct cgroup *cgrp) 3235 { 3236 struct cgroup *parent = cgroup_parent(cgrp); 3237 struct cgroup *dom_cgrp = parent->dom_cgrp; 3238 struct cgroup *dsct; 3239 struct cgroup_subsys_state *d_css; 3240 int ret; 3241 3242 lockdep_assert_held(&cgroup_mutex); 3243 3244 /* noop if already threaded */ 3245 if (cgroup_is_threaded(cgrp)) 3246 return 0; 3247 3248 /* 3249 * If @cgroup is populated or has domain controllers enabled, it 3250 * can't be switched. While the below cgroup_can_be_thread_root() 3251 * test can catch the same conditions, that's only when @parent is 3252 * not mixable, so let's check it explicitly. 3253 */ 3254 if (cgroup_is_populated(cgrp) || 3255 cgrp->subtree_control & ~cgrp_dfl_threaded_ss_mask) 3256 return -EOPNOTSUPP; 3257 3258 /* we're joining the parent's domain, ensure its validity */ 3259 if (!cgroup_is_valid_domain(dom_cgrp) || 3260 !cgroup_can_be_thread_root(dom_cgrp)) 3261 return -EOPNOTSUPP; 3262 3263 /* 3264 * The following shouldn't cause actual migrations and should 3265 * always succeed. 3266 */ 3267 cgroup_save_control(cgrp); 3268 3269 cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) 3270 if (dsct == cgrp || cgroup_is_threaded(dsct)) 3271 dsct->dom_cgrp = dom_cgrp; 3272 3273 ret = cgroup_apply_control(cgrp); 3274 if (!ret) 3275 parent->nr_threaded_children++; 3276 3277 cgroup_finalize_control(cgrp, ret); 3278 return ret; 3279 } 3280 3281 static int cgroup_type_show(struct seq_file *seq, void *v) 3282 { 3283 struct cgroup *cgrp = seq_css(seq)->cgroup; 3284 3285 if (cgroup_is_threaded(cgrp)) 3286 seq_puts(seq, "threaded\n"); 3287 else if (!cgroup_is_valid_domain(cgrp)) 3288 seq_puts(seq, "domain invalid\n"); 3289 else if (cgroup_is_thread_root(cgrp)) 3290 seq_puts(seq, "domain threaded\n"); 3291 else 3292 seq_puts(seq, "domain\n"); 3293 3294 return 0; 3295 } 3296 3297 static ssize_t cgroup_type_write(struct kernfs_open_file *of, char *buf, 3298 size_t nbytes, loff_t off) 3299 { 3300 struct cgroup *cgrp; 3301 int ret; 3302 3303 /* only switching to threaded mode is supported */ 3304 if (strcmp(strstrip(buf), "threaded")) 3305 return -EINVAL; 3306 3307 cgrp = cgroup_kn_lock_live(of->kn, false); 3308 if (!cgrp) 3309 return -ENOENT; 3310 3311 /* threaded can only be enabled */ 3312 ret = cgroup_enable_threaded(cgrp); 3313 3314 cgroup_kn_unlock(of->kn); 3315 return ret ?: nbytes; 3316 } 3317 3318 static int cgroup_max_descendants_show(struct seq_file *seq, void *v) 3319 { 3320 struct cgroup *cgrp = seq_css(seq)->cgroup; 3321 int descendants = READ_ONCE(cgrp->max_descendants); 3322 3323 if (descendants == INT_MAX) 3324 seq_puts(seq, "max\n"); 3325 else 3326 seq_printf(seq, "%d\n", descendants); 3327 3328 return 0; 3329 } 3330 3331 static ssize_t cgroup_max_descendants_write(struct kernfs_open_file *of, 3332 char *buf, size_t nbytes, loff_t off) 3333 { 3334 struct cgroup *cgrp; 3335 int descendants; 3336 ssize_t ret; 3337 3338 buf = strstrip(buf); 3339 if (!strcmp(buf, "max")) { 3340 descendants = INT_MAX; 3341 } else { 3342 ret = kstrtoint(buf, 0, &descendants); 3343 if (ret) 3344 return ret; 3345 } 3346 3347 if (descendants < 0) 3348 return -ERANGE; 3349 3350 cgrp = cgroup_kn_lock_live(of->kn, false); 3351 if (!cgrp) 3352 return -ENOENT; 3353 3354 cgrp->max_descendants = descendants; 3355 3356 cgroup_kn_unlock(of->kn); 3357 3358 return nbytes; 3359 } 3360 3361 static int cgroup_max_depth_show(struct seq_file *seq, void *v) 3362 { 3363 struct cgroup *cgrp = seq_css(seq)->cgroup; 3364 int depth = READ_ONCE(cgrp->max_depth); 3365 3366 if (depth == INT_MAX) 3367 seq_puts(seq, "max\n"); 3368 else 3369 seq_printf(seq, "%d\n", depth); 3370 3371 return 0; 3372 } 3373 3374 static ssize_t cgroup_max_depth_write(struct kernfs_open_file *of, 3375 char *buf, size_t nbytes, loff_t off) 3376 { 3377 struct cgroup *cgrp; 3378 ssize_t ret; 3379 int depth; 3380 3381 buf = strstrip(buf); 3382 if (!strcmp(buf, "max")) { 3383 depth = INT_MAX; 3384 } else { 3385 ret = kstrtoint(buf, 0, &depth); 3386 if (ret) 3387 return ret; 3388 } 3389 3390 if (depth < 0) 3391 return -ERANGE; 3392 3393 cgrp = cgroup_kn_lock_live(of->kn, false); 3394 if (!cgrp) 3395 return -ENOENT; 3396 3397 cgrp->max_depth = depth; 3398 3399 cgroup_kn_unlock(of->kn); 3400 3401 return nbytes; 3402 } 3403 3404 static int cgroup_events_show(struct seq_file *seq, void *v) 3405 { 3406 seq_printf(seq, "populated %d\n", 3407 cgroup_is_populated(seq_css(seq)->cgroup)); 3408 return 0; 3409 } 3410 3411 static int cgroup_stat_show(struct seq_file *seq, void *v) 3412 { 3413 struct cgroup *cgroup = seq_css(seq)->cgroup; 3414 3415 seq_printf(seq, "nr_descendants %d\n", 3416 cgroup->nr_descendants); 3417 seq_printf(seq, "nr_dying_descendants %d\n", 3418 cgroup->nr_dying_descendants); 3419 3420 return 0; 3421 } 3422 3423 static int __maybe_unused cgroup_extra_stat_show(struct seq_file *seq, 3424 struct cgroup *cgrp, int ssid) 3425 { 3426 struct cgroup_subsys *ss = cgroup_subsys[ssid]; 3427 struct cgroup_subsys_state *css; 3428 int ret; 3429 3430 if (!ss->css_extra_stat_show) 3431 return 0; 3432 3433 css = cgroup_tryget_css(cgrp, ss); 3434 if (!css) 3435 return 0; 3436 3437 ret = ss->css_extra_stat_show(seq, css); 3438 css_put(css); 3439 return ret; 3440 } 3441 3442 static int cpu_stat_show(struct seq_file *seq, void *v) 3443 { 3444 struct cgroup __maybe_unused *cgrp = seq_css(seq)->cgroup; 3445 int ret = 0; 3446 3447 cgroup_base_stat_cputime_show(seq); 3448 #ifdef CONFIG_CGROUP_SCHED 3449 ret = cgroup_extra_stat_show(seq, cgrp, cpu_cgrp_id); 3450 #endif 3451 return ret; 3452 } 3453 3454 #ifdef CONFIG_PSI 3455 static int cgroup_io_pressure_show(struct seq_file *seq, void *v) 3456 { 3457 return psi_show(seq, &seq_css(seq)->cgroup->psi, PSI_IO); 3458 } 3459 static int cgroup_memory_pressure_show(struct seq_file *seq, void *v) 3460 { 3461 return psi_show(seq, &seq_css(seq)->cgroup->psi, PSI_MEM); 3462 } 3463 static int cgroup_cpu_pressure_show(struct seq_file *seq, void *v) 3464 { 3465 return psi_show(seq, &seq_css(seq)->cgroup->psi, PSI_CPU); 3466 } 3467 #endif 3468 3469 static int cgroup_file_open(struct kernfs_open_file *of) 3470 { 3471 struct cftype *cft = of->kn->priv; 3472 3473 if (cft->open) 3474 return cft->open(of); 3475 return 0; 3476 } 3477 3478 static void cgroup_file_release(struct kernfs_open_file *of) 3479 { 3480 struct cftype *cft = of->kn->priv; 3481 3482 if (cft->release) 3483 cft->release(of); 3484 } 3485 3486 static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf, 3487 size_t nbytes, loff_t off) 3488 { 3489 struct cgroup_namespace *ns = current->nsproxy->cgroup_ns; 3490 struct cgroup *cgrp = of->kn->parent->priv; 3491 struct cftype *cft = of->kn->priv; 3492 struct cgroup_subsys_state *css; 3493 int ret; 3494 3495 /* 3496 * If namespaces are delegation boundaries, disallow writes to 3497 * files in an non-init namespace root from inside the namespace 3498 * except for the files explicitly marked delegatable - 3499 * cgroup.procs and cgroup.subtree_control. 3500 */ 3501 if ((cgrp->root->flags & CGRP_ROOT_NS_DELEGATE) && 3502 !(cft->flags & CFTYPE_NS_DELEGATABLE) && 3503 ns != &init_cgroup_ns && ns->root_cset->dfl_cgrp == cgrp) 3504 return -EPERM; 3505 3506 if (cft->write) 3507 return cft->write(of, buf, nbytes, off); 3508 3509 /* 3510 * kernfs guarantees that a file isn't deleted with operations in 3511 * flight, which means that the matching css is and stays alive and 3512 * doesn't need to be pinned. The RCU locking is not necessary 3513 * either. It's just for the convenience of using cgroup_css(). 3514 */ 3515 rcu_read_lock(); 3516 css = cgroup_css(cgrp, cft->ss); 3517 rcu_read_unlock(); 3518 3519 if (cft->write_u64) { 3520 unsigned long long v; 3521 ret = kstrtoull(buf, 0, &v); 3522 if (!ret) 3523 ret = cft->write_u64(css, cft, v); 3524 } else if (cft->write_s64) { 3525 long long v; 3526 ret = kstrtoll(buf, 0, &v); 3527 if (!ret) 3528 ret = cft->write_s64(css, cft, v); 3529 } else { 3530 ret = -EINVAL; 3531 } 3532 3533 return ret ?: nbytes; 3534 } 3535 3536 static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos) 3537 { 3538 return seq_cft(seq)->seq_start(seq, ppos); 3539 } 3540 3541 static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos) 3542 { 3543 return seq_cft(seq)->seq_next(seq, v, ppos); 3544 } 3545 3546 static void cgroup_seqfile_stop(struct seq_file *seq, void *v) 3547 { 3548 if (seq_cft(seq)->seq_stop) 3549 seq_cft(seq)->seq_stop(seq, v); 3550 } 3551 3552 static int cgroup_seqfile_show(struct seq_file *m, void *arg) 3553 { 3554 struct cftype *cft = seq_cft(m); 3555 struct cgroup_subsys_state *css = seq_css(m); 3556 3557 if (cft->seq_show) 3558 return cft->seq_show(m, arg); 3559 3560 if (cft->read_u64) 3561 seq_printf(m, "%llu\n", cft->read_u64(css, cft)); 3562 else if (cft->read_s64) 3563 seq_printf(m, "%lld\n", cft->read_s64(css, cft)); 3564 else 3565 return -EINVAL; 3566 return 0; 3567 } 3568 3569 static struct kernfs_ops cgroup_kf_single_ops = { 3570 .atomic_write_len = PAGE_SIZE, 3571 .open = cgroup_file_open, 3572 .release = cgroup_file_release, 3573 .write = cgroup_file_write, 3574 .seq_show = cgroup_seqfile_show, 3575 }; 3576 3577 static struct kernfs_ops cgroup_kf_ops = { 3578 .atomic_write_len = PAGE_SIZE, 3579 .open = cgroup_file_open, 3580 .release = cgroup_file_release, 3581 .write = cgroup_file_write, 3582 .seq_start = cgroup_seqfile_start, 3583 .seq_next = cgroup_seqfile_next, 3584 .seq_stop = cgroup_seqfile_stop, 3585 .seq_show = cgroup_seqfile_show, 3586 }; 3587 3588 /* set uid and gid of cgroup dirs and files to that of the creator */ 3589 static int cgroup_kn_set_ugid(struct kernfs_node *kn) 3590 { 3591 struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID, 3592 .ia_uid = current_fsuid(), 3593 .ia_gid = current_fsgid(), }; 3594 3595 if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) && 3596 gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID)) 3597 return 0; 3598 3599 return kernfs_setattr(kn, &iattr); 3600 } 3601 3602 static void cgroup_file_notify_timer(struct timer_list *timer) 3603 { 3604 cgroup_file_notify(container_of(timer, struct cgroup_file, 3605 notify_timer)); 3606 } 3607 3608 static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp, 3609 struct cftype *cft) 3610 { 3611 char name[CGROUP_FILE_NAME_MAX]; 3612 struct kernfs_node *kn; 3613 struct lock_class_key *key = NULL; 3614 int ret; 3615 3616 #ifdef CONFIG_DEBUG_LOCK_ALLOC 3617 key = &cft->lockdep_key; 3618 #endif 3619 kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name), 3620 cgroup_file_mode(cft), 3621 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 3622 0, cft->kf_ops, cft, 3623 NULL, key); 3624 if (IS_ERR(kn)) 3625 return PTR_ERR(kn); 3626 3627 ret = cgroup_kn_set_ugid(kn); 3628 if (ret) { 3629 kernfs_remove(kn); 3630 return ret; 3631 } 3632 3633 if (cft->file_offset) { 3634 struct cgroup_file *cfile = (void *)css + cft->file_offset; 3635 3636 timer_setup(&cfile->notify_timer, cgroup_file_notify_timer, 0); 3637 3638 spin_lock_irq(&cgroup_file_kn_lock); 3639 cfile->kn = kn; 3640 spin_unlock_irq(&cgroup_file_kn_lock); 3641 } 3642 3643 return 0; 3644 } 3645 3646 /** 3647 * cgroup_addrm_files - add or remove files to a cgroup directory 3648 * @css: the target css 3649 * @cgrp: the target cgroup (usually css->cgroup) 3650 * @cfts: array of cftypes to be added 3651 * @is_add: whether to add or remove 3652 * 3653 * Depending on @is_add, add or remove files defined by @cfts on @cgrp. 3654 * For removals, this function never fails. 3655 */ 3656 static int cgroup_addrm_files(struct cgroup_subsys_state *css, 3657 struct cgroup *cgrp, struct cftype cfts[], 3658 bool is_add) 3659 { 3660 struct cftype *cft, *cft_end = NULL; 3661 int ret = 0; 3662 3663 lockdep_assert_held(&cgroup_mutex); 3664 3665 restart: 3666 for (cft = cfts; cft != cft_end && cft->name[0] != '\0'; cft++) { 3667 /* does cft->flags tell us to skip this file on @cgrp? */ 3668 if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp)) 3669 continue; 3670 if ((cft->flags & __CFTYPE_NOT_ON_DFL) && cgroup_on_dfl(cgrp)) 3671 continue; 3672 if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgroup_parent(cgrp)) 3673 continue; 3674 if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgroup_parent(cgrp)) 3675 continue; 3676 if ((cft->flags & CFTYPE_DEBUG) && !cgroup_debug) 3677 continue; 3678 if (is_add) { 3679 ret = cgroup_add_file(css, cgrp, cft); 3680 if (ret) { 3681 pr_warn("%s: failed to add %s, err=%d\n", 3682 __func__, cft->name, ret); 3683 cft_end = cft; 3684 is_add = false; 3685 goto restart; 3686 } 3687 } else { 3688 cgroup_rm_file(cgrp, cft); 3689 } 3690 } 3691 return ret; 3692 } 3693 3694 static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add) 3695 { 3696 struct cgroup_subsys *ss = cfts[0].ss; 3697 struct cgroup *root = &ss->root->cgrp; 3698 struct cgroup_subsys_state *css; 3699 int ret = 0; 3700 3701 lockdep_assert_held(&cgroup_mutex); 3702 3703 /* add/rm files for all cgroups created before */ 3704 css_for_each_descendant_pre(css, cgroup_css(root, ss)) { 3705 struct cgroup *cgrp = css->cgroup; 3706 3707 if (!(css->flags & CSS_VISIBLE)) 3708 continue; 3709 3710 ret = cgroup_addrm_files(css, cgrp, cfts, is_add); 3711 if (ret) 3712 break; 3713 } 3714 3715 if (is_add && !ret) 3716 kernfs_activate(root->kn); 3717 return ret; 3718 } 3719 3720 static void cgroup_exit_cftypes(struct cftype *cfts) 3721 { 3722 struct cftype *cft; 3723 3724 for (cft = cfts; cft->name[0] != '\0'; cft++) { 3725 /* free copy for custom atomic_write_len, see init_cftypes() */ 3726 if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) 3727 kfree(cft->kf_ops); 3728 cft->kf_ops = NULL; 3729 cft->ss = NULL; 3730 3731 /* revert flags set by cgroup core while adding @cfts */ 3732 cft->flags &= ~(__CFTYPE_ONLY_ON_DFL | __CFTYPE_NOT_ON_DFL); 3733 } 3734 } 3735 3736 static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) 3737 { 3738 struct cftype *cft; 3739 3740 for (cft = cfts; cft->name[0] != '\0'; cft++) { 3741 struct kernfs_ops *kf_ops; 3742 3743 WARN_ON(cft->ss || cft->kf_ops); 3744 3745 if (cft->seq_start) 3746 kf_ops = &cgroup_kf_ops; 3747 else 3748 kf_ops = &cgroup_kf_single_ops; 3749 3750 /* 3751 * Ugh... if @cft wants a custom max_write_len, we need to 3752 * make a copy of kf_ops to set its atomic_write_len. 3753 */ 3754 if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) { 3755 kf_ops = kmemdup(kf_ops, sizeof(*kf_ops), GFP_KERNEL); 3756 if (!kf_ops) { 3757 cgroup_exit_cftypes(cfts); 3758 return -ENOMEM; 3759 } 3760 kf_ops->atomic_write_len = cft->max_write_len; 3761 } 3762 3763 cft->kf_ops = kf_ops; 3764 cft->ss = ss; 3765 } 3766 3767 return 0; 3768 } 3769 3770 static int cgroup_rm_cftypes_locked(struct cftype *cfts) 3771 { 3772 lockdep_assert_held(&cgroup_mutex); 3773 3774 if (!cfts || !cfts[0].ss) 3775 return -ENOENT; 3776 3777 list_del(&cfts->node); 3778 cgroup_apply_cftypes(cfts, false); 3779 cgroup_exit_cftypes(cfts); 3780 return 0; 3781 } 3782 3783 /** 3784 * cgroup_rm_cftypes - remove an array of cftypes from a subsystem 3785 * @cfts: zero-length name terminated array of cftypes 3786 * 3787 * Unregister @cfts. Files described by @cfts are removed from all 3788 * existing cgroups and all future cgroups won't have them either. This 3789 * function can be called anytime whether @cfts' subsys is attached or not. 3790 * 3791 * Returns 0 on successful unregistration, -ENOENT if @cfts is not 3792 * registered. 3793 */ 3794 int cgroup_rm_cftypes(struct cftype *cfts) 3795 { 3796 int ret; 3797 3798 mutex_lock(&cgroup_mutex); 3799 ret = cgroup_rm_cftypes_locked(cfts); 3800 mutex_unlock(&cgroup_mutex); 3801 return ret; 3802 } 3803 3804 /** 3805 * cgroup_add_cftypes - add an array of cftypes to a subsystem 3806 * @ss: target cgroup subsystem 3807 * @cfts: zero-length name terminated array of cftypes 3808 * 3809 * Register @cfts to @ss. Files described by @cfts are created for all 3810 * existing cgroups to which @ss is attached and all future cgroups will 3811 * have them too. This function can be called anytime whether @ss is 3812 * attached or not. 3813 * 3814 * Returns 0 on successful registration, -errno on failure. Note that this 3815 * function currently returns 0 as long as @cfts registration is successful 3816 * even if some file creation attempts on existing cgroups fail. 3817 */ 3818 static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) 3819 { 3820 int ret; 3821 3822 if (!cgroup_ssid_enabled(ss->id)) 3823 return 0; 3824 3825 if (!cfts || cfts[0].name[0] == '\0') 3826 return 0; 3827 3828 ret = cgroup_init_cftypes(ss, cfts); 3829 if (ret) 3830 return ret; 3831 3832 mutex_lock(&cgroup_mutex); 3833 3834 list_add_tail(&cfts->node, &ss->cfts); 3835 ret = cgroup_apply_cftypes(cfts, true); 3836 if (ret) 3837 cgroup_rm_cftypes_locked(cfts); 3838 3839 mutex_unlock(&cgroup_mutex); 3840 return ret; 3841 } 3842 3843 /** 3844 * cgroup_add_dfl_cftypes - add an array of cftypes for default hierarchy 3845 * @ss: target cgroup subsystem 3846 * @cfts: zero-length name terminated array of cftypes 3847 * 3848 * Similar to cgroup_add_cftypes() but the added files are only used for 3849 * the default hierarchy. 3850 */ 3851 int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) 3852 { 3853 struct cftype *cft; 3854 3855 for (cft = cfts; cft && cft->name[0] != '\0'; cft++) 3856 cft->flags |= __CFTYPE_ONLY_ON_DFL; 3857 return cgroup_add_cftypes(ss, cfts); 3858 } 3859 3860 /** 3861 * cgroup_add_legacy_cftypes - add an array of cftypes for legacy hierarchies 3862 * @ss: target cgroup subsystem 3863 * @cfts: zero-length name terminated array of cftypes 3864 * 3865 * Similar to cgroup_add_cftypes() but the added files are only used for 3866 * the legacy hierarchies. 3867 */ 3868 int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) 3869 { 3870 struct cftype *cft; 3871 3872 for (cft = cfts; cft && cft->name[0] != '\0'; cft++) 3873 cft->flags |= __CFTYPE_NOT_ON_DFL; 3874 return cgroup_add_cftypes(ss, cfts); 3875 } 3876 3877 /** 3878 * cgroup_file_notify - generate a file modified event for a cgroup_file 3879 * @cfile: target cgroup_file 3880 * 3881 * @cfile must have been obtained by setting cftype->file_offset. 3882 */ 3883 void cgroup_file_notify(struct cgroup_file *cfile) 3884 { 3885 unsigned long flags; 3886 3887 spin_lock_irqsave(&cgroup_file_kn_lock, flags); 3888 if (cfile->kn) { 3889 unsigned long last = cfile->notified_at; 3890 unsigned long next = last + CGROUP_FILE_NOTIFY_MIN_INTV; 3891 3892 if (time_in_range(jiffies, last, next)) { 3893 timer_reduce(&cfile->notify_timer, next); 3894 } else { 3895 kernfs_notify(cfile->kn); 3896 cfile->notified_at = jiffies; 3897 } 3898 } 3899 spin_unlock_irqrestore(&cgroup_file_kn_lock, flags); 3900 } 3901 3902 /** 3903 * css_next_child - find the next child of a given css 3904 * @pos: the current position (%NULL to initiate traversal) 3905 * @parent: css whose children to walk 3906 * 3907 * This function returns the next child of @parent and should be called 3908 * under either cgroup_mutex or RCU read lock. The only requirement is 3909 * that @parent and @pos are accessible. The next sibling is guaranteed to 3910 * be returned regardless of their states. 3911 * 3912 * If a subsystem synchronizes ->css_online() and the start of iteration, a 3913 * css which finished ->css_online() is guaranteed to be visible in the 3914 * future iterations and will stay visible until the last reference is put. 3915 * A css which hasn't finished ->css_online() or already finished 3916 * ->css_offline() may show up during traversal. It's each subsystem's 3917 * responsibility to synchronize against on/offlining. 3918 */ 3919 struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos, 3920 struct cgroup_subsys_state *parent) 3921 { 3922 struct cgroup_subsys_state *next; 3923 3924 cgroup_assert_mutex_or_rcu_locked(); 3925 3926 /* 3927 * @pos could already have been unlinked from the sibling list. 3928 * Once a cgroup is removed, its ->sibling.next is no longer 3929 * updated when its next sibling changes. CSS_RELEASED is set when 3930 * @pos is taken off list, at which time its next pointer is valid, 3931 * and, as releases are serialized, the one pointed to by the next 3932 * pointer is guaranteed to not have started release yet. This 3933 * implies that if we observe !CSS_RELEASED on @pos in this RCU 3934 * critical section, the one pointed to by its next pointer is 3935 * guaranteed to not have finished its RCU grace period even if we 3936 * have dropped rcu_read_lock() inbetween iterations. 3937 * 3938 * If @pos has CSS_RELEASED set, its next pointer can't be 3939 * dereferenced; however, as each css is given a monotonically 3940 * increasing unique serial number and always appended to the 3941 * sibling list, the next one can be found by walking the parent's 3942 * children until the first css with higher serial number than 3943 * @pos's. While this path can be slower, it happens iff iteration 3944 * races against release and the race window is very small. 3945 */ 3946 if (!pos) { 3947 next = list_entry_rcu(parent->children.next, struct cgroup_subsys_state, sibling); 3948 } else if (likely(!(pos->flags & CSS_RELEASED))) { 3949 next = list_entry_rcu(pos->sibling.next, struct cgroup_subsys_state, sibling); 3950 } else { 3951 list_for_each_entry_rcu(next, &parent->children, sibling) 3952 if (next->serial_nr > pos->serial_nr) 3953 break; 3954 } 3955 3956 /* 3957 * @next, if not pointing to the head, can be dereferenced and is 3958 * the next sibling. 3959 */ 3960 if (&next->sibling != &parent->children) 3961 return next; 3962 return NULL; 3963 } 3964 3965 /** 3966 * css_next_descendant_pre - find the next descendant for pre-order walk 3967 * @pos: the current position (%NULL to initiate traversal) 3968 * @root: css whose descendants to walk 3969 * 3970 * To be used by css_for_each_descendant_pre(). Find the next descendant 3971 * to visit for pre-order traversal of @root's descendants. @root is 3972 * included in the iteration and the first node to be visited. 3973 * 3974 * While this function requires cgroup_mutex or RCU read locking, it 3975 * doesn't require the whole traversal to be contained in a single critical 3976 * section. This function will return the correct next descendant as long 3977 * as both @pos and @root are accessible and @pos is a descendant of @root. 3978 * 3979 * If a subsystem synchronizes ->css_online() and the start of iteration, a 3980 * css which finished ->css_online() is guaranteed to be visible in the 3981 * future iterations and will stay visible until the last reference is put. 3982 * A css which hasn't finished ->css_online() or already finished 3983 * ->css_offline() may show up during traversal. It's each subsystem's 3984 * responsibility to synchronize against on/offlining. 3985 */ 3986 struct cgroup_subsys_state * 3987 css_next_descendant_pre(struct cgroup_subsys_state *pos, 3988 struct cgroup_subsys_state *root) 3989 { 3990 struct cgroup_subsys_state *next; 3991 3992 cgroup_assert_mutex_or_rcu_locked(); 3993 3994 /* if first iteration, visit @root */ 3995 if (!pos) 3996 return root; 3997 3998 /* visit the first child if exists */ 3999 next = css_next_child(NULL, pos); 4000 if (next) 4001 return next; 4002 4003 /* no child, visit my or the closest ancestor's next sibling */ 4004 while (pos != root) { 4005 next = css_next_child(pos, pos->parent); 4006 if (next) 4007 return next; 4008 pos = pos->parent; 4009 } 4010 4011 return NULL; 4012 } 4013 4014 /** 4015 * css_rightmost_descendant - return the rightmost descendant of a css 4016 * @pos: css of interest 4017 * 4018 * Return the rightmost descendant of @pos. If there's no descendant, @pos 4019 * is returned. This can be used during pre-order traversal to skip 4020 * subtree of @pos. 4021 * 4022 * While this function requires cgroup_mutex or RCU read locking, it 4023 * doesn't require the whole traversal to be contained in a single critical 4024 * section. This function will return the correct rightmost descendant as 4025 * long as @pos is accessible. 4026 */ 4027 struct cgroup_subsys_state * 4028 css_rightmost_descendant(struct cgroup_subsys_state *pos) 4029 { 4030 struct cgroup_subsys_state *last, *tmp; 4031 4032 cgroup_assert_mutex_or_rcu_locked(); 4033 4034 do { 4035 last = pos; 4036 /* ->prev isn't RCU safe, walk ->next till the end */ 4037 pos = NULL; 4038 css_for_each_child(tmp, last) 4039 pos = tmp; 4040 } while (pos); 4041 4042 return last; 4043 } 4044 4045 static struct cgroup_subsys_state * 4046 css_leftmost_descendant(struct cgroup_subsys_state *pos) 4047 { 4048 struct cgroup_subsys_state *last; 4049 4050 do { 4051 last = pos; 4052 pos = css_next_child(NULL, pos); 4053 } while (pos); 4054 4055 return last; 4056 } 4057 4058 /** 4059 * css_next_descendant_post - find the next descendant for post-order walk 4060 * @pos: the current position (%NULL to initiate traversal) 4061 * @root: css whose descendants to walk 4062 * 4063 * To be used by css_for_each_descendant_post(). Find the next descendant 4064 * to visit for post-order traversal of @root's descendants. @root is 4065 * included in the iteration and the last node to be visited. 4066 * 4067 * While this function requires cgroup_mutex or RCU read locking, it 4068 * doesn't require the whole traversal to be contained in a single critical 4069 * section. This function will return the correct next descendant as long 4070 * as both @pos and @cgroup are accessible and @pos is a descendant of 4071 * @cgroup. 4072 * 4073 * If a subsystem synchronizes ->css_online() and the start of iteration, a 4074 * css which finished ->css_online() is guaranteed to be visible in the 4075 * future iterations and will stay visible until the last reference is put. 4076 * A css which hasn't finished ->css_online() or already finished 4077 * ->css_offline() may show up during traversal. It's each subsystem's 4078 * responsibility to synchronize against on/offlining. 4079 */ 4080 struct cgroup_subsys_state * 4081 css_next_descendant_post(struct cgroup_subsys_state *pos, 4082 struct cgroup_subsys_state *root) 4083 { 4084 struct cgroup_subsys_state *next; 4085 4086 cgroup_assert_mutex_or_rcu_locked(); 4087 4088 /* if first iteration, visit leftmost descendant which may be @root */ 4089 if (!pos) 4090 return css_leftmost_descendant(root); 4091 4092 /* if we visited @root, we're done */ 4093 if (pos == root) 4094 return NULL; 4095 4096 /* if there's an unvisited sibling, visit its leftmost descendant */ 4097 next = css_next_child(pos, pos->parent); 4098 if (next) 4099 return css_leftmost_descendant(next); 4100 4101 /* no sibling left, visit parent */ 4102 return pos->parent; 4103 } 4104 4105 /** 4106 * css_has_online_children - does a css have online children 4107 * @css: the target css 4108 * 4109 * Returns %true if @css has any online children; otherwise, %false. This 4110 * function can be called from any context but the caller is responsible 4111 * for synchronizing against on/offlining as necessary. 4112 */ 4113 bool css_has_online_children(struct cgroup_subsys_state *css) 4114 { 4115 struct cgroup_subsys_state *child; 4116 bool ret = false; 4117 4118 rcu_read_lock(); 4119 css_for_each_child(child, css) { 4120 if (child->flags & CSS_ONLINE) { 4121 ret = true; 4122 break; 4123 } 4124 } 4125 rcu_read_unlock(); 4126 return ret; 4127 } 4128 4129 static struct css_set *css_task_iter_next_css_set(struct css_task_iter *it) 4130 { 4131 struct list_head *l; 4132 struct cgrp_cset_link *link; 4133 struct css_set *cset; 4134 4135 lockdep_assert_held(&css_set_lock); 4136 4137 /* find the next threaded cset */ 4138 if (it->tcset_pos) { 4139 l = it->tcset_pos->next; 4140 4141 if (l != it->tcset_head) { 4142 it->tcset_pos = l; 4143 return container_of(l, struct css_set, 4144 threaded_csets_node); 4145 } 4146 4147 it->tcset_pos = NULL; 4148 } 4149 4150 /* find the next cset */ 4151 l = it->cset_pos; 4152 l = l->next; 4153 if (l == it->cset_head) { 4154 it->cset_pos = NULL; 4155 return NULL; 4156 } 4157 4158 if (it->ss) { 4159 cset = container_of(l, struct css_set, e_cset_node[it->ss->id]); 4160 } else { 4161 link = list_entry(l, struct cgrp_cset_link, cset_link); 4162 cset = link->cset; 4163 } 4164 4165 it->cset_pos = l; 4166 4167 /* initialize threaded css_set walking */ 4168 if (it->flags & CSS_TASK_ITER_THREADED) { 4169 if (it->cur_dcset) 4170 put_css_set_locked(it->cur_dcset); 4171 it->cur_dcset = cset; 4172 get_css_set(cset); 4173 4174 it->tcset_head = &cset->threaded_csets; 4175 it->tcset_pos = &cset->threaded_csets; 4176 } 4177 4178 return cset; 4179 } 4180 4181 /** 4182 * css_task_iter_advance_css_set - advance a task itererator to the next css_set 4183 * @it: the iterator to advance 4184 * 4185 * Advance @it to the next css_set to walk. 4186 */ 4187 static void css_task_iter_advance_css_set(struct css_task_iter *it) 4188 { 4189 struct css_set *cset; 4190 4191 lockdep_assert_held(&css_set_lock); 4192 4193 /* Advance to the next non-empty css_set */ 4194 do { 4195 cset = css_task_iter_next_css_set(it); 4196 if (!cset) { 4197 it->task_pos = NULL; 4198 return; 4199 } 4200 } while (!css_set_populated(cset)); 4201 4202 if (!list_empty(&cset->tasks)) 4203 it->task_pos = cset->tasks.next; 4204 else 4205 it->task_pos = cset->mg_tasks.next; 4206 4207 it->tasks_head = &cset->tasks; 4208 it->mg_tasks_head = &cset->mg_tasks; 4209 4210 /* 4211 * We don't keep css_sets locked across iteration steps and thus 4212 * need to take steps to ensure that iteration can be resumed after 4213 * the lock is re-acquired. Iteration is performed at two levels - 4214 * css_sets and tasks in them. 4215 * 4216 * Once created, a css_set never leaves its cgroup lists, so a 4217 * pinned css_set is guaranteed to stay put and we can resume 4218 * iteration afterwards. 4219 * 4220 * Tasks may leave @cset across iteration steps. This is resolved 4221 * by registering each iterator with the css_set currently being 4222 * walked and making css_set_move_task() advance iterators whose 4223 * next task is leaving. 4224 */ 4225 if (it->cur_cset) { 4226 list_del(&it->iters_node); 4227 put_css_set_locked(it->cur_cset); 4228 } 4229 get_css_set(cset); 4230 it->cur_cset = cset; 4231 list_add(&it->iters_node, &cset->task_iters); 4232 } 4233 4234 static void css_task_iter_advance(struct css_task_iter *it) 4235 { 4236 struct list_head *next; 4237 4238 lockdep_assert_held(&css_set_lock); 4239 repeat: 4240 if (it->task_pos) { 4241 /* 4242 * Advance iterator to find next entry. cset->tasks is 4243 * consumed first and then ->mg_tasks. After ->mg_tasks, 4244 * we move onto the next cset. 4245 */ 4246 next = it->task_pos->next; 4247 4248 if (next == it->tasks_head) 4249 next = it->mg_tasks_head->next; 4250 4251 if (next == it->mg_tasks_head) 4252 css_task_iter_advance_css_set(it); 4253 else 4254 it->task_pos = next; 4255 } else { 4256 /* called from start, proceed to the first cset */ 4257 css_task_iter_advance_css_set(it); 4258 } 4259 4260 /* if PROCS, skip over tasks which aren't group leaders */ 4261 if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos && 4262 !thread_group_leader(list_entry(it->task_pos, struct task_struct, 4263 cg_list))) 4264 goto repeat; 4265 } 4266 4267 /** 4268 * css_task_iter_start - initiate task iteration 4269 * @css: the css to walk tasks of 4270 * @flags: CSS_TASK_ITER_* flags 4271 * @it: the task iterator to use 4272 * 4273 * Initiate iteration through the tasks of @css. The caller can call 4274 * css_task_iter_next() to walk through the tasks until the function 4275 * returns NULL. On completion of iteration, css_task_iter_end() must be 4276 * called. 4277 */ 4278 void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags, 4279 struct css_task_iter *it) 4280 { 4281 /* no one should try to iterate before mounting cgroups */ 4282 WARN_ON_ONCE(!use_task_css_set_links); 4283 4284 memset(it, 0, sizeof(*it)); 4285 4286 spin_lock_irq(&css_set_lock); 4287 4288 it->ss = css->ss; 4289 it->flags = flags; 4290 4291 if (it->ss) 4292 it->cset_pos = &css->cgroup->e_csets[css->ss->id]; 4293 else 4294 it->cset_pos = &css->cgroup->cset_links; 4295 4296 it->cset_head = it->cset_pos; 4297 4298 css_task_iter_advance(it); 4299 4300 spin_unlock_irq(&css_set_lock); 4301 } 4302 4303 /** 4304 * css_task_iter_next - return the next task for the iterator 4305 * @it: the task iterator being iterated 4306 * 4307 * The "next" function for task iteration. @it should have been 4308 * initialized via css_task_iter_start(). Returns NULL when the iteration 4309 * reaches the end. 4310 */ 4311 struct task_struct *css_task_iter_next(struct css_task_iter *it) 4312 { 4313 if (it->cur_task) { 4314 put_task_struct(it->cur_task); 4315 it->cur_task = NULL; 4316 } 4317 4318 spin_lock_irq(&css_set_lock); 4319 4320 if (it->task_pos) { 4321 it->cur_task = list_entry(it->task_pos, struct task_struct, 4322 cg_list); 4323 get_task_struct(it->cur_task); 4324 css_task_iter_advance(it); 4325 } 4326 4327 spin_unlock_irq(&css_set_lock); 4328 4329 return it->cur_task; 4330 } 4331 4332 /** 4333 * css_task_iter_end - finish task iteration 4334 * @it: the task iterator to finish 4335 * 4336 * Finish task iteration started by css_task_iter_start(). 4337 */ 4338 void css_task_iter_end(struct css_task_iter *it) 4339 { 4340 if (it->cur_cset) { 4341 spin_lock_irq(&css_set_lock); 4342 list_del(&it->iters_node); 4343 put_css_set_locked(it->cur_cset); 4344 spin_unlock_irq(&css_set_lock); 4345 } 4346 4347 if (it->cur_dcset) 4348 put_css_set(it->cur_dcset); 4349 4350 if (it->cur_task) 4351 put_task_struct(it->cur_task); 4352 } 4353 4354 static void cgroup_procs_release(struct kernfs_open_file *of) 4355 { 4356 if (of->priv) { 4357 css_task_iter_end(of->priv); 4358 kfree(of->priv); 4359 } 4360 } 4361 4362 static void *cgroup_procs_next(struct seq_file *s, void *v, loff_t *pos) 4363 { 4364 struct kernfs_open_file *of = s->private; 4365 struct css_task_iter *it = of->priv; 4366 4367 return css_task_iter_next(it); 4368 } 4369 4370 static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos, 4371 unsigned int iter_flags) 4372 { 4373 struct kernfs_open_file *of = s->private; 4374 struct cgroup *cgrp = seq_css(s)->cgroup; 4375 struct css_task_iter *it = of->priv; 4376 4377 /* 4378 * When a seq_file is seeked, it's always traversed sequentially 4379 * from position 0, so we can simply keep iterating on !0 *pos. 4380 */ 4381 if (!it) { 4382 if (WARN_ON_ONCE((*pos)++)) 4383 return ERR_PTR(-EINVAL); 4384 4385 it = kzalloc(sizeof(*it), GFP_KERNEL); 4386 if (!it) 4387 return ERR_PTR(-ENOMEM); 4388 of->priv = it; 4389 css_task_iter_start(&cgrp->self, iter_flags, it); 4390 } else if (!(*pos)++) { 4391 css_task_iter_end(it); 4392 css_task_iter_start(&cgrp->self, iter_flags, it); 4393 } 4394 4395 return cgroup_procs_next(s, NULL, NULL); 4396 } 4397 4398 static void *cgroup_procs_start(struct seq_file *s, loff_t *pos) 4399 { 4400 struct cgroup *cgrp = seq_css(s)->cgroup; 4401 4402 /* 4403 * All processes of a threaded subtree belong to the domain cgroup 4404 * of the subtree. Only threads can be distributed across the 4405 * subtree. Reject reads on cgroup.procs in the subtree proper. 4406 * They're always empty anyway. 4407 */ 4408 if (cgroup_is_threaded(cgrp)) 4409 return ERR_PTR(-EOPNOTSUPP); 4410 4411 return __cgroup_procs_start(s, pos, CSS_TASK_ITER_PROCS | 4412 CSS_TASK_ITER_THREADED); 4413 } 4414 4415 static int cgroup_procs_show(struct seq_file *s, void *v) 4416 { 4417 seq_printf(s, "%d\n", task_pid_vnr(v)); 4418 return 0; 4419 } 4420 4421 static int cgroup_procs_write_permission(struct cgroup *src_cgrp, 4422 struct cgroup *dst_cgrp, 4423 struct super_block *sb) 4424 { 4425 struct cgroup_namespace *ns = current->nsproxy->cgroup_ns; 4426 struct cgroup *com_cgrp = src_cgrp; 4427 struct inode *inode; 4428 int ret; 4429 4430 lockdep_assert_held(&cgroup_mutex); 4431 4432 /* find the common ancestor */ 4433 while (!cgroup_is_descendant(dst_cgrp, com_cgrp)) 4434 com_cgrp = cgroup_parent(com_cgrp); 4435 4436 /* %current should be authorized to migrate to the common ancestor */ 4437 inode = kernfs_get_inode(sb, com_cgrp->procs_file.kn); 4438 if (!inode) 4439 return -ENOMEM; 4440 4441 ret = inode_permission(inode, MAY_WRITE); 4442 iput(inode); 4443 if (ret) 4444 return ret; 4445 4446 /* 4447 * If namespaces are delegation boundaries, %current must be able 4448 * to see both source and destination cgroups from its namespace. 4449 */ 4450 if ((cgrp_dfl_root.flags & CGRP_ROOT_NS_DELEGATE) && 4451 (!cgroup_is_descendant(src_cgrp, ns->root_cset->dfl_cgrp) || 4452 !cgroup_is_descendant(dst_cgrp, ns->root_cset->dfl_cgrp))) 4453 return -ENOENT; 4454 4455 return 0; 4456 } 4457 4458 static ssize_t cgroup_procs_write(struct kernfs_open_file *of, 4459 char *buf, size_t nbytes, loff_t off) 4460 { 4461 struct cgroup *src_cgrp, *dst_cgrp; 4462 struct task_struct *task; 4463 ssize_t ret; 4464 4465 dst_cgrp = cgroup_kn_lock_live(of->kn, false); 4466 if (!dst_cgrp) 4467 return -ENODEV; 4468 4469 task = cgroup_procs_write_start(buf, true); 4470 ret = PTR_ERR_OR_ZERO(task); 4471 if (ret) 4472 goto out_unlock; 4473 4474 /* find the source cgroup */ 4475 spin_lock_irq(&css_set_lock); 4476 src_cgrp = task_cgroup_from_root(task, &cgrp_dfl_root); 4477 spin_unlock_irq(&css_set_lock); 4478 4479 ret = cgroup_procs_write_permission(src_cgrp, dst_cgrp, 4480 of->file->f_path.dentry->d_sb); 4481 if (ret) 4482 goto out_finish; 4483 4484 ret = cgroup_attach_task(dst_cgrp, task, true); 4485 4486 out_finish: 4487 cgroup_procs_write_finish(task); 4488 out_unlock: 4489 cgroup_kn_unlock(of->kn); 4490 4491 return ret ?: nbytes; 4492 } 4493 4494 static void *cgroup_threads_start(struct seq_file *s, loff_t *pos) 4495 { 4496 return __cgroup_procs_start(s, pos, 0); 4497 } 4498 4499 static ssize_t cgroup_threads_write(struct kernfs_open_file *of, 4500 char *buf, size_t nbytes, loff_t off) 4501 { 4502 struct cgroup *src_cgrp, *dst_cgrp; 4503 struct task_struct *task; 4504 ssize_t ret; 4505 4506 buf = strstrip(buf); 4507 4508 dst_cgrp = cgroup_kn_lock_live(of->kn, false); 4509 if (!dst_cgrp) 4510 return -ENODEV; 4511 4512 task = cgroup_procs_write_start(buf, false); 4513 ret = PTR_ERR_OR_ZERO(task); 4514 if (ret) 4515 goto out_unlock; 4516 4517 /* find the source cgroup */ 4518 spin_lock_irq(&css_set_lock); 4519 src_cgrp = task_cgroup_from_root(task, &cgrp_dfl_root); 4520 spin_unlock_irq(&css_set_lock); 4521 4522 /* thread migrations follow the cgroup.procs delegation rule */ 4523 ret = cgroup_procs_write_permission(src_cgrp, dst_cgrp, 4524 of->file->f_path.dentry->d_sb); 4525 if (ret) 4526 goto out_finish; 4527 4528 /* and must be contained in the same domain */ 4529 ret = -EOPNOTSUPP; 4530 if (src_cgrp->dom_cgrp != dst_cgrp->dom_cgrp) 4531 goto out_finish; 4532 4533 ret = cgroup_attach_task(dst_cgrp, task, false); 4534 4535 out_finish: 4536 cgroup_procs_write_finish(task); 4537 out_unlock: 4538 cgroup_kn_unlock(of->kn); 4539 4540 return ret ?: nbytes; 4541 } 4542 4543 /* cgroup core interface files for the default hierarchy */ 4544 static struct cftype cgroup_base_files[] = { 4545 { 4546 .name = "cgroup.type", 4547 .flags = CFTYPE_NOT_ON_ROOT, 4548 .seq_show = cgroup_type_show, 4549 .write = cgroup_type_write, 4550 }, 4551 { 4552 .name = "cgroup.procs", 4553 .flags = CFTYPE_NS_DELEGATABLE, 4554 .file_offset = offsetof(struct cgroup, procs_file), 4555 .release = cgroup_procs_release, 4556 .seq_start = cgroup_procs_start, 4557 .seq_next = cgroup_procs_next, 4558 .seq_show = cgroup_procs_show, 4559 .write = cgroup_procs_write, 4560 }, 4561 { 4562 .name = "cgroup.threads", 4563 .flags = CFTYPE_NS_DELEGATABLE, 4564 .release = cgroup_procs_release, 4565 .seq_start = cgroup_threads_start, 4566 .seq_next = cgroup_procs_next, 4567 .seq_show = cgroup_procs_show, 4568 .write = cgroup_threads_write, 4569 }, 4570 { 4571 .name = "cgroup.controllers", 4572 .seq_show = cgroup_controllers_show, 4573 }, 4574 { 4575 .name = "cgroup.subtree_control", 4576 .flags = CFTYPE_NS_DELEGATABLE, 4577 .seq_show = cgroup_subtree_control_show, 4578 .write = cgroup_subtree_control_write, 4579 }, 4580 { 4581 .name = "cgroup.events", 4582 .flags = CFTYPE_NOT_ON_ROOT, 4583 .file_offset = offsetof(struct cgroup, events_file), 4584 .seq_show = cgroup_events_show, 4585 }, 4586 { 4587 .name = "cgroup.max.descendants", 4588 .seq_show = cgroup_max_descendants_show, 4589 .write = cgroup_max_descendants_write, 4590 }, 4591 { 4592 .name = "cgroup.max.depth", 4593 .seq_show = cgroup_max_depth_show, 4594 .write = cgroup_max_depth_write, 4595 }, 4596 { 4597 .name = "cgroup.stat", 4598 .seq_show = cgroup_stat_show, 4599 }, 4600 { 4601 .name = "cpu.stat", 4602 .flags = CFTYPE_NOT_ON_ROOT, 4603 .seq_show = cpu_stat_show, 4604 }, 4605 #ifdef CONFIG_PSI 4606 { 4607 .name = "io.pressure", 4608 .flags = CFTYPE_NOT_ON_ROOT, 4609 .seq_show = cgroup_io_pressure_show, 4610 }, 4611 { 4612 .name = "memory.pressure", 4613 .flags = CFTYPE_NOT_ON_ROOT, 4614 .seq_show = cgroup_memory_pressure_show, 4615 }, 4616 { 4617 .name = "cpu.pressure", 4618 .flags = CFTYPE_NOT_ON_ROOT, 4619 .seq_show = cgroup_cpu_pressure_show, 4620 }, 4621 #endif 4622 { } /* terminate */ 4623 }; 4624 4625 /* 4626 * css destruction is four-stage process. 4627 * 4628 * 1. Destruction starts. Killing of the percpu_ref is initiated. 4629 * Implemented in kill_css(). 4630 * 4631 * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs 4632 * and thus css_tryget_online() is guaranteed to fail, the css can be 4633 * offlined by invoking offline_css(). After offlining, the base ref is 4634 * put. Implemented in css_killed_work_fn(). 4635 * 4636 * 3. When the percpu_ref reaches zero, the only possible remaining 4637 * accessors are inside RCU read sections. css_release() schedules the 4638 * RCU callback. 4639 * 4640 * 4. After the grace period, the css can be freed. Implemented in 4641 * css_free_work_fn(). 4642 * 4643 * It is actually hairier because both step 2 and 4 require process context 4644 * and thus involve punting to css->destroy_work adding two additional 4645 * steps to the already complex sequence. 4646 */ 4647 static void css_free_rwork_fn(struct work_struct *work) 4648 { 4649 struct cgroup_subsys_state *css = container_of(to_rcu_work(work), 4650 struct cgroup_subsys_state, destroy_rwork); 4651 struct cgroup_subsys *ss = css->ss; 4652 struct cgroup *cgrp = css->cgroup; 4653 4654 percpu_ref_exit(&css->refcnt); 4655 4656 if (ss) { 4657 /* css free path */ 4658 struct cgroup_subsys_state *parent = css->parent; 4659 int id = css->id; 4660 4661 ss->css_free(css); 4662 cgroup_idr_remove(&ss->css_idr, id); 4663 cgroup_put(cgrp); 4664 4665 if (parent) 4666 css_put(parent); 4667 } else { 4668 /* cgroup free path */ 4669 atomic_dec(&cgrp->root->nr_cgrps); 4670 cgroup1_pidlist_destroy_all(cgrp); 4671 cancel_work_sync(&cgrp->release_agent_work); 4672 4673 if (cgroup_parent(cgrp)) { 4674 /* 4675 * We get a ref to the parent, and put the ref when 4676 * this cgroup is being freed, so it's guaranteed 4677 * that the parent won't be destroyed before its 4678 * children. 4679 */ 4680 cgroup_put(cgroup_parent(cgrp)); 4681 kernfs_put(cgrp->kn); 4682 psi_cgroup_free(cgrp); 4683 if (cgroup_on_dfl(cgrp)) 4684 cgroup_rstat_exit(cgrp); 4685 kfree(cgrp); 4686 } else { 4687 /* 4688 * This is root cgroup's refcnt reaching zero, 4689 * which indicates that the root should be 4690 * released. 4691 */ 4692 cgroup_destroy_root(cgrp->root); 4693 } 4694 } 4695 } 4696 4697 static void css_release_work_fn(struct work_struct *work) 4698 { 4699 struct cgroup_subsys_state *css = 4700 container_of(work, struct cgroup_subsys_state, destroy_work); 4701 struct cgroup_subsys *ss = css->ss; 4702 struct cgroup *cgrp = css->cgroup; 4703 4704 mutex_lock(&cgroup_mutex); 4705 4706 css->flags |= CSS_RELEASED; 4707 list_del_rcu(&css->sibling); 4708 4709 if (ss) { 4710 /* css release path */ 4711 if (!list_empty(&css->rstat_css_node)) { 4712 cgroup_rstat_flush(cgrp); 4713 list_del_rcu(&css->rstat_css_node); 4714 } 4715 4716 cgroup_idr_replace(&ss->css_idr, NULL, css->id); 4717 if (ss->css_released) 4718 ss->css_released(css); 4719 } else { 4720 struct cgroup *tcgrp; 4721 4722 /* cgroup release path */ 4723 TRACE_CGROUP_PATH(release, cgrp); 4724 4725 if (cgroup_on_dfl(cgrp)) 4726 cgroup_rstat_flush(cgrp); 4727 4728 for (tcgrp = cgroup_parent(cgrp); tcgrp; 4729 tcgrp = cgroup_parent(tcgrp)) 4730 tcgrp->nr_dying_descendants--; 4731 4732 cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id); 4733 cgrp->id = -1; 4734 4735 /* 4736 * There are two control paths which try to determine 4737 * cgroup from dentry without going through kernfs - 4738 * cgroupstats_build() and css_tryget_online_from_dir(). 4739 * Those are supported by RCU protecting clearing of 4740 * cgrp->kn->priv backpointer. 4741 */ 4742 if (cgrp->kn) 4743 RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, 4744 NULL); 4745 4746 cgroup_bpf_put(cgrp); 4747 } 4748 4749 mutex_unlock(&cgroup_mutex); 4750 4751 INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn); 4752 queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork); 4753 } 4754 4755 static void css_release(struct percpu_ref *ref) 4756 { 4757 struct cgroup_subsys_state *css = 4758 container_of(ref, struct cgroup_subsys_state, refcnt); 4759 4760 INIT_WORK(&css->destroy_work, css_release_work_fn); 4761 queue_work(cgroup_destroy_wq, &css->destroy_work); 4762 } 4763 4764 static void init_and_link_css(struct cgroup_subsys_state *css, 4765 struct cgroup_subsys *ss, struct cgroup *cgrp) 4766 { 4767 lockdep_assert_held(&cgroup_mutex); 4768 4769 cgroup_get_live(cgrp); 4770 4771 memset(css, 0, sizeof(*css)); 4772 css->cgroup = cgrp; 4773 css->ss = ss; 4774 css->id = -1; 4775 INIT_LIST_HEAD(&css->sibling); 4776 INIT_LIST_HEAD(&css->children); 4777 INIT_LIST_HEAD(&css->rstat_css_node); 4778 css->serial_nr = css_serial_nr_next++; 4779 atomic_set(&css->online_cnt, 0); 4780 4781 if (cgroup_parent(cgrp)) { 4782 css->parent = cgroup_css(cgroup_parent(cgrp), ss); 4783 css_get(css->parent); 4784 } 4785 4786 if (cgroup_on_dfl(cgrp) && ss->css_rstat_flush) 4787 list_add_rcu(&css->rstat_css_node, &cgrp->rstat_css_list); 4788 4789 BUG_ON(cgroup_css(cgrp, ss)); 4790 } 4791 4792 /* invoke ->css_online() on a new CSS and mark it online if successful */ 4793 static int online_css(struct cgroup_subsys_state *css) 4794 { 4795 struct cgroup_subsys *ss = css->ss; 4796 int ret = 0; 4797 4798 lockdep_assert_held(&cgroup_mutex); 4799 4800 if (ss->css_online) 4801 ret = ss->css_online(css); 4802 if (!ret) { 4803 css->flags |= CSS_ONLINE; 4804 rcu_assign_pointer(css->cgroup->subsys[ss->id], css); 4805 4806 atomic_inc(&css->online_cnt); 4807 if (css->parent) 4808 atomic_inc(&css->parent->online_cnt); 4809 } 4810 return ret; 4811 } 4812 4813 /* if the CSS is online, invoke ->css_offline() on it and mark it offline */ 4814 static void offline_css(struct cgroup_subsys_state *css) 4815 { 4816 struct cgroup_subsys *ss = css->ss; 4817 4818 lockdep_assert_held(&cgroup_mutex); 4819 4820 if (!(css->flags & CSS_ONLINE)) 4821 return; 4822 4823 if (ss->css_offline) 4824 ss->css_offline(css); 4825 4826 css->flags &= ~CSS_ONLINE; 4827 RCU_INIT_POINTER(css->cgroup->subsys[ss->id], NULL); 4828 4829 wake_up_all(&css->cgroup->offline_waitq); 4830 } 4831 4832 /** 4833 * css_create - create a cgroup_subsys_state 4834 * @cgrp: the cgroup new css will be associated with 4835 * @ss: the subsys of new css 4836 * 4837 * Create a new css associated with @cgrp - @ss pair. On success, the new 4838 * css is online and installed in @cgrp. This function doesn't create the 4839 * interface files. Returns 0 on success, -errno on failure. 4840 */ 4841 static struct cgroup_subsys_state *css_create(struct cgroup *cgrp, 4842 struct cgroup_subsys *ss) 4843 { 4844 struct cgroup *parent = cgroup_parent(cgrp); 4845 struct cgroup_subsys_state *parent_css = cgroup_css(parent, ss); 4846 struct cgroup_subsys_state *css; 4847 int err; 4848 4849 lockdep_assert_held(&cgroup_mutex); 4850 4851 css = ss->css_alloc(parent_css); 4852 if (!css) 4853 css = ERR_PTR(-ENOMEM); 4854 if (IS_ERR(css)) 4855 return css; 4856 4857 init_and_link_css(css, ss, cgrp); 4858 4859 err = percpu_ref_init(&css->refcnt, css_release, 0, GFP_KERNEL); 4860 if (err) 4861 goto err_free_css; 4862 4863 err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL); 4864 if (err < 0) 4865 goto err_free_css; 4866 css->id = err; 4867 4868 /* @css is ready to be brought online now, make it visible */ 4869 list_add_tail_rcu(&css->sibling, &parent_css->children); 4870 cgroup_idr_replace(&ss->css_idr, css, css->id); 4871 4872 err = online_css(css); 4873 if (err) 4874 goto err_list_del; 4875 4876 if (ss->broken_hierarchy && !ss->warned_broken_hierarchy && 4877 cgroup_parent(parent)) { 4878 pr_warn("%s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n", 4879 current->comm, current->pid, ss->name); 4880 if (!strcmp(ss->name, "memory")) 4881 pr_warn("\"memory\" requires setting use_hierarchy to 1 on the root\n"); 4882 ss->warned_broken_hierarchy = true; 4883 } 4884 4885 return css; 4886 4887 err_list_del: 4888 list_del_rcu(&css->sibling); 4889 err_free_css: 4890 list_del_rcu(&css->rstat_css_node); 4891 INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn); 4892 queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork); 4893 return ERR_PTR(err); 4894 } 4895 4896 /* 4897 * The returned cgroup is fully initialized including its control mask, but 4898 * it isn't associated with its kernfs_node and doesn't have the control 4899 * mask applied. 4900 */ 4901 static struct cgroup *cgroup_create(struct cgroup *parent) 4902 { 4903 struct cgroup_root *root = parent->root; 4904 struct cgroup *cgrp, *tcgrp; 4905 int level = parent->level + 1; 4906 int ret; 4907 4908 /* allocate the cgroup and its ID, 0 is reserved for the root */ 4909 cgrp = kzalloc(struct_size(cgrp, ancestor_ids, (level + 1)), 4910 GFP_KERNEL); 4911 if (!cgrp) 4912 return ERR_PTR(-ENOMEM); 4913 4914 ret = percpu_ref_init(&cgrp->self.refcnt, css_release, 0, GFP_KERNEL); 4915 if (ret) 4916 goto out_free_cgrp; 4917 4918 if (cgroup_on_dfl(parent)) { 4919 ret = cgroup_rstat_init(cgrp); 4920 if (ret) 4921 goto out_cancel_ref; 4922 } 4923 4924 /* 4925 * Temporarily set the pointer to NULL, so idr_find() won't return 4926 * a half-baked cgroup. 4927 */ 4928 cgrp->id = cgroup_idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_KERNEL); 4929 if (cgrp->id < 0) { 4930 ret = -ENOMEM; 4931 goto out_stat_exit; 4932 } 4933 4934 init_cgroup_housekeeping(cgrp); 4935 4936 cgrp->self.parent = &parent->self; 4937 cgrp->root = root; 4938 cgrp->level = level; 4939 4940 ret = psi_cgroup_alloc(cgrp); 4941 if (ret) 4942 goto out_idr_free; 4943 4944 ret = cgroup_bpf_inherit(cgrp); 4945 if (ret) 4946 goto out_psi_free; 4947 4948 for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) { 4949 cgrp->ancestor_ids[tcgrp->level] = tcgrp->id; 4950 4951 if (tcgrp != cgrp) 4952 tcgrp->nr_descendants++; 4953 } 4954 4955 if (notify_on_release(parent)) 4956 set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); 4957 4958 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags)) 4959 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags); 4960 4961 cgrp->self.serial_nr = css_serial_nr_next++; 4962 4963 /* allocation complete, commit to creation */ 4964 list_add_tail_rcu(&cgrp->self.sibling, &cgroup_parent(cgrp)->self.children); 4965 atomic_inc(&root->nr_cgrps); 4966 cgroup_get_live(parent); 4967 4968 /* 4969 * @cgrp is now fully operational. If something fails after this 4970 * point, it'll be released via the normal destruction path. 4971 */ 4972 cgroup_idr_replace(&root->cgroup_idr, cgrp, cgrp->id); 4973 4974 /* 4975 * On the default hierarchy, a child doesn't automatically inherit 4976 * subtree_control from the parent. Each is configured manually. 4977 */ 4978 if (!cgroup_on_dfl(cgrp)) 4979 cgrp->subtree_control = cgroup_control(cgrp); 4980 4981 cgroup_propagate_control(cgrp); 4982 4983 return cgrp; 4984 4985 out_psi_free: 4986 psi_cgroup_free(cgrp); 4987 out_idr_free: 4988 cgroup_idr_remove(&root->cgroup_idr, cgrp->id); 4989 out_stat_exit: 4990 if (cgroup_on_dfl(parent)) 4991 cgroup_rstat_exit(cgrp); 4992 out_cancel_ref: 4993 percpu_ref_exit(&cgrp->self.refcnt); 4994 out_free_cgrp: 4995 kfree(cgrp); 4996 return ERR_PTR(ret); 4997 } 4998 4999 static bool cgroup_check_hierarchy_limits(struct cgroup *parent) 5000 { 5001 struct cgroup *cgroup; 5002 int ret = false; 5003 int level = 1; 5004 5005 lockdep_assert_held(&cgroup_mutex); 5006 5007 for (cgroup = parent; cgroup; cgroup = cgroup_parent(cgroup)) { 5008 if (cgroup->nr_descendants >= cgroup->max_descendants) 5009 goto fail; 5010 5011 if (level > cgroup->max_depth) 5012 goto fail; 5013 5014 level++; 5015 } 5016 5017 ret = true; 5018 fail: 5019 return ret; 5020 } 5021 5022 int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode) 5023 { 5024 struct cgroup *parent, *cgrp; 5025 struct kernfs_node *kn; 5026 int ret; 5027 5028 /* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */ 5029 if (strchr(name, '\n')) 5030 return -EINVAL; 5031 5032 parent = cgroup_kn_lock_live(parent_kn, false); 5033 if (!parent) 5034 return -ENODEV; 5035 5036 if (!cgroup_check_hierarchy_limits(parent)) { 5037 ret = -EAGAIN; 5038 goto out_unlock; 5039 } 5040 5041 cgrp = cgroup_create(parent); 5042 if (IS_ERR(cgrp)) { 5043 ret = PTR_ERR(cgrp); 5044 goto out_unlock; 5045 } 5046 5047 /* create the directory */ 5048 kn = kernfs_create_dir(parent->kn, name, mode, cgrp); 5049 if (IS_ERR(kn)) { 5050 ret = PTR_ERR(kn); 5051 goto out_destroy; 5052 } 5053 cgrp->kn = kn; 5054 5055 /* 5056 * This extra ref will be put in cgroup_free_fn() and guarantees 5057 * that @cgrp->kn is always accessible. 5058 */ 5059 kernfs_get(kn); 5060 5061 ret = cgroup_kn_set_ugid(kn); 5062 if (ret) 5063 goto out_destroy; 5064 5065 ret = css_populate_dir(&cgrp->self); 5066 if (ret) 5067 goto out_destroy; 5068 5069 ret = cgroup_apply_control_enable(cgrp); 5070 if (ret) 5071 goto out_destroy; 5072 5073 TRACE_CGROUP_PATH(mkdir, cgrp); 5074 5075 /* let's create and online css's */ 5076 kernfs_activate(kn); 5077 5078 ret = 0; 5079 goto out_unlock; 5080 5081 out_destroy: 5082 cgroup_destroy_locked(cgrp); 5083 out_unlock: 5084 cgroup_kn_unlock(parent_kn); 5085 return ret; 5086 } 5087 5088 /* 5089 * This is called when the refcnt of a css is confirmed to be killed. 5090 * css_tryget_online() is now guaranteed to fail. Tell the subsystem to 5091 * initate destruction and put the css ref from kill_css(). 5092 */ 5093 static void css_killed_work_fn(struct work_struct *work) 5094 { 5095 struct cgroup_subsys_state *css = 5096 container_of(work, struct cgroup_subsys_state, destroy_work); 5097 5098 mutex_lock(&cgroup_mutex); 5099 5100 do { 5101 offline_css(css); 5102 css_put(css); 5103 /* @css can't go away while we're holding cgroup_mutex */ 5104 css = css->parent; 5105 } while (css && atomic_dec_and_test(&css->online_cnt)); 5106 5107 mutex_unlock(&cgroup_mutex); 5108 } 5109 5110 /* css kill confirmation processing requires process context, bounce */ 5111 static void css_killed_ref_fn(struct percpu_ref *ref) 5112 { 5113 struct cgroup_subsys_state *css = 5114 container_of(ref, struct cgroup_subsys_state, refcnt); 5115 5116 if (atomic_dec_and_test(&css->online_cnt)) { 5117 INIT_WORK(&css->destroy_work, css_killed_work_fn); 5118 queue_work(cgroup_destroy_wq, &css->destroy_work); 5119 } 5120 } 5121 5122 /** 5123 * kill_css - destroy a css 5124 * @css: css to destroy 5125 * 5126 * This function initiates destruction of @css by removing cgroup interface 5127 * files and putting its base reference. ->css_offline() will be invoked 5128 * asynchronously once css_tryget_online() is guaranteed to fail and when 5129 * the reference count reaches zero, @css will be released. 5130 */ 5131 static void kill_css(struct cgroup_subsys_state *css) 5132 { 5133 lockdep_assert_held(&cgroup_mutex); 5134 5135 if (css->flags & CSS_DYING) 5136 return; 5137 5138 css->flags |= CSS_DYING; 5139 5140 /* 5141 * This must happen before css is disassociated with its cgroup. 5142 * See seq_css() for details. 5143 */ 5144 css_clear_dir(css); 5145 5146 /* 5147 * Killing would put the base ref, but we need to keep it alive 5148 * until after ->css_offline(). 5149 */ 5150 css_get(css); 5151 5152 /* 5153 * cgroup core guarantees that, by the time ->css_offline() is 5154 * invoked, no new css reference will be given out via 5155 * css_tryget_online(). We can't simply call percpu_ref_kill() and 5156 * proceed to offlining css's because percpu_ref_kill() doesn't 5157 * guarantee that the ref is seen as killed on all CPUs on return. 5158 * 5159 * Use percpu_ref_kill_and_confirm() to get notifications as each 5160 * css is confirmed to be seen as killed on all CPUs. 5161 */ 5162 percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn); 5163 } 5164 5165 /** 5166 * cgroup_destroy_locked - the first stage of cgroup destruction 5167 * @cgrp: cgroup to be destroyed 5168 * 5169 * css's make use of percpu refcnts whose killing latency shouldn't be 5170 * exposed to userland and are RCU protected. Also, cgroup core needs to 5171 * guarantee that css_tryget_online() won't succeed by the time 5172 * ->css_offline() is invoked. To satisfy all the requirements, 5173 * destruction is implemented in the following two steps. 5174 * 5175 * s1. Verify @cgrp can be destroyed and mark it dying. Remove all 5176 * userland visible parts and start killing the percpu refcnts of 5177 * css's. Set up so that the next stage will be kicked off once all 5178 * the percpu refcnts are confirmed to be killed. 5179 * 5180 * s2. Invoke ->css_offline(), mark the cgroup dead and proceed with the 5181 * rest of destruction. Once all cgroup references are gone, the 5182 * cgroup is RCU-freed. 5183 * 5184 * This function implements s1. After this step, @cgrp is gone as far as 5185 * the userland is concerned and a new cgroup with the same name may be 5186 * created. As cgroup doesn't care about the names internally, this 5187 * doesn't cause any problem. 5188 */ 5189 static int cgroup_destroy_locked(struct cgroup *cgrp) 5190 __releases(&cgroup_mutex) __acquires(&cgroup_mutex) 5191 { 5192 struct cgroup *tcgrp, *parent = cgroup_parent(cgrp); 5193 struct cgroup_subsys_state *css; 5194 struct cgrp_cset_link *link; 5195 int ssid; 5196 5197 lockdep_assert_held(&cgroup_mutex); 5198 5199 /* 5200 * Only migration can raise populated from zero and we're already 5201 * holding cgroup_mutex. 5202 */ 5203 if (cgroup_is_populated(cgrp)) 5204 return -EBUSY; 5205 5206 /* 5207 * Make sure there's no live children. We can't test emptiness of 5208 * ->self.children as dead children linger on it while being 5209 * drained; otherwise, "rmdir parent/child parent" may fail. 5210 */ 5211 if (css_has_online_children(&cgrp->self)) 5212 return -EBUSY; 5213 5214 /* 5215 * Mark @cgrp and the associated csets dead. The former prevents 5216 * further task migration and child creation by disabling 5217 * cgroup_lock_live_group(). The latter makes the csets ignored by 5218 * the migration path. 5219 */ 5220 cgrp->self.flags &= ~CSS_ONLINE; 5221 5222 spin_lock_irq(&css_set_lock); 5223 list_for_each_entry(link, &cgrp->cset_links, cset_link) 5224 link->cset->dead = true; 5225 spin_unlock_irq(&css_set_lock); 5226 5227 /* initiate massacre of all css's */ 5228 for_each_css(css, ssid, cgrp) 5229 kill_css(css); 5230 5231 /* clear and remove @cgrp dir, @cgrp has an extra ref on its kn */ 5232 css_clear_dir(&cgrp->self); 5233 kernfs_remove(cgrp->kn); 5234 5235 if (parent && cgroup_is_threaded(cgrp)) 5236 parent->nr_threaded_children--; 5237 5238 for (tcgrp = cgroup_parent(cgrp); tcgrp; tcgrp = cgroup_parent(tcgrp)) { 5239 tcgrp->nr_descendants--; 5240 tcgrp->nr_dying_descendants++; 5241 } 5242 5243 cgroup1_check_for_release(parent); 5244 5245 /* put the base reference */ 5246 percpu_ref_kill(&cgrp->self.refcnt); 5247 5248 return 0; 5249 }; 5250 5251 int cgroup_rmdir(struct kernfs_node *kn) 5252 { 5253 struct cgroup *cgrp; 5254 int ret = 0; 5255 5256 cgrp = cgroup_kn_lock_live(kn, false); 5257 if (!cgrp) 5258 return 0; 5259 5260 ret = cgroup_destroy_locked(cgrp); 5261 if (!ret) 5262 TRACE_CGROUP_PATH(rmdir, cgrp); 5263 5264 cgroup_kn_unlock(kn); 5265 return ret; 5266 } 5267 5268 static struct kernfs_syscall_ops cgroup_kf_syscall_ops = { 5269 .show_options = cgroup_show_options, 5270 .remount_fs = cgroup_remount, 5271 .mkdir = cgroup_mkdir, 5272 .rmdir = cgroup_rmdir, 5273 .show_path = cgroup_show_path, 5274 }; 5275 5276 static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early) 5277 { 5278 struct cgroup_subsys_state *css; 5279 5280 pr_debug("Initializing cgroup subsys %s\n", ss->name); 5281 5282 mutex_lock(&cgroup_mutex); 5283 5284 idr_init(&ss->css_idr); 5285 INIT_LIST_HEAD(&ss->cfts); 5286 5287 /* Create the root cgroup state for this subsystem */ 5288 ss->root = &cgrp_dfl_root; 5289 css = ss->css_alloc(cgroup_css(&cgrp_dfl_root.cgrp, ss)); 5290 /* We don't handle early failures gracefully */ 5291 BUG_ON(IS_ERR(css)); 5292 init_and_link_css(css, ss, &cgrp_dfl_root.cgrp); 5293 5294 /* 5295 * Root csses are never destroyed and we can't initialize 5296 * percpu_ref during early init. Disable refcnting. 5297 */ 5298 css->flags |= CSS_NO_REF; 5299 5300 if (early) { 5301 /* allocation can't be done safely during early init */ 5302 css->id = 1; 5303 } else { 5304 css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL); 5305 BUG_ON(css->id < 0); 5306 } 5307 5308 /* Update the init_css_set to contain a subsys 5309 * pointer to this state - since the subsystem is 5310 * newly registered, all tasks and hence the 5311 * init_css_set is in the subsystem's root cgroup. */ 5312 init_css_set.subsys[ss->id] = css; 5313 5314 have_fork_callback |= (bool)ss->fork << ss->id; 5315 have_exit_callback |= (bool)ss->exit << ss->id; 5316 have_free_callback |= (bool)ss->free << ss->id; 5317 have_canfork_callback |= (bool)ss->can_fork << ss->id; 5318 5319 /* At system boot, before all subsystems have been 5320 * registered, no tasks have been forked, so we don't 5321 * need to invoke fork callbacks here. */ 5322 BUG_ON(!list_empty(&init_task.tasks)); 5323 5324 BUG_ON(online_css(css)); 5325 5326 mutex_unlock(&cgroup_mutex); 5327 } 5328 5329 /** 5330 * cgroup_init_early - cgroup initialization at system boot 5331 * 5332 * Initialize cgroups at system boot, and initialize any 5333 * subsystems that request early init. 5334 */ 5335 int __init cgroup_init_early(void) 5336 { 5337 static struct cgroup_sb_opts __initdata opts; 5338 struct cgroup_subsys *ss; 5339 int i; 5340 5341 init_cgroup_root(&cgrp_dfl_root, &opts); 5342 cgrp_dfl_root.cgrp.self.flags |= CSS_NO_REF; 5343 5344 RCU_INIT_POINTER(init_task.cgroups, &init_css_set); 5345 5346 for_each_subsys(ss, i) { 5347 WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id, 5348 "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p id:name=%d:%s\n", 5349 i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free, 5350 ss->id, ss->name); 5351 WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN, 5352 "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]); 5353 5354 ss->id = i; 5355 ss->name = cgroup_subsys_name[i]; 5356 if (!ss->legacy_name) 5357 ss->legacy_name = cgroup_subsys_name[i]; 5358 5359 if (ss->early_init) 5360 cgroup_init_subsys(ss, true); 5361 } 5362 return 0; 5363 } 5364 5365 static u16 cgroup_disable_mask __initdata; 5366 5367 /** 5368 * cgroup_init - cgroup initialization 5369 * 5370 * Register cgroup filesystem and /proc file, and initialize 5371 * any subsystems that didn't request early init. 5372 */ 5373 int __init cgroup_init(void) 5374 { 5375 struct cgroup_subsys *ss; 5376 int ssid; 5377 5378 BUILD_BUG_ON(CGROUP_SUBSYS_COUNT > 16); 5379 BUG_ON(percpu_init_rwsem(&cgroup_threadgroup_rwsem)); 5380 BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files)); 5381 BUG_ON(cgroup_init_cftypes(NULL, cgroup1_base_files)); 5382 5383 cgroup_rstat_boot(); 5384 5385 /* 5386 * The latency of the synchronize_rcu() is too high for cgroups, 5387 * avoid it at the cost of forcing all readers into the slow path. 5388 */ 5389 rcu_sync_enter_start(&cgroup_threadgroup_rwsem.rss); 5390 5391 get_user_ns(init_cgroup_ns.user_ns); 5392 5393 mutex_lock(&cgroup_mutex); 5394 5395 /* 5396 * Add init_css_set to the hash table so that dfl_root can link to 5397 * it during init. 5398 */ 5399 hash_add(css_set_table, &init_css_set.hlist, 5400 css_set_hash(init_css_set.subsys)); 5401 5402 BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0, 0)); 5403 5404 mutex_unlock(&cgroup_mutex); 5405 5406 for_each_subsys(ss, ssid) { 5407 if (ss->early_init) { 5408 struct cgroup_subsys_state *css = 5409 init_css_set.subsys[ss->id]; 5410 5411 css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, 5412 GFP_KERNEL); 5413 BUG_ON(css->id < 0); 5414 } else { 5415 cgroup_init_subsys(ss, false); 5416 } 5417 5418 list_add_tail(&init_css_set.e_cset_node[ssid], 5419 &cgrp_dfl_root.cgrp.e_csets[ssid]); 5420 5421 /* 5422 * Setting dfl_root subsys_mask needs to consider the 5423 * disabled flag and cftype registration needs kmalloc, 5424 * both of which aren't available during early_init. 5425 */ 5426 if (cgroup_disable_mask & (1 << ssid)) { 5427 static_branch_disable(cgroup_subsys_enabled_key[ssid]); 5428 printk(KERN_INFO "Disabling %s control group subsystem\n", 5429 ss->name); 5430 continue; 5431 } 5432 5433 if (cgroup1_ssid_disabled(ssid)) 5434 printk(KERN_INFO "Disabling %s control group subsystem in v1 mounts\n", 5435 ss->name); 5436 5437 cgrp_dfl_root.subsys_mask |= 1 << ss->id; 5438 5439 /* implicit controllers must be threaded too */ 5440 WARN_ON(ss->implicit_on_dfl && !ss->threaded); 5441 5442 if (ss->implicit_on_dfl) 5443 cgrp_dfl_implicit_ss_mask |= 1 << ss->id; 5444 else if (!ss->dfl_cftypes) 5445 cgrp_dfl_inhibit_ss_mask |= 1 << ss->id; 5446 5447 if (ss->threaded) 5448 cgrp_dfl_threaded_ss_mask |= 1 << ss->id; 5449 5450 if (ss->dfl_cftypes == ss->legacy_cftypes) { 5451 WARN_ON(cgroup_add_cftypes(ss, ss->dfl_cftypes)); 5452 } else { 5453 WARN_ON(cgroup_add_dfl_cftypes(ss, ss->dfl_cftypes)); 5454 WARN_ON(cgroup_add_legacy_cftypes(ss, ss->legacy_cftypes)); 5455 } 5456 5457 if (ss->bind) 5458 ss->bind(init_css_set.subsys[ssid]); 5459 5460 mutex_lock(&cgroup_mutex); 5461 css_populate_dir(init_css_set.subsys[ssid]); 5462 mutex_unlock(&cgroup_mutex); 5463 } 5464 5465 /* init_css_set.subsys[] has been updated, re-hash */ 5466 hash_del(&init_css_set.hlist); 5467 hash_add(css_set_table, &init_css_set.hlist, 5468 css_set_hash(init_css_set.subsys)); 5469 5470 WARN_ON(sysfs_create_mount_point(fs_kobj, "cgroup")); 5471 WARN_ON(register_filesystem(&cgroup_fs_type)); 5472 WARN_ON(register_filesystem(&cgroup2_fs_type)); 5473 WARN_ON(!proc_create_single("cgroups", 0, NULL, proc_cgroupstats_show)); 5474 5475 return 0; 5476 } 5477 5478 static int __init cgroup_wq_init(void) 5479 { 5480 /* 5481 * There isn't much point in executing destruction path in 5482 * parallel. Good chunk is serialized with cgroup_mutex anyway. 5483 * Use 1 for @max_active. 5484 * 5485 * We would prefer to do this in cgroup_init() above, but that 5486 * is called before init_workqueues(): so leave this until after. 5487 */ 5488 cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1); 5489 BUG_ON(!cgroup_destroy_wq); 5490 return 0; 5491 } 5492 core_initcall(cgroup_wq_init); 5493 5494 void cgroup_path_from_kernfs_id(const union kernfs_node_id *id, 5495 char *buf, size_t buflen) 5496 { 5497 struct kernfs_node *kn; 5498 5499 kn = kernfs_get_node_by_id(cgrp_dfl_root.kf_root, id); 5500 if (!kn) 5501 return; 5502 kernfs_path(kn, buf, buflen); 5503 kernfs_put(kn); 5504 } 5505 5506 /* 5507 * proc_cgroup_show() 5508 * - Print task's cgroup paths into seq_file, one line for each hierarchy 5509 * - Used for /proc/<pid>/cgroup. 5510 */ 5511 int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, 5512 struct pid *pid, struct task_struct *tsk) 5513 { 5514 char *buf; 5515 int retval; 5516 struct cgroup_root *root; 5517 5518 retval = -ENOMEM; 5519 buf = kmalloc(PATH_MAX, GFP_KERNEL); 5520 if (!buf) 5521 goto out; 5522 5523 mutex_lock(&cgroup_mutex); 5524 spin_lock_irq(&css_set_lock); 5525 5526 for_each_root(root) { 5527 struct cgroup_subsys *ss; 5528 struct cgroup *cgrp; 5529 int ssid, count = 0; 5530 5531 if (root == &cgrp_dfl_root && !cgrp_dfl_visible) 5532 continue; 5533 5534 seq_printf(m, "%d:", root->hierarchy_id); 5535 if (root != &cgrp_dfl_root) 5536 for_each_subsys(ss, ssid) 5537 if (root->subsys_mask & (1 << ssid)) 5538 seq_printf(m, "%s%s", count++ ? "," : "", 5539 ss->legacy_name); 5540 if (strlen(root->name)) 5541 seq_printf(m, "%sname=%s", count ? "," : "", 5542 root->name); 5543 seq_putc(m, ':'); 5544 5545 cgrp = task_cgroup_from_root(tsk, root); 5546 5547 /* 5548 * On traditional hierarchies, all zombie tasks show up as 5549 * belonging to the root cgroup. On the default hierarchy, 5550 * while a zombie doesn't show up in "cgroup.procs" and 5551 * thus can't be migrated, its /proc/PID/cgroup keeps 5552 * reporting the cgroup it belonged to before exiting. If 5553 * the cgroup is removed before the zombie is reaped, 5554 * " (deleted)" is appended to the cgroup path. 5555 */ 5556 if (cgroup_on_dfl(cgrp) || !(tsk->flags & PF_EXITING)) { 5557 retval = cgroup_path_ns_locked(cgrp, buf, PATH_MAX, 5558 current->nsproxy->cgroup_ns); 5559 if (retval >= PATH_MAX) 5560 retval = -ENAMETOOLONG; 5561 if (retval < 0) 5562 goto out_unlock; 5563 5564 seq_puts(m, buf); 5565 } else { 5566 seq_puts(m, "/"); 5567 } 5568 5569 if (cgroup_on_dfl(cgrp) && cgroup_is_dead(cgrp)) 5570 seq_puts(m, " (deleted)\n"); 5571 else 5572 seq_putc(m, '\n'); 5573 } 5574 5575 retval = 0; 5576 out_unlock: 5577 spin_unlock_irq(&css_set_lock); 5578 mutex_unlock(&cgroup_mutex); 5579 kfree(buf); 5580 out: 5581 return retval; 5582 } 5583 5584 /** 5585 * cgroup_fork - initialize cgroup related fields during copy_process() 5586 * @child: pointer to task_struct of forking parent process. 5587 * 5588 * A task is associated with the init_css_set until cgroup_post_fork() 5589 * attaches it to the parent's css_set. Empty cg_list indicates that 5590 * @child isn't holding reference to its css_set. 5591 */ 5592 void cgroup_fork(struct task_struct *child) 5593 { 5594 RCU_INIT_POINTER(child->cgroups, &init_css_set); 5595 INIT_LIST_HEAD(&child->cg_list); 5596 } 5597 5598 /** 5599 * cgroup_can_fork - called on a new task before the process is exposed 5600 * @child: the task in question. 5601 * 5602 * This calls the subsystem can_fork() callbacks. If the can_fork() callback 5603 * returns an error, the fork aborts with that error code. This allows for 5604 * a cgroup subsystem to conditionally allow or deny new forks. 5605 */ 5606 int cgroup_can_fork(struct task_struct *child) 5607 { 5608 struct cgroup_subsys *ss; 5609 int i, j, ret; 5610 5611 do_each_subsys_mask(ss, i, have_canfork_callback) { 5612 ret = ss->can_fork(child); 5613 if (ret) 5614 goto out_revert; 5615 } while_each_subsys_mask(); 5616 5617 return 0; 5618 5619 out_revert: 5620 for_each_subsys(ss, j) { 5621 if (j >= i) 5622 break; 5623 if (ss->cancel_fork) 5624 ss->cancel_fork(child); 5625 } 5626 5627 return ret; 5628 } 5629 5630 /** 5631 * cgroup_cancel_fork - called if a fork failed after cgroup_can_fork() 5632 * @child: the task in question 5633 * 5634 * This calls the cancel_fork() callbacks if a fork failed *after* 5635 * cgroup_can_fork() succeded. 5636 */ 5637 void cgroup_cancel_fork(struct task_struct *child) 5638 { 5639 struct cgroup_subsys *ss; 5640 int i; 5641 5642 for_each_subsys(ss, i) 5643 if (ss->cancel_fork) 5644 ss->cancel_fork(child); 5645 } 5646 5647 /** 5648 * cgroup_post_fork - called on a new task after adding it to the task list 5649 * @child: the task in question 5650 * 5651 * Adds the task to the list running through its css_set if necessary and 5652 * call the subsystem fork() callbacks. Has to be after the task is 5653 * visible on the task list in case we race with the first call to 5654 * cgroup_task_iter_start() - to guarantee that the new task ends up on its 5655 * list. 5656 */ 5657 void cgroup_post_fork(struct task_struct *child) 5658 { 5659 struct cgroup_subsys *ss; 5660 int i; 5661 5662 /* 5663 * This may race against cgroup_enable_task_cg_lists(). As that 5664 * function sets use_task_css_set_links before grabbing 5665 * tasklist_lock and we just went through tasklist_lock to add 5666 * @child, it's guaranteed that either we see the set 5667 * use_task_css_set_links or cgroup_enable_task_cg_lists() sees 5668 * @child during its iteration. 5669 * 5670 * If we won the race, @child is associated with %current's 5671 * css_set. Grabbing css_set_lock guarantees both that the 5672 * association is stable, and, on completion of the parent's 5673 * migration, @child is visible in the source of migration or 5674 * already in the destination cgroup. This guarantee is necessary 5675 * when implementing operations which need to migrate all tasks of 5676 * a cgroup to another. 5677 * 5678 * Note that if we lose to cgroup_enable_task_cg_lists(), @child 5679 * will remain in init_css_set. This is safe because all tasks are 5680 * in the init_css_set before cg_links is enabled and there's no 5681 * operation which transfers all tasks out of init_css_set. 5682 */ 5683 if (use_task_css_set_links) { 5684 struct css_set *cset; 5685 5686 spin_lock_irq(&css_set_lock); 5687 cset = task_css_set(current); 5688 if (list_empty(&child->cg_list)) { 5689 get_css_set(cset); 5690 cset->nr_tasks++; 5691 css_set_move_task(child, NULL, cset, false); 5692 } 5693 spin_unlock_irq(&css_set_lock); 5694 } 5695 5696 /* 5697 * Call ss->fork(). This must happen after @child is linked on 5698 * css_set; otherwise, @child might change state between ->fork() 5699 * and addition to css_set. 5700 */ 5701 do_each_subsys_mask(ss, i, have_fork_callback) { 5702 ss->fork(child); 5703 } while_each_subsys_mask(); 5704 } 5705 5706 /** 5707 * cgroup_exit - detach cgroup from exiting task 5708 * @tsk: pointer to task_struct of exiting process 5709 * 5710 * Description: Detach cgroup from @tsk and release it. 5711 * 5712 * Note that cgroups marked notify_on_release force every task in 5713 * them to take the global cgroup_mutex mutex when exiting. 5714 * This could impact scaling on very large systems. Be reluctant to 5715 * use notify_on_release cgroups where very high task exit scaling 5716 * is required on large systems. 5717 * 5718 * We set the exiting tasks cgroup to the root cgroup (top_cgroup). We 5719 * call cgroup_exit() while the task is still competent to handle 5720 * notify_on_release(), then leave the task attached to the root cgroup in 5721 * each hierarchy for the remainder of its exit. No need to bother with 5722 * init_css_set refcnting. init_css_set never goes away and we can't race 5723 * with migration path - PF_EXITING is visible to migration path. 5724 */ 5725 void cgroup_exit(struct task_struct *tsk) 5726 { 5727 struct cgroup_subsys *ss; 5728 struct css_set *cset; 5729 int i; 5730 5731 /* 5732 * Unlink from @tsk from its css_set. As migration path can't race 5733 * with us, we can check css_set and cg_list without synchronization. 5734 */ 5735 cset = task_css_set(tsk); 5736 5737 if (!list_empty(&tsk->cg_list)) { 5738 spin_lock_irq(&css_set_lock); 5739 css_set_move_task(tsk, cset, NULL, false); 5740 cset->nr_tasks--; 5741 spin_unlock_irq(&css_set_lock); 5742 } else { 5743 get_css_set(cset); 5744 } 5745 5746 /* see cgroup_post_fork() for details */ 5747 do_each_subsys_mask(ss, i, have_exit_callback) { 5748 ss->exit(tsk); 5749 } while_each_subsys_mask(); 5750 } 5751 5752 void cgroup_free(struct task_struct *task) 5753 { 5754 struct css_set *cset = task_css_set(task); 5755 struct cgroup_subsys *ss; 5756 int ssid; 5757 5758 do_each_subsys_mask(ss, ssid, have_free_callback) { 5759 ss->free(task); 5760 } while_each_subsys_mask(); 5761 5762 put_css_set(cset); 5763 } 5764 5765 static int __init cgroup_disable(char *str) 5766 { 5767 struct cgroup_subsys *ss; 5768 char *token; 5769 int i; 5770 5771 while ((token = strsep(&str, ",")) != NULL) { 5772 if (!*token) 5773 continue; 5774 5775 for_each_subsys(ss, i) { 5776 if (strcmp(token, ss->name) && 5777 strcmp(token, ss->legacy_name)) 5778 continue; 5779 cgroup_disable_mask |= 1 << i; 5780 } 5781 } 5782 return 1; 5783 } 5784 __setup("cgroup_disable=", cgroup_disable); 5785 5786 void __init __weak enable_debug_cgroup(void) { } 5787 5788 static int __init enable_cgroup_debug(char *str) 5789 { 5790 cgroup_debug = true; 5791 enable_debug_cgroup(); 5792 return 1; 5793 } 5794 __setup("cgroup_debug", enable_cgroup_debug); 5795 5796 /** 5797 * css_tryget_online_from_dir - get corresponding css from a cgroup dentry 5798 * @dentry: directory dentry of interest 5799 * @ss: subsystem of interest 5800 * 5801 * If @dentry is a directory for a cgroup which has @ss enabled on it, try 5802 * to get the corresponding css and return it. If such css doesn't exist 5803 * or can't be pinned, an ERR_PTR value is returned. 5804 */ 5805 struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, 5806 struct cgroup_subsys *ss) 5807 { 5808 struct kernfs_node *kn = kernfs_node_from_dentry(dentry); 5809 struct file_system_type *s_type = dentry->d_sb->s_type; 5810 struct cgroup_subsys_state *css = NULL; 5811 struct cgroup *cgrp; 5812 5813 /* is @dentry a cgroup dir? */ 5814 if ((s_type != &cgroup_fs_type && s_type != &cgroup2_fs_type) || 5815 !kn || kernfs_type(kn) != KERNFS_DIR) 5816 return ERR_PTR(-EBADF); 5817 5818 rcu_read_lock(); 5819 5820 /* 5821 * This path doesn't originate from kernfs and @kn could already 5822 * have been or be removed at any point. @kn->priv is RCU 5823 * protected for this access. See css_release_work_fn() for details. 5824 */ 5825 cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv); 5826 if (cgrp) 5827 css = cgroup_css(cgrp, ss); 5828 5829 if (!css || !css_tryget_online(css)) 5830 css = ERR_PTR(-ENOENT); 5831 5832 rcu_read_unlock(); 5833 return css; 5834 } 5835 5836 /** 5837 * css_from_id - lookup css by id 5838 * @id: the cgroup id 5839 * @ss: cgroup subsys to be looked into 5840 * 5841 * Returns the css if there's valid one with @id, otherwise returns NULL. 5842 * Should be called under rcu_read_lock(). 5843 */ 5844 struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss) 5845 { 5846 WARN_ON_ONCE(!rcu_read_lock_held()); 5847 return idr_find(&ss->css_idr, id); 5848 } 5849 5850 /** 5851 * cgroup_get_from_path - lookup and get a cgroup from its default hierarchy path 5852 * @path: path on the default hierarchy 5853 * 5854 * Find the cgroup at @path on the default hierarchy, increment its 5855 * reference count and return it. Returns pointer to the found cgroup on 5856 * success, ERR_PTR(-ENOENT) if @path doens't exist and ERR_PTR(-ENOTDIR) 5857 * if @path points to a non-directory. 5858 */ 5859 struct cgroup *cgroup_get_from_path(const char *path) 5860 { 5861 struct kernfs_node *kn; 5862 struct cgroup *cgrp; 5863 5864 mutex_lock(&cgroup_mutex); 5865 5866 kn = kernfs_walk_and_get(cgrp_dfl_root.cgrp.kn, path); 5867 if (kn) { 5868 if (kernfs_type(kn) == KERNFS_DIR) { 5869 cgrp = kn->priv; 5870 cgroup_get_live(cgrp); 5871 } else { 5872 cgrp = ERR_PTR(-ENOTDIR); 5873 } 5874 kernfs_put(kn); 5875 } else { 5876 cgrp = ERR_PTR(-ENOENT); 5877 } 5878 5879 mutex_unlock(&cgroup_mutex); 5880 return cgrp; 5881 } 5882 EXPORT_SYMBOL_GPL(cgroup_get_from_path); 5883 5884 /** 5885 * cgroup_get_from_fd - get a cgroup pointer from a fd 5886 * @fd: fd obtained by open(cgroup2_dir) 5887 * 5888 * Find the cgroup from a fd which should be obtained 5889 * by opening a cgroup directory. Returns a pointer to the 5890 * cgroup on success. ERR_PTR is returned if the cgroup 5891 * cannot be found. 5892 */ 5893 struct cgroup *cgroup_get_from_fd(int fd) 5894 { 5895 struct cgroup_subsys_state *css; 5896 struct cgroup *cgrp; 5897 struct file *f; 5898 5899 f = fget_raw(fd); 5900 if (!f) 5901 return ERR_PTR(-EBADF); 5902 5903 css = css_tryget_online_from_dir(f->f_path.dentry, NULL); 5904 fput(f); 5905 if (IS_ERR(css)) 5906 return ERR_CAST(css); 5907 5908 cgrp = css->cgroup; 5909 if (!cgroup_on_dfl(cgrp)) { 5910 cgroup_put(cgrp); 5911 return ERR_PTR(-EBADF); 5912 } 5913 5914 return cgrp; 5915 } 5916 EXPORT_SYMBOL_GPL(cgroup_get_from_fd); 5917 5918 /* 5919 * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data 5920 * definition in cgroup-defs.h. 5921 */ 5922 #ifdef CONFIG_SOCK_CGROUP_DATA 5923 5924 #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID) 5925 5926 DEFINE_SPINLOCK(cgroup_sk_update_lock); 5927 static bool cgroup_sk_alloc_disabled __read_mostly; 5928 5929 void cgroup_sk_alloc_disable(void) 5930 { 5931 if (cgroup_sk_alloc_disabled) 5932 return; 5933 pr_info("cgroup: disabling cgroup2 socket matching due to net_prio or net_cls activation\n"); 5934 cgroup_sk_alloc_disabled = true; 5935 } 5936 5937 #else 5938 5939 #define cgroup_sk_alloc_disabled false 5940 5941 #endif 5942 5943 void cgroup_sk_alloc(struct sock_cgroup_data *skcd) 5944 { 5945 if (cgroup_sk_alloc_disabled) 5946 return; 5947 5948 /* Socket clone path */ 5949 if (skcd->val) { 5950 /* 5951 * We might be cloning a socket which is left in an empty 5952 * cgroup and the cgroup might have already been rmdir'd. 5953 * Don't use cgroup_get_live(). 5954 */ 5955 cgroup_get(sock_cgroup_ptr(skcd)); 5956 return; 5957 } 5958 5959 rcu_read_lock(); 5960 5961 while (true) { 5962 struct css_set *cset; 5963 5964 cset = task_css_set(current); 5965 if (likely(cgroup_tryget(cset->dfl_cgrp))) { 5966 skcd->val = (unsigned long)cset->dfl_cgrp; 5967 break; 5968 } 5969 cpu_relax(); 5970 } 5971 5972 rcu_read_unlock(); 5973 } 5974 5975 void cgroup_sk_free(struct sock_cgroup_data *skcd) 5976 { 5977 cgroup_put(sock_cgroup_ptr(skcd)); 5978 } 5979 5980 #endif /* CONFIG_SOCK_CGROUP_DATA */ 5981 5982 #ifdef CONFIG_CGROUP_BPF 5983 int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, 5984 enum bpf_attach_type type, u32 flags) 5985 { 5986 int ret; 5987 5988 mutex_lock(&cgroup_mutex); 5989 ret = __cgroup_bpf_attach(cgrp, prog, type, flags); 5990 mutex_unlock(&cgroup_mutex); 5991 return ret; 5992 } 5993 int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, 5994 enum bpf_attach_type type, u32 flags) 5995 { 5996 int ret; 5997 5998 mutex_lock(&cgroup_mutex); 5999 ret = __cgroup_bpf_detach(cgrp, prog, type, flags); 6000 mutex_unlock(&cgroup_mutex); 6001 return ret; 6002 } 6003 int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, 6004 union bpf_attr __user *uattr) 6005 { 6006 int ret; 6007 6008 mutex_lock(&cgroup_mutex); 6009 ret = __cgroup_bpf_query(cgrp, attr, uattr); 6010 mutex_unlock(&cgroup_mutex); 6011 return ret; 6012 } 6013 #endif /* CONFIG_CGROUP_BPF */ 6014 6015 #ifdef CONFIG_SYSFS 6016 static ssize_t show_delegatable_files(struct cftype *files, char *buf, 6017 ssize_t size, const char *prefix) 6018 { 6019 struct cftype *cft; 6020 ssize_t ret = 0; 6021 6022 for (cft = files; cft && cft->name[0] != '\0'; cft++) { 6023 if (!(cft->flags & CFTYPE_NS_DELEGATABLE)) 6024 continue; 6025 6026 if (prefix) 6027 ret += snprintf(buf + ret, size - ret, "%s.", prefix); 6028 6029 ret += snprintf(buf + ret, size - ret, "%s\n", cft->name); 6030 6031 if (WARN_ON(ret >= size)) 6032 break; 6033 } 6034 6035 return ret; 6036 } 6037 6038 static ssize_t delegate_show(struct kobject *kobj, struct kobj_attribute *attr, 6039 char *buf) 6040 { 6041 struct cgroup_subsys *ss; 6042 int ssid; 6043 ssize_t ret = 0; 6044 6045 ret = show_delegatable_files(cgroup_base_files, buf, PAGE_SIZE - ret, 6046 NULL); 6047 6048 for_each_subsys(ss, ssid) 6049 ret += show_delegatable_files(ss->dfl_cftypes, buf + ret, 6050 PAGE_SIZE - ret, 6051 cgroup_subsys_name[ssid]); 6052 6053 return ret; 6054 } 6055 static struct kobj_attribute cgroup_delegate_attr = __ATTR_RO(delegate); 6056 6057 static ssize_t features_show(struct kobject *kobj, struct kobj_attribute *attr, 6058 char *buf) 6059 { 6060 return snprintf(buf, PAGE_SIZE, "nsdelegate\n"); 6061 } 6062 static struct kobj_attribute cgroup_features_attr = __ATTR_RO(features); 6063 6064 static struct attribute *cgroup_sysfs_attrs[] = { 6065 &cgroup_delegate_attr.attr, 6066 &cgroup_features_attr.attr, 6067 NULL, 6068 }; 6069 6070 static const struct attribute_group cgroup_sysfs_attr_group = { 6071 .attrs = cgroup_sysfs_attrs, 6072 .name = "cgroup", 6073 }; 6074 6075 static int __init cgroup_sysfs_init(void) 6076 { 6077 return sysfs_create_group(kernel_kobj, &cgroup_sysfs_attr_group); 6078 } 6079 subsys_initcall(cgroup_sysfs_init); 6080 #endif /* CONFIG_SYSFS */ 6081