1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Generic process-grouping system.
4 *
5 * Based originally on the cpuset system, extracted by Paul Menage
6 * Copyright (C) 2006 Google, Inc
7 *
8 * Notifications support
9 * Copyright (C) 2009 Nokia Corporation
10 * Author: Kirill A. Shutemov
11 *
12 * Copyright notices from the original cpuset code:
13 * --------------------------------------------------
14 * Copyright (C) 2003 BULL SA.
15 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
16 *
17 * Portions derived from Patrick Mochel's sysfs code.
18 * sysfs is Copyright (c) 2001-3 Patrick Mochel
19 *
20 * 2003-10-10 Written by Simon Derr.
21 * 2003-10-22 Updates by Stephen Hemminger.
22 * 2004 May-July Rework by Paul Jackson.
23 * ---------------------------------------------------
24 */
25
26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27
28 #include "cgroup-internal.h"
29
30 #include <linux/bpf-cgroup.h>
31 #include <linux/cred.h>
32 #include <linux/errno.h>
33 #include <linux/init_task.h>
34 #include <linux/kernel.h>
35 #include <linux/magic.h>
36 #include <linux/mutex.h>
37 #include <linux/mount.h>
38 #include <linux/pagemap.h>
39 #include <linux/proc_fs.h>
40 #include <linux/rcupdate.h>
41 #include <linux/sched.h>
42 #include <linux/sched/task.h>
43 #include <linux/slab.h>
44 #include <linux/spinlock.h>
45 #include <linux/percpu-rwsem.h>
46 #include <linux/string.h>
47 #include <linux/hashtable.h>
48 #include <linux/idr.h>
49 #include <linux/kthread.h>
50 #include <linux/atomic.h>
51 #include <linux/cpuset.h>
52 #include <linux/proc_ns.h>
53 #include <linux/nsproxy.h>
54 #include <linux/file.h>
55 #include <linux/fs_parser.h>
56 #include <linux/sched/cputime.h>
57 #include <linux/sched/deadline.h>
58 #include <linux/psi.h>
59 #include <linux/nstree.h>
60 #include <linux/irq_work.h>
61 #include <net/sock.h>
62
63 #define CREATE_TRACE_POINTS
64 #include <trace/events/cgroup.h>
65
66 #define CGROUP_FILE_NAME_MAX (MAX_CGROUP_TYPE_NAMELEN + \
67 MAX_CFTYPE_NAME + 2)
68 /* let's not notify more than 100 times per second */
69 #define CGROUP_FILE_NOTIFY_MIN_INTV DIV_ROUND_UP(HZ, 100)
70
71 /*
72 * cgroup_mutex is the master lock. Any modification to cgroup or its
73 * hierarchy must be performed while holding it.
74 *
75 * css_set_lock protects task->cgroups pointer, the list of css_set
76 * objects, and the chain of tasks off each css_set.
77 *
78 * These locks are exported if CONFIG_PROVE_RCU so that accessors in
79 * cgroup.h can use them for lockdep annotations.
80 */
81 DEFINE_MUTEX(cgroup_mutex);
82 DEFINE_SPINLOCK(css_set_lock);
83
84 #if (defined CONFIG_PROVE_RCU || defined CONFIG_LOCKDEP)
85 EXPORT_SYMBOL_GPL(cgroup_mutex);
86 EXPORT_SYMBOL_GPL(css_set_lock);
87 #endif
88
89 struct blocking_notifier_head cgroup_lifetime_notifier =
90 BLOCKING_NOTIFIER_INIT(cgroup_lifetime_notifier);
91
92 DEFINE_SPINLOCK(trace_cgroup_path_lock);
93 char trace_cgroup_path[TRACE_CGROUP_PATH_LEN];
94 static bool cgroup_debug __read_mostly;
95
96 /*
97 * Protects cgroup_idr and css_idr so that IDs can be released without
98 * grabbing cgroup_mutex.
99 */
100 static DEFINE_SPINLOCK(cgroup_idr_lock);
101
102 DEFINE_PERCPU_RWSEM(cgroup_threadgroup_rwsem);
103
104 #define cgroup_assert_mutex_or_rcu_locked() \
105 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
106 !lockdep_is_held(&cgroup_mutex), \
107 "cgroup_mutex or RCU read lock required");
108
109 /*
110 * cgroup destruction makes heavy use of work items and there can be a lot
111 * of concurrent destructions. Use a separate workqueue so that cgroup
112 * destruction work items don't end up filling up max_active of system_percpu_wq
113 * which may lead to deadlock.
114 *
115 * A cgroup destruction should enqueue work sequentially to:
116 * cgroup_offline_wq: use for css offline work
117 * cgroup_release_wq: use for css release work
118 * cgroup_free_wq: use for free work
119 *
120 * Rationale for using separate workqueues:
121 * The cgroup root free work may depend on completion of other css offline
122 * operations. If all tasks were enqueued to a single workqueue, this could
123 * create a deadlock scenario where:
124 * - Free work waits for other css offline work to complete.
125 * - But other css offline work is queued after free work in the same queue.
126 *
127 * Example deadlock scenario with single workqueue (cgroup_destroy_wq):
128 * 1. umount net_prio
129 * 2. net_prio root destruction enqueues work to cgroup_destroy_wq (CPUx)
130 * 3. perf_event CSS A offline enqueues work to same cgroup_destroy_wq (CPUx)
131 * 4. net_prio cgroup_destroy_root->cgroup_lock_and_drain_offline.
132 * 5. net_prio root destruction blocks waiting for perf_event CSS A offline,
133 * which can never complete as it's behind in the same queue and
134 * workqueue's max_active is 1.
135 */
136 static struct workqueue_struct *cgroup_offline_wq;
137 static struct workqueue_struct *cgroup_release_wq;
138 static struct workqueue_struct *cgroup_free_wq;
139
140 /* generate an array of cgroup subsystem pointers */
141 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
142 struct cgroup_subsys *cgroup_subsys[] = {
143 #include <linux/cgroup_subsys.h>
144 };
145 #undef SUBSYS
146
147 /* array of cgroup subsystem names */
148 #define SUBSYS(_x) [_x ## _cgrp_id] = #_x,
149 static const char *cgroup_subsys_name[] = {
150 #include <linux/cgroup_subsys.h>
151 };
152 #undef SUBSYS
153
154 /* array of static_keys for cgroup_subsys_enabled() and cgroup_subsys_on_dfl() */
155 #define SUBSYS(_x) \
156 DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_enabled_key); \
157 DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_on_dfl_key); \
158 EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_enabled_key); \
159 EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_on_dfl_key);
160 #include <linux/cgroup_subsys.h>
161 #undef SUBSYS
162
163 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_enabled_key,
164 static struct static_key_true *cgroup_subsys_enabled_key[] = {
165 #include <linux/cgroup_subsys.h>
166 };
167 #undef SUBSYS
168
169 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_on_dfl_key,
170 static struct static_key_true *cgroup_subsys_on_dfl_key[] = {
171 #include <linux/cgroup_subsys.h>
172 };
173 #undef SUBSYS
174
175 static DEFINE_PER_CPU(struct css_rstat_cpu, root_rstat_cpu);
176 static DEFINE_PER_CPU(struct cgroup_rstat_base_cpu, root_rstat_base_cpu);
177
178 /* the default hierarchy */
179 struct cgroup_root cgrp_dfl_root = {
180 .cgrp.self.rstat_cpu = &root_rstat_cpu,
181 .cgrp.rstat_base_cpu = &root_rstat_base_cpu,
182 };
183 EXPORT_SYMBOL_GPL(cgrp_dfl_root);
184
185 /*
186 * The default hierarchy always exists but is hidden until mounted for the
187 * first time. This is for backward compatibility.
188 */
189 bool cgrp_dfl_visible;
190
191 /* some controllers are not supported in the default hierarchy */
192 static u32 cgrp_dfl_inhibit_ss_mask;
193
194 /* some controllers are implicitly enabled on the default hierarchy */
195 static u32 cgrp_dfl_implicit_ss_mask;
196
197 /* some controllers can be threaded on the default hierarchy */
198 static u32 cgrp_dfl_threaded_ss_mask;
199
200 /* The list of hierarchy roots */
201 LIST_HEAD(cgroup_roots);
202 static int cgroup_root_count;
203
204 /* hierarchy ID allocation and mapping, protected by cgroup_mutex */
205 static DEFINE_IDR(cgroup_hierarchy_idr);
206
207 /*
208 * Assign a monotonically increasing serial number to csses. It guarantees
209 * cgroups with bigger numbers are newer than those with smaller numbers.
210 * Also, as csses are always appended to the parent's ->children list, it
211 * guarantees that sibling csses are always sorted in the ascending serial
212 * number order on the list. Protected by cgroup_mutex.
213 */
214 static u64 css_serial_nr_next = 1;
215
216 /*
217 * These bitmasks identify subsystems with specific features to avoid
218 * having to do iterative checks repeatedly.
219 */
220 static u32 have_fork_callback __read_mostly;
221 static u32 have_exit_callback __read_mostly;
222 static u32 have_release_callback __read_mostly;
223 static u32 have_canfork_callback __read_mostly;
224
225 static bool have_favordynmods __ro_after_init = IS_ENABLED(CONFIG_CGROUP_FAVOR_DYNMODS);
226
227 /*
228 * Write protected by cgroup_mutex and write-lock of cgroup_threadgroup_rwsem,
229 * read protected by either.
230 *
231 * Can only be turned on, but not turned off.
232 */
233 bool cgroup_enable_per_threadgroup_rwsem __read_mostly;
234
235 /* cgroup namespace for init task */
236 struct cgroup_namespace init_cgroup_ns = {
237 .ns = NS_COMMON_INIT(init_cgroup_ns),
238 .user_ns = &init_user_ns,
239 .root_cset = &init_css_set,
240 };
241
242 static struct file_system_type cgroup2_fs_type;
243 static struct cftype cgroup_base_files[];
244 static struct cftype cgroup_psi_files[];
245
246 /* cgroup optional features */
247 enum cgroup_opt_features {
248 #ifdef CONFIG_PSI
249 OPT_FEATURE_PRESSURE,
250 #endif
251 OPT_FEATURE_COUNT
252 };
253
254 static const char *cgroup_opt_feature_names[OPT_FEATURE_COUNT] = {
255 #ifdef CONFIG_PSI
256 "pressure",
257 #endif
258 };
259
260 static u16 cgroup_feature_disable_mask __read_mostly;
261
262 static int cgroup_apply_control(struct cgroup *cgrp);
263 static void cgroup_finalize_control(struct cgroup *cgrp, int ret);
264 static void css_task_iter_skip(struct css_task_iter *it,
265 struct task_struct *task);
266 static int cgroup_destroy_locked(struct cgroup *cgrp);
267 static void cgroup_finish_destroy(struct cgroup *cgrp);
268 static void kill_css_sync(struct cgroup_subsys_state *css);
269 static void kill_css_finish(struct cgroup_subsys_state *css);
270 static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
271 struct cgroup_subsys *ss);
272 static void css_release(struct percpu_ref *ref);
273 static int cgroup_addrm_files(struct cgroup_subsys_state *css,
274 struct cgroup *cgrp, struct cftype cfts[],
275 bool is_add);
276 static void cgroup_rt_init(void);
277
278 #ifdef CONFIG_DEBUG_CGROUP_REF
279 #define CGROUP_REF_FN_ATTRS noinline
280 #define CGROUP_REF_EXPORT(fn) EXPORT_SYMBOL_GPL(fn);
281 #include <linux/cgroup_refcnt.h>
282 #endif
283
284 /**
285 * cgroup_ssid_enabled - cgroup subsys enabled test by subsys ID
286 * @ssid: subsys ID of interest
287 *
288 * cgroup_subsys_enabled() can only be used with literal subsys names which
289 * is fine for individual subsystems but unsuitable for cgroup core. This
290 * is slower static_key_enabled() based test indexed by @ssid.
291 */
cgroup_ssid_enabled(int ssid)292 bool cgroup_ssid_enabled(int ssid)
293 {
294 if (!CGROUP_HAS_SUBSYS_CONFIG)
295 return false;
296
297 return static_key_enabled(cgroup_subsys_enabled_key[ssid]);
298 }
299
300 /**
301 * cgroup_on_dfl - test whether a cgroup is on the default hierarchy
302 * @cgrp: the cgroup of interest
303 *
304 * The default hierarchy is the v2 interface of cgroup and this function
305 * can be used to test whether a cgroup is on the default hierarchy for
306 * cases where a subsystem should behave differently depending on the
307 * interface version.
308 *
309 * List of changed behaviors:
310 *
311 * - Mount options "noprefix", "xattr", "clone_children", "release_agent"
312 * and "name" are disallowed.
313 *
314 * - When mounting an existing superblock, mount options should match.
315 *
316 * - rename(2) is disallowed.
317 *
318 * - "tasks" is removed. Everything should be at process granularity. Use
319 * "cgroup.procs" instead.
320 *
321 * - "cgroup.procs" is not sorted. pids will be unique unless they got
322 * recycled in-between reads.
323 *
324 * - "release_agent" and "notify_on_release" are removed. Replacement
325 * notification mechanism will be implemented.
326 *
327 * - "cgroup.clone_children" is removed.
328 *
329 * - "cgroup.subtree_populated" is available. Its value is 0 if the cgroup
330 * and its descendants contain no task; otherwise, 1. The file also
331 * generates kernfs notification which can be monitored through poll and
332 * [di]notify when the value of the file changes.
333 *
334 * - cpuset: tasks will be kept in empty cpusets when hotplug happens and
335 * take masks of ancestors with non-empty cpus/mems, instead of being
336 * moved to an ancestor.
337 *
338 * - cpuset: a task can be moved into an empty cpuset, and again it takes
339 * masks of ancestors.
340 *
341 * - blkcg: blk-throttle becomes properly hierarchical.
342 */
cgroup_on_dfl(const struct cgroup * cgrp)343 bool cgroup_on_dfl(const struct cgroup *cgrp)
344 {
345 return cgrp->root == &cgrp_dfl_root;
346 }
347
348 /* IDR wrappers which synchronize using cgroup_idr_lock */
cgroup_idr_alloc(struct idr * idr,void * ptr,int start,int end,gfp_t gfp_mask)349 static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end,
350 gfp_t gfp_mask)
351 {
352 int ret;
353
354 idr_preload(gfp_mask);
355 spin_lock_bh(&cgroup_idr_lock);
356 ret = idr_alloc(idr, ptr, start, end, gfp_mask & ~__GFP_DIRECT_RECLAIM);
357 spin_unlock_bh(&cgroup_idr_lock);
358 idr_preload_end();
359 return ret;
360 }
361
cgroup_idr_replace(struct idr * idr,void * ptr,int id)362 static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id)
363 {
364 void *ret;
365
366 spin_lock_bh(&cgroup_idr_lock);
367 ret = idr_replace(idr, ptr, id);
368 spin_unlock_bh(&cgroup_idr_lock);
369 return ret;
370 }
371
cgroup_idr_remove(struct idr * idr,int id)372 static void cgroup_idr_remove(struct idr *idr, int id)
373 {
374 spin_lock_bh(&cgroup_idr_lock);
375 idr_remove(idr, id);
376 spin_unlock_bh(&cgroup_idr_lock);
377 }
378
cgroup_has_tasks(struct cgroup * cgrp)379 static bool cgroup_has_tasks(struct cgroup *cgrp)
380 {
381 return cgrp->nr_populated_csets;
382 }
383
cgroup_is_threaded(struct cgroup * cgrp)384 static bool cgroup_is_threaded(struct cgroup *cgrp)
385 {
386 return cgrp->dom_cgrp != cgrp;
387 }
388
389 /* can @cgrp host both domain and threaded children? */
cgroup_is_mixable(struct cgroup * cgrp)390 static bool cgroup_is_mixable(struct cgroup *cgrp)
391 {
392 /*
393 * Root isn't under domain level resource control exempting it from
394 * the no-internal-process constraint, so it can serve as a thread
395 * root and a parent of resource domains at the same time.
396 */
397 return !cgroup_parent(cgrp);
398 }
399
400 /* can @cgrp become a thread root? Should always be true for a thread root */
cgroup_can_be_thread_root(struct cgroup * cgrp)401 static bool cgroup_can_be_thread_root(struct cgroup *cgrp)
402 {
403 /* mixables don't care */
404 if (cgroup_is_mixable(cgrp))
405 return true;
406
407 /* domain roots can't be nested under threaded */
408 if (cgroup_is_threaded(cgrp))
409 return false;
410
411 /* can only have either domain or threaded children */
412 if (cgrp->nr_populated_domain_children)
413 return false;
414
415 /* and no domain controllers can be enabled */
416 if (cgrp->subtree_control & ~cgrp_dfl_threaded_ss_mask)
417 return false;
418
419 return true;
420 }
421
422 /* is @cgrp root of a threaded subtree? */
cgroup_is_thread_root(struct cgroup * cgrp)423 static bool cgroup_is_thread_root(struct cgroup *cgrp)
424 {
425 /* thread root should be a domain */
426 if (cgroup_is_threaded(cgrp))
427 return false;
428
429 /* a domain w/ threaded children is a thread root */
430 if (cgrp->nr_threaded_children)
431 return true;
432
433 /*
434 * A domain which has tasks and explicit threaded controllers
435 * enabled is a thread root.
436 */
437 if (cgroup_has_tasks(cgrp) &&
438 (cgrp->subtree_control & cgrp_dfl_threaded_ss_mask))
439 return true;
440
441 return false;
442 }
443
444 /* a domain which isn't connected to the root w/o brekage can't be used */
cgroup_is_valid_domain(struct cgroup * cgrp)445 static bool cgroup_is_valid_domain(struct cgroup *cgrp)
446 {
447 /* the cgroup itself can be a thread root */
448 if (cgroup_is_threaded(cgrp))
449 return false;
450
451 /* but the ancestors can't be unless mixable */
452 while ((cgrp = cgroup_parent(cgrp))) {
453 if (!cgroup_is_mixable(cgrp) && cgroup_is_thread_root(cgrp))
454 return false;
455 if (cgroup_is_threaded(cgrp))
456 return false;
457 }
458
459 return true;
460 }
461
462 /* subsystems visibly enabled on a cgroup */
cgroup_control(struct cgroup * cgrp)463 static u32 cgroup_control(struct cgroup *cgrp)
464 {
465 struct cgroup *parent = cgroup_parent(cgrp);
466 u32 root_ss_mask = cgrp->root->subsys_mask;
467
468 if (parent) {
469 u32 ss_mask = parent->subtree_control;
470
471 /* threaded cgroups can only have threaded controllers */
472 if (cgroup_is_threaded(cgrp))
473 ss_mask &= cgrp_dfl_threaded_ss_mask;
474 return ss_mask;
475 }
476
477 if (cgroup_on_dfl(cgrp))
478 root_ss_mask &= ~(cgrp_dfl_inhibit_ss_mask |
479 cgrp_dfl_implicit_ss_mask);
480 return root_ss_mask;
481 }
482
483 /* subsystems enabled on a cgroup */
cgroup_ss_mask(struct cgroup * cgrp)484 static u32 cgroup_ss_mask(struct cgroup *cgrp)
485 {
486 struct cgroup *parent = cgroup_parent(cgrp);
487
488 if (parent) {
489 u32 ss_mask = parent->subtree_ss_mask;
490
491 /* threaded cgroups can only have threaded controllers */
492 if (cgroup_is_threaded(cgrp))
493 ss_mask &= cgrp_dfl_threaded_ss_mask;
494 return ss_mask;
495 }
496
497 return cgrp->root->subsys_mask;
498 }
499
500 /**
501 * cgroup_e_css_by_mask - obtain a cgroup's effective css for the specified ss
502 * @cgrp: the cgroup of interest
503 * @ss: the subsystem of interest (%NULL returns @cgrp->self)
504 *
505 * Similar to cgroup_css() but returns the effective css, which is defined
506 * as the matching css of the nearest ancestor including self which has @ss
507 * enabled. If @ss is associated with the hierarchy @cgrp is on, this
508 * function is guaranteed to return non-NULL css.
509 */
cgroup_e_css_by_mask(struct cgroup * cgrp,struct cgroup_subsys * ss)510 static struct cgroup_subsys_state *cgroup_e_css_by_mask(struct cgroup *cgrp,
511 struct cgroup_subsys *ss)
512 {
513 lockdep_assert_held(&cgroup_mutex);
514
515 if (!ss)
516 return &cgrp->self;
517
518 /*
519 * This function is used while updating css associations and thus
520 * can't test the csses directly. Test ss_mask.
521 */
522 while (!(cgroup_ss_mask(cgrp) & (1 << ss->id))) {
523 cgrp = cgroup_parent(cgrp);
524 if (!cgrp)
525 return NULL;
526 }
527
528 return cgroup_css(cgrp, ss);
529 }
530
531 /**
532 * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
533 * @cgrp: the cgroup of interest
534 * @ss: the subsystem of interest
535 *
536 * Find and get the effective css of @cgrp for @ss. The effective css is
537 * defined as the matching css of the nearest ancestor including self which
538 * has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on,
539 * the root css is returned, so this function always returns a valid css.
540 *
541 * The returned css is not guaranteed to be online, and therefore it is the
542 * callers responsibility to try get a reference for it.
543 */
cgroup_e_css(struct cgroup * cgrp,struct cgroup_subsys * ss)544 struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
545 struct cgroup_subsys *ss)
546 {
547 struct cgroup_subsys_state *css;
548
549 if (!CGROUP_HAS_SUBSYS_CONFIG)
550 return NULL;
551
552 do {
553 css = cgroup_css(cgrp, ss);
554
555 if (css)
556 return css;
557 cgrp = cgroup_parent(cgrp);
558 } while (cgrp);
559
560 return init_css_set.subsys[ss->id];
561 }
562
563 /**
564 * cgroup_get_e_css - get a cgroup's effective css for the specified subsystem
565 * @cgrp: the cgroup of interest
566 * @ss: the subsystem of interest
567 *
568 * Find and get the effective css of @cgrp for @ss. The effective css is
569 * defined as the matching css of the nearest ancestor including self which
570 * has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on,
571 * the root css is returned, so this function always returns a valid css.
572 * The returned css must be put using css_put().
573 */
cgroup_get_e_css(struct cgroup * cgrp,struct cgroup_subsys * ss)574 struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgrp,
575 struct cgroup_subsys *ss)
576 {
577 struct cgroup_subsys_state *css;
578
579 if (!CGROUP_HAS_SUBSYS_CONFIG)
580 return NULL;
581
582 rcu_read_lock();
583
584 do {
585 css = cgroup_css(cgrp, ss);
586
587 if (css && css_tryget_online(css))
588 goto out_unlock;
589 cgrp = cgroup_parent(cgrp);
590 } while (cgrp);
591
592 css = init_css_set.subsys[ss->id];
593 css_get(css);
594 out_unlock:
595 rcu_read_unlock();
596 return css;
597 }
598 EXPORT_SYMBOL_GPL(cgroup_get_e_css);
599
cgroup_get_live(struct cgroup * cgrp)600 static void cgroup_get_live(struct cgroup *cgrp)
601 {
602 WARN_ON_ONCE(cgroup_is_dead(cgrp));
603 cgroup_get(cgrp);
604 }
605
606 /**
607 * __cgroup_task_count - count the number of tasks in a cgroup. The caller
608 * is responsible for taking the css_set_lock.
609 * @cgrp: the cgroup in question
610 */
__cgroup_task_count(const struct cgroup * cgrp)611 int __cgroup_task_count(const struct cgroup *cgrp)
612 {
613 int count = 0;
614 struct cgrp_cset_link *link;
615
616 lockdep_assert_held(&css_set_lock);
617
618 list_for_each_entry(link, &cgrp->cset_links, cset_link)
619 count += link->cset->nr_tasks;
620
621 return count;
622 }
623
624 /**
625 * cgroup_task_count - count the number of tasks in a cgroup.
626 * @cgrp: the cgroup in question
627 */
cgroup_task_count(const struct cgroup * cgrp)628 int cgroup_task_count(const struct cgroup *cgrp)
629 {
630 int count;
631
632 spin_lock_irq(&css_set_lock);
633 count = __cgroup_task_count(cgrp);
634 spin_unlock_irq(&css_set_lock);
635
636 return count;
637 }
638
kn_priv(struct kernfs_node * kn)639 static struct cgroup *kn_priv(struct kernfs_node *kn)
640 {
641 struct kernfs_node *parent;
642 /*
643 * The parent can not be replaced due to KERNFS_ROOT_INVARIANT_PARENT.
644 * Therefore it is always safe to dereference this pointer outside of a
645 * RCU section.
646 */
647 parent = rcu_dereference_check(kn->__parent,
648 kernfs_root_flags(kn) & KERNFS_ROOT_INVARIANT_PARENT);
649 return parent->priv;
650 }
651
of_css(struct kernfs_open_file * of)652 struct cgroup_subsys_state *of_css(struct kernfs_open_file *of)
653 {
654 struct cgroup *cgrp = kn_priv(of->kn);
655 struct cftype *cft = of_cft(of);
656
657 /*
658 * This is open and unprotected implementation of cgroup_css().
659 * seq_css() is only called from a kernfs file operation which has
660 * an active reference on the file. Because all the subsystem
661 * files are drained before a css is disassociated with a cgroup,
662 * the matching css from the cgroup's subsys table is guaranteed to
663 * be and stay valid until the enclosing operation is complete.
664 */
665 if (CGROUP_HAS_SUBSYS_CONFIG && cft->ss)
666 return rcu_dereference_raw(cgrp->subsys[cft->ss->id]);
667 else
668 return &cgrp->self;
669 }
670 EXPORT_SYMBOL_GPL(of_css);
671
672 /**
673 * for_each_css - iterate all css's of a cgroup
674 * @css: the iteration cursor
675 * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
676 * @cgrp: the target cgroup to iterate css's of
677 *
678 * Should be called under cgroup_mutex.
679 */
680 #define for_each_css(css, ssid, cgrp) \
681 for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
682 if (!((css) = rcu_dereference_check( \
683 (cgrp)->subsys[(ssid)], \
684 lockdep_is_held(&cgroup_mutex)))) { } \
685 else
686
687 /**
688 * do_each_subsys_mask - filter for_each_subsys with a bitmask
689 * @ss: the iteration cursor
690 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
691 * @ss_mask: the bitmask
692 *
693 * The block will only run for cases where the ssid-th bit (1 << ssid) of
694 * @ss_mask is set.
695 */
696 #define do_each_subsys_mask(ss, ssid, ss_mask) do { \
697 unsigned long __ss_mask = (ss_mask); \
698 if (!CGROUP_HAS_SUBSYS_CONFIG) { \
699 (ssid) = 0; \
700 break; \
701 } \
702 for_each_set_bit(ssid, &__ss_mask, CGROUP_SUBSYS_COUNT) { \
703 (ss) = cgroup_subsys[ssid]; \
704 {
705
706 #define while_each_subsys_mask() \
707 } \
708 } \
709 } while (false)
710
711 /*
712 * The default css_set - used by init and its children prior to any
713 * hierarchies being mounted. It contains a pointer to the root state
714 * for each subsystem. Also used to anchor the list of css_sets. Not
715 * reference-counted, to improve performance when child cgroups
716 * haven't been created.
717 */
718 struct css_set init_css_set = {
719 .refcount = REFCOUNT_INIT(1),
720 .dom_cset = &init_css_set,
721 .tasks = LIST_HEAD_INIT(init_css_set.tasks),
722 .mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks),
723 .dying_tasks = LIST_HEAD_INIT(init_css_set.dying_tasks),
724 .task_iters = LIST_HEAD_INIT(init_css_set.task_iters),
725 .threaded_csets = LIST_HEAD_INIT(init_css_set.threaded_csets),
726 .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links),
727 .mg_src_preload_node = LIST_HEAD_INIT(init_css_set.mg_src_preload_node),
728 .mg_dst_preload_node = LIST_HEAD_INIT(init_css_set.mg_dst_preload_node),
729 .mg_node = LIST_HEAD_INIT(init_css_set.mg_node),
730
731 /*
732 * The following field is re-initialized when this cset gets linked
733 * in cgroup_init(). However, let's initialize the field
734 * statically too so that the default cgroup can be accessed safely
735 * early during boot.
736 */
737 .dfl_cgrp = &cgrp_dfl_root.cgrp,
738 };
739
740 static int css_set_count = 1; /* 1 for init_css_set */
741
css_set_threaded(struct css_set * cset)742 static bool css_set_threaded(struct css_set *cset)
743 {
744 return cset->dom_cset != cset;
745 }
746
747 /**
748 * css_set_populated - does a css_set contain any tasks?
749 * @cset: target css_set
750 *
751 * css_set_populated() should be the same as !!cset->nr_tasks at steady
752 * state. However, css_set_populated() can be called while a task is being
753 * added to or removed from the linked list before the nr_tasks is
754 * properly updated. Hence, we can't just look at ->nr_tasks here.
755 */
css_set_populated(struct css_set * cset)756 static bool css_set_populated(struct css_set *cset)
757 {
758 lockdep_assert_held(&css_set_lock);
759
760 return !list_empty(&cset->tasks) || !list_empty(&cset->mg_tasks);
761 }
762
763 /**
764 * cgroup_update_populated - update the populated count of a cgroup
765 * @cgrp: the target cgroup
766 * @populated: inc or dec populated count
767 *
768 * One of the css_sets associated with @cgrp is either getting its first
769 * task or losing the last. Update @cgrp->nr_populated_* accordingly. The
770 * count is propagated towards root so that a given cgroup's
771 * nr_populated_children is zero iff none of its descendants contain any
772 * tasks.
773 *
774 * @cgrp's interface file "cgroup.populated" is zero if both
775 * @cgrp->nr_populated_csets and @cgrp->nr_populated_children are zero and
776 * 1 otherwise. When the sum changes from or to zero, userland is notified
777 * that the content of the interface file has changed. This can be used to
778 * detect when @cgrp and its descendants become populated or empty.
779 */
cgroup_update_populated(struct cgroup * cgrp,bool populated)780 static void cgroup_update_populated(struct cgroup *cgrp, bool populated)
781 {
782 struct cgroup *child = NULL;
783 int adj = populated ? 1 : -1;
784
785 lockdep_assert_held(&css_set_lock);
786
787 do {
788 bool was_populated = cgroup_is_populated(cgrp);
789
790 if (!child) {
791 cgrp->nr_populated_csets += adj;
792 } else {
793 if (cgroup_is_threaded(child))
794 cgrp->nr_populated_threaded_children += adj;
795 else
796 cgrp->nr_populated_domain_children += adj;
797 }
798
799 if (was_populated == cgroup_is_populated(cgrp))
800 break;
801
802 /*
803 * Subtree just emptied below an offlined cgrp. Fire deferred
804 * destroy. The transition is one-shot.
805 */
806 if (was_populated && !css_is_online(&cgrp->self)) {
807 cgroup_get(cgrp);
808 WARN_ON_ONCE(!queue_work(cgroup_offline_wq,
809 &cgrp->finish_destroy_work));
810 }
811
812 cgroup1_check_for_release(cgrp);
813 TRACE_CGROUP_PATH(notify_populated, cgrp,
814 cgroup_is_populated(cgrp));
815 cgroup_file_notify(&cgrp->events_file);
816
817 child = cgrp;
818 cgrp = cgroup_parent(cgrp);
819 } while (cgrp);
820 }
821
822 /**
823 * css_set_update_populated - update populated state of a css_set
824 * @cset: target css_set
825 * @populated: whether @cset is populated or depopulated
826 *
827 * @cset is either getting the first task or losing the last. Update the
828 * populated counters of all associated cgroups accordingly.
829 */
css_set_update_populated(struct css_set * cset,bool populated)830 static void css_set_update_populated(struct css_set *cset, bool populated)
831 {
832 struct cgrp_cset_link *link;
833
834 lockdep_assert_held(&css_set_lock);
835
836 list_for_each_entry(link, &cset->cgrp_links, cgrp_link)
837 cgroup_update_populated(link->cgrp, populated);
838 }
839
840 /*
841 * @task is leaving, advance task iterators which are pointing to it so
842 * that they can resume at the next position. Advancing an iterator might
843 * remove it from the list, use safe walk. See css_task_iter_skip() for
844 * details.
845 */
css_set_skip_task_iters(struct css_set * cset,struct task_struct * task)846 static void css_set_skip_task_iters(struct css_set *cset,
847 struct task_struct *task)
848 {
849 struct css_task_iter *it, *pos;
850
851 list_for_each_entry_safe(it, pos, &cset->task_iters, iters_node)
852 css_task_iter_skip(it, task);
853 }
854
855 /**
856 * css_set_move_task - move a task from one css_set to another
857 * @task: task being moved
858 * @from_cset: css_set @task currently belongs to (may be NULL)
859 * @to_cset: new css_set @task is being moved to (may be NULL)
860 * @use_mg_tasks: move to @to_cset->mg_tasks instead of ->tasks
861 *
862 * Move @task from @from_cset to @to_cset. If @task didn't belong to any
863 * css_set, @from_cset can be NULL. If @task is being disassociated
864 * instead of moved, @to_cset can be NULL.
865 *
866 * This function automatically handles populated counter updates and
867 * css_task_iter adjustments but the caller is responsible for managing
868 * @from_cset and @to_cset's reference counts.
869 */
css_set_move_task(struct task_struct * task,struct css_set * from_cset,struct css_set * to_cset,bool use_mg_tasks)870 static void css_set_move_task(struct task_struct *task,
871 struct css_set *from_cset, struct css_set *to_cset,
872 bool use_mg_tasks)
873 {
874 lockdep_assert_held(&css_set_lock);
875
876 if (to_cset && !css_set_populated(to_cset))
877 css_set_update_populated(to_cset, true);
878
879 if (from_cset) {
880 WARN_ON_ONCE(list_empty(&task->cg_list));
881
882 css_set_skip_task_iters(from_cset, task);
883 list_del_init(&task->cg_list);
884 if (!css_set_populated(from_cset))
885 css_set_update_populated(from_cset, false);
886 } else {
887 WARN_ON_ONCE(!list_empty(&task->cg_list));
888 }
889
890 if (to_cset) {
891 /*
892 * We are synchronized through cgroup_threadgroup_rwsem
893 * against PF_EXITING setting such that we can't race
894 * against cgroup_task_dead()/cgroup_task_free() dropping
895 * the css_set.
896 */
897 WARN_ON_ONCE(task->flags & PF_EXITING);
898
899 cgroup_move_task(task, to_cset);
900 list_add_tail(&task->cg_list, use_mg_tasks ? &to_cset->mg_tasks :
901 &to_cset->tasks);
902 }
903 }
904
905 /*
906 * hash table for cgroup groups. This improves the performance to find
907 * an existing css_set. This hash doesn't (currently) take into
908 * account cgroups in empty hierarchies.
909 */
910 #define CSS_SET_HASH_BITS 7
911 static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS);
912
css_set_hash(struct cgroup_subsys_state ** css)913 static unsigned long css_set_hash(struct cgroup_subsys_state **css)
914 {
915 unsigned long key = 0UL;
916 struct cgroup_subsys *ss;
917 int i;
918
919 for_each_subsys(ss, i)
920 key += (unsigned long)css[i];
921 key = (key >> 16) ^ key;
922
923 return key;
924 }
925
put_css_set_locked(struct css_set * cset)926 void put_css_set_locked(struct css_set *cset)
927 {
928 struct cgrp_cset_link *link, *tmp_link;
929 struct cgroup_subsys *ss;
930 int ssid;
931
932 lockdep_assert_held(&css_set_lock);
933
934 if (!refcount_dec_and_test(&cset->refcount))
935 return;
936
937 WARN_ON_ONCE(!list_empty(&cset->threaded_csets));
938
939 /* This css_set is dead. Unlink it and release cgroup and css refs */
940 for_each_subsys(ss, ssid) {
941 list_del(&cset->e_cset_node[ssid]);
942 css_put(cset->subsys[ssid]);
943 }
944 hash_del(&cset->hlist);
945 css_set_count--;
946
947 list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) {
948 list_del(&link->cset_link);
949 list_del(&link->cgrp_link);
950 if (cgroup_parent(link->cgrp))
951 cgroup_put(link->cgrp);
952 kfree(link);
953 }
954
955 if (css_set_threaded(cset)) {
956 list_del(&cset->threaded_csets_node);
957 put_css_set_locked(cset->dom_cset);
958 }
959
960 kfree_rcu(cset, rcu_head);
961 }
962
963 /**
964 * compare_css_sets - helper function for find_existing_css_set().
965 * @cset: candidate css_set being tested
966 * @old_cset: existing css_set for a task
967 * @new_cgrp: cgroup that's being entered by the task
968 * @template: desired set of css pointers in css_set (pre-calculated)
969 *
970 * Returns true if "cset" matches "old_cset" except for the hierarchy
971 * which "new_cgrp" belongs to, for which it should match "new_cgrp".
972 */
compare_css_sets(struct css_set * cset,struct css_set * old_cset,struct cgroup * new_cgrp,struct cgroup_subsys_state * template[])973 static bool compare_css_sets(struct css_set *cset,
974 struct css_set *old_cset,
975 struct cgroup *new_cgrp,
976 struct cgroup_subsys_state *template[])
977 {
978 struct cgroup *new_dfl_cgrp;
979 struct list_head *l1, *l2;
980
981 /*
982 * On the default hierarchy, there can be csets which are
983 * associated with the same set of cgroups but different csses.
984 * Let's first ensure that csses match.
985 */
986 if (memcmp(template, cset->subsys, sizeof(cset->subsys)))
987 return false;
988
989
990 /* @cset's domain should match the default cgroup's */
991 if (cgroup_on_dfl(new_cgrp))
992 new_dfl_cgrp = new_cgrp;
993 else
994 new_dfl_cgrp = old_cset->dfl_cgrp;
995
996 if (new_dfl_cgrp->dom_cgrp != cset->dom_cset->dfl_cgrp)
997 return false;
998
999 /*
1000 * Compare cgroup pointers in order to distinguish between
1001 * different cgroups in hierarchies. As different cgroups may
1002 * share the same effective css, this comparison is always
1003 * necessary.
1004 */
1005 l1 = &cset->cgrp_links;
1006 l2 = &old_cset->cgrp_links;
1007 while (1) {
1008 struct cgrp_cset_link *link1, *link2;
1009 struct cgroup *cgrp1, *cgrp2;
1010
1011 l1 = l1->next;
1012 l2 = l2->next;
1013 /* See if we reached the end - both lists are equal length. */
1014 if (l1 == &cset->cgrp_links) {
1015 BUG_ON(l2 != &old_cset->cgrp_links);
1016 break;
1017 } else {
1018 BUG_ON(l2 == &old_cset->cgrp_links);
1019 }
1020 /* Locate the cgroups associated with these links. */
1021 link1 = list_entry(l1, struct cgrp_cset_link, cgrp_link);
1022 link2 = list_entry(l2, struct cgrp_cset_link, cgrp_link);
1023 cgrp1 = link1->cgrp;
1024 cgrp2 = link2->cgrp;
1025 /* Hierarchies should be linked in the same order. */
1026 BUG_ON(cgrp1->root != cgrp2->root);
1027
1028 /*
1029 * If this hierarchy is the hierarchy of the cgroup
1030 * that's changing, then we need to check that this
1031 * css_set points to the new cgroup; if it's any other
1032 * hierarchy, then this css_set should point to the
1033 * same cgroup as the old css_set.
1034 */
1035 if (cgrp1->root == new_cgrp->root) {
1036 if (cgrp1 != new_cgrp)
1037 return false;
1038 } else {
1039 if (cgrp1 != cgrp2)
1040 return false;
1041 }
1042 }
1043 return true;
1044 }
1045
1046 /**
1047 * find_existing_css_set - init css array and find the matching css_set
1048 * @old_cset: the css_set that we're using before the cgroup transition
1049 * @cgrp: the cgroup that we're moving into
1050 * @template: out param for the new set of csses, should be clear on entry
1051 */
find_existing_css_set(struct css_set * old_cset,struct cgroup * cgrp,struct cgroup_subsys_state ** template)1052 static struct css_set *find_existing_css_set(struct css_set *old_cset,
1053 struct cgroup *cgrp,
1054 struct cgroup_subsys_state **template)
1055 {
1056 struct cgroup_root *root = cgrp->root;
1057 struct cgroup_subsys *ss;
1058 struct css_set *cset;
1059 unsigned long key;
1060 int i;
1061
1062 /*
1063 * Build the set of subsystem state objects that we want to see in the
1064 * new css_set. While subsystems can change globally, the entries here
1065 * won't change, so no need for locking.
1066 */
1067 for_each_subsys(ss, i) {
1068 if (root->subsys_mask & (1UL << i)) {
1069 /*
1070 * @ss is in this hierarchy, so we want the
1071 * effective css from @cgrp.
1072 */
1073 template[i] = cgroup_e_css_by_mask(cgrp, ss);
1074 } else {
1075 /*
1076 * @ss is not in this hierarchy, so we don't want
1077 * to change the css.
1078 */
1079 template[i] = old_cset->subsys[i];
1080 }
1081 }
1082
1083 key = css_set_hash(template);
1084 hash_for_each_possible(css_set_table, cset, hlist, key) {
1085 if (!compare_css_sets(cset, old_cset, cgrp, template))
1086 continue;
1087
1088 /* This css_set matches what we need */
1089 return cset;
1090 }
1091
1092 /* No existing cgroup group matched */
1093 return NULL;
1094 }
1095
free_cgrp_cset_links(struct list_head * links_to_free)1096 static void free_cgrp_cset_links(struct list_head *links_to_free)
1097 {
1098 struct cgrp_cset_link *link, *tmp_link;
1099
1100 list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) {
1101 list_del(&link->cset_link);
1102 kfree(link);
1103 }
1104 }
1105
1106 /**
1107 * allocate_cgrp_cset_links - allocate cgrp_cset_links
1108 * @count: the number of links to allocate
1109 * @tmp_links: list_head the allocated links are put on
1110 *
1111 * Allocate @count cgrp_cset_link structures and chain them on @tmp_links
1112 * through ->cset_link. Returns 0 on success or -errno.
1113 */
allocate_cgrp_cset_links(int count,struct list_head * tmp_links)1114 static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links)
1115 {
1116 struct cgrp_cset_link *link;
1117 int i;
1118
1119 INIT_LIST_HEAD(tmp_links);
1120
1121 for (i = 0; i < count; i++) {
1122 link = kzalloc_obj(*link);
1123 if (!link) {
1124 free_cgrp_cset_links(tmp_links);
1125 return -ENOMEM;
1126 }
1127 list_add(&link->cset_link, tmp_links);
1128 }
1129 return 0;
1130 }
1131
1132 /**
1133 * link_css_set - a helper function to link a css_set to a cgroup
1134 * @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links()
1135 * @cset: the css_set to be linked
1136 * @cgrp: the destination cgroup
1137 */
link_css_set(struct list_head * tmp_links,struct css_set * cset,struct cgroup * cgrp)1138 static void link_css_set(struct list_head *tmp_links, struct css_set *cset,
1139 struct cgroup *cgrp)
1140 {
1141 struct cgrp_cset_link *link;
1142
1143 BUG_ON(list_empty(tmp_links));
1144
1145 if (cgroup_on_dfl(cgrp))
1146 cset->dfl_cgrp = cgrp;
1147
1148 link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link);
1149 link->cset = cset;
1150 link->cgrp = cgrp;
1151
1152 /*
1153 * Always add links to the tail of the lists so that the lists are
1154 * in chronological order.
1155 */
1156 list_move_tail(&link->cset_link, &cgrp->cset_links);
1157 list_add_tail(&link->cgrp_link, &cset->cgrp_links);
1158
1159 if (cgroup_parent(cgrp))
1160 cgroup_get_live(cgrp);
1161 }
1162
1163 /**
1164 * find_css_set - return a new css_set with one cgroup updated
1165 * @old_cset: the baseline css_set
1166 * @cgrp: the cgroup to be updated
1167 *
1168 * Return a new css_set that's equivalent to @old_cset, but with @cgrp
1169 * substituted into the appropriate hierarchy.
1170 */
find_css_set(struct css_set * old_cset,struct cgroup * cgrp)1171 static struct css_set *find_css_set(struct css_set *old_cset,
1172 struct cgroup *cgrp)
1173 {
1174 struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { };
1175 struct css_set *cset;
1176 struct list_head tmp_links;
1177 struct cgrp_cset_link *link;
1178 struct cgroup_subsys *ss;
1179 unsigned long key;
1180 int ssid;
1181
1182 lockdep_assert_held(&cgroup_mutex);
1183
1184 /* First see if we already have a cgroup group that matches
1185 * the desired set */
1186 spin_lock_irq(&css_set_lock);
1187 cset = find_existing_css_set(old_cset, cgrp, template);
1188 if (cset)
1189 get_css_set(cset);
1190 spin_unlock_irq(&css_set_lock);
1191
1192 if (cset)
1193 return cset;
1194
1195 cset = kzalloc_obj(*cset);
1196 if (!cset)
1197 return NULL;
1198
1199 /* Allocate all the cgrp_cset_link objects that we'll need */
1200 if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) {
1201 kfree(cset);
1202 return NULL;
1203 }
1204
1205 refcount_set(&cset->refcount, 1);
1206 cset->dom_cset = cset;
1207 INIT_LIST_HEAD(&cset->tasks);
1208 INIT_LIST_HEAD(&cset->mg_tasks);
1209 INIT_LIST_HEAD(&cset->dying_tasks);
1210 INIT_LIST_HEAD(&cset->task_iters);
1211 INIT_LIST_HEAD(&cset->threaded_csets);
1212 INIT_HLIST_NODE(&cset->hlist);
1213 INIT_LIST_HEAD(&cset->cgrp_links);
1214 INIT_LIST_HEAD(&cset->mg_src_preload_node);
1215 INIT_LIST_HEAD(&cset->mg_dst_preload_node);
1216 INIT_LIST_HEAD(&cset->mg_node);
1217
1218 /* Copy the set of subsystem state objects generated in
1219 * find_existing_css_set() */
1220 memcpy(cset->subsys, template, sizeof(cset->subsys));
1221
1222 spin_lock_irq(&css_set_lock);
1223 /* Add reference counts and links from the new css_set. */
1224 list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
1225 struct cgroup *c = link->cgrp;
1226
1227 if (c->root == cgrp->root)
1228 c = cgrp;
1229 link_css_set(&tmp_links, cset, c);
1230 }
1231
1232 BUG_ON(!list_empty(&tmp_links));
1233
1234 css_set_count++;
1235
1236 /* Add @cset to the hash table */
1237 key = css_set_hash(cset->subsys);
1238 hash_add(css_set_table, &cset->hlist, key);
1239
1240 for_each_subsys(ss, ssid) {
1241 struct cgroup_subsys_state *css = cset->subsys[ssid];
1242
1243 list_add_tail(&cset->e_cset_node[ssid],
1244 &css->cgroup->e_csets[ssid]);
1245 css_get(css);
1246 }
1247
1248 spin_unlock_irq(&css_set_lock);
1249
1250 /*
1251 * If @cset should be threaded, look up the matching dom_cset and
1252 * link them up. We first fully initialize @cset then look for the
1253 * dom_cset. It's simpler this way and safe as @cset is guaranteed
1254 * to stay empty until we return.
1255 */
1256 if (cgroup_is_threaded(cset->dfl_cgrp)) {
1257 struct css_set *dcset;
1258
1259 dcset = find_css_set(cset, cset->dfl_cgrp->dom_cgrp);
1260 if (!dcset) {
1261 put_css_set(cset);
1262 return NULL;
1263 }
1264
1265 spin_lock_irq(&css_set_lock);
1266 cset->dom_cset = dcset;
1267 list_add_tail(&cset->threaded_csets_node,
1268 &dcset->threaded_csets);
1269 spin_unlock_irq(&css_set_lock);
1270 }
1271
1272 return cset;
1273 }
1274
cgroup_root_from_kf(struct kernfs_root * kf_root)1275 struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
1276 {
1277 struct cgroup *root_cgrp = kernfs_root_to_node(kf_root)->priv;
1278
1279 return root_cgrp->root;
1280 }
1281
cgroup_favor_dynmods(struct cgroup_root * root,bool favor)1282 void cgroup_favor_dynmods(struct cgroup_root *root, bool favor)
1283 {
1284 bool favoring = root->flags & CGRP_ROOT_FAVOR_DYNMODS;
1285
1286 /*
1287 * see the comment above CGRP_ROOT_FAVOR_DYNMODS definition.
1288 * favordynmods can flip while task is between
1289 * cgroup_threadgroup_change_begin() and end(), so down_write global
1290 * cgroup_threadgroup_rwsem to synchronize them.
1291 *
1292 * Once cgroup_enable_per_threadgroup_rwsem is enabled, holding
1293 * cgroup_threadgroup_rwsem doesn't exlude tasks between
1294 * cgroup_thread_group_change_begin() and end() and thus it's unsafe to
1295 * turn off. As the scenario is unlikely, simply disallow disabling once
1296 * enabled and print out a warning.
1297 */
1298 percpu_down_write(&cgroup_threadgroup_rwsem);
1299 if (favor && !favoring) {
1300 cgroup_enable_per_threadgroup_rwsem = true;
1301 rcu_sync_enter(&cgroup_threadgroup_rwsem.rss);
1302 root->flags |= CGRP_ROOT_FAVOR_DYNMODS;
1303 } else if (!favor && favoring) {
1304 if (cgroup_enable_per_threadgroup_rwsem)
1305 pr_warn_once("cgroup favordynmods: per threadgroup rwsem mechanism can't be disabled\n");
1306 rcu_sync_exit(&cgroup_threadgroup_rwsem.rss);
1307 root->flags &= ~CGRP_ROOT_FAVOR_DYNMODS;
1308 }
1309 percpu_up_write(&cgroup_threadgroup_rwsem);
1310 }
1311
cgroup_init_root_id(struct cgroup_root * root)1312 static int cgroup_init_root_id(struct cgroup_root *root)
1313 {
1314 int id;
1315
1316 lockdep_assert_held(&cgroup_mutex);
1317
1318 id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 0, 0, GFP_KERNEL);
1319 if (id < 0)
1320 return id;
1321
1322 root->hierarchy_id = id;
1323 return 0;
1324 }
1325
cgroup_exit_root_id(struct cgroup_root * root)1326 static void cgroup_exit_root_id(struct cgroup_root *root)
1327 {
1328 lockdep_assert_held(&cgroup_mutex);
1329
1330 idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id);
1331 }
1332
cgroup_free_root(struct cgroup_root * root)1333 void cgroup_free_root(struct cgroup_root *root)
1334 {
1335 kfree_rcu(root, rcu);
1336 }
1337
cgroup_destroy_root(struct cgroup_root * root)1338 static void cgroup_destroy_root(struct cgroup_root *root)
1339 {
1340 struct cgroup *cgrp = &root->cgrp;
1341 struct cgrp_cset_link *link, *tmp_link;
1342 int ret;
1343
1344 trace_cgroup_destroy_root(root);
1345
1346 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1347
1348 BUG_ON(atomic_read(&root->nr_cgrps));
1349 BUG_ON(!list_empty(&cgrp->self.children));
1350
1351 ret = blocking_notifier_call_chain(&cgroup_lifetime_notifier,
1352 CGROUP_LIFETIME_OFFLINE, cgrp);
1353 WARN_ON_ONCE(notifier_to_errno(ret));
1354
1355 /* Rebind all subsystems back to the default hierarchy */
1356 WARN_ON(rebind_subsystems(&cgrp_dfl_root, root->subsys_mask));
1357
1358 /*
1359 * Release all the links from cset_links to this hierarchy's
1360 * root cgroup
1361 */
1362 spin_lock_irq(&css_set_lock);
1363
1364 list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
1365 list_del(&link->cset_link);
1366 list_del(&link->cgrp_link);
1367 kfree(link);
1368 }
1369
1370 spin_unlock_irq(&css_set_lock);
1371
1372 WARN_ON_ONCE(list_empty(&root->root_list));
1373 list_del_rcu(&root->root_list);
1374 cgroup_root_count--;
1375
1376 if (!have_favordynmods)
1377 cgroup_favor_dynmods(root, false);
1378
1379 cgroup_exit_root_id(root);
1380
1381 cgroup_unlock();
1382
1383 kernfs_destroy_root(root->kf_root);
1384 cgroup_free_root(root);
1385 }
1386
1387 /*
1388 * Returned cgroup is without refcount but it's valid as long as cset pins it.
1389 */
__cset_cgroup_from_root(struct css_set * cset,struct cgroup_root * root)1390 static inline struct cgroup *__cset_cgroup_from_root(struct css_set *cset,
1391 struct cgroup_root *root)
1392 {
1393 struct cgroup *res_cgroup = NULL;
1394
1395 if (cset == &init_css_set) {
1396 res_cgroup = &root->cgrp;
1397 } else if (root == &cgrp_dfl_root) {
1398 res_cgroup = cset->dfl_cgrp;
1399 } else {
1400 struct cgrp_cset_link *link;
1401 lockdep_assert_held(&css_set_lock);
1402
1403 list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
1404 struct cgroup *c = link->cgrp;
1405
1406 if (c->root == root) {
1407 res_cgroup = c;
1408 break;
1409 }
1410 }
1411 }
1412
1413 /*
1414 * If cgroup_mutex is not held, the cgrp_cset_link will be freed
1415 * before we remove the cgroup root from the root_list. Consequently,
1416 * when accessing a cgroup root, the cset_link may have already been
1417 * freed, resulting in a NULL res_cgroup. However, by holding the
1418 * cgroup_mutex, we ensure that res_cgroup can't be NULL.
1419 * If we don't hold cgroup_mutex in the caller, we must do the NULL
1420 * check.
1421 */
1422 return res_cgroup;
1423 }
1424
1425 /*
1426 * look up cgroup associated with current task's cgroup namespace on the
1427 * specified hierarchy
1428 */
1429 static struct cgroup *
current_cgns_cgroup_from_root(struct cgroup_root * root)1430 current_cgns_cgroup_from_root(struct cgroup_root *root)
1431 {
1432 struct cgroup *res = NULL;
1433 struct css_set *cset;
1434
1435 lockdep_assert_held(&css_set_lock);
1436
1437 rcu_read_lock();
1438
1439 cset = current->nsproxy->cgroup_ns->root_cset;
1440 res = __cset_cgroup_from_root(cset, root);
1441
1442 rcu_read_unlock();
1443
1444 /*
1445 * The namespace_sem is held by current, so the root cgroup can't
1446 * be umounted. Therefore, we can ensure that the res is non-NULL.
1447 */
1448 WARN_ON_ONCE(!res);
1449 return res;
1450 }
1451
1452 /*
1453 * Look up cgroup associated with current task's cgroup namespace on the default
1454 * hierarchy.
1455 *
1456 * Unlike current_cgns_cgroup_from_root(), this doesn't need locks:
1457 * - Internal rcu_read_lock is unnecessary because we don't dereference any rcu
1458 * pointers.
1459 * - css_set_lock is not needed because we just read cset->dfl_cgrp.
1460 * - As a bonus returned cgrp is pinned with the current because it cannot
1461 * switch cgroup_ns asynchronously.
1462 */
current_cgns_cgroup_dfl(void)1463 static struct cgroup *current_cgns_cgroup_dfl(void)
1464 {
1465 struct css_set *cset;
1466
1467 if (current->nsproxy) {
1468 cset = current->nsproxy->cgroup_ns->root_cset;
1469 return __cset_cgroup_from_root(cset, &cgrp_dfl_root);
1470 } else {
1471 /*
1472 * NOTE: This function may be called from bpf_cgroup_from_id()
1473 * on a task which has already passed exit_nsproxy_namespaces()
1474 * and nsproxy == NULL. Fall back to cgrp_dfl_root which will
1475 * make all cgroups visible for lookups.
1476 */
1477 return &cgrp_dfl_root.cgrp;
1478 }
1479 }
1480
1481 /* look up cgroup associated with given css_set on the specified hierarchy */
cset_cgroup_from_root(struct css_set * cset,struct cgroup_root * root)1482 static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
1483 struct cgroup_root *root)
1484 {
1485 lockdep_assert_held(&css_set_lock);
1486
1487 return __cset_cgroup_from_root(cset, root);
1488 }
1489
1490 /*
1491 * Return the cgroup for "task" from the given hierarchy. Must be
1492 * called with css_set_lock held to prevent task's groups from being modified.
1493 * Must be called with either cgroup_mutex or rcu read lock to prevent the
1494 * cgroup root from being destroyed.
1495 */
task_cgroup_from_root(struct task_struct * task,struct cgroup_root * root)1496 struct cgroup *task_cgroup_from_root(struct task_struct *task,
1497 struct cgroup_root *root)
1498 {
1499 /*
1500 * No need to lock the task - since we hold css_set_lock the
1501 * task can't change groups.
1502 */
1503 return cset_cgroup_from_root(task_css_set(task), root);
1504 }
1505
1506 /*
1507 * A task must hold cgroup_mutex to modify cgroups.
1508 *
1509 * Any task can increment and decrement the count field without lock.
1510 * So in general, code holding cgroup_mutex can't rely on the count
1511 * field not changing. However, if the count goes to zero, then only
1512 * cgroup_attach_task() can increment it again. Because a count of zero
1513 * means that no tasks are currently attached, therefore there is no
1514 * way a task attached to that cgroup can fork (the other way to
1515 * increment the count). So code holding cgroup_mutex can safely
1516 * assume that if the count is zero, it will stay zero. Similarly, if
1517 * a task holds cgroup_mutex on a cgroup with zero count, it
1518 * knows that the cgroup won't be removed, as cgroup_rmdir()
1519 * needs that mutex.
1520 *
1521 * A cgroup can only be deleted if both its 'count' of using tasks
1522 * is zero, and its list of 'children' cgroups is empty. Since all
1523 * tasks in the system use _some_ cgroup, and since there is always at
1524 * least one task in the system (init, pid == 1), therefore, root cgroup
1525 * always has either children cgroups and/or using tasks. So we don't
1526 * need a special hack to ensure that root cgroup cannot be deleted.
1527 *
1528 * P.S. One more locking exception. RCU is used to guard the
1529 * update of a tasks cgroup pointer by cgroup_attach_task()
1530 */
1531
1532 static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
1533
cgroup_file_name(struct cgroup * cgrp,const struct cftype * cft,char * buf)1534 static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
1535 char *buf)
1536 {
1537 struct cgroup_subsys *ss = cft->ss;
1538
1539 if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) &&
1540 !(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) {
1541 const char *dbg = (cft->flags & CFTYPE_DEBUG) ? ".__DEBUG__." : "";
1542
1543 snprintf(buf, CGROUP_FILE_NAME_MAX, "%s%s.%s",
1544 dbg, cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name,
1545 cft->name);
1546 } else {
1547 strscpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
1548 }
1549 return buf;
1550 }
1551
1552 /**
1553 * cgroup_file_mode - deduce file mode of a control file
1554 * @cft: the control file in question
1555 *
1556 * S_IRUGO for read, S_IWUSR for write.
1557 */
cgroup_file_mode(const struct cftype * cft)1558 static umode_t cgroup_file_mode(const struct cftype *cft)
1559 {
1560 umode_t mode = 0;
1561
1562 if (cft->read_u64 || cft->read_s64 || cft->seq_show)
1563 mode |= S_IRUGO;
1564
1565 if (cft->write_u64 || cft->write_s64 || cft->write) {
1566 if (cft->flags & CFTYPE_WORLD_WRITABLE)
1567 mode |= S_IWUGO;
1568 else
1569 mode |= S_IWUSR;
1570 }
1571
1572 return mode;
1573 }
1574
1575 /**
1576 * cgroup_calc_subtree_ss_mask - calculate subtree_ss_mask
1577 * @subtree_control: the new subtree_control mask to consider
1578 * @this_ss_mask: available subsystems
1579 *
1580 * On the default hierarchy, a subsystem may request other subsystems to be
1581 * enabled together through its ->depends_on mask. In such cases, more
1582 * subsystems than specified in "cgroup.subtree_control" may be enabled.
1583 *
1584 * This function calculates which subsystems need to be enabled if
1585 * @subtree_control is to be applied while restricted to @this_ss_mask.
1586 */
cgroup_calc_subtree_ss_mask(u32 subtree_control,u32 this_ss_mask)1587 static u32 cgroup_calc_subtree_ss_mask(u32 subtree_control, u32 this_ss_mask)
1588 {
1589 u32 cur_ss_mask = subtree_control;
1590 struct cgroup_subsys *ss;
1591 int ssid;
1592
1593 lockdep_assert_held(&cgroup_mutex);
1594
1595 cur_ss_mask |= cgrp_dfl_implicit_ss_mask;
1596
1597 while (true) {
1598 u32 new_ss_mask = cur_ss_mask;
1599
1600 do_each_subsys_mask(ss, ssid, cur_ss_mask) {
1601 new_ss_mask |= ss->depends_on;
1602 } while_each_subsys_mask();
1603
1604 /*
1605 * Mask out subsystems which aren't available. This can
1606 * happen only if some depended-upon subsystems were bound
1607 * to non-default hierarchies.
1608 */
1609 new_ss_mask &= this_ss_mask;
1610
1611 if (new_ss_mask == cur_ss_mask)
1612 break;
1613 cur_ss_mask = new_ss_mask;
1614 }
1615
1616 return cur_ss_mask;
1617 }
1618
1619 /**
1620 * cgroup_kn_unlock - unlocking helper for cgroup kernfs methods
1621 * @kn: the kernfs_node being serviced
1622 *
1623 * This helper undoes cgroup_kn_lock_live() and should be invoked before
1624 * the method finishes if locking succeeded. Note that once this function
1625 * returns the cgroup returned by cgroup_kn_lock_live() may become
1626 * inaccessible any time. If the caller intends to continue to access the
1627 * cgroup, it should pin it before invoking this function.
1628 */
cgroup_kn_unlock(struct kernfs_node * kn)1629 void cgroup_kn_unlock(struct kernfs_node *kn)
1630 {
1631 struct cgroup *cgrp;
1632
1633 if (kernfs_type(kn) == KERNFS_DIR)
1634 cgrp = kn->priv;
1635 else
1636 cgrp = kn_priv(kn);
1637
1638 cgroup_unlock();
1639
1640 kernfs_unbreak_active_protection(kn);
1641 cgroup_put(cgrp);
1642 }
1643
1644 /**
1645 * cgroup_kn_lock_live - locking helper for cgroup kernfs methods
1646 * @kn: the kernfs_node being serviced
1647 * @drain_offline: perform offline draining on the cgroup
1648 *
1649 * This helper is to be used by a cgroup kernfs method currently servicing
1650 * @kn. It breaks the active protection, performs cgroup locking and
1651 * verifies that the associated cgroup is alive. Returns the cgroup if
1652 * alive; otherwise, %NULL. A successful return should be undone by a
1653 * matching cgroup_kn_unlock() invocation. If @drain_offline is %true, the
1654 * cgroup is drained of offlining csses before return.
1655 *
1656 * Any cgroup kernfs method implementation which requires locking the
1657 * associated cgroup should use this helper. It avoids nesting cgroup
1658 * locking under kernfs active protection and allows all kernfs operations
1659 * including self-removal.
1660 */
cgroup_kn_lock_live(struct kernfs_node * kn,bool drain_offline)1661 struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn, bool drain_offline)
1662 {
1663 struct cgroup *cgrp;
1664
1665 if (kernfs_type(kn) == KERNFS_DIR)
1666 cgrp = kn->priv;
1667 else
1668 cgrp = kn_priv(kn);
1669
1670 /*
1671 * We're gonna grab cgroup_mutex which nests outside kernfs
1672 * active_ref. cgroup liveliness check alone provides enough
1673 * protection against removal. Ensure @cgrp stays accessible and
1674 * break the active_ref protection.
1675 */
1676 if (!cgroup_tryget(cgrp))
1677 return NULL;
1678 kernfs_break_active_protection(kn);
1679
1680 if (drain_offline)
1681 cgroup_lock_and_drain_offline(cgrp);
1682 else
1683 cgroup_lock();
1684
1685 if (!cgroup_is_dead(cgrp))
1686 return cgrp;
1687
1688 cgroup_kn_unlock(kn);
1689 return NULL;
1690 }
1691
cgroup_rm_file(struct cgroup * cgrp,const struct cftype * cft)1692 static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
1693 {
1694 char name[CGROUP_FILE_NAME_MAX];
1695
1696 lockdep_assert_held(&cgroup_mutex);
1697
1698 if (cft->file_offset) {
1699 struct cgroup_subsys_state *css = cgroup_css(cgrp, cft->ss);
1700 struct cgroup_file *cfile = (void *)css + cft->file_offset;
1701
1702 spin_lock_irq(&cfile->lock);
1703 WRITE_ONCE(cfile->kn, NULL);
1704 spin_unlock_irq(&cfile->lock);
1705
1706 timer_delete_sync(&cfile->notify_timer);
1707 }
1708
1709 kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
1710 }
1711
1712 /**
1713 * css_clear_dir - remove subsys files in a cgroup directory
1714 * @css: target css
1715 */
css_clear_dir(struct cgroup_subsys_state * css)1716 static void css_clear_dir(struct cgroup_subsys_state *css)
1717 {
1718 struct cgroup *cgrp = css->cgroup;
1719 struct cftype *cfts;
1720
1721 if (!(css->flags & CSS_VISIBLE))
1722 return;
1723
1724 css->flags &= ~CSS_VISIBLE;
1725
1726 if (css_is_self(css)) {
1727 if (cgroup_on_dfl(cgrp)) {
1728 cgroup_addrm_files(css, cgrp,
1729 cgroup_base_files, false);
1730 if (cgroup_psi_enabled())
1731 cgroup_addrm_files(css, cgrp,
1732 cgroup_psi_files, false);
1733 } else {
1734 cgroup_addrm_files(css, cgrp,
1735 cgroup1_base_files, false);
1736 }
1737 } else {
1738 list_for_each_entry(cfts, &css->ss->cfts, node)
1739 cgroup_addrm_files(css, cgrp, cfts, false);
1740 }
1741 }
1742
1743 /**
1744 * css_populate_dir - create subsys files in a cgroup directory
1745 * @css: target css
1746 *
1747 * On failure, no file is added.
1748 */
css_populate_dir(struct cgroup_subsys_state * css)1749 static int css_populate_dir(struct cgroup_subsys_state *css)
1750 {
1751 struct cgroup *cgrp = css->cgroup;
1752 struct cftype *cfts, *failed_cfts;
1753 int ret;
1754
1755 if (css->flags & CSS_VISIBLE)
1756 return 0;
1757
1758 if (css_is_self(css)) {
1759 if (cgroup_on_dfl(cgrp)) {
1760 ret = cgroup_addrm_files(css, cgrp,
1761 cgroup_base_files, true);
1762 if (ret < 0)
1763 return ret;
1764
1765 if (cgroup_psi_enabled()) {
1766 ret = cgroup_addrm_files(css, cgrp,
1767 cgroup_psi_files, true);
1768 if (ret < 0) {
1769 cgroup_addrm_files(css, cgrp,
1770 cgroup_base_files, false);
1771 return ret;
1772 }
1773 }
1774 } else {
1775 ret = cgroup_addrm_files(css, cgrp,
1776 cgroup1_base_files, true);
1777 if (ret < 0)
1778 return ret;
1779 }
1780 } else {
1781 list_for_each_entry(cfts, &css->ss->cfts, node) {
1782 ret = cgroup_addrm_files(css, cgrp, cfts, true);
1783 if (ret < 0) {
1784 failed_cfts = cfts;
1785 goto err;
1786 }
1787 }
1788 }
1789
1790 css->flags |= CSS_VISIBLE;
1791
1792 return 0;
1793 err:
1794 list_for_each_entry(cfts, &css->ss->cfts, node) {
1795 if (cfts == failed_cfts)
1796 break;
1797 cgroup_addrm_files(css, cgrp, cfts, false);
1798 }
1799 return ret;
1800 }
1801
rebind_subsystems(struct cgroup_root * dst_root,u32 ss_mask)1802 int rebind_subsystems(struct cgroup_root *dst_root, u32 ss_mask)
1803 {
1804 struct cgroup *dcgrp = &dst_root->cgrp;
1805 struct cgroup_subsys *ss;
1806 int ssid, ret;
1807 u32 dfl_disable_ss_mask = 0;
1808
1809 lockdep_assert_held(&cgroup_mutex);
1810
1811 do_each_subsys_mask(ss, ssid, ss_mask) {
1812 /*
1813 * If @ss has non-root csses attached to it, can't move.
1814 * If @ss is an implicit controller, it is exempt from this
1815 * rule and can be stolen.
1816 */
1817 if (css_next_child(NULL, cgroup_css(&ss->root->cgrp, ss)) &&
1818 !ss->implicit_on_dfl)
1819 return -EBUSY;
1820
1821 /* can't move between two non-dummy roots either */
1822 if (ss->root != &cgrp_dfl_root && dst_root != &cgrp_dfl_root)
1823 return -EBUSY;
1824
1825 /*
1826 * Collect ssid's that need to be disabled from default
1827 * hierarchy.
1828 */
1829 if (ss->root == &cgrp_dfl_root)
1830 dfl_disable_ss_mask |= 1 << ssid;
1831
1832 } while_each_subsys_mask();
1833
1834 if (dfl_disable_ss_mask) {
1835 struct cgroup *scgrp = &cgrp_dfl_root.cgrp;
1836
1837 /*
1838 * Controllers from default hierarchy that need to be rebound
1839 * are all disabled together in one go.
1840 */
1841 cgrp_dfl_root.subsys_mask &= ~dfl_disable_ss_mask;
1842 WARN_ON(cgroup_apply_control(scgrp));
1843 cgroup_finalize_control(scgrp, 0);
1844 }
1845
1846 do_each_subsys_mask(ss, ssid, ss_mask) {
1847 struct cgroup_root *src_root = ss->root;
1848 struct cgroup *scgrp = &src_root->cgrp;
1849 struct cgroup_subsys_state *css = cgroup_css(scgrp, ss);
1850 struct css_set *cset, *cset_pos;
1851 struct css_task_iter *it;
1852
1853 WARN_ON(!css || cgroup_css(dcgrp, ss));
1854
1855 if (src_root != &cgrp_dfl_root) {
1856 /* disable from the source */
1857 src_root->subsys_mask &= ~(1 << ssid);
1858 WARN_ON(cgroup_apply_control(scgrp));
1859 cgroup_finalize_control(scgrp, 0);
1860 }
1861
1862 /* rebind */
1863 RCU_INIT_POINTER(scgrp->subsys[ssid], NULL);
1864 rcu_assign_pointer(dcgrp->subsys[ssid], css);
1865 ss->root = dst_root;
1866
1867 spin_lock_irq(&css_set_lock);
1868 css->cgroup = dcgrp;
1869 WARN_ON(!list_empty(&dcgrp->e_csets[ss->id]));
1870 list_for_each_entry_safe(cset, cset_pos, &scgrp->e_csets[ss->id],
1871 e_cset_node[ss->id]) {
1872 list_move_tail(&cset->e_cset_node[ss->id],
1873 &dcgrp->e_csets[ss->id]);
1874 /*
1875 * all css_sets of scgrp together in same order to dcgrp,
1876 * patch in-flight iterators to preserve correct iteration.
1877 * since the iterator is always advanced right away and
1878 * finished when it->cset_pos meets it->cset_head, so only
1879 * update it->cset_head is enough here.
1880 */
1881 list_for_each_entry(it, &cset->task_iters, iters_node)
1882 if (it->cset_head == &scgrp->e_csets[ss->id])
1883 it->cset_head = &dcgrp->e_csets[ss->id];
1884 }
1885 spin_unlock_irq(&css_set_lock);
1886
1887 /* default hierarchy doesn't enable controllers by default */
1888 dst_root->subsys_mask |= 1 << ssid;
1889 if (dst_root == &cgrp_dfl_root) {
1890 static_branch_enable(cgroup_subsys_on_dfl_key[ssid]);
1891 } else {
1892 dcgrp->subtree_control |= 1 << ssid;
1893 static_branch_disable(cgroup_subsys_on_dfl_key[ssid]);
1894 }
1895
1896 ret = cgroup_apply_control(dcgrp);
1897 if (ret)
1898 pr_warn("partial failure to rebind %s controller (err=%d)\n",
1899 ss->name, ret);
1900
1901 if (ss->bind)
1902 ss->bind(css);
1903 } while_each_subsys_mask();
1904
1905 kernfs_activate(dcgrp->kn);
1906 return 0;
1907 }
1908
cgroup_show_path(struct seq_file * sf,struct kernfs_node * kf_node,struct kernfs_root * kf_root)1909 int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
1910 struct kernfs_root *kf_root)
1911 {
1912 int len = 0;
1913 char *buf = NULL;
1914 struct cgroup_root *kf_cgroot = cgroup_root_from_kf(kf_root);
1915 struct cgroup *ns_cgroup;
1916
1917 buf = kmalloc(PATH_MAX, GFP_KERNEL);
1918 if (!buf)
1919 return -ENOMEM;
1920
1921 spin_lock_irq(&css_set_lock);
1922 ns_cgroup = current_cgns_cgroup_from_root(kf_cgroot);
1923 len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX);
1924 spin_unlock_irq(&css_set_lock);
1925
1926 if (len == -E2BIG)
1927 len = -ERANGE;
1928 else if (len > 0) {
1929 seq_escape(sf, buf, " \t\n\\");
1930 len = 0;
1931 }
1932 kfree(buf);
1933 return len;
1934 }
1935
1936 enum cgroup2_param {
1937 Opt_nsdelegate,
1938 Opt_favordynmods,
1939 Opt_memory_localevents,
1940 Opt_memory_recursiveprot,
1941 Opt_memory_hugetlb_accounting,
1942 Opt_pids_localevents,
1943 nr__cgroup2_params
1944 };
1945
1946 static const struct fs_parameter_spec cgroup2_fs_parameters[] = {
1947 fsparam_flag("nsdelegate", Opt_nsdelegate),
1948 fsparam_flag("favordynmods", Opt_favordynmods),
1949 fsparam_flag("memory_localevents", Opt_memory_localevents),
1950 fsparam_flag("memory_recursiveprot", Opt_memory_recursiveprot),
1951 fsparam_flag("memory_hugetlb_accounting", Opt_memory_hugetlb_accounting),
1952 fsparam_flag("pids_localevents", Opt_pids_localevents),
1953 {}
1954 };
1955
cgroup2_parse_param(struct fs_context * fc,struct fs_parameter * param)1956 static int cgroup2_parse_param(struct fs_context *fc, struct fs_parameter *param)
1957 {
1958 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1959 struct fs_parse_result result;
1960 int opt;
1961
1962 opt = fs_parse(fc, cgroup2_fs_parameters, param, &result);
1963 if (opt < 0)
1964 return opt;
1965
1966 switch (opt) {
1967 case Opt_nsdelegate:
1968 ctx->flags |= CGRP_ROOT_NS_DELEGATE;
1969 return 0;
1970 case Opt_favordynmods:
1971 ctx->flags |= CGRP_ROOT_FAVOR_DYNMODS;
1972 return 0;
1973 case Opt_memory_localevents:
1974 ctx->flags |= CGRP_ROOT_MEMORY_LOCAL_EVENTS;
1975 return 0;
1976 case Opt_memory_recursiveprot:
1977 ctx->flags |= CGRP_ROOT_MEMORY_RECURSIVE_PROT;
1978 return 0;
1979 case Opt_memory_hugetlb_accounting:
1980 ctx->flags |= CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
1981 return 0;
1982 case Opt_pids_localevents:
1983 ctx->flags |= CGRP_ROOT_PIDS_LOCAL_EVENTS;
1984 return 0;
1985 }
1986 return -EINVAL;
1987 }
1988
of_peak(struct kernfs_open_file * of)1989 struct cgroup_of_peak *of_peak(struct kernfs_open_file *of)
1990 {
1991 struct cgroup_file_ctx *ctx = of->priv;
1992
1993 return &ctx->peak;
1994 }
1995
apply_cgroup_root_flags(unsigned int root_flags)1996 static void apply_cgroup_root_flags(unsigned int root_flags)
1997 {
1998 if (current->nsproxy->cgroup_ns == &init_cgroup_ns) {
1999 if (root_flags & CGRP_ROOT_NS_DELEGATE)
2000 cgrp_dfl_root.flags |= CGRP_ROOT_NS_DELEGATE;
2001 else
2002 cgrp_dfl_root.flags &= ~CGRP_ROOT_NS_DELEGATE;
2003
2004 cgroup_favor_dynmods(&cgrp_dfl_root,
2005 root_flags & CGRP_ROOT_FAVOR_DYNMODS);
2006
2007 if (root_flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
2008 cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_LOCAL_EVENTS;
2009 else
2010 cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_LOCAL_EVENTS;
2011
2012 if (root_flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT)
2013 cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_RECURSIVE_PROT;
2014 else
2015 cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_RECURSIVE_PROT;
2016
2017 if (root_flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING)
2018 cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
2019 else
2020 cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
2021
2022 if (root_flags & CGRP_ROOT_PIDS_LOCAL_EVENTS)
2023 cgrp_dfl_root.flags |= CGRP_ROOT_PIDS_LOCAL_EVENTS;
2024 else
2025 cgrp_dfl_root.flags &= ~CGRP_ROOT_PIDS_LOCAL_EVENTS;
2026 }
2027 }
2028
cgroup_show_options(struct seq_file * seq,struct kernfs_root * kf_root)2029 static int cgroup_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
2030 {
2031 if (cgrp_dfl_root.flags & CGRP_ROOT_NS_DELEGATE)
2032 seq_puts(seq, ",nsdelegate");
2033 if (cgrp_dfl_root.flags & CGRP_ROOT_FAVOR_DYNMODS)
2034 seq_puts(seq, ",favordynmods");
2035 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
2036 seq_puts(seq, ",memory_localevents");
2037 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT)
2038 seq_puts(seq, ",memory_recursiveprot");
2039 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING)
2040 seq_puts(seq, ",memory_hugetlb_accounting");
2041 if (cgrp_dfl_root.flags & CGRP_ROOT_PIDS_LOCAL_EVENTS)
2042 seq_puts(seq, ",pids_localevents");
2043 return 0;
2044 }
2045
cgroup_reconfigure(struct fs_context * fc)2046 static int cgroup_reconfigure(struct fs_context *fc)
2047 {
2048 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
2049
2050 apply_cgroup_root_flags(ctx->flags);
2051 return 0;
2052 }
2053
cgroup_finish_destroy_work_fn(struct work_struct * work)2054 static void cgroup_finish_destroy_work_fn(struct work_struct *work)
2055 {
2056 struct cgroup *cgrp = container_of(work, struct cgroup, finish_destroy_work);
2057
2058 cgroup_lock();
2059 cgroup_finish_destroy(cgrp);
2060 cgroup_unlock();
2061 cgroup_put(cgrp);
2062 }
2063
init_cgroup_housekeeping(struct cgroup * cgrp)2064 static void init_cgroup_housekeeping(struct cgroup *cgrp)
2065 {
2066 struct cgroup_subsys *ss;
2067 int ssid;
2068
2069 INIT_LIST_HEAD(&cgrp->self.sibling);
2070 INIT_LIST_HEAD(&cgrp->self.children);
2071 INIT_LIST_HEAD(&cgrp->cset_links);
2072 INIT_LIST_HEAD(&cgrp->pidlists);
2073 mutex_init(&cgrp->pidlist_mutex);
2074 cgrp->self.cgroup = cgrp;
2075 cgrp->self.flags |= CSS_ONLINE;
2076 cgrp->dom_cgrp = cgrp;
2077 cgrp->max_descendants = INT_MAX;
2078 cgrp->max_depth = INT_MAX;
2079 prev_cputime_init(&cgrp->prev_cputime);
2080
2081 for_each_subsys(ss, ssid)
2082 INIT_LIST_HEAD(&cgrp->e_csets[ssid]);
2083
2084 #ifdef CONFIG_CGROUP_BPF
2085 for (int i = 0; i < ARRAY_SIZE(cgrp->bpf.revisions); i++)
2086 cgrp->bpf.revisions[i] = 1;
2087 #endif
2088
2089 init_waitqueue_head(&cgrp->offline_waitq);
2090 INIT_WORK(&cgrp->finish_destroy_work, cgroup_finish_destroy_work_fn);
2091 INIT_WORK(&cgrp->release_agent_work, cgroup1_release_agent);
2092 }
2093
init_cgroup_root(struct cgroup_fs_context * ctx)2094 void init_cgroup_root(struct cgroup_fs_context *ctx)
2095 {
2096 struct cgroup_root *root = ctx->root;
2097 struct cgroup *cgrp = &root->cgrp;
2098
2099 INIT_LIST_HEAD_RCU(&root->root_list);
2100 atomic_set(&root->nr_cgrps, 1);
2101 cgrp->root = root;
2102 init_cgroup_housekeeping(cgrp);
2103
2104 /* DYNMODS must be modified through cgroup_favor_dynmods() */
2105 root->flags = ctx->flags & ~CGRP_ROOT_FAVOR_DYNMODS;
2106 if (ctx->release_agent)
2107 strscpy(root->release_agent_path, ctx->release_agent, PATH_MAX);
2108 if (ctx->name)
2109 strscpy(root->name, ctx->name, MAX_CGROUP_ROOT_NAMELEN);
2110 if (ctx->cpuset_clone_children)
2111 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
2112 }
2113
cgroup_setup_root(struct cgroup_root * root,u32 ss_mask)2114 int cgroup_setup_root(struct cgroup_root *root, u32 ss_mask)
2115 {
2116 LIST_HEAD(tmp_links);
2117 struct cgroup *root_cgrp = &root->cgrp;
2118 struct kernfs_syscall_ops *kf_sops;
2119 struct css_set *cset;
2120 int i, ret;
2121
2122 lockdep_assert_held(&cgroup_mutex);
2123
2124 ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release,
2125 0, GFP_KERNEL);
2126 if (ret)
2127 goto out;
2128
2129 /*
2130 * We're accessing css_set_count without locking css_set_lock here,
2131 * but that's OK - it can only be increased by someone holding
2132 * cgroup_lock, and that's us. Later rebinding may disable
2133 * controllers on the default hierarchy and thus create new csets,
2134 * which can't be more than the existing ones. Allocate 2x.
2135 */
2136 ret = allocate_cgrp_cset_links(2 * css_set_count, &tmp_links);
2137 if (ret)
2138 goto cancel_ref;
2139
2140 ret = cgroup_init_root_id(root);
2141 if (ret)
2142 goto cancel_ref;
2143
2144 kf_sops = root == &cgrp_dfl_root ?
2145 &cgroup_kf_syscall_ops : &cgroup1_kf_syscall_ops;
2146
2147 root->kf_root = kernfs_create_root(kf_sops,
2148 KERNFS_ROOT_CREATE_DEACTIVATED |
2149 KERNFS_ROOT_SUPPORT_EXPORTOP |
2150 KERNFS_ROOT_SUPPORT_USER_XATTR |
2151 KERNFS_ROOT_INVARIANT_PARENT,
2152 root_cgrp);
2153 if (IS_ERR(root->kf_root)) {
2154 ret = PTR_ERR(root->kf_root);
2155 goto exit_root_id;
2156 }
2157 root_cgrp->kn = kernfs_root_to_node(root->kf_root);
2158 WARN_ON_ONCE(cgroup_ino(root_cgrp) != 1);
2159 root_cgrp->ancestors[0] = root_cgrp;
2160
2161 ret = css_populate_dir(&root_cgrp->self);
2162 if (ret)
2163 goto destroy_root;
2164
2165 ret = css_rstat_init(&root_cgrp->self);
2166 if (ret)
2167 goto destroy_root;
2168
2169 ret = rebind_subsystems(root, ss_mask);
2170 if (ret)
2171 goto exit_stats;
2172
2173 ret = blocking_notifier_call_chain(&cgroup_lifetime_notifier,
2174 CGROUP_LIFETIME_ONLINE, root_cgrp);
2175 WARN_ON_ONCE(notifier_to_errno(ret));
2176
2177 trace_cgroup_setup_root(root);
2178
2179 /*
2180 * There must be no failure case after here, since rebinding takes
2181 * care of subsystems' refcounts, which are explicitly dropped in
2182 * the failure exit path.
2183 */
2184 list_add_rcu(&root->root_list, &cgroup_roots);
2185 cgroup_root_count++;
2186
2187 /*
2188 * Link the root cgroup in this hierarchy into all the css_set
2189 * objects.
2190 */
2191 spin_lock_irq(&css_set_lock);
2192 hash_for_each(css_set_table, i, cset, hlist) {
2193 link_css_set(&tmp_links, cset, root_cgrp);
2194 if (css_set_populated(cset))
2195 cgroup_update_populated(root_cgrp, true);
2196 }
2197 spin_unlock_irq(&css_set_lock);
2198
2199 BUG_ON(!list_empty(&root_cgrp->self.children));
2200 BUG_ON(atomic_read(&root->nr_cgrps) != 1);
2201
2202 ret = 0;
2203 goto out;
2204
2205 exit_stats:
2206 css_rstat_exit(&root_cgrp->self);
2207 destroy_root:
2208 kernfs_destroy_root(root->kf_root);
2209 root->kf_root = NULL;
2210 exit_root_id:
2211 cgroup_exit_root_id(root);
2212 cancel_ref:
2213 percpu_ref_exit(&root_cgrp->self.refcnt);
2214 out:
2215 free_cgrp_cset_links(&tmp_links);
2216 return ret;
2217 }
2218
cgroup_do_get_tree(struct fs_context * fc)2219 int cgroup_do_get_tree(struct fs_context *fc)
2220 {
2221 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
2222 int ret;
2223
2224 ctx->kfc.root = ctx->root->kf_root;
2225 if (fc->fs_type == &cgroup2_fs_type)
2226 ctx->kfc.magic = CGROUP2_SUPER_MAGIC;
2227 else
2228 ctx->kfc.magic = CGROUP_SUPER_MAGIC;
2229 ret = kernfs_get_tree(fc);
2230
2231 /*
2232 * In non-init cgroup namespace, instead of root cgroup's dentry,
2233 * we return the dentry corresponding to the cgroupns->root_cgrp.
2234 */
2235 if (!ret && ctx->ns != &init_cgroup_ns) {
2236 struct dentry *nsdentry;
2237 struct super_block *sb = fc->root->d_sb;
2238 struct cgroup *cgrp;
2239
2240 cgroup_lock();
2241 spin_lock_irq(&css_set_lock);
2242
2243 cgrp = cset_cgroup_from_root(ctx->ns->root_cset, ctx->root);
2244
2245 spin_unlock_irq(&css_set_lock);
2246 cgroup_unlock();
2247
2248 nsdentry = kernfs_node_dentry(cgrp->kn, sb);
2249 dput(fc->root);
2250 if (IS_ERR(nsdentry)) {
2251 deactivate_locked_super(sb);
2252 ret = PTR_ERR(nsdentry);
2253 nsdentry = NULL;
2254 }
2255 fc->root = nsdentry;
2256 }
2257
2258 if (!ctx->kfc.new_sb_created)
2259 cgroup_put(&ctx->root->cgrp);
2260
2261 return ret;
2262 }
2263
2264 /*
2265 * Destroy a cgroup filesystem context.
2266 */
cgroup_fs_context_free(struct fs_context * fc)2267 static void cgroup_fs_context_free(struct fs_context *fc)
2268 {
2269 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
2270
2271 kfree(ctx->name);
2272 kfree(ctx->release_agent);
2273 put_cgroup_ns(ctx->ns);
2274 kernfs_free_fs_context(fc);
2275 kfree(ctx);
2276 }
2277
cgroup_get_tree(struct fs_context * fc)2278 static int cgroup_get_tree(struct fs_context *fc)
2279 {
2280 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
2281 int ret;
2282
2283 WRITE_ONCE(cgrp_dfl_visible, true);
2284 cgroup_get_live(&cgrp_dfl_root.cgrp);
2285 ctx->root = &cgrp_dfl_root;
2286
2287 ret = cgroup_do_get_tree(fc);
2288 if (!ret)
2289 apply_cgroup_root_flags(ctx->flags);
2290 return ret;
2291 }
2292
2293 static const struct fs_context_operations cgroup_fs_context_ops = {
2294 .free = cgroup_fs_context_free,
2295 .parse_param = cgroup2_parse_param,
2296 .get_tree = cgroup_get_tree,
2297 .reconfigure = cgroup_reconfigure,
2298 };
2299
2300 static const struct fs_context_operations cgroup1_fs_context_ops = {
2301 .free = cgroup_fs_context_free,
2302 .parse_param = cgroup1_parse_param,
2303 .get_tree = cgroup1_get_tree,
2304 .reconfigure = cgroup1_reconfigure,
2305 };
2306
2307 /*
2308 * Initialise the cgroup filesystem creation/reconfiguration context. Notably,
2309 * we select the namespace we're going to use.
2310 */
cgroup_init_fs_context(struct fs_context * fc)2311 static int cgroup_init_fs_context(struct fs_context *fc)
2312 {
2313 struct cgroup_fs_context *ctx;
2314
2315 ctx = kzalloc_obj(struct cgroup_fs_context);
2316 if (!ctx)
2317 return -ENOMEM;
2318
2319 ctx->ns = current->nsproxy->cgroup_ns;
2320 get_cgroup_ns(ctx->ns);
2321 fc->fs_private = &ctx->kfc;
2322 if (fc->fs_type == &cgroup2_fs_type)
2323 fc->ops = &cgroup_fs_context_ops;
2324 else
2325 fc->ops = &cgroup1_fs_context_ops;
2326 put_user_ns(fc->user_ns);
2327 fc->user_ns = get_user_ns(ctx->ns->user_ns);
2328 fc->global = true;
2329
2330 if (have_favordynmods)
2331 ctx->flags |= CGRP_ROOT_FAVOR_DYNMODS;
2332
2333 return 0;
2334 }
2335
cgroup_kill_sb(struct super_block * sb)2336 static void cgroup_kill_sb(struct super_block *sb)
2337 {
2338 struct kernfs_root *kf_root = kernfs_root_from_sb(sb);
2339 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
2340
2341 /*
2342 * If @root doesn't have any children, start killing it.
2343 * This prevents new mounts by disabling percpu_ref_tryget_live().
2344 *
2345 * And don't kill the default root.
2346 */
2347 if (list_empty(&root->cgrp.self.children) && root != &cgrp_dfl_root &&
2348 !percpu_ref_is_dying(&root->cgrp.self.refcnt))
2349 percpu_ref_kill(&root->cgrp.self.refcnt);
2350 cgroup_put(&root->cgrp);
2351 kernfs_kill_sb(sb);
2352 }
2353
2354 struct file_system_type cgroup_fs_type = {
2355 .name = "cgroup",
2356 .init_fs_context = cgroup_init_fs_context,
2357 .parameters = cgroup1_fs_parameters,
2358 .kill_sb = cgroup_kill_sb,
2359 .fs_flags = FS_USERNS_MOUNT,
2360 };
2361
2362 static struct file_system_type cgroup2_fs_type = {
2363 .name = "cgroup2",
2364 .init_fs_context = cgroup_init_fs_context,
2365 .parameters = cgroup2_fs_parameters,
2366 .kill_sb = cgroup_kill_sb,
2367 .fs_flags = FS_USERNS_MOUNT,
2368 };
2369
2370 #ifdef CONFIG_CPUSETS_V1
2371 enum cpuset_param {
2372 Opt_cpuset_v2_mode,
2373 };
2374
2375 static const struct fs_parameter_spec cpuset_fs_parameters[] = {
2376 fsparam_flag ("cpuset_v2_mode", Opt_cpuset_v2_mode),
2377 {}
2378 };
2379
cpuset_parse_param(struct fs_context * fc,struct fs_parameter * param)2380 static int cpuset_parse_param(struct fs_context *fc, struct fs_parameter *param)
2381 {
2382 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
2383 struct fs_parse_result result;
2384 int opt;
2385
2386 opt = fs_parse(fc, cpuset_fs_parameters, param, &result);
2387 if (opt < 0)
2388 return opt;
2389
2390 switch (opt) {
2391 case Opt_cpuset_v2_mode:
2392 ctx->flags |= CGRP_ROOT_CPUSET_V2_MODE;
2393 return 0;
2394 }
2395 return -EINVAL;
2396 }
2397
2398 static const struct fs_context_operations cpuset_fs_context_ops = {
2399 .get_tree = cgroup1_get_tree,
2400 .free = cgroup_fs_context_free,
2401 .parse_param = cpuset_parse_param,
2402 };
2403
2404 /*
2405 * This is ugly, but preserves the userspace API for existing cpuset
2406 * users. If someone tries to mount the "cpuset" filesystem, we
2407 * silently switch it to mount "cgroup" instead
2408 */
cpuset_init_fs_context(struct fs_context * fc)2409 static int cpuset_init_fs_context(struct fs_context *fc)
2410 {
2411 char *agent = kstrdup("/sbin/cpuset_release_agent", GFP_USER);
2412 struct cgroup_fs_context *ctx;
2413 int err;
2414
2415 err = cgroup_init_fs_context(fc);
2416 if (err) {
2417 kfree(agent);
2418 return err;
2419 }
2420
2421 fc->ops = &cpuset_fs_context_ops;
2422
2423 ctx = cgroup_fc2context(fc);
2424 ctx->subsys_mask = 1 << cpuset_cgrp_id;
2425 ctx->flags |= CGRP_ROOT_NOPREFIX;
2426 ctx->release_agent = agent;
2427
2428 get_filesystem(&cgroup_fs_type);
2429 put_filesystem(fc->fs_type);
2430 fc->fs_type = &cgroup_fs_type;
2431
2432 return 0;
2433 }
2434
2435 static struct file_system_type cpuset_fs_type = {
2436 .name = "cpuset",
2437 .init_fs_context = cpuset_init_fs_context,
2438 .parameters = cpuset_fs_parameters,
2439 .fs_flags = FS_USERNS_MOUNT,
2440 };
2441 #endif
2442
cgroup_path_ns_locked(struct cgroup * cgrp,char * buf,size_t buflen,struct cgroup_namespace * ns)2443 int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
2444 struct cgroup_namespace *ns)
2445 {
2446 struct cgroup *root = cset_cgroup_from_root(ns->root_cset, cgrp->root);
2447
2448 return kernfs_path_from_node(cgrp->kn, root->kn, buf, buflen);
2449 }
2450
cgroup_path_ns(struct cgroup * cgrp,char * buf,size_t buflen,struct cgroup_namespace * ns)2451 int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
2452 struct cgroup_namespace *ns)
2453 {
2454 int ret;
2455
2456 cgroup_lock();
2457 spin_lock_irq(&css_set_lock);
2458
2459 ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns);
2460
2461 spin_unlock_irq(&css_set_lock);
2462 cgroup_unlock();
2463
2464 return ret;
2465 }
2466 EXPORT_SYMBOL_GPL(cgroup_path_ns);
2467
2468 /**
2469 * cgroup_attach_lock - Lock for ->attach()
2470 * @lock_mode: whether acquire and acquire which rwsem
2471 * @tsk: thread group to lock
2472 *
2473 * cgroup migration sometimes needs to stabilize threadgroups against forks and
2474 * exits by write-locking cgroup_threadgroup_rwsem. However, some ->attach()
2475 * implementations (e.g. cpuset), also need to disable CPU hotplug.
2476 * Unfortunately, letting ->attach() operations acquire cpus_read_lock() can
2477 * lead to deadlocks.
2478 *
2479 * Bringing up a CPU may involve creating and destroying tasks which requires
2480 * read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside
2481 * cpus_read_lock(). If we call an ->attach() which acquires the cpus lock while
2482 * write-locking threadgroup_rwsem, the locking order is reversed and we end up
2483 * waiting for an on-going CPU hotplug operation which in turn is waiting for
2484 * the threadgroup_rwsem to be released to create new tasks. For more details:
2485 *
2486 * http://lkml.kernel.org/r/20220711174629.uehfmqegcwn2lqzu@wubuntu
2487 *
2488 * Resolve the situation by always acquiring cpus_read_lock() before optionally
2489 * write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume that
2490 * CPU hotplug is disabled on entry.
2491 *
2492 * When favordynmods is enabled, take per threadgroup rwsem to reduce overhead
2493 * on dynamic cgroup modifications. see the comment above
2494 * CGRP_ROOT_FAVOR_DYNMODS definition.
2495 *
2496 * tsk is not NULL only when writing to cgroup.procs.
2497 */
cgroup_attach_lock(enum cgroup_attach_lock_mode lock_mode,struct task_struct * tsk)2498 void cgroup_attach_lock(enum cgroup_attach_lock_mode lock_mode,
2499 struct task_struct *tsk)
2500 {
2501 cpus_read_lock();
2502
2503 switch (lock_mode) {
2504 case CGRP_ATTACH_LOCK_NONE:
2505 break;
2506 case CGRP_ATTACH_LOCK_GLOBAL:
2507 percpu_down_write(&cgroup_threadgroup_rwsem);
2508 break;
2509 case CGRP_ATTACH_LOCK_PER_THREADGROUP:
2510 down_write(&tsk->signal->cgroup_threadgroup_rwsem);
2511 break;
2512 default:
2513 pr_warn("cgroup: Unexpected attach lock mode.");
2514 break;
2515 }
2516 }
2517
2518 /**
2519 * cgroup_attach_unlock - Undo cgroup_attach_lock()
2520 * @lock_mode: whether release and release which rwsem
2521 * @tsk: thread group to lock
2522 */
cgroup_attach_unlock(enum cgroup_attach_lock_mode lock_mode,struct task_struct * tsk)2523 void cgroup_attach_unlock(enum cgroup_attach_lock_mode lock_mode,
2524 struct task_struct *tsk)
2525 {
2526 switch (lock_mode) {
2527 case CGRP_ATTACH_LOCK_NONE:
2528 break;
2529 case CGRP_ATTACH_LOCK_GLOBAL:
2530 percpu_up_write(&cgroup_threadgroup_rwsem);
2531 break;
2532 case CGRP_ATTACH_LOCK_PER_THREADGROUP:
2533 up_write(&tsk->signal->cgroup_threadgroup_rwsem);
2534 break;
2535 default:
2536 pr_warn("cgroup: Unexpected attach lock mode.");
2537 break;
2538 }
2539
2540 cpus_read_unlock();
2541 }
2542
2543 /**
2544 * cgroup_migrate_add_task - add a migration target task to a migration context
2545 * @task: target task
2546 * @mgctx: target migration context
2547 *
2548 * Add @task, which is a migration target, to @mgctx->tset. This function
2549 * becomes noop if @task doesn't need to be migrated. @task's css_set
2550 * should have been added as a migration source and @task->cg_list will be
2551 * moved from the css_set's tasks list to mg_tasks one.
2552 */
cgroup_migrate_add_task(struct task_struct * task,struct cgroup_mgctx * mgctx)2553 static void cgroup_migrate_add_task(struct task_struct *task,
2554 struct cgroup_mgctx *mgctx)
2555 {
2556 struct css_set *cset;
2557
2558 lockdep_assert_held(&css_set_lock);
2559
2560 /* @task either already exited or can't exit until the end */
2561 if (task->flags & PF_EXITING)
2562 return;
2563
2564 /* cgroup_threadgroup_rwsem protects racing against forks */
2565 WARN_ON_ONCE(list_empty(&task->cg_list));
2566
2567 cset = task_css_set(task);
2568 if (!cset->mg_src_cgrp)
2569 return;
2570
2571 mgctx->tset.nr_tasks++;
2572
2573 css_set_skip_task_iters(cset, task);
2574 list_move_tail(&task->cg_list, &cset->mg_tasks);
2575 if (list_empty(&cset->mg_node))
2576 list_add_tail(&cset->mg_node,
2577 &mgctx->tset.src_csets);
2578 if (list_empty(&cset->mg_dst_cset->mg_node))
2579 list_add_tail(&cset->mg_dst_cset->mg_node,
2580 &mgctx->tset.dst_csets);
2581 }
2582
2583 /**
2584 * cgroup_taskset_first - reset taskset and return the first task
2585 * @tset: taskset of interest
2586 * @dst_cssp: output variable for the destination css
2587 *
2588 * @tset iteration is initialized and the first task is returned.
2589 */
cgroup_taskset_first(struct cgroup_taskset * tset,struct cgroup_subsys_state ** dst_cssp)2590 struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
2591 struct cgroup_subsys_state **dst_cssp)
2592 {
2593 tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node);
2594 tset->cur_task = NULL;
2595
2596 return cgroup_taskset_next(tset, dst_cssp);
2597 }
2598
2599 /**
2600 * cgroup_taskset_next - iterate to the next task in taskset
2601 * @tset: taskset of interest
2602 * @dst_cssp: output variable for the destination css
2603 *
2604 * Return the next task in @tset. Iteration must have been initialized
2605 * with cgroup_taskset_first().
2606 */
cgroup_taskset_next(struct cgroup_taskset * tset,struct cgroup_subsys_state ** dst_cssp)2607 struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
2608 struct cgroup_subsys_state **dst_cssp)
2609 {
2610 struct css_set *cset = tset->cur_cset;
2611 struct task_struct *task = tset->cur_task;
2612
2613 while (CGROUP_HAS_SUBSYS_CONFIG && &cset->mg_node != tset->csets) {
2614 if (!task)
2615 task = list_first_entry(&cset->mg_tasks,
2616 struct task_struct, cg_list);
2617 else
2618 task = list_next_entry(task, cg_list);
2619
2620 if (&task->cg_list != &cset->mg_tasks) {
2621 tset->cur_cset = cset;
2622 tset->cur_task = task;
2623
2624 /*
2625 * This function may be called both before and
2626 * after cgroup_migrate_execute(). The two cases
2627 * can be distinguished by looking at whether @cset
2628 * has its ->mg_dst_cset set.
2629 */
2630 if (cset->mg_dst_cset)
2631 *dst_cssp = cset->mg_dst_cset->subsys[tset->ssid];
2632 else
2633 *dst_cssp = cset->subsys[tset->ssid];
2634
2635 return task;
2636 }
2637
2638 cset = list_next_entry(cset, mg_node);
2639 task = NULL;
2640 }
2641
2642 return NULL;
2643 }
2644
2645 /**
2646 * cgroup_migrate_execute - migrate a taskset
2647 * @mgctx: migration context
2648 *
2649 * Migrate tasks in @mgctx as setup by migration preparation functions.
2650 * This function fails iff one of the ->can_attach callbacks fails and
2651 * guarantees that either all or none of the tasks in @mgctx are migrated.
2652 * @mgctx is consumed regardless of success.
2653 */
cgroup_migrate_execute(struct cgroup_mgctx * mgctx)2654 static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx)
2655 {
2656 struct cgroup_taskset *tset = &mgctx->tset;
2657 struct cgroup_subsys *ss;
2658 struct task_struct *task, *tmp_task;
2659 struct css_set *cset, *tmp_cset;
2660 int ssid, failed_ssid, ret;
2661
2662 /* check that we can legitimately attach to the cgroup */
2663 if (tset->nr_tasks) {
2664 do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
2665 if (ss->can_attach) {
2666 tset->ssid = ssid;
2667 ret = ss->can_attach(tset);
2668 if (ret) {
2669 failed_ssid = ssid;
2670 goto out_cancel_attach;
2671 }
2672 }
2673 } while_each_subsys_mask();
2674 }
2675
2676 /*
2677 * Now that we're guaranteed success, proceed to move all tasks to
2678 * the new cgroup. There are no failure cases after here, so this
2679 * is the commit point.
2680 */
2681 spin_lock_irq(&css_set_lock);
2682 list_for_each_entry(cset, &tset->src_csets, mg_node) {
2683 list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) {
2684 struct css_set *from_cset = task_css_set(task);
2685 struct css_set *to_cset = cset->mg_dst_cset;
2686
2687 get_css_set(to_cset);
2688 to_cset->nr_tasks++;
2689 css_set_move_task(task, from_cset, to_cset, true);
2690 from_cset->nr_tasks--;
2691 /*
2692 * If the source or destination cgroup is frozen,
2693 * the task might require to change its state.
2694 */
2695 cgroup_freezer_migrate_task(task, from_cset->dfl_cgrp,
2696 to_cset->dfl_cgrp);
2697 put_css_set_locked(from_cset);
2698
2699 }
2700 }
2701 spin_unlock_irq(&css_set_lock);
2702
2703 /*
2704 * Migration is committed, all target tasks are now on dst_csets.
2705 * Nothing is sensitive to fork() after this point. Notify
2706 * controllers that migration is complete.
2707 */
2708 tset->csets = &tset->dst_csets;
2709
2710 if (tset->nr_tasks) {
2711 do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
2712 if (ss->attach) {
2713 tset->ssid = ssid;
2714 ss->attach(tset);
2715 }
2716 } while_each_subsys_mask();
2717 }
2718
2719 ret = 0;
2720 goto out_release_tset;
2721
2722 out_cancel_attach:
2723 if (tset->nr_tasks) {
2724 do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
2725 if (ssid == failed_ssid)
2726 break;
2727 if (ss->cancel_attach) {
2728 tset->ssid = ssid;
2729 ss->cancel_attach(tset);
2730 }
2731 } while_each_subsys_mask();
2732 }
2733 out_release_tset:
2734 spin_lock_irq(&css_set_lock);
2735 list_splice_init(&tset->dst_csets, &tset->src_csets);
2736 list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) {
2737 list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
2738 list_del_init(&cset->mg_node);
2739 }
2740 spin_unlock_irq(&css_set_lock);
2741
2742 /*
2743 * Re-initialize the cgroup_taskset structure in case it is reused
2744 * again in another cgroup_migrate_add_task()/cgroup_migrate_execute()
2745 * iteration.
2746 */
2747 tset->nr_tasks = 0;
2748 tset->csets = &tset->src_csets;
2749 return ret;
2750 }
2751
2752 /**
2753 * cgroup_migrate_vet_dst - verify whether a cgroup can be migration destination
2754 * @dst_cgrp: destination cgroup to test
2755 *
2756 * On the default hierarchy, except for the mixable, (possible) thread root
2757 * and threaded cgroups, subtree_control must be zero for migration
2758 * destination cgroups with tasks so that child cgroups don't compete
2759 * against tasks.
2760 */
cgroup_migrate_vet_dst(struct cgroup * dst_cgrp)2761 int cgroup_migrate_vet_dst(struct cgroup *dst_cgrp)
2762 {
2763 /* v1 doesn't have any restriction */
2764 if (!cgroup_on_dfl(dst_cgrp))
2765 return 0;
2766
2767 /* verify @dst_cgrp can host resources */
2768 if (!cgroup_is_valid_domain(dst_cgrp->dom_cgrp))
2769 return -EOPNOTSUPP;
2770
2771 /*
2772 * If @dst_cgrp is already or can become a thread root or is
2773 * threaded, it doesn't matter.
2774 */
2775 if (cgroup_can_be_thread_root(dst_cgrp) || cgroup_is_threaded(dst_cgrp))
2776 return 0;
2777
2778 /* apply no-internal-process constraint */
2779 if (dst_cgrp->subtree_control)
2780 return -EBUSY;
2781
2782 return 0;
2783 }
2784
2785 /**
2786 * cgroup_migrate_finish - cleanup after attach
2787 * @mgctx: migration context
2788 *
2789 * Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst(). See
2790 * those functions for details.
2791 */
cgroup_migrate_finish(struct cgroup_mgctx * mgctx)2792 void cgroup_migrate_finish(struct cgroup_mgctx *mgctx)
2793 {
2794 struct css_set *cset, *tmp_cset;
2795
2796 lockdep_assert_held(&cgroup_mutex);
2797
2798 spin_lock_irq(&css_set_lock);
2799
2800 list_for_each_entry_safe(cset, tmp_cset, &mgctx->preloaded_src_csets,
2801 mg_src_preload_node) {
2802 cset->mg_src_cgrp = NULL;
2803 cset->mg_dst_cgrp = NULL;
2804 cset->mg_dst_cset = NULL;
2805 list_del_init(&cset->mg_src_preload_node);
2806 put_css_set_locked(cset);
2807 }
2808
2809 list_for_each_entry_safe(cset, tmp_cset, &mgctx->preloaded_dst_csets,
2810 mg_dst_preload_node) {
2811 cset->mg_src_cgrp = NULL;
2812 cset->mg_dst_cgrp = NULL;
2813 cset->mg_dst_cset = NULL;
2814 list_del_init(&cset->mg_dst_preload_node);
2815 put_css_set_locked(cset);
2816 }
2817
2818 spin_unlock_irq(&css_set_lock);
2819 }
2820
2821 /**
2822 * cgroup_migrate_add_src - add a migration source css_set
2823 * @src_cset: the source css_set to add
2824 * @dst_cgrp: the destination cgroup
2825 * @mgctx: migration context
2826 *
2827 * Tasks belonging to @src_cset are about to be migrated to @dst_cgrp. Pin
2828 * @src_cset and add it to @mgctx->src_csets, which should later be cleaned
2829 * up by cgroup_migrate_finish().
2830 *
2831 * This function may be called without holding cgroup_threadgroup_rwsem
2832 * even if the target is a process. Threads may be created and destroyed
2833 * but as long as cgroup_mutex is not dropped, no new css_set can be put
2834 * into play and the preloaded css_sets are guaranteed to cover all
2835 * migrations.
2836 */
cgroup_migrate_add_src(struct css_set * src_cset,struct cgroup * dst_cgrp,struct cgroup_mgctx * mgctx)2837 void cgroup_migrate_add_src(struct css_set *src_cset,
2838 struct cgroup *dst_cgrp,
2839 struct cgroup_mgctx *mgctx)
2840 {
2841 struct cgroup *src_cgrp;
2842
2843 lockdep_assert_held(&cgroup_mutex);
2844 lockdep_assert_held(&css_set_lock);
2845
2846 /*
2847 * If ->dead, @src_set is associated with one or more dead cgroups
2848 * and doesn't contain any migratable tasks. Ignore it early so
2849 * that the rest of migration path doesn't get confused by it.
2850 */
2851 if (src_cset->dead)
2852 return;
2853
2854 if (!list_empty(&src_cset->mg_src_preload_node))
2855 return;
2856
2857 src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);
2858
2859 WARN_ON(src_cset->mg_src_cgrp);
2860 WARN_ON(src_cset->mg_dst_cgrp);
2861 WARN_ON(!list_empty(&src_cset->mg_tasks));
2862 WARN_ON(!list_empty(&src_cset->mg_node));
2863
2864 src_cset->mg_src_cgrp = src_cgrp;
2865 src_cset->mg_dst_cgrp = dst_cgrp;
2866 get_css_set(src_cset);
2867 list_add_tail(&src_cset->mg_src_preload_node, &mgctx->preloaded_src_csets);
2868 }
2869
2870 /**
2871 * cgroup_migrate_prepare_dst - prepare destination css_sets for migration
2872 * @mgctx: migration context
2873 *
2874 * Tasks are about to be moved and all the source css_sets have been
2875 * preloaded to @mgctx->preloaded_src_csets. This function looks up and
2876 * pins all destination css_sets, links each to its source, and append them
2877 * to @mgctx->preloaded_dst_csets.
2878 *
2879 * This function must be called after cgroup_migrate_add_src() has been
2880 * called on each migration source css_set. After migration is performed
2881 * using cgroup_migrate(), cgroup_migrate_finish() must be called on
2882 * @mgctx.
2883 */
cgroup_migrate_prepare_dst(struct cgroup_mgctx * mgctx)2884 int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx)
2885 {
2886 struct css_set *src_cset, *tmp_cset;
2887
2888 lockdep_assert_held(&cgroup_mutex);
2889
2890 /* look up the dst cset for each src cset and link it to src */
2891 list_for_each_entry_safe(src_cset, tmp_cset, &mgctx->preloaded_src_csets,
2892 mg_src_preload_node) {
2893 struct css_set *dst_cset;
2894 struct cgroup_subsys *ss;
2895 int ssid;
2896
2897 dst_cset = find_css_set(src_cset, src_cset->mg_dst_cgrp);
2898 if (!dst_cset)
2899 return -ENOMEM;
2900
2901 WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset);
2902
2903 /*
2904 * If src cset equals dst, it's noop. Drop the src.
2905 * cgroup_migrate() will skip the cset too. Note that we
2906 * can't handle src == dst as some nodes are used by both.
2907 */
2908 if (src_cset == dst_cset) {
2909 src_cset->mg_src_cgrp = NULL;
2910 src_cset->mg_dst_cgrp = NULL;
2911 list_del_init(&src_cset->mg_src_preload_node);
2912 put_css_set(src_cset);
2913 put_css_set(dst_cset);
2914 continue;
2915 }
2916
2917 src_cset->mg_dst_cset = dst_cset;
2918
2919 if (list_empty(&dst_cset->mg_dst_preload_node))
2920 list_add_tail(&dst_cset->mg_dst_preload_node,
2921 &mgctx->preloaded_dst_csets);
2922 else
2923 put_css_set(dst_cset);
2924
2925 for_each_subsys(ss, ssid)
2926 if (src_cset->subsys[ssid] != dst_cset->subsys[ssid])
2927 mgctx->ss_mask |= 1 << ssid;
2928 }
2929
2930 return 0;
2931 }
2932
2933 /**
2934 * cgroup_migrate - migrate a process or task to a cgroup
2935 * @leader: the leader of the process or the task to migrate
2936 * @threadgroup: whether @leader points to the whole process or a single task
2937 * @mgctx: migration context
2938 *
2939 * Migrate a process or task denoted by @leader. If migrating a process,
2940 * the caller must be holding cgroup_threadgroup_rwsem. The caller is also
2941 * responsible for invoking cgroup_migrate_add_src() and
2942 * cgroup_migrate_prepare_dst() on the targets before invoking this
2943 * function and following up with cgroup_migrate_finish().
2944 *
2945 * As long as a controller's ->can_attach() doesn't fail, this function is
2946 * guaranteed to succeed. This means that, excluding ->can_attach()
2947 * failure, when migrating multiple targets, the success or failure can be
2948 * decided for all targets by invoking group_migrate_prepare_dst() before
2949 * actually starting migrating.
2950 */
cgroup_migrate(struct task_struct * leader,bool threadgroup,struct cgroup_mgctx * mgctx)2951 int cgroup_migrate(struct task_struct *leader, bool threadgroup,
2952 struct cgroup_mgctx *mgctx)
2953 {
2954 struct task_struct *task;
2955
2956 /*
2957 * The following thread iteration should be inside an RCU critical
2958 * section to prevent tasks from being freed while taking the snapshot.
2959 * spin_lock_irq() implies RCU critical section here.
2960 */
2961 spin_lock_irq(&css_set_lock);
2962 task = leader;
2963 do {
2964 cgroup_migrate_add_task(task, mgctx);
2965 if (!threadgroup)
2966 break;
2967 } while_each_thread(leader, task);
2968 spin_unlock_irq(&css_set_lock);
2969
2970 return cgroup_migrate_execute(mgctx);
2971 }
2972
2973 /**
2974 * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup
2975 * @dst_cgrp: the cgroup to attach to
2976 * @leader: the task or the leader of the threadgroup to be attached
2977 * @threadgroup: attach the whole threadgroup?
2978 *
2979 * Call holding cgroup_mutex and cgroup_threadgroup_rwsem.
2980 */
cgroup_attach_task(struct cgroup * dst_cgrp,struct task_struct * leader,bool threadgroup)2981 int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
2982 bool threadgroup)
2983 {
2984 DEFINE_CGROUP_MGCTX(mgctx);
2985 struct task_struct *task;
2986 int ret = 0;
2987
2988 /* look up all src csets */
2989 spin_lock_irq(&css_set_lock);
2990 task = leader;
2991 do {
2992 cgroup_migrate_add_src(task_css_set(task), dst_cgrp, &mgctx);
2993 if (!threadgroup)
2994 break;
2995 } while_each_thread(leader, task);
2996 spin_unlock_irq(&css_set_lock);
2997
2998 /* prepare dst csets and commit */
2999 ret = cgroup_migrate_prepare_dst(&mgctx);
3000 if (!ret)
3001 ret = cgroup_migrate(leader, threadgroup, &mgctx);
3002
3003 cgroup_migrate_finish(&mgctx);
3004
3005 if (!ret)
3006 TRACE_CGROUP_PATH(attach_task, dst_cgrp, leader, threadgroup);
3007
3008 return ret;
3009 }
3010
cgroup_procs_write_start(char * buf,bool threadgroup,enum cgroup_attach_lock_mode * lock_mode)3011 struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
3012 enum cgroup_attach_lock_mode *lock_mode)
3013 {
3014 struct task_struct *tsk;
3015 pid_t pid;
3016
3017 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
3018 return ERR_PTR(-EINVAL);
3019
3020 retry_find_task:
3021 rcu_read_lock();
3022 if (pid) {
3023 tsk = find_task_by_vpid(pid);
3024 if (!tsk) {
3025 tsk = ERR_PTR(-ESRCH);
3026 goto out_unlock_rcu;
3027 }
3028 } else {
3029 tsk = current;
3030 }
3031
3032 if (threadgroup)
3033 tsk = tsk->group_leader;
3034
3035 /*
3036 * kthreads may acquire PF_NO_SETAFFINITY during initialization.
3037 * If userland migrates such a kthread to a non-root cgroup, it can
3038 * become trapped in a cpuset, or RT kthread may be born in a
3039 * cgroup with no rt_runtime allocated. Just say no.
3040 */
3041 if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) {
3042 tsk = ERR_PTR(-EINVAL);
3043 goto out_unlock_rcu;
3044 }
3045 get_task_struct(tsk);
3046 rcu_read_unlock();
3047
3048 /*
3049 * If we migrate a single thread, we don't care about threadgroup
3050 * stability. If the thread is `current`, it won't exit(2) under our
3051 * hands or change PID through exec(2). We exclude
3052 * cgroup_update_dfl_csses and other cgroup_{proc,thread}s_write callers
3053 * by cgroup_mutex. Therefore, we can skip the global lock.
3054 */
3055 lockdep_assert_held(&cgroup_mutex);
3056
3057 if (pid || threadgroup) {
3058 if (cgroup_enable_per_threadgroup_rwsem)
3059 *lock_mode = CGRP_ATTACH_LOCK_PER_THREADGROUP;
3060 else
3061 *lock_mode = CGRP_ATTACH_LOCK_GLOBAL;
3062 } else {
3063 *lock_mode = CGRP_ATTACH_LOCK_NONE;
3064 }
3065
3066 cgroup_attach_lock(*lock_mode, tsk);
3067
3068 if (threadgroup) {
3069 if (!thread_group_leader(tsk)) {
3070 /*
3071 * A race with de_thread from another thread's exec()
3072 * may strip us of our leadership. If this happens,
3073 * throw this task away and try again.
3074 */
3075 cgroup_attach_unlock(*lock_mode, tsk);
3076 put_task_struct(tsk);
3077 goto retry_find_task;
3078 }
3079 }
3080
3081 return tsk;
3082
3083 out_unlock_rcu:
3084 rcu_read_unlock();
3085 return tsk;
3086 }
3087
cgroup_procs_write_finish(struct task_struct * task,enum cgroup_attach_lock_mode lock_mode)3088 void cgroup_procs_write_finish(struct task_struct *task,
3089 enum cgroup_attach_lock_mode lock_mode)
3090 {
3091 cgroup_attach_unlock(lock_mode, task);
3092
3093 /* release reference from cgroup_procs_write_start() */
3094 put_task_struct(task);
3095 }
3096
cgroup_print_ss_mask(struct seq_file * seq,u32 ss_mask)3097 static void cgroup_print_ss_mask(struct seq_file *seq, u32 ss_mask)
3098 {
3099 struct cgroup_subsys *ss;
3100 bool printed = false;
3101 int ssid;
3102
3103 do_each_subsys_mask(ss, ssid, ss_mask) {
3104 if (printed)
3105 seq_putc(seq, ' ');
3106 seq_puts(seq, ss->name);
3107 printed = true;
3108 } while_each_subsys_mask();
3109 if (printed)
3110 seq_putc(seq, '\n');
3111 }
3112
3113 /* show controllers which are enabled from the parent */
cgroup_controllers_show(struct seq_file * seq,void * v)3114 static int cgroup_controllers_show(struct seq_file *seq, void *v)
3115 {
3116 struct cgroup *cgrp = seq_css(seq)->cgroup;
3117
3118 cgroup_print_ss_mask(seq, cgroup_control(cgrp));
3119 return 0;
3120 }
3121
3122 /* show controllers which are enabled for a given cgroup's children */
cgroup_subtree_control_show(struct seq_file * seq,void * v)3123 static int cgroup_subtree_control_show(struct seq_file *seq, void *v)
3124 {
3125 struct cgroup *cgrp = seq_css(seq)->cgroup;
3126
3127 cgroup_print_ss_mask(seq, cgrp->subtree_control);
3128 return 0;
3129 }
3130
3131 /**
3132 * cgroup_update_dfl_csses - update css assoc of a subtree in default hierarchy
3133 * @cgrp: root of the subtree to update csses for
3134 *
3135 * @cgrp's control masks have changed and its subtree's css associations
3136 * need to be updated accordingly. This function looks up all css_sets
3137 * which are attached to the subtree, creates the matching updated css_sets
3138 * and migrates the tasks to the new ones.
3139 */
cgroup_update_dfl_csses(struct cgroup * cgrp)3140 static int cgroup_update_dfl_csses(struct cgroup *cgrp)
3141 {
3142 DEFINE_CGROUP_MGCTX(mgctx);
3143 struct cgroup_subsys_state *d_css;
3144 struct cgroup *dsct;
3145 struct css_set *src_cset;
3146 enum cgroup_attach_lock_mode lock_mode;
3147 bool has_tasks;
3148 int ret;
3149
3150 lockdep_assert_held(&cgroup_mutex);
3151
3152 /* look up all csses currently attached to @cgrp's subtree */
3153 spin_lock_irq(&css_set_lock);
3154 cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
3155 struct cgrp_cset_link *link;
3156
3157 /*
3158 * As cgroup_update_dfl_csses() is only called by
3159 * cgroup_apply_control(). The csses associated with the
3160 * given cgrp will not be affected by changes made to
3161 * its subtree_control file. We can skip them.
3162 */
3163 if (dsct == cgrp)
3164 continue;
3165
3166 list_for_each_entry(link, &dsct->cset_links, cset_link)
3167 cgroup_migrate_add_src(link->cset, dsct, &mgctx);
3168 }
3169 spin_unlock_irq(&css_set_lock);
3170
3171 /*
3172 * We need to write-lock threadgroup_rwsem while migrating tasks.
3173 * However, if there are no source csets for @cgrp, changing its
3174 * controllers isn't gonna produce any task migrations and the
3175 * write-locking can be skipped safely.
3176 */
3177 has_tasks = !list_empty(&mgctx.preloaded_src_csets);
3178
3179 if (has_tasks)
3180 lock_mode = CGRP_ATTACH_LOCK_GLOBAL;
3181 else
3182 lock_mode = CGRP_ATTACH_LOCK_NONE;
3183
3184 cgroup_attach_lock(lock_mode, NULL);
3185
3186 /* NULL dst indicates self on default hierarchy */
3187 ret = cgroup_migrate_prepare_dst(&mgctx);
3188 if (ret)
3189 goto out_finish;
3190
3191 spin_lock_irq(&css_set_lock);
3192 list_for_each_entry(src_cset, &mgctx.preloaded_src_csets,
3193 mg_src_preload_node) {
3194 struct task_struct *task, *ntask;
3195
3196 /* all tasks in src_csets need to be migrated */
3197 list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list)
3198 cgroup_migrate_add_task(task, &mgctx);
3199 }
3200 spin_unlock_irq(&css_set_lock);
3201
3202 ret = cgroup_migrate_execute(&mgctx);
3203 out_finish:
3204 cgroup_migrate_finish(&mgctx);
3205 cgroup_attach_unlock(lock_mode, NULL);
3206 return ret;
3207 }
3208
3209 /**
3210 * cgroup_lock_and_drain_offline - lock cgroup_mutex and drain offlined csses
3211 * @cgrp: root of the target subtree
3212 *
3213 * Because css offlining is asynchronous, userland may try to re-enable a
3214 * controller while the previous css is still around. This function grabs
3215 * cgroup_mutex and drains the previous css instances of @cgrp's subtree.
3216 */
cgroup_lock_and_drain_offline(struct cgroup * cgrp)3217 void cgroup_lock_and_drain_offline(struct cgroup *cgrp)
3218 __acquires(&cgroup_mutex)
3219 {
3220 struct cgroup *dsct;
3221 struct cgroup_subsys_state *d_css;
3222 struct cgroup_subsys *ss;
3223 int ssid;
3224
3225 restart:
3226 cgroup_lock();
3227
3228 cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
3229 for_each_subsys(ss, ssid) {
3230 struct cgroup_subsys_state *css = cgroup_css(dsct, ss);
3231 DEFINE_WAIT(wait);
3232
3233 if (!css || !percpu_ref_is_dying(&css->refcnt))
3234 continue;
3235
3236 cgroup_get_live(dsct);
3237 prepare_to_wait(&dsct->offline_waitq, &wait,
3238 TASK_UNINTERRUPTIBLE);
3239
3240 cgroup_unlock();
3241 schedule();
3242 finish_wait(&dsct->offline_waitq, &wait);
3243
3244 cgroup_put(dsct);
3245 goto restart;
3246 }
3247 }
3248 }
3249
3250 /**
3251 * cgroup_save_control - save control masks and dom_cgrp of a subtree
3252 * @cgrp: root of the target subtree
3253 *
3254 * Save ->subtree_control, ->subtree_ss_mask and ->dom_cgrp to the
3255 * respective old_ prefixed fields for @cgrp's subtree including @cgrp
3256 * itself.
3257 */
cgroup_save_control(struct cgroup * cgrp)3258 static void cgroup_save_control(struct cgroup *cgrp)
3259 {
3260 struct cgroup *dsct;
3261 struct cgroup_subsys_state *d_css;
3262
3263 cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
3264 dsct->old_subtree_control = dsct->subtree_control;
3265 dsct->old_subtree_ss_mask = dsct->subtree_ss_mask;
3266 dsct->old_dom_cgrp = dsct->dom_cgrp;
3267 }
3268 }
3269
3270 /**
3271 * cgroup_propagate_control - refresh control masks of a subtree
3272 * @cgrp: root of the target subtree
3273 *
3274 * For @cgrp and its subtree, ensure ->subtree_ss_mask matches
3275 * ->subtree_control and propagate controller availability through the
3276 * subtree so that descendants don't have unavailable controllers enabled.
3277 */
cgroup_propagate_control(struct cgroup * cgrp)3278 static void cgroup_propagate_control(struct cgroup *cgrp)
3279 {
3280 struct cgroup *dsct;
3281 struct cgroup_subsys_state *d_css;
3282
3283 cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
3284 dsct->subtree_control &= cgroup_control(dsct);
3285 dsct->subtree_ss_mask =
3286 cgroup_calc_subtree_ss_mask(dsct->subtree_control,
3287 cgroup_ss_mask(dsct));
3288 }
3289 }
3290
3291 /**
3292 * cgroup_restore_control - restore control masks and dom_cgrp of a subtree
3293 * @cgrp: root of the target subtree
3294 *
3295 * Restore ->subtree_control, ->subtree_ss_mask and ->dom_cgrp from the
3296 * respective old_ prefixed fields for @cgrp's subtree including @cgrp
3297 * itself.
3298 */
cgroup_restore_control(struct cgroup * cgrp)3299 static void cgroup_restore_control(struct cgroup *cgrp)
3300 {
3301 struct cgroup *dsct;
3302 struct cgroup_subsys_state *d_css;
3303
3304 cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
3305 dsct->subtree_control = dsct->old_subtree_control;
3306 dsct->subtree_ss_mask = dsct->old_subtree_ss_mask;
3307 dsct->dom_cgrp = dsct->old_dom_cgrp;
3308 }
3309 }
3310
css_visible(struct cgroup_subsys_state * css)3311 static bool css_visible(struct cgroup_subsys_state *css)
3312 {
3313 struct cgroup_subsys *ss = css->ss;
3314 struct cgroup *cgrp = css->cgroup;
3315
3316 if (cgroup_control(cgrp) & (1 << ss->id))
3317 return true;
3318 if (!(cgroup_ss_mask(cgrp) & (1 << ss->id)))
3319 return false;
3320 return cgroup_on_dfl(cgrp) && ss->implicit_on_dfl;
3321 }
3322
3323 /**
3324 * cgroup_apply_control_enable - enable or show csses according to control
3325 * @cgrp: root of the target subtree
3326 *
3327 * Walk @cgrp's subtree and create new csses or make the existing ones
3328 * visible. A css is created invisible if it's being implicitly enabled
3329 * through dependency. An invisible css is made visible when the userland
3330 * explicitly enables it.
3331 *
3332 * Returns 0 on success, -errno on failure. On failure, csses which have
3333 * been processed already aren't cleaned up. The caller is responsible for
3334 * cleaning up with cgroup_apply_control_disable().
3335 */
cgroup_apply_control_enable(struct cgroup * cgrp)3336 static int cgroup_apply_control_enable(struct cgroup *cgrp)
3337 {
3338 struct cgroup *dsct;
3339 struct cgroup_subsys_state *d_css;
3340 struct cgroup_subsys *ss;
3341 int ssid, ret;
3342
3343 cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
3344 for_each_subsys(ss, ssid) {
3345 struct cgroup_subsys_state *css = cgroup_css(dsct, ss);
3346
3347 if (!(cgroup_ss_mask(dsct) & (1 << ss->id)))
3348 continue;
3349
3350 if (!css) {
3351 css = css_create(dsct, ss);
3352 if (IS_ERR(css))
3353 return PTR_ERR(css);
3354 }
3355
3356 WARN_ON_ONCE(percpu_ref_is_dying(&css->refcnt));
3357
3358 if (css_visible(css)) {
3359 ret = css_populate_dir(css);
3360 if (ret)
3361 return ret;
3362 }
3363 }
3364 }
3365
3366 return 0;
3367 }
3368
3369 /**
3370 * cgroup_apply_control_disable - kill or hide csses according to control
3371 * @cgrp: root of the target subtree
3372 *
3373 * Walk @cgrp's subtree and kill and hide csses so that they match
3374 * cgroup_ss_mask() and cgroup_visible_mask().
3375 *
3376 * A css is hidden when the userland requests it to be disabled while other
3377 * subsystems are still depending on it. The css must not actively control
3378 * resources and be in the vanilla state if it's made visible again later.
3379 * Controllers which may be depended upon should provide ->css_reset() for
3380 * this purpose.
3381 */
cgroup_apply_control_disable(struct cgroup * cgrp)3382 static void cgroup_apply_control_disable(struct cgroup *cgrp)
3383 {
3384 struct cgroup *dsct;
3385 struct cgroup_subsys_state *d_css;
3386 struct cgroup_subsys *ss;
3387 int ssid;
3388
3389 cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
3390 for_each_subsys(ss, ssid) {
3391 struct cgroup_subsys_state *css = cgroup_css(dsct, ss);
3392
3393 if (!css)
3394 continue;
3395
3396 WARN_ON_ONCE(percpu_ref_is_dying(&css->refcnt));
3397
3398 if (css->parent &&
3399 !(cgroup_ss_mask(dsct) & (1 << ss->id))) {
3400 kill_css_sync(css);
3401 kill_css_finish(css);
3402 } else if (!css_visible(css)) {
3403 css_clear_dir(css);
3404 if (ss->css_reset)
3405 ss->css_reset(css);
3406 }
3407 }
3408 }
3409 }
3410
3411 /**
3412 * cgroup_apply_control - apply control mask updates to the subtree
3413 * @cgrp: root of the target subtree
3414 *
3415 * subsystems can be enabled and disabled in a subtree using the following
3416 * steps.
3417 *
3418 * 1. Call cgroup_save_control() to stash the current state.
3419 * 2. Update ->subtree_control masks in the subtree as desired.
3420 * 3. Call cgroup_apply_control() to apply the changes.
3421 * 4. Optionally perform other related operations.
3422 * 5. Call cgroup_finalize_control() to finish up.
3423 *
3424 * This function implements step 3 and propagates the mask changes
3425 * throughout @cgrp's subtree, updates csses accordingly and perform
3426 * process migrations.
3427 */
cgroup_apply_control(struct cgroup * cgrp)3428 static int cgroup_apply_control(struct cgroup *cgrp)
3429 {
3430 int ret;
3431
3432 cgroup_propagate_control(cgrp);
3433
3434 ret = cgroup_apply_control_enable(cgrp);
3435 if (ret)
3436 return ret;
3437
3438 /*
3439 * At this point, cgroup_e_css_by_mask() results reflect the new csses
3440 * making the following cgroup_update_dfl_csses() properly update
3441 * css associations of all tasks in the subtree.
3442 */
3443 return cgroup_update_dfl_csses(cgrp);
3444 }
3445
3446 /**
3447 * cgroup_finalize_control - finalize control mask update
3448 * @cgrp: root of the target subtree
3449 * @ret: the result of the update
3450 *
3451 * Finalize control mask update. See cgroup_apply_control() for more info.
3452 */
cgroup_finalize_control(struct cgroup * cgrp,int ret)3453 static void cgroup_finalize_control(struct cgroup *cgrp, int ret)
3454 {
3455 if (ret) {
3456 cgroup_restore_control(cgrp);
3457 cgroup_propagate_control(cgrp);
3458 }
3459
3460 cgroup_apply_control_disable(cgrp);
3461 }
3462
cgroup_vet_subtree_control_enable(struct cgroup * cgrp,u32 enable)3463 static int cgroup_vet_subtree_control_enable(struct cgroup *cgrp, u32 enable)
3464 {
3465 u32 domain_enable = enable & ~cgrp_dfl_threaded_ss_mask;
3466
3467 /* if nothing is getting enabled, nothing to worry about */
3468 if (!enable)
3469 return 0;
3470
3471 /* can @cgrp host any resources? */
3472 if (!cgroup_is_valid_domain(cgrp->dom_cgrp))
3473 return -EOPNOTSUPP;
3474
3475 /* mixables don't care */
3476 if (cgroup_is_mixable(cgrp))
3477 return 0;
3478
3479 if (domain_enable) {
3480 /* can't enable domain controllers inside a thread subtree */
3481 if (cgroup_is_thread_root(cgrp) || cgroup_is_threaded(cgrp))
3482 return -EOPNOTSUPP;
3483 } else {
3484 /*
3485 * Threaded controllers can handle internal competitions
3486 * and are always allowed inside a (prospective) thread
3487 * subtree.
3488 */
3489 if (cgroup_can_be_thread_root(cgrp) || cgroup_is_threaded(cgrp))
3490 return 0;
3491 }
3492
3493 /*
3494 * Controllers can't be enabled for a cgroup with tasks to avoid
3495 * child cgroups competing against tasks.
3496 */
3497 if (cgroup_has_tasks(cgrp))
3498 return -EBUSY;
3499
3500 return 0;
3501 }
3502
3503 /* change the enabled child controllers for a cgroup in the default hierarchy */
cgroup_subtree_control_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3504 static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
3505 char *buf, size_t nbytes,
3506 loff_t off)
3507 {
3508 u32 enable = 0, disable = 0;
3509 struct cgroup *cgrp, *child;
3510 struct cgroup_subsys *ss;
3511 char *tok;
3512 int ssid, ret;
3513
3514 /*
3515 * Parse input - space separated list of subsystem names prefixed
3516 * with either + or -.
3517 */
3518 buf = strstrip(buf);
3519 while ((tok = strsep(&buf, " "))) {
3520 if (tok[0] == '\0')
3521 continue;
3522 do_each_subsys_mask(ss, ssid, ~cgrp_dfl_inhibit_ss_mask) {
3523 if (!cgroup_ssid_enabled(ssid) ||
3524 strcmp(tok + 1, ss->name))
3525 continue;
3526
3527 if (*tok == '+') {
3528 enable |= 1 << ssid;
3529 disable &= ~(1 << ssid);
3530 } else if (*tok == '-') {
3531 disable |= 1 << ssid;
3532 enable &= ~(1 << ssid);
3533 } else {
3534 return -EINVAL;
3535 }
3536 break;
3537 } while_each_subsys_mask();
3538 if (ssid == CGROUP_SUBSYS_COUNT)
3539 return -EINVAL;
3540 }
3541
3542 cgrp = cgroup_kn_lock_live(of->kn, true);
3543 if (!cgrp)
3544 return -ENODEV;
3545
3546 for_each_subsys(ss, ssid) {
3547 if (enable & (1 << ssid)) {
3548 if (cgrp->subtree_control & (1 << ssid)) {
3549 enable &= ~(1 << ssid);
3550 continue;
3551 }
3552
3553 if (!(cgroup_control(cgrp) & (1 << ssid))) {
3554 ret = -ENOENT;
3555 goto out_unlock;
3556 }
3557 } else if (disable & (1 << ssid)) {
3558 if (!(cgrp->subtree_control & (1 << ssid))) {
3559 disable &= ~(1 << ssid);
3560 continue;
3561 }
3562
3563 /* a child has it enabled? */
3564 cgroup_for_each_live_child(child, cgrp) {
3565 if (child->subtree_control & (1 << ssid)) {
3566 ret = -EBUSY;
3567 goto out_unlock;
3568 }
3569 }
3570 }
3571 }
3572
3573 if (!enable && !disable) {
3574 ret = 0;
3575 goto out_unlock;
3576 }
3577
3578 ret = cgroup_vet_subtree_control_enable(cgrp, enable);
3579 if (ret)
3580 goto out_unlock;
3581
3582 /* save and update control masks and prepare csses */
3583 cgroup_save_control(cgrp);
3584
3585 cgrp->subtree_control |= enable;
3586 cgrp->subtree_control &= ~disable;
3587
3588 ret = cgroup_apply_control(cgrp);
3589 cgroup_finalize_control(cgrp, ret);
3590 if (ret)
3591 goto out_unlock;
3592
3593 kernfs_activate(cgrp->kn);
3594 out_unlock:
3595 cgroup_kn_unlock(of->kn);
3596 return ret ?: nbytes;
3597 }
3598
3599 /**
3600 * cgroup_enable_threaded - make @cgrp threaded
3601 * @cgrp: the target cgroup
3602 *
3603 * Called when "threaded" is written to the cgroup.type interface file and
3604 * tries to make @cgrp threaded and join the parent's resource domain.
3605 * This function is never called on the root cgroup as cgroup.type doesn't
3606 * exist on it.
3607 */
cgroup_enable_threaded(struct cgroup * cgrp)3608 static int cgroup_enable_threaded(struct cgroup *cgrp)
3609 {
3610 struct cgroup *parent = cgroup_parent(cgrp);
3611 struct cgroup *dom_cgrp = parent->dom_cgrp;
3612 struct cgroup *dsct;
3613 struct cgroup_subsys_state *d_css;
3614 int ret;
3615
3616 lockdep_assert_held(&cgroup_mutex);
3617
3618 /* noop if already threaded */
3619 if (cgroup_is_threaded(cgrp))
3620 return 0;
3621
3622 /*
3623 * If @cgroup is populated or has domain controllers enabled, it
3624 * can't be switched. While the below cgroup_can_be_thread_root()
3625 * test can catch the same conditions, that's only when @parent is
3626 * not mixable, so let's check it explicitly.
3627 */
3628 if (cgroup_is_populated(cgrp) ||
3629 cgrp->subtree_control & ~cgrp_dfl_threaded_ss_mask)
3630 return -EOPNOTSUPP;
3631
3632 /* we're joining the parent's domain, ensure its validity */
3633 if (!cgroup_is_valid_domain(dom_cgrp) ||
3634 !cgroup_can_be_thread_root(dom_cgrp))
3635 return -EOPNOTSUPP;
3636
3637 /*
3638 * The following shouldn't cause actual migrations and should
3639 * always succeed.
3640 */
3641 cgroup_save_control(cgrp);
3642
3643 cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp)
3644 if (dsct == cgrp || cgroup_is_threaded(dsct))
3645 dsct->dom_cgrp = dom_cgrp;
3646
3647 ret = cgroup_apply_control(cgrp);
3648 if (!ret)
3649 parent->nr_threaded_children++;
3650
3651 cgroup_finalize_control(cgrp, ret);
3652 return ret;
3653 }
3654
cgroup_type_show(struct seq_file * seq,void * v)3655 static int cgroup_type_show(struct seq_file *seq, void *v)
3656 {
3657 struct cgroup *cgrp = seq_css(seq)->cgroup;
3658
3659 if (cgroup_is_threaded(cgrp))
3660 seq_puts(seq, "threaded\n");
3661 else if (!cgroup_is_valid_domain(cgrp))
3662 seq_puts(seq, "domain invalid\n");
3663 else if (cgroup_is_thread_root(cgrp))
3664 seq_puts(seq, "domain threaded\n");
3665 else
3666 seq_puts(seq, "domain\n");
3667
3668 return 0;
3669 }
3670
cgroup_type_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3671 static ssize_t cgroup_type_write(struct kernfs_open_file *of, char *buf,
3672 size_t nbytes, loff_t off)
3673 {
3674 struct cgroup *cgrp;
3675 int ret;
3676
3677 /* only switching to threaded mode is supported */
3678 if (strcmp(strstrip(buf), "threaded"))
3679 return -EINVAL;
3680
3681 /* drain dying csses before we re-apply (threaded) subtree control */
3682 cgrp = cgroup_kn_lock_live(of->kn, true);
3683 if (!cgrp)
3684 return -ENOENT;
3685
3686 /* threaded can only be enabled */
3687 ret = cgroup_enable_threaded(cgrp);
3688
3689 cgroup_kn_unlock(of->kn);
3690 return ret ?: nbytes;
3691 }
3692
cgroup_max_descendants_show(struct seq_file * seq,void * v)3693 static int cgroup_max_descendants_show(struct seq_file *seq, void *v)
3694 {
3695 struct cgroup *cgrp = seq_css(seq)->cgroup;
3696 int descendants = READ_ONCE(cgrp->max_descendants);
3697
3698 if (descendants == INT_MAX)
3699 seq_puts(seq, "max\n");
3700 else
3701 seq_printf(seq, "%d\n", descendants);
3702
3703 return 0;
3704 }
3705
cgroup_max_descendants_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3706 static ssize_t cgroup_max_descendants_write(struct kernfs_open_file *of,
3707 char *buf, size_t nbytes, loff_t off)
3708 {
3709 struct cgroup *cgrp;
3710 int descendants;
3711 ssize_t ret;
3712
3713 buf = strstrip(buf);
3714 if (!strcmp(buf, "max")) {
3715 descendants = INT_MAX;
3716 } else {
3717 ret = kstrtoint(buf, 0, &descendants);
3718 if (ret)
3719 return ret;
3720 }
3721
3722 if (descendants < 0)
3723 return -ERANGE;
3724
3725 cgrp = cgroup_kn_lock_live(of->kn, false);
3726 if (!cgrp)
3727 return -ENOENT;
3728
3729 cgrp->max_descendants = descendants;
3730
3731 cgroup_kn_unlock(of->kn);
3732
3733 return nbytes;
3734 }
3735
cgroup_max_depth_show(struct seq_file * seq,void * v)3736 static int cgroup_max_depth_show(struct seq_file *seq, void *v)
3737 {
3738 struct cgroup *cgrp = seq_css(seq)->cgroup;
3739 int depth = READ_ONCE(cgrp->max_depth);
3740
3741 if (depth == INT_MAX)
3742 seq_puts(seq, "max\n");
3743 else
3744 seq_printf(seq, "%d\n", depth);
3745
3746 return 0;
3747 }
3748
cgroup_max_depth_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3749 static ssize_t cgroup_max_depth_write(struct kernfs_open_file *of,
3750 char *buf, size_t nbytes, loff_t off)
3751 {
3752 struct cgroup *cgrp;
3753 ssize_t ret;
3754 int depth;
3755
3756 buf = strstrip(buf);
3757 if (!strcmp(buf, "max")) {
3758 depth = INT_MAX;
3759 } else {
3760 ret = kstrtoint(buf, 0, &depth);
3761 if (ret)
3762 return ret;
3763 }
3764
3765 if (depth < 0)
3766 return -ERANGE;
3767
3768 cgrp = cgroup_kn_lock_live(of->kn, false);
3769 if (!cgrp)
3770 return -ENOENT;
3771
3772 cgrp->max_depth = depth;
3773
3774 cgroup_kn_unlock(of->kn);
3775
3776 return nbytes;
3777 }
3778
cgroup_events_show(struct seq_file * seq,void * v)3779 static int cgroup_events_show(struct seq_file *seq, void *v)
3780 {
3781 struct cgroup *cgrp = seq_css(seq)->cgroup;
3782
3783 seq_printf(seq, "populated %d\n", cgroup_is_populated(cgrp));
3784 seq_printf(seq, "frozen %d\n", test_bit(CGRP_FROZEN, &cgrp->flags));
3785
3786 return 0;
3787 }
3788
cgroup_stat_show(struct seq_file * seq,void * v)3789 static int cgroup_stat_show(struct seq_file *seq, void *v)
3790 {
3791 struct cgroup *cgroup = seq_css(seq)->cgroup;
3792 struct cgroup_subsys_state *css;
3793 int dying_cnt[CGROUP_SUBSYS_COUNT];
3794 int ssid;
3795
3796 seq_printf(seq, "nr_descendants %d\n",
3797 cgroup->nr_descendants);
3798
3799 /*
3800 * Show the number of live and dying csses associated with each of
3801 * non-inhibited cgroup subsystems that is bound to cgroup v2.
3802 *
3803 * Without proper lock protection, racing is possible. So the
3804 * numbers may not be consistent when that happens.
3805 */
3806 rcu_read_lock();
3807 for (ssid = 0; ssid < CGROUP_SUBSYS_COUNT; ssid++) {
3808 dying_cnt[ssid] = -1;
3809 if ((BIT(ssid) & cgrp_dfl_inhibit_ss_mask) ||
3810 (cgroup_subsys[ssid]->root != &cgrp_dfl_root))
3811 continue;
3812 css = rcu_dereference_raw(cgroup->subsys[ssid]);
3813 dying_cnt[ssid] = cgroup->nr_dying_subsys[ssid];
3814 seq_printf(seq, "nr_subsys_%s %d\n", cgroup_subsys[ssid]->name,
3815 css ? (css->nr_descendants + 1) : 0);
3816 }
3817
3818 seq_printf(seq, "nr_dying_descendants %d\n",
3819 cgroup->nr_dying_descendants);
3820 for (ssid = 0; ssid < CGROUP_SUBSYS_COUNT; ssid++) {
3821 if (dying_cnt[ssid] >= 0)
3822 seq_printf(seq, "nr_dying_subsys_%s %d\n",
3823 cgroup_subsys[ssid]->name, dying_cnt[ssid]);
3824 }
3825 rcu_read_unlock();
3826 return 0;
3827 }
3828
cgroup_core_local_stat_show(struct seq_file * seq,void * v)3829 static int cgroup_core_local_stat_show(struct seq_file *seq, void *v)
3830 {
3831 struct cgroup *cgrp = seq_css(seq)->cgroup;
3832 unsigned int sequence;
3833 u64 freeze_time;
3834
3835 do {
3836 sequence = read_seqcount_begin(&cgrp->freezer.freeze_seq);
3837 freeze_time = cgrp->freezer.frozen_nsec;
3838 /* Add in current freezer interval if the cgroup is freezing. */
3839 if (test_bit(CGRP_FREEZE, &cgrp->flags))
3840 freeze_time += (ktime_get_ns() -
3841 cgrp->freezer.freeze_start_nsec);
3842 } while (read_seqcount_retry(&cgrp->freezer.freeze_seq, sequence));
3843
3844 do_div(freeze_time, NSEC_PER_USEC);
3845 seq_printf(seq, "frozen_usec %llu\n", freeze_time);
3846
3847 return 0;
3848 }
3849
3850 #ifdef CONFIG_CGROUP_SCHED
3851 /**
3852 * cgroup_tryget_css - try to get a cgroup's css for the specified subsystem
3853 * @cgrp: the cgroup of interest
3854 * @ss: the subsystem of interest
3855 *
3856 * Find and get @cgrp's css associated with @ss. If the css doesn't exist
3857 * or is offline, %NULL is returned.
3858 */
cgroup_tryget_css(struct cgroup * cgrp,struct cgroup_subsys * ss)3859 static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp,
3860 struct cgroup_subsys *ss)
3861 {
3862 struct cgroup_subsys_state *css;
3863
3864 rcu_read_lock();
3865 css = cgroup_css(cgrp, ss);
3866 if (css && !css_tryget_online(css))
3867 css = NULL;
3868 rcu_read_unlock();
3869
3870 return css;
3871 }
3872
cgroup_extra_stat_show(struct seq_file * seq,int ssid)3873 static int cgroup_extra_stat_show(struct seq_file *seq, int ssid)
3874 {
3875 struct cgroup *cgrp = seq_css(seq)->cgroup;
3876 struct cgroup_subsys *ss = cgroup_subsys[ssid];
3877 struct cgroup_subsys_state *css;
3878 int ret;
3879
3880 if (!ss->css_extra_stat_show)
3881 return 0;
3882
3883 css = cgroup_tryget_css(cgrp, ss);
3884 if (!css)
3885 return 0;
3886
3887 ret = ss->css_extra_stat_show(seq, css);
3888 css_put(css);
3889 return ret;
3890 }
3891
cgroup_local_stat_show(struct seq_file * seq,struct cgroup * cgrp,int ssid)3892 static int cgroup_local_stat_show(struct seq_file *seq,
3893 struct cgroup *cgrp, int ssid)
3894 {
3895 struct cgroup_subsys *ss = cgroup_subsys[ssid];
3896 struct cgroup_subsys_state *css;
3897 int ret;
3898
3899 if (!ss->css_local_stat_show)
3900 return 0;
3901
3902 css = cgroup_tryget_css(cgrp, ss);
3903 if (!css)
3904 return 0;
3905
3906 ret = ss->css_local_stat_show(seq, css);
3907 css_put(css);
3908 return ret;
3909 }
3910 #endif
3911
cpu_stat_show(struct seq_file * seq,void * v)3912 static int cpu_stat_show(struct seq_file *seq, void *v)
3913 {
3914 int ret = 0;
3915
3916 cgroup_base_stat_cputime_show(seq);
3917 #ifdef CONFIG_CGROUP_SCHED
3918 ret = cgroup_extra_stat_show(seq, cpu_cgrp_id);
3919 #endif
3920 return ret;
3921 }
3922
cpu_local_stat_show(struct seq_file * seq,void * v)3923 static int cpu_local_stat_show(struct seq_file *seq, void *v)
3924 {
3925 struct cgroup __maybe_unused *cgrp = seq_css(seq)->cgroup;
3926 int ret = 0;
3927
3928 #ifdef CONFIG_CGROUP_SCHED
3929 ret = cgroup_local_stat_show(seq, cgrp, cpu_cgrp_id);
3930 #endif
3931 return ret;
3932 }
3933
3934 #ifdef CONFIG_PSI
cgroup_io_pressure_show(struct seq_file * seq,void * v)3935 static int cgroup_io_pressure_show(struct seq_file *seq, void *v)
3936 {
3937 struct cgroup *cgrp = seq_css(seq)->cgroup;
3938 struct psi_group *psi = cgroup_psi(cgrp);
3939
3940 return psi_show(seq, psi, PSI_IO);
3941 }
cgroup_memory_pressure_show(struct seq_file * seq,void * v)3942 static int cgroup_memory_pressure_show(struct seq_file *seq, void *v)
3943 {
3944 struct cgroup *cgrp = seq_css(seq)->cgroup;
3945 struct psi_group *psi = cgroup_psi(cgrp);
3946
3947 return psi_show(seq, psi, PSI_MEM);
3948 }
cgroup_cpu_pressure_show(struct seq_file * seq,void * v)3949 static int cgroup_cpu_pressure_show(struct seq_file *seq, void *v)
3950 {
3951 struct cgroup *cgrp = seq_css(seq)->cgroup;
3952 struct psi_group *psi = cgroup_psi(cgrp);
3953
3954 return psi_show(seq, psi, PSI_CPU);
3955 }
3956
pressure_write(struct kernfs_open_file * of,char * buf,size_t nbytes,enum psi_res res)3957 static ssize_t pressure_write(struct kernfs_open_file *of, char *buf,
3958 size_t nbytes, enum psi_res res)
3959 {
3960 struct cgroup_file_ctx *ctx;
3961 struct psi_trigger *new;
3962 struct cgroup *cgrp;
3963 struct psi_group *psi;
3964 ssize_t ret = 0;
3965
3966 cgrp = cgroup_kn_lock_live(of->kn, false);
3967 if (!cgrp)
3968 return -ENODEV;
3969
3970 ctx = of->priv;
3971 if (!ctx) {
3972 ret = -ENODEV;
3973 goto out_unlock;
3974 }
3975
3976 /* Allow only one trigger per file descriptor */
3977 if (ctx->psi.trigger) {
3978 ret = -EBUSY;
3979 goto out_unlock;
3980 }
3981
3982 psi = cgroup_psi(cgrp);
3983 new = psi_trigger_create(psi, buf, res, of->file, of);
3984 if (IS_ERR(new)) {
3985 ret = PTR_ERR(new);
3986 goto out_unlock;
3987 }
3988
3989 smp_store_release(&ctx->psi.trigger, new);
3990
3991 out_unlock:
3992 cgroup_kn_unlock(of->kn);
3993 if (ret)
3994 return ret;
3995
3996 return nbytes;
3997 }
3998
cgroup_io_pressure_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3999 static ssize_t cgroup_io_pressure_write(struct kernfs_open_file *of,
4000 char *buf, size_t nbytes,
4001 loff_t off)
4002 {
4003 return pressure_write(of, buf, nbytes, PSI_IO);
4004 }
4005
cgroup_memory_pressure_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4006 static ssize_t cgroup_memory_pressure_write(struct kernfs_open_file *of,
4007 char *buf, size_t nbytes,
4008 loff_t off)
4009 {
4010 return pressure_write(of, buf, nbytes, PSI_MEM);
4011 }
4012
cgroup_cpu_pressure_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4013 static ssize_t cgroup_cpu_pressure_write(struct kernfs_open_file *of,
4014 char *buf, size_t nbytes,
4015 loff_t off)
4016 {
4017 return pressure_write(of, buf, nbytes, PSI_CPU);
4018 }
4019
4020 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
cgroup_irq_pressure_show(struct seq_file * seq,void * v)4021 static int cgroup_irq_pressure_show(struct seq_file *seq, void *v)
4022 {
4023 struct cgroup *cgrp = seq_css(seq)->cgroup;
4024 struct psi_group *psi = cgroup_psi(cgrp);
4025
4026 return psi_show(seq, psi, PSI_IRQ);
4027 }
4028
cgroup_irq_pressure_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4029 static ssize_t cgroup_irq_pressure_write(struct kernfs_open_file *of,
4030 char *buf, size_t nbytes,
4031 loff_t off)
4032 {
4033 return pressure_write(of, buf, nbytes, PSI_IRQ);
4034 }
4035 #endif
4036
cgroup_pressure_show(struct seq_file * seq,void * v)4037 static int cgroup_pressure_show(struct seq_file *seq, void *v)
4038 {
4039 struct cgroup *cgrp = seq_css(seq)->cgroup;
4040 struct psi_group *psi = cgroup_psi(cgrp);
4041
4042 seq_printf(seq, "%d\n", psi->enabled);
4043
4044 return 0;
4045 }
4046
cgroup_pressure_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4047 static ssize_t cgroup_pressure_write(struct kernfs_open_file *of,
4048 char *buf, size_t nbytes,
4049 loff_t off)
4050 {
4051 ssize_t ret;
4052 int enable;
4053 struct cgroup *cgrp;
4054 struct psi_group *psi;
4055
4056 ret = kstrtoint(strstrip(buf), 0, &enable);
4057 if (ret)
4058 return ret;
4059
4060 if (enable < 0 || enable > 1)
4061 return -ERANGE;
4062
4063 cgrp = cgroup_kn_lock_live(of->kn, false);
4064 if (!cgrp)
4065 return -ENOENT;
4066
4067 psi = cgroup_psi(cgrp);
4068 if (psi->enabled != enable) {
4069 int i;
4070
4071 /* show or hide {cpu,memory,io,irq}.pressure files */
4072 for (i = 0; i < NR_PSI_RESOURCES; i++)
4073 cgroup_file_show(&cgrp->psi_files[i], enable);
4074
4075 psi->enabled = enable;
4076 if (enable)
4077 psi_cgroup_restart(psi);
4078 }
4079
4080 cgroup_kn_unlock(of->kn);
4081
4082 return nbytes;
4083 }
4084
cgroup_pressure_poll(struct kernfs_open_file * of,poll_table * pt)4085 static __poll_t cgroup_pressure_poll(struct kernfs_open_file *of,
4086 poll_table *pt)
4087 {
4088 struct cgroup_file_ctx *ctx = of->priv;
4089
4090 return psi_trigger_poll(&ctx->psi.trigger, of->file, pt);
4091 }
4092
cgroup_pressure_release(struct kernfs_open_file * of)4093 static void cgroup_pressure_release(struct kernfs_open_file *of)
4094 {
4095 struct cgroup_file_ctx *ctx = of->priv;
4096
4097 psi_trigger_destroy(ctx->psi.trigger);
4098 }
4099
cgroup_psi_enabled(void)4100 bool cgroup_psi_enabled(void)
4101 {
4102 if (static_branch_likely(&psi_disabled))
4103 return false;
4104
4105 return (cgroup_feature_disable_mask & (1 << OPT_FEATURE_PRESSURE)) == 0;
4106 }
4107
4108 #else /* CONFIG_PSI */
cgroup_psi_enabled(void)4109 bool cgroup_psi_enabled(void)
4110 {
4111 return false;
4112 }
4113
4114 #endif /* CONFIG_PSI */
4115
cgroup_freeze_show(struct seq_file * seq,void * v)4116 static int cgroup_freeze_show(struct seq_file *seq, void *v)
4117 {
4118 struct cgroup *cgrp = seq_css(seq)->cgroup;
4119
4120 seq_printf(seq, "%d\n", cgrp->freezer.freeze);
4121
4122 return 0;
4123 }
4124
cgroup_freeze_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4125 static ssize_t cgroup_freeze_write(struct kernfs_open_file *of,
4126 char *buf, size_t nbytes, loff_t off)
4127 {
4128 struct cgroup *cgrp;
4129 ssize_t ret;
4130 int freeze;
4131
4132 ret = kstrtoint(strstrip(buf), 0, &freeze);
4133 if (ret)
4134 return ret;
4135
4136 if (freeze < 0 || freeze > 1)
4137 return -ERANGE;
4138
4139 cgrp = cgroup_kn_lock_live(of->kn, false);
4140 if (!cgrp)
4141 return -ENOENT;
4142
4143 cgroup_freeze(cgrp, freeze);
4144
4145 cgroup_kn_unlock(of->kn);
4146
4147 return nbytes;
4148 }
4149
__cgroup_kill(struct cgroup * cgrp)4150 static void __cgroup_kill(struct cgroup *cgrp)
4151 {
4152 struct css_task_iter it;
4153 struct task_struct *task;
4154
4155 lockdep_assert_held(&cgroup_mutex);
4156
4157 spin_lock_irq(&css_set_lock);
4158 cgrp->kill_seq++;
4159 spin_unlock_irq(&css_set_lock);
4160
4161 css_task_iter_start(&cgrp->self, CSS_TASK_ITER_PROCS | CSS_TASK_ITER_THREADED, &it);
4162 while ((task = css_task_iter_next(&it))) {
4163 /* Ignore kernel threads here. */
4164 if (task->flags & PF_KTHREAD)
4165 continue;
4166
4167 /* Skip tasks that are already dying. */
4168 if (__fatal_signal_pending(task))
4169 continue;
4170
4171 send_sig(SIGKILL, task, 0);
4172 }
4173 css_task_iter_end(&it);
4174 }
4175
cgroup_kill(struct cgroup * cgrp)4176 static void cgroup_kill(struct cgroup *cgrp)
4177 {
4178 struct cgroup_subsys_state *css;
4179 struct cgroup *dsct;
4180
4181 lockdep_assert_held(&cgroup_mutex);
4182
4183 cgroup_for_each_live_descendant_pre(dsct, css, cgrp)
4184 __cgroup_kill(dsct);
4185 }
4186
cgroup_kill_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4187 static ssize_t cgroup_kill_write(struct kernfs_open_file *of, char *buf,
4188 size_t nbytes, loff_t off)
4189 {
4190 ssize_t ret = 0;
4191 int kill;
4192 struct cgroup *cgrp;
4193
4194 ret = kstrtoint(strstrip(buf), 0, &kill);
4195 if (ret)
4196 return ret;
4197
4198 if (kill != 1)
4199 return -ERANGE;
4200
4201 cgrp = cgroup_kn_lock_live(of->kn, false);
4202 if (!cgrp)
4203 return -ENOENT;
4204
4205 /*
4206 * Killing is a process directed operation, i.e. the whole thread-group
4207 * is taken down so act like we do for cgroup.procs and only make this
4208 * writable in non-threaded cgroups.
4209 */
4210 if (cgroup_is_threaded(cgrp))
4211 ret = -EOPNOTSUPP;
4212 else
4213 cgroup_kill(cgrp);
4214
4215 cgroup_kn_unlock(of->kn);
4216
4217 return ret ?: nbytes;
4218 }
4219
cgroup_file_open(struct kernfs_open_file * of)4220 static int cgroup_file_open(struct kernfs_open_file *of)
4221 {
4222 struct cftype *cft = of_cft(of);
4223 struct cgroup_file_ctx *ctx;
4224 int ret;
4225
4226 ctx = kzalloc_obj(*ctx);
4227 if (!ctx)
4228 return -ENOMEM;
4229
4230 ctx->ns = current->nsproxy->cgroup_ns;
4231 get_cgroup_ns(ctx->ns);
4232 of->priv = ctx;
4233
4234 if (!cft->open)
4235 return 0;
4236
4237 ret = cft->open(of);
4238 if (ret) {
4239 put_cgroup_ns(ctx->ns);
4240 kfree(ctx);
4241 }
4242 return ret;
4243 }
4244
cgroup_file_release(struct kernfs_open_file * of)4245 static void cgroup_file_release(struct kernfs_open_file *of)
4246 {
4247 struct cftype *cft = of_cft(of);
4248 struct cgroup_file_ctx *ctx = of->priv;
4249
4250 if (cft->release)
4251 cft->release(of);
4252 put_cgroup_ns(ctx->ns);
4253 kfree(ctx);
4254 of->priv = NULL;
4255 }
4256
cgroup_file_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4257 static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
4258 size_t nbytes, loff_t off)
4259 {
4260 struct cgroup_file_ctx *ctx = of->priv;
4261 struct cgroup *cgrp = kn_priv(of->kn);
4262 struct cftype *cft = of_cft(of);
4263 struct cgroup_subsys_state *css;
4264 int ret;
4265
4266 if (!nbytes)
4267 return 0;
4268
4269 /*
4270 * If namespaces are delegation boundaries, disallow writes to
4271 * files in an non-init namespace root from inside the namespace
4272 * except for the files explicitly marked delegatable -
4273 * eg. cgroup.procs, cgroup.threads and cgroup.subtree_control.
4274 */
4275 if ((cgrp->root->flags & CGRP_ROOT_NS_DELEGATE) &&
4276 !(cft->flags & CFTYPE_NS_DELEGATABLE) &&
4277 ctx->ns != &init_cgroup_ns && ctx->ns->root_cset->dfl_cgrp == cgrp)
4278 return -EPERM;
4279
4280 if (cft->write)
4281 return cft->write(of, buf, nbytes, off);
4282
4283 /*
4284 * kernfs guarantees that a file isn't deleted with operations in
4285 * flight, which means that the matching css is and stays alive and
4286 * doesn't need to be pinned. The RCU locking is not necessary
4287 * either. It's just for the convenience of using cgroup_css().
4288 */
4289 rcu_read_lock();
4290 css = cgroup_css(cgrp, cft->ss);
4291 rcu_read_unlock();
4292
4293 if (cft->write_u64) {
4294 unsigned long long v;
4295 ret = kstrtoull(buf, 0, &v);
4296 if (!ret)
4297 ret = cft->write_u64(css, cft, v);
4298 } else if (cft->write_s64) {
4299 long long v;
4300 ret = kstrtoll(buf, 0, &v);
4301 if (!ret)
4302 ret = cft->write_s64(css, cft, v);
4303 } else {
4304 ret = -EINVAL;
4305 }
4306
4307 return ret ?: nbytes;
4308 }
4309
cgroup_file_poll(struct kernfs_open_file * of,poll_table * pt)4310 static __poll_t cgroup_file_poll(struct kernfs_open_file *of, poll_table *pt)
4311 {
4312 struct cftype *cft = of_cft(of);
4313
4314 if (cft->poll)
4315 return cft->poll(of, pt);
4316
4317 return kernfs_generic_poll(of, pt);
4318 }
4319
cgroup_seqfile_start(struct seq_file * seq,loff_t * ppos)4320 static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos)
4321 {
4322 return seq_cft(seq)->seq_start(seq, ppos);
4323 }
4324
cgroup_seqfile_next(struct seq_file * seq,void * v,loff_t * ppos)4325 static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos)
4326 {
4327 return seq_cft(seq)->seq_next(seq, v, ppos);
4328 }
4329
cgroup_seqfile_stop(struct seq_file * seq,void * v)4330 static void cgroup_seqfile_stop(struct seq_file *seq, void *v)
4331 {
4332 if (seq_cft(seq)->seq_stop)
4333 seq_cft(seq)->seq_stop(seq, v);
4334 }
4335
cgroup_seqfile_show(struct seq_file * m,void * arg)4336 static int cgroup_seqfile_show(struct seq_file *m, void *arg)
4337 {
4338 struct cftype *cft = seq_cft(m);
4339 struct cgroup_subsys_state *css = seq_css(m);
4340
4341 if (cft->seq_show)
4342 return cft->seq_show(m, arg);
4343
4344 if (cft->read_u64)
4345 seq_printf(m, "%llu\n", cft->read_u64(css, cft));
4346 else if (cft->read_s64)
4347 seq_printf(m, "%lld\n", cft->read_s64(css, cft));
4348 else
4349 return -EINVAL;
4350 return 0;
4351 }
4352
4353 static struct kernfs_ops cgroup_kf_single_ops = {
4354 .atomic_write_len = PAGE_SIZE,
4355 .open = cgroup_file_open,
4356 .release = cgroup_file_release,
4357 .write = cgroup_file_write,
4358 .poll = cgroup_file_poll,
4359 .seq_show = cgroup_seqfile_show,
4360 };
4361
4362 static struct kernfs_ops cgroup_kf_ops = {
4363 .atomic_write_len = PAGE_SIZE,
4364 .open = cgroup_file_open,
4365 .release = cgroup_file_release,
4366 .write = cgroup_file_write,
4367 .poll = cgroup_file_poll,
4368 .seq_start = cgroup_seqfile_start,
4369 .seq_next = cgroup_seqfile_next,
4370 .seq_stop = cgroup_seqfile_stop,
4371 .seq_show = cgroup_seqfile_show,
4372 };
4373
cgroup_file_notify_timer(struct timer_list * timer)4374 static void cgroup_file_notify_timer(struct timer_list *timer)
4375 {
4376 cgroup_file_notify(container_of(timer, struct cgroup_file,
4377 notify_timer));
4378 }
4379
cgroup_add_file(struct cgroup_subsys_state * css,struct cgroup * cgrp,struct cftype * cft)4380 static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
4381 struct cftype *cft)
4382 {
4383 char name[CGROUP_FILE_NAME_MAX];
4384 struct kernfs_node *kn;
4385 struct lock_class_key *key = NULL;
4386
4387 #ifdef CONFIG_DEBUG_LOCK_ALLOC
4388 key = &cft->lockdep_key;
4389 #endif
4390 kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name),
4391 cgroup_file_mode(cft),
4392 current_fsuid(), current_fsgid(),
4393 0, cft->kf_ops, cft,
4394 NULL, key);
4395 if (IS_ERR(kn))
4396 return PTR_ERR(kn);
4397
4398 if (cft->file_offset) {
4399 struct cgroup_file *cfile = (void *)css + cft->file_offset;
4400
4401 timer_setup(&cfile->notify_timer, cgroup_file_notify_timer, 0);
4402 spin_lock_init(&cfile->lock);
4403 cfile->kn = kn;
4404 }
4405
4406 return 0;
4407 }
4408
4409 /**
4410 * cgroup_addrm_files - add or remove files to a cgroup directory
4411 * @css: the target css
4412 * @cgrp: the target cgroup (usually css->cgroup)
4413 * @cfts: array of cftypes to be added
4414 * @is_add: whether to add or remove
4415 *
4416 * Depending on @is_add, add or remove files defined by @cfts on @cgrp.
4417 * For removals, this function never fails.
4418 */
cgroup_addrm_files(struct cgroup_subsys_state * css,struct cgroup * cgrp,struct cftype cfts[],bool is_add)4419 static int cgroup_addrm_files(struct cgroup_subsys_state *css,
4420 struct cgroup *cgrp, struct cftype cfts[],
4421 bool is_add)
4422 {
4423 struct cftype *cft, *cft_end = NULL;
4424 int ret = 0;
4425
4426 lockdep_assert_held(&cgroup_mutex);
4427
4428 restart:
4429 for (cft = cfts; cft != cft_end && cft->name[0] != '\0'; cft++) {
4430 /* does cft->flags tell us to skip this file on @cgrp? */
4431 if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp))
4432 continue;
4433 if ((cft->flags & __CFTYPE_NOT_ON_DFL) && cgroup_on_dfl(cgrp))
4434 continue;
4435 if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgroup_parent(cgrp))
4436 continue;
4437 if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgroup_parent(cgrp))
4438 continue;
4439 if ((cft->flags & CFTYPE_DEBUG) && !cgroup_debug)
4440 continue;
4441 if (is_add) {
4442 ret = cgroup_add_file(css, cgrp, cft);
4443 if (ret) {
4444 pr_warn("%s: failed to add %s, err=%d\n",
4445 __func__, cft->name, ret);
4446 cft_end = cft;
4447 is_add = false;
4448 goto restart;
4449 }
4450 } else {
4451 cgroup_rm_file(cgrp, cft);
4452 }
4453 }
4454 return ret;
4455 }
4456
cgroup_apply_cftypes(struct cftype * cfts,bool is_add)4457 static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add)
4458 {
4459 struct cgroup_subsys *ss = cfts[0].ss;
4460 struct cgroup *root = &ss->root->cgrp;
4461 struct cgroup_subsys_state *css;
4462 int ret = 0;
4463
4464 lockdep_assert_held(&cgroup_mutex);
4465
4466 /* add/rm files for all cgroups created before */
4467 css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
4468 struct cgroup *cgrp = css->cgroup;
4469
4470 if (!(css->flags & CSS_VISIBLE))
4471 continue;
4472
4473 ret = cgroup_addrm_files(css, cgrp, cfts, is_add);
4474 if (ret)
4475 break;
4476 }
4477
4478 if (is_add && !ret)
4479 kernfs_activate(root->kn);
4480 return ret;
4481 }
4482
cgroup_exit_cftypes(struct cftype * cfts)4483 static void cgroup_exit_cftypes(struct cftype *cfts)
4484 {
4485 struct cftype *cft;
4486
4487 for (cft = cfts; cft->name[0] != '\0'; cft++) {
4488 /* free copy for custom atomic_write_len, see init_cftypes() */
4489 if (cft->max_write_len && cft->max_write_len != PAGE_SIZE)
4490 kfree(cft->kf_ops);
4491 cft->kf_ops = NULL;
4492 cft->ss = NULL;
4493
4494 /* revert flags set by cgroup core while adding @cfts */
4495 cft->flags &= ~(__CFTYPE_ONLY_ON_DFL | __CFTYPE_NOT_ON_DFL |
4496 __CFTYPE_ADDED);
4497 }
4498 }
4499
cgroup_init_cftypes(struct cgroup_subsys * ss,struct cftype * cfts)4500 static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
4501 {
4502 struct cftype *cft;
4503 int ret = 0;
4504
4505 for (cft = cfts; cft->name[0] != '\0'; cft++) {
4506 struct kernfs_ops *kf_ops;
4507
4508 WARN_ON(cft->ss || cft->kf_ops);
4509
4510 if (cft->flags & __CFTYPE_ADDED) {
4511 ret = -EBUSY;
4512 break;
4513 }
4514
4515 if (cft->seq_start)
4516 kf_ops = &cgroup_kf_ops;
4517 else
4518 kf_ops = &cgroup_kf_single_ops;
4519
4520 /*
4521 * Ugh... if @cft wants a custom max_write_len, we need to
4522 * make a copy of kf_ops to set its atomic_write_len.
4523 */
4524 if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) {
4525 kf_ops = kmemdup(kf_ops, sizeof(*kf_ops), GFP_KERNEL);
4526 if (!kf_ops) {
4527 ret = -ENOMEM;
4528 break;
4529 }
4530 kf_ops->atomic_write_len = cft->max_write_len;
4531 }
4532
4533 cft->kf_ops = kf_ops;
4534 cft->ss = ss;
4535 cft->flags |= __CFTYPE_ADDED;
4536 }
4537
4538 if (ret)
4539 cgroup_exit_cftypes(cfts);
4540 return ret;
4541 }
4542
cgroup_rm_cftypes_locked(struct cftype * cfts)4543 static void cgroup_rm_cftypes_locked(struct cftype *cfts)
4544 {
4545 lockdep_assert_held(&cgroup_mutex);
4546
4547 list_del(&cfts->node);
4548 cgroup_apply_cftypes(cfts, false);
4549 cgroup_exit_cftypes(cfts);
4550 }
4551
4552 /**
4553 * cgroup_rm_cftypes - remove an array of cftypes from a subsystem
4554 * @cfts: zero-length name terminated array of cftypes
4555 *
4556 * Unregister @cfts. Files described by @cfts are removed from all
4557 * existing cgroups and all future cgroups won't have them either. This
4558 * function can be called anytime whether @cfts' subsys is attached or not.
4559 *
4560 * Returns 0 on successful unregistration, -ENOENT if @cfts is not
4561 * registered.
4562 */
cgroup_rm_cftypes(struct cftype * cfts)4563 int cgroup_rm_cftypes(struct cftype *cfts)
4564 {
4565 if (!cfts || cfts[0].name[0] == '\0')
4566 return 0;
4567
4568 if (!(cfts[0].flags & __CFTYPE_ADDED))
4569 return -ENOENT;
4570
4571 cgroup_lock();
4572 cgroup_rm_cftypes_locked(cfts);
4573 cgroup_unlock();
4574 return 0;
4575 }
4576
4577 /**
4578 * cgroup_add_cftypes - add an array of cftypes to a subsystem
4579 * @ss: target cgroup subsystem
4580 * @cfts: zero-length name terminated array of cftypes
4581 *
4582 * Register @cfts to @ss. Files described by @cfts are created for all
4583 * existing cgroups to which @ss is attached and all future cgroups will
4584 * have them too. This function can be called anytime whether @ss is
4585 * attached or not.
4586 *
4587 * Returns 0 on successful registration, -errno on failure. Note that this
4588 * function currently returns 0 as long as @cfts registration is successful
4589 * even if some file creation attempts on existing cgroups fail.
4590 */
cgroup_add_cftypes(struct cgroup_subsys * ss,struct cftype * cfts)4591 int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
4592 {
4593 int ret;
4594
4595 if (!cgroup_ssid_enabled(ss->id))
4596 return 0;
4597
4598 if (!cfts || cfts[0].name[0] == '\0')
4599 return 0;
4600
4601 ret = cgroup_init_cftypes(ss, cfts);
4602 if (ret)
4603 return ret;
4604
4605 cgroup_lock();
4606
4607 list_add_tail(&cfts->node, &ss->cfts);
4608 ret = cgroup_apply_cftypes(cfts, true);
4609 if (ret)
4610 cgroup_rm_cftypes_locked(cfts);
4611
4612 cgroup_unlock();
4613 return ret;
4614 }
4615
4616 /**
4617 * cgroup_add_dfl_cftypes - add an array of cftypes for default hierarchy
4618 * @ss: target cgroup subsystem
4619 * @cfts: zero-length name terminated array of cftypes
4620 *
4621 * Similar to cgroup_add_cftypes() but the added files are only used for
4622 * the default hierarchy.
4623 */
cgroup_add_dfl_cftypes(struct cgroup_subsys * ss,struct cftype * cfts)4624 int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
4625 {
4626 struct cftype *cft;
4627
4628 for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
4629 cft->flags |= __CFTYPE_ONLY_ON_DFL;
4630 return cgroup_add_cftypes(ss, cfts);
4631 }
4632
4633 /**
4634 * cgroup_add_legacy_cftypes - add an array of cftypes for legacy hierarchies
4635 * @ss: target cgroup subsystem
4636 * @cfts: zero-length name terminated array of cftypes
4637 *
4638 * Similar to cgroup_add_cftypes() but the added files are only used for
4639 * the legacy hierarchies.
4640 */
cgroup_add_legacy_cftypes(struct cgroup_subsys * ss,struct cftype * cfts)4641 int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
4642 {
4643 struct cftype *cft;
4644
4645 for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
4646 cft->flags |= __CFTYPE_NOT_ON_DFL;
4647 return cgroup_add_cftypes(ss, cfts);
4648 }
4649
4650 /**
4651 * cgroup_file_notify - generate a file modified event for a cgroup_file
4652 * @cfile: target cgroup_file
4653 *
4654 * @cfile must have been obtained by setting cftype->file_offset.
4655 */
cgroup_file_notify(struct cgroup_file * cfile)4656 void cgroup_file_notify(struct cgroup_file *cfile)
4657 {
4658 unsigned long flags, last, next;
4659 struct kernfs_node *kn = NULL;
4660
4661 if (!READ_ONCE(cfile->kn))
4662 return;
4663
4664 last = READ_ONCE(cfile->notified_at);
4665 next = last + CGROUP_FILE_NOTIFY_MIN_INTV;
4666 if (time_in_range(jiffies, last, next)) {
4667 timer_reduce(&cfile->notify_timer, next);
4668 if (timer_pending(&cfile->notify_timer))
4669 return;
4670 }
4671
4672 spin_lock_irqsave(&cfile->lock, flags);
4673 if (cfile->kn) {
4674 kn = cfile->kn;
4675 kernfs_get(kn);
4676 WRITE_ONCE(cfile->notified_at, jiffies);
4677 }
4678 spin_unlock_irqrestore(&cfile->lock, flags);
4679
4680 if (kn) {
4681 kernfs_notify(kn);
4682 kernfs_put(kn);
4683 }
4684 }
4685 EXPORT_SYMBOL_GPL(cgroup_file_notify);
4686
4687 /**
4688 * cgroup_file_show - show or hide a hidden cgroup file
4689 * @cfile: target cgroup_file obtained by setting cftype->file_offset
4690 * @show: whether to show or hide
4691 */
cgroup_file_show(struct cgroup_file * cfile,bool show)4692 void cgroup_file_show(struct cgroup_file *cfile, bool show)
4693 {
4694 struct kernfs_node *kn;
4695
4696 spin_lock_irq(&cfile->lock);
4697 kn = cfile->kn;
4698 kernfs_get(kn);
4699 spin_unlock_irq(&cfile->lock);
4700
4701 if (kn)
4702 kernfs_show(kn, show);
4703
4704 kernfs_put(kn);
4705 }
4706
4707 /**
4708 * css_next_child - find the next child of a given css
4709 * @pos: the current position (%NULL to initiate traversal)
4710 * @parent: css whose children to walk
4711 *
4712 * This function returns the next child of @parent and should be called
4713 * under either cgroup_mutex or RCU read lock. The only requirement is
4714 * that @parent and @pos are accessible. The next sibling is guaranteed to
4715 * be returned regardless of their states.
4716 *
4717 * If a subsystem synchronizes ->css_online() and the start of iteration, a
4718 * css which finished ->css_online() is guaranteed to be visible in the
4719 * future iterations and will stay visible until the last reference is put.
4720 * A css which hasn't finished ->css_online() or already finished
4721 * ->css_offline() may show up during traversal. It's each subsystem's
4722 * responsibility to synchronize against on/offlining.
4723 */
css_next_child(struct cgroup_subsys_state * pos,struct cgroup_subsys_state * parent)4724 struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
4725 struct cgroup_subsys_state *parent)
4726 {
4727 struct cgroup_subsys_state *next;
4728
4729 cgroup_assert_mutex_or_rcu_locked();
4730
4731 /*
4732 * @pos could already have been unlinked from the sibling list.
4733 * Once a cgroup is removed, its ->sibling.next is no longer
4734 * updated when its next sibling changes. CSS_RELEASED is set when
4735 * @pos is taken off list, at which time its next pointer is valid,
4736 * and, as releases are serialized, the one pointed to by the next
4737 * pointer is guaranteed to not have started release yet. This
4738 * implies that if we observe !CSS_RELEASED on @pos in this RCU
4739 * critical section, the one pointed to by its next pointer is
4740 * guaranteed to not have finished its RCU grace period even if we
4741 * have dropped rcu_read_lock() in-between iterations.
4742 *
4743 * If @pos has CSS_RELEASED set, its next pointer can't be
4744 * dereferenced; however, as each css is given a monotonically
4745 * increasing unique serial number and always appended to the
4746 * sibling list, the next one can be found by walking the parent's
4747 * children until the first css with higher serial number than
4748 * @pos's. While this path can be slower, it happens iff iteration
4749 * races against release and the race window is very small.
4750 */
4751 if (!pos) {
4752 next = list_entry_rcu(parent->children.next, struct cgroup_subsys_state, sibling);
4753 } else if (likely(!(pos->flags & CSS_RELEASED))) {
4754 next = list_entry_rcu(pos->sibling.next, struct cgroup_subsys_state, sibling);
4755 } else {
4756 list_for_each_entry_rcu(next, &parent->children, sibling,
4757 lockdep_is_held(&cgroup_mutex))
4758 if (next->serial_nr > pos->serial_nr)
4759 break;
4760 }
4761
4762 /*
4763 * @next, if not pointing to the head, can be dereferenced and is
4764 * the next sibling.
4765 */
4766 if (&next->sibling != &parent->children)
4767 return next;
4768 return NULL;
4769 }
4770
4771 /**
4772 * css_next_descendant_pre - find the next descendant for pre-order walk
4773 * @pos: the current position (%NULL to initiate traversal)
4774 * @root: css whose descendants to walk
4775 *
4776 * To be used by css_for_each_descendant_pre(). Find the next descendant
4777 * to visit for pre-order traversal of @root's descendants. @root is
4778 * included in the iteration and the first node to be visited.
4779 *
4780 * While this function requires cgroup_mutex or RCU read locking, it
4781 * doesn't require the whole traversal to be contained in a single critical
4782 * section. Additionally, it isn't necessary to hold onto a reference to @pos.
4783 * This function will return the correct next descendant as long as both @pos
4784 * and @root are accessible and @pos is a descendant of @root.
4785 *
4786 * If a subsystem synchronizes ->css_online() and the start of iteration, a
4787 * css which finished ->css_online() is guaranteed to be visible in the
4788 * future iterations and will stay visible until the last reference is put.
4789 * A css which hasn't finished ->css_online() or already finished
4790 * ->css_offline() may show up during traversal. It's each subsystem's
4791 * responsibility to synchronize against on/offlining.
4792 */
4793 struct cgroup_subsys_state *
css_next_descendant_pre(struct cgroup_subsys_state * pos,struct cgroup_subsys_state * root)4794 css_next_descendant_pre(struct cgroup_subsys_state *pos,
4795 struct cgroup_subsys_state *root)
4796 {
4797 struct cgroup_subsys_state *next;
4798
4799 cgroup_assert_mutex_or_rcu_locked();
4800
4801 /* if first iteration, visit @root */
4802 if (!pos)
4803 return root;
4804
4805 /* visit the first child if exists */
4806 next = css_next_child(NULL, pos);
4807 if (next)
4808 return next;
4809
4810 /* no child, visit my or the closest ancestor's next sibling */
4811 while (pos != root) {
4812 next = css_next_child(pos, pos->parent);
4813 if (next)
4814 return next;
4815 pos = pos->parent;
4816 }
4817
4818 return NULL;
4819 }
4820 EXPORT_SYMBOL_GPL(css_next_descendant_pre);
4821
4822 /**
4823 * css_rightmost_descendant - return the rightmost descendant of a css
4824 * @pos: css of interest
4825 *
4826 * Return the rightmost descendant of @pos. If there's no descendant, @pos
4827 * is returned. This can be used during pre-order traversal to skip
4828 * subtree of @pos.
4829 *
4830 * While this function requires cgroup_mutex or RCU read locking, it
4831 * doesn't require the whole traversal to be contained in a single critical
4832 * section. Additionally, it isn't necessary to hold onto a reference to @pos.
4833 * This function will return the correct rightmost descendant as long as @pos
4834 * is accessible.
4835 */
4836 struct cgroup_subsys_state *
css_rightmost_descendant(struct cgroup_subsys_state * pos)4837 css_rightmost_descendant(struct cgroup_subsys_state *pos)
4838 {
4839 struct cgroup_subsys_state *last, *tmp;
4840
4841 cgroup_assert_mutex_or_rcu_locked();
4842
4843 do {
4844 last = pos;
4845 /* ->prev isn't RCU safe, walk ->next till the end */
4846 pos = NULL;
4847 css_for_each_child(tmp, last)
4848 pos = tmp;
4849 } while (pos);
4850
4851 return last;
4852 }
4853
4854 static struct cgroup_subsys_state *
css_leftmost_descendant(struct cgroup_subsys_state * pos)4855 css_leftmost_descendant(struct cgroup_subsys_state *pos)
4856 {
4857 struct cgroup_subsys_state *last;
4858
4859 do {
4860 last = pos;
4861 pos = css_next_child(NULL, pos);
4862 } while (pos);
4863
4864 return last;
4865 }
4866
4867 /**
4868 * css_next_descendant_post - find the next descendant for post-order walk
4869 * @pos: the current position (%NULL to initiate traversal)
4870 * @root: css whose descendants to walk
4871 *
4872 * To be used by css_for_each_descendant_post(). Find the next descendant
4873 * to visit for post-order traversal of @root's descendants. @root is
4874 * included in the iteration and the last node to be visited.
4875 *
4876 * While this function requires cgroup_mutex or RCU read locking, it
4877 * doesn't require the whole traversal to be contained in a single critical
4878 * section. Additionally, it isn't necessary to hold onto a reference to @pos.
4879 * This function will return the correct next descendant as long as both @pos
4880 * and @cgroup are accessible and @pos is a descendant of @cgroup.
4881 *
4882 * If a subsystem synchronizes ->css_online() and the start of iteration, a
4883 * css which finished ->css_online() is guaranteed to be visible in the
4884 * future iterations and will stay visible until the last reference is put.
4885 * A css which hasn't finished ->css_online() or already finished
4886 * ->css_offline() may show up during traversal. It's each subsystem's
4887 * responsibility to synchronize against on/offlining.
4888 */
4889 struct cgroup_subsys_state *
css_next_descendant_post(struct cgroup_subsys_state * pos,struct cgroup_subsys_state * root)4890 css_next_descendant_post(struct cgroup_subsys_state *pos,
4891 struct cgroup_subsys_state *root)
4892 {
4893 struct cgroup_subsys_state *next;
4894
4895 cgroup_assert_mutex_or_rcu_locked();
4896
4897 /* if first iteration, visit leftmost descendant which may be @root */
4898 if (!pos)
4899 return css_leftmost_descendant(root);
4900
4901 /* if we visited @root, we're done */
4902 if (pos == root)
4903 return NULL;
4904
4905 /* if there's an unvisited sibling, visit its leftmost descendant */
4906 next = css_next_child(pos, pos->parent);
4907 if (next)
4908 return css_leftmost_descendant(next);
4909
4910 /* no sibling left, visit parent */
4911 return pos->parent;
4912 }
4913
4914 /**
4915 * css_has_online_children - does a css have online children
4916 * @css: the target css
4917 *
4918 * Returns %true if @css has any online children; otherwise, %false. This
4919 * function can be called from any context but the caller is responsible
4920 * for synchronizing against on/offlining as necessary.
4921 */
css_has_online_children(struct cgroup_subsys_state * css)4922 bool css_has_online_children(struct cgroup_subsys_state *css)
4923 {
4924 struct cgroup_subsys_state *child;
4925 bool ret = false;
4926
4927 rcu_read_lock();
4928 css_for_each_child(child, css) {
4929 if (css_is_online(child)) {
4930 ret = true;
4931 break;
4932 }
4933 }
4934 rcu_read_unlock();
4935 return ret;
4936 }
4937
css_task_iter_next_css_set(struct css_task_iter * it)4938 static struct css_set *css_task_iter_next_css_set(struct css_task_iter *it)
4939 {
4940 struct list_head *l;
4941 struct cgrp_cset_link *link;
4942 struct css_set *cset;
4943
4944 lockdep_assert_held(&css_set_lock);
4945
4946 /* find the next threaded cset */
4947 if (it->tcset_pos) {
4948 l = it->tcset_pos->next;
4949
4950 if (l != it->tcset_head) {
4951 it->tcset_pos = l;
4952 return container_of(l, struct css_set,
4953 threaded_csets_node);
4954 }
4955
4956 it->tcset_pos = NULL;
4957 }
4958
4959 /* find the next cset */
4960 l = it->cset_pos;
4961 l = l->next;
4962 if (l == it->cset_head) {
4963 it->cset_pos = NULL;
4964 return NULL;
4965 }
4966
4967 if (it->ss) {
4968 cset = container_of(l, struct css_set, e_cset_node[it->ss->id]);
4969 } else {
4970 link = list_entry(l, struct cgrp_cset_link, cset_link);
4971 cset = link->cset;
4972 }
4973
4974 it->cset_pos = l;
4975
4976 /* initialize threaded css_set walking */
4977 if (it->flags & CSS_TASK_ITER_THREADED) {
4978 if (it->cur_dcset)
4979 put_css_set_locked(it->cur_dcset);
4980 it->cur_dcset = cset;
4981 get_css_set(cset);
4982
4983 it->tcset_head = &cset->threaded_csets;
4984 it->tcset_pos = &cset->threaded_csets;
4985 }
4986
4987 return cset;
4988 }
4989
4990 /**
4991 * css_task_iter_advance_css_set - advance a task iterator to the next css_set
4992 * @it: the iterator to advance
4993 *
4994 * Advance @it to the next css_set to walk.
4995 */
css_task_iter_advance_css_set(struct css_task_iter * it)4996 static void css_task_iter_advance_css_set(struct css_task_iter *it)
4997 {
4998 struct css_set *cset;
4999
5000 lockdep_assert_held(&css_set_lock);
5001
5002 /* Advance to the next non-empty css_set and find first non-empty tasks list*/
5003 while ((cset = css_task_iter_next_css_set(it))) {
5004 if (!list_empty(&cset->tasks)) {
5005 it->cur_tasks_head = &cset->tasks;
5006 break;
5007 } else if (!list_empty(&cset->mg_tasks)) {
5008 it->cur_tasks_head = &cset->mg_tasks;
5009 break;
5010 } else if (!list_empty(&cset->dying_tasks)) {
5011 it->cur_tasks_head = &cset->dying_tasks;
5012 break;
5013 }
5014 }
5015 if (!cset) {
5016 it->task_pos = NULL;
5017 return;
5018 }
5019 it->task_pos = it->cur_tasks_head->next;
5020
5021 /*
5022 * We don't keep css_sets locked across iteration steps and thus
5023 * need to take steps to ensure that iteration can be resumed after
5024 * the lock is re-acquired. Iteration is performed at two levels -
5025 * css_sets and tasks in them.
5026 *
5027 * Once created, a css_set never leaves its cgroup lists, so a
5028 * pinned css_set is guaranteed to stay put and we can resume
5029 * iteration afterwards.
5030 *
5031 * Tasks may leave @cset across iteration steps. This is resolved
5032 * by registering each iterator with the css_set currently being
5033 * walked and making css_set_move_task() advance iterators whose
5034 * next task is leaving.
5035 */
5036 if (it->cur_cset) {
5037 list_del(&it->iters_node);
5038 put_css_set_locked(it->cur_cset);
5039 }
5040 get_css_set(cset);
5041 it->cur_cset = cset;
5042 list_add(&it->iters_node, &cset->task_iters);
5043 }
5044
css_task_iter_skip(struct css_task_iter * it,struct task_struct * task)5045 static void css_task_iter_skip(struct css_task_iter *it,
5046 struct task_struct *task)
5047 {
5048 lockdep_assert_held(&css_set_lock);
5049
5050 if (it->task_pos == &task->cg_list) {
5051 it->task_pos = it->task_pos->next;
5052 it->flags |= CSS_TASK_ITER_SKIPPED;
5053 }
5054 }
5055
css_task_iter_advance(struct css_task_iter * it)5056 static void css_task_iter_advance(struct css_task_iter *it)
5057 {
5058 struct task_struct *task;
5059
5060 lockdep_assert_held(&css_set_lock);
5061 repeat:
5062 if (it->task_pos) {
5063 /*
5064 * Advance iterator to find next entry. We go through cset
5065 * tasks, mg_tasks and dying_tasks, when consumed we move onto
5066 * the next cset.
5067 */
5068 if (it->flags & CSS_TASK_ITER_SKIPPED)
5069 it->flags &= ~CSS_TASK_ITER_SKIPPED;
5070 else
5071 it->task_pos = it->task_pos->next;
5072
5073 if (it->task_pos == &it->cur_cset->tasks) {
5074 it->cur_tasks_head = &it->cur_cset->mg_tasks;
5075 it->task_pos = it->cur_tasks_head->next;
5076 }
5077 if (it->task_pos == &it->cur_cset->mg_tasks) {
5078 it->cur_tasks_head = &it->cur_cset->dying_tasks;
5079 it->task_pos = it->cur_tasks_head->next;
5080 }
5081 if (it->task_pos == &it->cur_cset->dying_tasks)
5082 css_task_iter_advance_css_set(it);
5083 } else {
5084 /* called from start, proceed to the first cset */
5085 css_task_iter_advance_css_set(it);
5086 }
5087
5088 if (!it->task_pos)
5089 return;
5090
5091 task = list_entry(it->task_pos, struct task_struct, cg_list);
5092 /*
5093 * Hide tasks that are exiting but not yet removed by default. Keep
5094 * zombie leaders with live threads visible. Usages that need to walk
5095 * every existing task can opt out via CSS_TASK_ITER_WITH_DEAD.
5096 */
5097 if (!(it->flags & CSS_TASK_ITER_WITH_DEAD) &&
5098 (task->flags & PF_EXITING) && !atomic_read(&task->signal->live))
5099 goto repeat;
5100
5101 if (it->flags & CSS_TASK_ITER_PROCS) {
5102 /* if PROCS, skip over tasks which aren't group leaders */
5103 if (!thread_group_leader(task))
5104 goto repeat;
5105
5106 /* and dying leaders w/o live member threads */
5107 if (it->cur_tasks_head == &it->cur_cset->dying_tasks &&
5108 !atomic_read(&task->signal->live))
5109 goto repeat;
5110 } else {
5111 /* skip all dying ones */
5112 if (it->cur_tasks_head == &it->cur_cset->dying_tasks)
5113 goto repeat;
5114 }
5115 }
5116
5117 /**
5118 * css_task_iter_start - initiate task iteration
5119 * @css: the css to walk tasks of
5120 * @flags: CSS_TASK_ITER_* flags
5121 * @it: the task iterator to use
5122 *
5123 * Initiate iteration through the tasks of @css. The caller can call
5124 * css_task_iter_next() to walk through the tasks until the function
5125 * returns NULL. On completion of iteration, css_task_iter_end() must be
5126 * called.
5127 */
css_task_iter_start(struct cgroup_subsys_state * css,unsigned int flags,struct css_task_iter * it)5128 void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
5129 struct css_task_iter *it)
5130 {
5131 unsigned long irqflags;
5132
5133 memset(it, 0, sizeof(*it));
5134
5135 spin_lock_irqsave(&css_set_lock, irqflags);
5136
5137 it->ss = css->ss;
5138 it->flags = flags;
5139
5140 if (CGROUP_HAS_SUBSYS_CONFIG && it->ss)
5141 it->cset_pos = &css->cgroup->e_csets[css->ss->id];
5142 else
5143 it->cset_pos = &css->cgroup->cset_links;
5144
5145 it->cset_head = it->cset_pos;
5146
5147 css_task_iter_advance(it);
5148
5149 spin_unlock_irqrestore(&css_set_lock, irqflags);
5150 }
5151
5152 /**
5153 * css_task_iter_next - return the next task for the iterator
5154 * @it: the task iterator being iterated
5155 *
5156 * The "next" function for task iteration. @it should have been
5157 * initialized via css_task_iter_start(). Returns NULL when the iteration
5158 * reaches the end.
5159 */
css_task_iter_next(struct css_task_iter * it)5160 struct task_struct *css_task_iter_next(struct css_task_iter *it)
5161 {
5162 unsigned long irqflags;
5163
5164 if (it->cur_task) {
5165 put_task_struct(it->cur_task);
5166 it->cur_task = NULL;
5167 }
5168
5169 spin_lock_irqsave(&css_set_lock, irqflags);
5170
5171 /* @it may be half-advanced by skips, finish advancing */
5172 if (it->flags & CSS_TASK_ITER_SKIPPED)
5173 css_task_iter_advance(it);
5174
5175 if (it->task_pos) {
5176 it->cur_task = list_entry(it->task_pos, struct task_struct,
5177 cg_list);
5178 get_task_struct(it->cur_task);
5179 css_task_iter_advance(it);
5180 }
5181
5182 spin_unlock_irqrestore(&css_set_lock, irqflags);
5183
5184 return it->cur_task;
5185 }
5186
5187 /**
5188 * css_task_iter_end - finish task iteration
5189 * @it: the task iterator to finish
5190 *
5191 * Finish task iteration started by css_task_iter_start().
5192 */
css_task_iter_end(struct css_task_iter * it)5193 void css_task_iter_end(struct css_task_iter *it)
5194 {
5195 unsigned long irqflags;
5196
5197 if (it->cur_cset) {
5198 spin_lock_irqsave(&css_set_lock, irqflags);
5199 list_del(&it->iters_node);
5200 put_css_set_locked(it->cur_cset);
5201 spin_unlock_irqrestore(&css_set_lock, irqflags);
5202 }
5203
5204 if (it->cur_dcset)
5205 put_css_set(it->cur_dcset);
5206
5207 if (it->cur_task)
5208 put_task_struct(it->cur_task);
5209 }
5210
cgroup_procs_release(struct kernfs_open_file * of)5211 static void cgroup_procs_release(struct kernfs_open_file *of)
5212 {
5213 struct cgroup_file_ctx *ctx = of->priv;
5214
5215 if (ctx->procs.started)
5216 css_task_iter_end(&ctx->procs.iter);
5217 }
5218
cgroup_procs_next(struct seq_file * s,void * v,loff_t * pos)5219 static void *cgroup_procs_next(struct seq_file *s, void *v, loff_t *pos)
5220 {
5221 struct kernfs_open_file *of = s->private;
5222 struct cgroup_file_ctx *ctx = of->priv;
5223
5224 if (pos)
5225 (*pos)++;
5226
5227 return css_task_iter_next(&ctx->procs.iter);
5228 }
5229
__cgroup_procs_start(struct seq_file * s,loff_t * pos,unsigned int iter_flags)5230 static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos,
5231 unsigned int iter_flags)
5232 {
5233 struct kernfs_open_file *of = s->private;
5234 struct cgroup *cgrp = seq_css(s)->cgroup;
5235 struct cgroup_file_ctx *ctx = of->priv;
5236 struct css_task_iter *it = &ctx->procs.iter;
5237
5238 /*
5239 * When a seq_file is seeked, it's always traversed sequentially
5240 * from position 0, so we can simply keep iterating on !0 *pos.
5241 */
5242 if (!ctx->procs.started) {
5243 if (WARN_ON_ONCE((*pos)))
5244 return ERR_PTR(-EINVAL);
5245 css_task_iter_start(&cgrp->self, iter_flags, it);
5246 ctx->procs.started = true;
5247 } else if (!(*pos)) {
5248 css_task_iter_end(it);
5249 css_task_iter_start(&cgrp->self, iter_flags, it);
5250 } else
5251 return it->cur_task;
5252
5253 return cgroup_procs_next(s, NULL, NULL);
5254 }
5255
cgroup_procs_start(struct seq_file * s,loff_t * pos)5256 static void *cgroup_procs_start(struct seq_file *s, loff_t *pos)
5257 {
5258 struct cgroup *cgrp = seq_css(s)->cgroup;
5259
5260 /*
5261 * All processes of a threaded subtree belong to the domain cgroup
5262 * of the subtree. Only threads can be distributed across the
5263 * subtree. Reject reads on cgroup.procs in the subtree proper.
5264 * They're always empty anyway.
5265 */
5266 if (cgroup_is_threaded(cgrp))
5267 return ERR_PTR(-EOPNOTSUPP);
5268
5269 return __cgroup_procs_start(s, pos, CSS_TASK_ITER_PROCS |
5270 CSS_TASK_ITER_THREADED);
5271 }
5272
cgroup_procs_show(struct seq_file * s,void * v)5273 static int cgroup_procs_show(struct seq_file *s, void *v)
5274 {
5275 seq_printf(s, "%d\n", task_pid_vnr(v));
5276 return 0;
5277 }
5278
cgroup_may_write(const struct cgroup * cgrp,struct super_block * sb)5279 static int cgroup_may_write(const struct cgroup *cgrp, struct super_block *sb)
5280 {
5281 int ret;
5282 struct inode *inode;
5283
5284 lockdep_assert_held(&cgroup_mutex);
5285
5286 inode = kernfs_get_inode(sb, cgrp->procs_file.kn);
5287 if (!inode)
5288 return -ENOMEM;
5289
5290 ret = inode_permission(&nop_mnt_idmap, inode, MAY_WRITE);
5291 iput(inode);
5292 return ret;
5293 }
5294
cgroup_procs_write_permission(struct cgroup * src_cgrp,struct cgroup * dst_cgrp,struct super_block * sb,struct cgroup_namespace * ns)5295 static int cgroup_procs_write_permission(struct cgroup *src_cgrp,
5296 struct cgroup *dst_cgrp,
5297 struct super_block *sb,
5298 struct cgroup_namespace *ns)
5299 {
5300 struct cgroup *com_cgrp = src_cgrp;
5301 int ret;
5302
5303 lockdep_assert_held(&cgroup_mutex);
5304
5305 /* find the common ancestor */
5306 while (!cgroup_is_descendant(dst_cgrp, com_cgrp))
5307 com_cgrp = cgroup_parent(com_cgrp);
5308
5309 /* %current should be authorized to migrate to the common ancestor */
5310 ret = cgroup_may_write(com_cgrp, sb);
5311 if (ret)
5312 return ret;
5313
5314 /*
5315 * If namespaces are delegation boundaries, %current must be able
5316 * to see both source and destination cgroups from its namespace.
5317 */
5318 if ((cgrp_dfl_root.flags & CGRP_ROOT_NS_DELEGATE) &&
5319 (!cgroup_is_descendant(src_cgrp, ns->root_cset->dfl_cgrp) ||
5320 !cgroup_is_descendant(dst_cgrp, ns->root_cset->dfl_cgrp)))
5321 return -ENOENT;
5322
5323 return 0;
5324 }
5325
cgroup_attach_permissions(struct cgroup * src_cgrp,struct cgroup * dst_cgrp,struct super_block * sb,bool threadgroup,struct cgroup_namespace * ns)5326 static int cgroup_attach_permissions(struct cgroup *src_cgrp,
5327 struct cgroup *dst_cgrp,
5328 struct super_block *sb, bool threadgroup,
5329 struct cgroup_namespace *ns)
5330 {
5331 int ret = 0;
5332
5333 ret = cgroup_procs_write_permission(src_cgrp, dst_cgrp, sb, ns);
5334 if (ret)
5335 return ret;
5336
5337 ret = cgroup_migrate_vet_dst(dst_cgrp);
5338 if (ret)
5339 return ret;
5340
5341 if (!threadgroup && (src_cgrp->dom_cgrp != dst_cgrp->dom_cgrp))
5342 ret = -EOPNOTSUPP;
5343
5344 return ret;
5345 }
5346
__cgroup_procs_write(struct kernfs_open_file * of,char * buf,bool threadgroup)5347 static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
5348 bool threadgroup)
5349 {
5350 struct cgroup_file_ctx *ctx = of->priv;
5351 struct cgroup *src_cgrp, *dst_cgrp;
5352 struct task_struct *task;
5353 ssize_t ret;
5354 enum cgroup_attach_lock_mode lock_mode;
5355
5356 dst_cgrp = cgroup_kn_lock_live(of->kn, false);
5357 if (!dst_cgrp)
5358 return -ENODEV;
5359
5360 task = cgroup_procs_write_start(buf, threadgroup, &lock_mode);
5361 ret = PTR_ERR_OR_ZERO(task);
5362 if (ret)
5363 goto out_unlock;
5364
5365 /* find the source cgroup */
5366 spin_lock_irq(&css_set_lock);
5367 src_cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
5368 spin_unlock_irq(&css_set_lock);
5369
5370 /*
5371 * Process and thread migrations follow same delegation rule. Check
5372 * permissions using the credentials from file open to protect against
5373 * inherited fd attacks.
5374 */
5375 scoped_with_creds(of->file->f_cred)
5376 ret = cgroup_attach_permissions(src_cgrp, dst_cgrp,
5377 of->file->f_path.dentry->d_sb,
5378 threadgroup, ctx->ns);
5379 if (ret)
5380 goto out_finish;
5381
5382 ret = cgroup_attach_task(dst_cgrp, task, threadgroup);
5383
5384 out_finish:
5385 cgroup_procs_write_finish(task, lock_mode);
5386 out_unlock:
5387 cgroup_kn_unlock(of->kn);
5388
5389 return ret;
5390 }
5391
cgroup_procs_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5392 static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
5393 char *buf, size_t nbytes, loff_t off)
5394 {
5395 return __cgroup_procs_write(of, buf, true) ?: nbytes;
5396 }
5397
cgroup_threads_start(struct seq_file * s,loff_t * pos)5398 static void *cgroup_threads_start(struct seq_file *s, loff_t *pos)
5399 {
5400 return __cgroup_procs_start(s, pos, 0);
5401 }
5402
cgroup_threads_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5403 static ssize_t cgroup_threads_write(struct kernfs_open_file *of,
5404 char *buf, size_t nbytes, loff_t off)
5405 {
5406 return __cgroup_procs_write(of, buf, false) ?: nbytes;
5407 }
5408
5409 /* cgroup core interface files for the default hierarchy */
5410 static struct cftype cgroup_base_files[] = {
5411 {
5412 .name = "cgroup.type",
5413 .flags = CFTYPE_NOT_ON_ROOT,
5414 .seq_show = cgroup_type_show,
5415 .write = cgroup_type_write,
5416 },
5417 {
5418 .name = "cgroup.procs",
5419 .flags = CFTYPE_NS_DELEGATABLE,
5420 .file_offset = offsetof(struct cgroup, procs_file),
5421 .release = cgroup_procs_release,
5422 .seq_start = cgroup_procs_start,
5423 .seq_next = cgroup_procs_next,
5424 .seq_show = cgroup_procs_show,
5425 .write = cgroup_procs_write,
5426 },
5427 {
5428 .name = "cgroup.threads",
5429 .flags = CFTYPE_NS_DELEGATABLE,
5430 .release = cgroup_procs_release,
5431 .seq_start = cgroup_threads_start,
5432 .seq_next = cgroup_procs_next,
5433 .seq_show = cgroup_procs_show,
5434 .write = cgroup_threads_write,
5435 },
5436 {
5437 .name = "cgroup.controllers",
5438 .seq_show = cgroup_controllers_show,
5439 },
5440 {
5441 .name = "cgroup.subtree_control",
5442 .flags = CFTYPE_NS_DELEGATABLE,
5443 .seq_show = cgroup_subtree_control_show,
5444 .write = cgroup_subtree_control_write,
5445 },
5446 {
5447 .name = "cgroup.events",
5448 .flags = CFTYPE_NOT_ON_ROOT,
5449 .file_offset = offsetof(struct cgroup, events_file),
5450 .seq_show = cgroup_events_show,
5451 },
5452 {
5453 .name = "cgroup.max.descendants",
5454 .seq_show = cgroup_max_descendants_show,
5455 .write = cgroup_max_descendants_write,
5456 },
5457 {
5458 .name = "cgroup.max.depth",
5459 .seq_show = cgroup_max_depth_show,
5460 .write = cgroup_max_depth_write,
5461 },
5462 {
5463 .name = "cgroup.stat",
5464 .seq_show = cgroup_stat_show,
5465 },
5466 {
5467 .name = "cgroup.stat.local",
5468 .flags = CFTYPE_NOT_ON_ROOT,
5469 .seq_show = cgroup_core_local_stat_show,
5470 },
5471 {
5472 .name = "cgroup.freeze",
5473 .flags = CFTYPE_NOT_ON_ROOT,
5474 .seq_show = cgroup_freeze_show,
5475 .write = cgroup_freeze_write,
5476 },
5477 {
5478 .name = "cgroup.kill",
5479 .flags = CFTYPE_NOT_ON_ROOT,
5480 .write = cgroup_kill_write,
5481 },
5482 {
5483 .name = "cpu.stat",
5484 .seq_show = cpu_stat_show,
5485 },
5486 {
5487 .name = "cpu.stat.local",
5488 .seq_show = cpu_local_stat_show,
5489 },
5490 { } /* terminate */
5491 };
5492
5493 static struct cftype cgroup_psi_files[] = {
5494 #ifdef CONFIG_PSI
5495 {
5496 .name = "io.pressure",
5497 .file_offset = offsetof(struct cgroup, psi_files[PSI_IO]),
5498 .seq_show = cgroup_io_pressure_show,
5499 .write = cgroup_io_pressure_write,
5500 .poll = cgroup_pressure_poll,
5501 .release = cgroup_pressure_release,
5502 },
5503 {
5504 .name = "memory.pressure",
5505 .file_offset = offsetof(struct cgroup, psi_files[PSI_MEM]),
5506 .seq_show = cgroup_memory_pressure_show,
5507 .write = cgroup_memory_pressure_write,
5508 .poll = cgroup_pressure_poll,
5509 .release = cgroup_pressure_release,
5510 },
5511 {
5512 .name = "cpu.pressure",
5513 .file_offset = offsetof(struct cgroup, psi_files[PSI_CPU]),
5514 .seq_show = cgroup_cpu_pressure_show,
5515 .write = cgroup_cpu_pressure_write,
5516 .poll = cgroup_pressure_poll,
5517 .release = cgroup_pressure_release,
5518 },
5519 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
5520 {
5521 .name = "irq.pressure",
5522 .file_offset = offsetof(struct cgroup, psi_files[PSI_IRQ]),
5523 .seq_show = cgroup_irq_pressure_show,
5524 .write = cgroup_irq_pressure_write,
5525 .poll = cgroup_pressure_poll,
5526 .release = cgroup_pressure_release,
5527 },
5528 #endif
5529 {
5530 .name = "cgroup.pressure",
5531 .seq_show = cgroup_pressure_show,
5532 .write = cgroup_pressure_write,
5533 },
5534 #endif /* CONFIG_PSI */
5535 { } /* terminate */
5536 };
5537
5538 /*
5539 * css destruction is four-stage process.
5540 *
5541 * 1. Destruction starts. Killing of the percpu_ref is initiated.
5542 * Implemented in kill_css_finish().
5543 *
5544 * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
5545 * and thus css_tryget_online() is guaranteed to fail, the css can be
5546 * offlined by invoking offline_css(). After offlining, the base ref is
5547 * put. Implemented in css_killed_work_fn().
5548 *
5549 * 3. When the percpu_ref reaches zero, the only possible remaining
5550 * accessors are inside RCU read sections. css_release() schedules the
5551 * RCU callback.
5552 *
5553 * 4. After the grace period, the css can be freed. Implemented in
5554 * css_free_rwork_fn().
5555 *
5556 * It is actually hairier because both step 2 and 4 require process context
5557 * and thus involve punting to css->destroy_work adding two additional
5558 * steps to the already complex sequence.
5559 */
css_free_rwork_fn(struct work_struct * work)5560 static void css_free_rwork_fn(struct work_struct *work)
5561 {
5562 struct cgroup_subsys_state *css = container_of(to_rcu_work(work),
5563 struct cgroup_subsys_state, destroy_rwork);
5564 struct cgroup_subsys *ss = css->ss;
5565 struct cgroup *cgrp = css->cgroup;
5566
5567 percpu_ref_exit(&css->refcnt);
5568 css_rstat_exit(css);
5569
5570 if (!css_is_self(css)) {
5571 /* css free path */
5572 struct cgroup_subsys_state *parent = css->parent;
5573 int id = css->id;
5574
5575 ss->css_free(css);
5576 cgroup_idr_remove(&ss->css_idr, id);
5577 cgroup_put(cgrp);
5578
5579 if (parent)
5580 css_put(parent);
5581 } else {
5582 /* cgroup free path */
5583 atomic_dec(&cgrp->root->nr_cgrps);
5584 if (!cgroup_on_dfl(cgrp))
5585 cgroup1_pidlist_destroy_all(cgrp);
5586 cancel_work_sync(&cgrp->release_agent_work);
5587 bpf_cgrp_storage_free(cgrp);
5588
5589 if (cgroup_parent(cgrp)) {
5590 /*
5591 * We get a ref to the parent, and put the ref when
5592 * this cgroup is being freed, so it's guaranteed
5593 * that the parent won't be destroyed before its
5594 * children.
5595 */
5596 cgroup_put(cgroup_parent(cgrp));
5597 kernfs_put(cgrp->kn);
5598 psi_cgroup_free(cgrp);
5599 kfree(cgrp);
5600 } else {
5601 /*
5602 * This is root cgroup's refcnt reaching zero,
5603 * which indicates that the root should be
5604 * released.
5605 */
5606 cgroup_destroy_root(cgrp->root);
5607 }
5608 }
5609 }
5610
css_release_work_fn(struct work_struct * work)5611 static void css_release_work_fn(struct work_struct *work)
5612 {
5613 struct cgroup_subsys_state *css =
5614 container_of(work, struct cgroup_subsys_state, destroy_work);
5615 struct cgroup_subsys *ss = css->ss;
5616 struct cgroup *cgrp = css->cgroup;
5617
5618 cgroup_lock();
5619
5620 css->flags |= CSS_RELEASED;
5621 list_del_rcu(&css->sibling);
5622
5623 if (!css_is_self(css)) {
5624 struct cgroup *parent_cgrp;
5625
5626 css_rstat_flush(css);
5627
5628 cgroup_idr_replace(&ss->css_idr, NULL, css->id);
5629 if (ss->css_released)
5630 ss->css_released(css);
5631
5632 cgrp->nr_dying_subsys[ss->id]--;
5633 /*
5634 * When a css is released and ready to be freed, its
5635 * nr_descendants must be zero. However, the corresponding
5636 * cgrp->nr_dying_subsys[ss->id] may not be 0 if a subsystem
5637 * is activated and deactivated multiple times with one or
5638 * more of its previous activation leaving behind dying csses.
5639 */
5640 WARN_ON_ONCE(css->nr_descendants);
5641 parent_cgrp = cgroup_parent(cgrp);
5642 while (parent_cgrp) {
5643 parent_cgrp->nr_dying_subsys[ss->id]--;
5644 parent_cgrp = cgroup_parent(parent_cgrp);
5645 }
5646 } else {
5647 struct cgroup *tcgrp;
5648
5649 /* cgroup release path */
5650 TRACE_CGROUP_PATH(release, cgrp);
5651
5652 css_rstat_flush(&cgrp->self);
5653
5654 spin_lock_irq(&css_set_lock);
5655 for (tcgrp = cgroup_parent(cgrp); tcgrp;
5656 tcgrp = cgroup_parent(tcgrp))
5657 tcgrp->nr_dying_descendants--;
5658 spin_unlock_irq(&css_set_lock);
5659
5660 /*
5661 * There are two control paths which try to determine
5662 * cgroup from dentry without going through kernfs -
5663 * cgroupstats_build() and css_tryget_online_from_dir().
5664 * Those are supported by RCU protecting clearing of
5665 * cgrp->kn->priv backpointer.
5666 */
5667 if (cgrp->kn)
5668 RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv,
5669 NULL);
5670 }
5671
5672 cgroup_unlock();
5673
5674 INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
5675 queue_rcu_work(cgroup_free_wq, &css->destroy_rwork);
5676 }
5677
css_release(struct percpu_ref * ref)5678 static void css_release(struct percpu_ref *ref)
5679 {
5680 struct cgroup_subsys_state *css =
5681 container_of(ref, struct cgroup_subsys_state, refcnt);
5682
5683 INIT_WORK(&css->destroy_work, css_release_work_fn);
5684 queue_work(cgroup_release_wq, &css->destroy_work);
5685 }
5686
init_and_link_css(struct cgroup_subsys_state * css,struct cgroup_subsys * ss,struct cgroup * cgrp)5687 static void init_and_link_css(struct cgroup_subsys_state *css,
5688 struct cgroup_subsys *ss, struct cgroup *cgrp)
5689 {
5690 lockdep_assert_held(&cgroup_mutex);
5691
5692 cgroup_get_live(cgrp);
5693
5694 memset(css, 0, sizeof(*css));
5695 css->cgroup = cgrp;
5696 css->ss = ss;
5697 css->id = -1;
5698 INIT_LIST_HEAD(&css->sibling);
5699 INIT_LIST_HEAD(&css->children);
5700 css->serial_nr = css_serial_nr_next++;
5701 atomic_set(&css->online_cnt, 0);
5702
5703 if (cgroup_parent(cgrp)) {
5704 css->parent = cgroup_css(cgroup_parent(cgrp), ss);
5705 css_get(css->parent);
5706 }
5707
5708 BUG_ON(cgroup_css(cgrp, ss));
5709 }
5710
5711 /* invoke ->css_online() on a new CSS and mark it online if successful */
online_css(struct cgroup_subsys_state * css)5712 static int online_css(struct cgroup_subsys_state *css)
5713 {
5714 struct cgroup_subsys *ss = css->ss;
5715 int ret = 0;
5716
5717 lockdep_assert_held(&cgroup_mutex);
5718
5719 if (ss->css_online)
5720 ret = ss->css_online(css);
5721 if (!ret) {
5722 css->flags |= CSS_ONLINE;
5723 rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
5724
5725 atomic_inc(&css->online_cnt);
5726 if (css->parent) {
5727 atomic_inc(&css->parent->online_cnt);
5728 while ((css = css->parent))
5729 css->nr_descendants++;
5730 }
5731 }
5732 return ret;
5733 }
5734
5735 /* if the CSS is online, invoke ->css_offline() on it and mark it offline */
offline_css(struct cgroup_subsys_state * css)5736 static void offline_css(struct cgroup_subsys_state *css)
5737 {
5738 struct cgroup_subsys *ss = css->ss;
5739
5740 lockdep_assert_held(&cgroup_mutex);
5741
5742 if (!css_is_online(css))
5743 return;
5744
5745 if (ss->css_offline)
5746 ss->css_offline(css);
5747
5748 css->flags &= ~CSS_ONLINE;
5749 RCU_INIT_POINTER(css->cgroup->subsys[ss->id], NULL);
5750
5751 wake_up_all(&css->cgroup->offline_waitq);
5752 }
5753
5754 /**
5755 * css_create - create a cgroup_subsys_state
5756 * @cgrp: the cgroup new css will be associated with
5757 * @ss: the subsys of new css
5758 *
5759 * Create a new css associated with @cgrp - @ss pair. On success, the new
5760 * css is online and installed in @cgrp. This function doesn't create the
5761 * interface files. Returns 0 on success, -errno on failure.
5762 */
css_create(struct cgroup * cgrp,struct cgroup_subsys * ss)5763 static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
5764 struct cgroup_subsys *ss)
5765 {
5766 struct cgroup *parent = cgroup_parent(cgrp);
5767 struct cgroup_subsys_state *parent_css = cgroup_css(parent, ss);
5768 struct cgroup_subsys_state *css;
5769 int err;
5770
5771 lockdep_assert_held(&cgroup_mutex);
5772
5773 css = ss->css_alloc(parent_css);
5774 if (!css)
5775 css = ERR_PTR(-ENOMEM);
5776 if (IS_ERR(css))
5777 return css;
5778
5779 init_and_link_css(css, ss, cgrp);
5780
5781 err = percpu_ref_init(&css->refcnt, css_release, 0, GFP_KERNEL);
5782 if (err)
5783 goto err_free_css;
5784
5785 err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL);
5786 if (err < 0)
5787 goto err_free_css;
5788 css->id = err;
5789
5790 err = css_rstat_init(css);
5791 if (err)
5792 goto err_free_css;
5793
5794 /* @css is ready to be brought online now, make it visible */
5795 list_add_tail_rcu(&css->sibling, &parent_css->children);
5796 cgroup_idr_replace(&ss->css_idr, css, css->id);
5797
5798 err = online_css(css);
5799 if (err)
5800 goto err_list_del;
5801
5802 return css;
5803
5804 err_list_del:
5805 list_del_rcu(&css->sibling);
5806 err_free_css:
5807 INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
5808 queue_rcu_work(cgroup_free_wq, &css->destroy_rwork);
5809 return ERR_PTR(err);
5810 }
5811
5812 /*
5813 * The returned cgroup is fully initialized including its control mask, but
5814 * it doesn't have the control mask applied.
5815 */
cgroup_create(struct cgroup * parent,const char * name,umode_t mode)5816 static struct cgroup *cgroup_create(struct cgroup *parent, const char *name,
5817 umode_t mode)
5818 {
5819 struct cgroup_root *root = parent->root;
5820 struct cgroup *cgrp, *tcgrp;
5821 struct kernfs_node *kn;
5822 int i, level = parent->level + 1;
5823 int ret;
5824
5825 /* allocate the cgroup and its ID, 0 is reserved for the root */
5826 cgrp = kzalloc_flex(*cgrp, _low_ancestors, level);
5827 if (!cgrp)
5828 return ERR_PTR(-ENOMEM);
5829
5830 ret = percpu_ref_init(&cgrp->self.refcnt, css_release, 0, GFP_KERNEL);
5831 if (ret)
5832 goto out_free_cgrp;
5833
5834 /* create the directory */
5835 kn = kernfs_create_dir_ns(parent->kn, name, mode,
5836 current_fsuid(), current_fsgid(),
5837 cgrp, NULL);
5838 if (IS_ERR(kn)) {
5839 ret = PTR_ERR(kn);
5840 goto out_cancel_ref;
5841 }
5842 cgrp->kn = kn;
5843
5844 init_cgroup_housekeeping(cgrp);
5845
5846 cgrp->self.parent = &parent->self;
5847 cgrp->root = root;
5848 cgrp->level = level;
5849
5850 /*
5851 * Now that init_cgroup_housekeeping() has been called and cgrp->self
5852 * is setup, it is safe to perform rstat initialization on it.
5853 */
5854 ret = css_rstat_init(&cgrp->self);
5855 if (ret)
5856 goto out_kernfs_remove;
5857
5858 ret = psi_cgroup_alloc(cgrp);
5859 if (ret)
5860 goto out_stat_exit;
5861
5862 for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp))
5863 cgrp->ancestors[tcgrp->level] = tcgrp;
5864
5865 /*
5866 * New cgroup inherits effective freeze counter, and
5867 * if the parent has to be frozen, the child has too.
5868 */
5869 cgrp->freezer.e_freeze = parent->freezer.e_freeze;
5870 seqcount_spinlock_init(&cgrp->freezer.freeze_seq, &css_set_lock);
5871 if (cgrp->freezer.e_freeze) {
5872 /*
5873 * Set the CGRP_FREEZE flag, so when a process will be
5874 * attached to the child cgroup, it will become frozen.
5875 * At this point the new cgroup is unpopulated, so we can
5876 * consider it frozen immediately.
5877 */
5878 set_bit(CGRP_FREEZE, &cgrp->flags);
5879 cgrp->freezer.freeze_start_nsec = ktime_get_ns();
5880 set_bit(CGRP_FROZEN, &cgrp->flags);
5881 }
5882
5883 if (notify_on_release(parent))
5884 set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
5885
5886 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags))
5887 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
5888
5889 cgrp->self.serial_nr = css_serial_nr_next++;
5890
5891 ret = blocking_notifier_call_chain_robust(&cgroup_lifetime_notifier,
5892 CGROUP_LIFETIME_ONLINE,
5893 CGROUP_LIFETIME_OFFLINE, cgrp);
5894 ret = notifier_to_errno(ret);
5895 if (ret)
5896 goto out_psi_free;
5897
5898 /* allocation complete, commit to creation */
5899 spin_lock_irq(&css_set_lock);
5900 for (i = 0; i < level; i++) {
5901 tcgrp = cgrp->ancestors[i];
5902 tcgrp->nr_descendants++;
5903
5904 /*
5905 * If the new cgroup is frozen, all ancestor cgroups get a new
5906 * frozen descendant, but their state can't change because of
5907 * this.
5908 */
5909 if (cgrp->freezer.e_freeze)
5910 tcgrp->freezer.nr_frozen_descendants++;
5911 }
5912 spin_unlock_irq(&css_set_lock);
5913
5914 list_add_tail_rcu(&cgrp->self.sibling, &cgroup_parent(cgrp)->self.children);
5915 atomic_inc(&root->nr_cgrps);
5916 cgroup_get_live(parent);
5917
5918 /*
5919 * On the default hierarchy, a child doesn't automatically inherit
5920 * subtree_control from the parent. Each is configured manually.
5921 */
5922 if (!cgroup_on_dfl(cgrp))
5923 cgrp->subtree_control = cgroup_control(cgrp);
5924
5925 cgroup_propagate_control(cgrp);
5926
5927 return cgrp;
5928
5929 out_psi_free:
5930 psi_cgroup_free(cgrp);
5931 out_stat_exit:
5932 css_rstat_exit(&cgrp->self);
5933 out_kernfs_remove:
5934 kernfs_remove(cgrp->kn);
5935 out_cancel_ref:
5936 percpu_ref_exit(&cgrp->self.refcnt);
5937 out_free_cgrp:
5938 kfree(cgrp);
5939 return ERR_PTR(ret);
5940 }
5941
cgroup_check_hierarchy_limits(struct cgroup * parent)5942 static bool cgroup_check_hierarchy_limits(struct cgroup *parent)
5943 {
5944 struct cgroup *cgroup;
5945 int ret = false;
5946 int level = 0;
5947
5948 lockdep_assert_held(&cgroup_mutex);
5949
5950 for (cgroup = parent; cgroup; cgroup = cgroup_parent(cgroup)) {
5951 if (cgroup->nr_descendants >= cgroup->max_descendants)
5952 goto fail;
5953
5954 if (level >= cgroup->max_depth)
5955 goto fail;
5956
5957 level++;
5958 }
5959
5960 ret = true;
5961 fail:
5962 return ret;
5963 }
5964
cgroup_mkdir(struct kernfs_node * parent_kn,const char * name,umode_t mode)5965 int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode)
5966 {
5967 struct cgroup *parent, *cgrp;
5968 int ret;
5969
5970 /* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
5971 if (strchr(name, '\n'))
5972 return -EINVAL;
5973
5974 parent = cgroup_kn_lock_live(parent_kn, false);
5975 if (!parent)
5976 return -ENODEV;
5977
5978 if (!cgroup_check_hierarchy_limits(parent)) {
5979 ret = -EAGAIN;
5980 goto out_unlock;
5981 }
5982
5983 cgrp = cgroup_create(parent, name, mode);
5984 if (IS_ERR(cgrp)) {
5985 ret = PTR_ERR(cgrp);
5986 goto out_unlock;
5987 }
5988
5989 /*
5990 * This extra ref will be put in css_free_rwork_fn() and guarantees
5991 * that @cgrp->kn is always accessible.
5992 */
5993 kernfs_get(cgrp->kn);
5994
5995 ret = css_populate_dir(&cgrp->self);
5996 if (ret)
5997 goto out_destroy;
5998
5999 ret = cgroup_apply_control_enable(cgrp);
6000 if (ret)
6001 goto out_destroy;
6002
6003 TRACE_CGROUP_PATH(mkdir, cgrp);
6004
6005 /* let's create and online css's */
6006 kernfs_activate(cgrp->kn);
6007
6008 ret = 0;
6009 goto out_unlock;
6010
6011 out_destroy:
6012 cgroup_destroy_locked(cgrp);
6013 out_unlock:
6014 cgroup_kn_unlock(parent_kn);
6015 return ret;
6016 }
6017
6018 /*
6019 * This is called when the refcnt of a css is confirmed to be killed.
6020 * css_tryget_online() is now guaranteed to fail. Tell the subsystem to
6021 * initiate destruction and put the css ref from kill_css_finish().
6022 */
css_killed_work_fn(struct work_struct * work)6023 static void css_killed_work_fn(struct work_struct *work)
6024 {
6025 struct cgroup_subsys_state *css;
6026
6027 css = container_of(to_rcu_work(work), struct cgroup_subsys_state, destroy_rwork);
6028
6029 cgroup_lock();
6030
6031 do {
6032 offline_css(css);
6033 css_put(css);
6034 /* @css can't go away while we're holding cgroup_mutex */
6035 css = css->parent;
6036 } while (css && atomic_dec_and_test(&css->online_cnt));
6037
6038 cgroup_unlock();
6039 }
6040
6041 /* css kill confirmation processing requires process context, bounce */
css_killed_ref_fn(struct percpu_ref * ref)6042 static void css_killed_ref_fn(struct percpu_ref *ref)
6043 {
6044 struct cgroup_subsys_state *css =
6045 container_of(ref, struct cgroup_subsys_state, refcnt);
6046
6047 if (atomic_dec_and_test(&css->online_cnt)) {
6048 INIT_RCU_WORK(&css->destroy_rwork, css_killed_work_fn);
6049 queue_rcu_work(cgroup_offline_wq, &css->destroy_rwork);
6050 }
6051 }
6052
6053 /**
6054 * kill_css_sync - synchronous half of css teardown
6055 * @css: css being killed
6056 *
6057 * See cgroup_destroy_locked().
6058 */
kill_css_sync(struct cgroup_subsys_state * css)6059 static void kill_css_sync(struct cgroup_subsys_state *css)
6060 {
6061 struct cgroup_subsys *ss = css->ss;
6062
6063 lockdep_assert_held(&cgroup_mutex);
6064
6065 if (css->flags & CSS_DYING)
6066 return;
6067
6068 /*
6069 * Call css_killed(), if defined, before setting the CSS_DYING flag
6070 */
6071 if (css->ss->css_killed)
6072 css->ss->css_killed(css);
6073
6074 css->flags |= CSS_DYING;
6075
6076 /*
6077 * This must happen before css is disassociated with its cgroup.
6078 * See seq_css() for details.
6079 */
6080 css_clear_dir(css);
6081
6082 css->cgroup->nr_dying_subsys[ss->id]++;
6083 /*
6084 * Parent css and cgroup cannot be freed until after the freeing
6085 * of child css, see css_free_rwork_fn().
6086 */
6087 while ((css = css->parent)) {
6088 css->nr_descendants--;
6089 css->cgroup->nr_dying_subsys[ss->id]++;
6090 }
6091 }
6092
6093 /**
6094 * kill_css_finish - deferred half of css teardown
6095 * @css: css being killed
6096 *
6097 * See cgroup_destroy_locked().
6098 */
kill_css_finish(struct cgroup_subsys_state * css)6099 static void kill_css_finish(struct cgroup_subsys_state *css)
6100 {
6101 lockdep_assert_held(&cgroup_mutex);
6102
6103 /*
6104 * Skip on re-entry: cgroup_apply_control_disable() may have killed @css
6105 * earlier. cgroup_destroy_locked() can still walk it because
6106 * offline_css() (which NULLs cgrp->subsys[ssid]) runs async.
6107 */
6108 if (percpu_ref_is_dying(&css->refcnt))
6109 return;
6110
6111 /*
6112 * Killing would put the base ref, but we need to keep it alive until
6113 * after ->css_offline().
6114 */
6115 css_get(css);
6116
6117 /*
6118 * cgroup core guarantees that, by the time ->css_offline() is invoked,
6119 * no new css reference will be given out via css_tryget_online(). We
6120 * can't simply call percpu_ref_kill() and proceed to offlining css's
6121 * because percpu_ref_kill() doesn't guarantee that the ref is seen as
6122 * killed on all CPUs on return.
6123 *
6124 * Use percpu_ref_kill_and_confirm() to get notifications as each css is
6125 * confirmed to be seen as killed on all CPUs.
6126 */
6127 percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn);
6128 }
6129
6130 /**
6131 * cgroup_destroy_locked - destroy @cgrp (called on rmdir)
6132 * @cgrp: cgroup to be destroyed
6133 *
6134 * Tear down @cgrp on behalf of rmdir. Constraints:
6135 *
6136 * - Userspace: rmdir must succeed when cgroup.procs and friends are empty.
6137 *
6138 * - Kernel: subsystem ->css_offline() must not run while any task in @cgrp's
6139 * subtree is still doing kernel work. A task hidden from cgroup.procs (past
6140 * exit_signals() with signal->live cleared) can still schedule, allocate, and
6141 * consume resources until its final context switch. Dying descendants in the
6142 * subtree can host such tasks too.
6143 *
6144 * - Kernel: css_tryget_online() must fail by the time ->css_offline() runs.
6145 *
6146 * The destruction runs in three parts:
6147 *
6148 * - This function: synchronous user-visible state teardown plus kill_css_sync()
6149 * on each subsystem css.
6150 *
6151 * - cgroup_finish_destroy(): kicks the percpu_ref kill via kill_css_finish() on
6152 * each subsystem css. Fires once @cgrp's subtree is fully drained, either
6153 * inline here or from cgroup_update_populated().
6154 *
6155 * - The percpu_ref kill chain: css_killed_ref_fn -> css_killed_work_fn ->
6156 * ->css_offline() -> release/free.
6157 *
6158 * Return 0 on success, -EBUSY if a userspace-visible task or an online child
6159 * remains.
6160 */
cgroup_destroy_locked(struct cgroup * cgrp)6161 static int cgroup_destroy_locked(struct cgroup *cgrp)
6162 {
6163 struct cgroup *tcgrp, *parent = cgroup_parent(cgrp);
6164 struct cgroup_subsys_state *css;
6165 struct cgrp_cset_link *link;
6166 struct css_task_iter it;
6167 struct task_struct *task;
6168 int ssid, ret;
6169
6170 lockdep_assert_held(&cgroup_mutex);
6171
6172 css_task_iter_start(&cgrp->self, 0, &it);
6173 task = css_task_iter_next(&it);
6174 css_task_iter_end(&it);
6175 if (task)
6176 return -EBUSY;
6177
6178 /*
6179 * Make sure there's no live children. We can't test emptiness of
6180 * ->self.children as dead children linger on it while being
6181 * drained; otherwise, "rmdir parent/child parent" may fail.
6182 */
6183 if (css_has_online_children(&cgrp->self))
6184 return -EBUSY;
6185
6186 /*
6187 * Mark @cgrp and the associated csets dead. The former prevents
6188 * further task migration and child creation by disabling
6189 * cgroup_kn_lock_live(). The latter makes the csets ignored by
6190 * the migration path.
6191 */
6192 cgrp->self.flags &= ~CSS_ONLINE;
6193
6194 spin_lock_irq(&css_set_lock);
6195 list_for_each_entry(link, &cgrp->cset_links, cset_link)
6196 link->cset->dead = true;
6197 spin_unlock_irq(&css_set_lock);
6198
6199 for_each_css(css, ssid, cgrp)
6200 kill_css_sync(css);
6201
6202 /* clear and remove @cgrp dir, @cgrp has an extra ref on its kn */
6203 css_clear_dir(&cgrp->self);
6204 kernfs_remove(cgrp->kn);
6205
6206 if (cgroup_is_threaded(cgrp))
6207 parent->nr_threaded_children--;
6208
6209 spin_lock_irq(&css_set_lock);
6210 for (tcgrp = parent; tcgrp; tcgrp = cgroup_parent(tcgrp)) {
6211 tcgrp->nr_descendants--;
6212 tcgrp->nr_dying_descendants++;
6213 /*
6214 * If the dying cgroup is frozen, decrease frozen descendants
6215 * counters of ancestor cgroups.
6216 */
6217 if (test_bit(CGRP_FROZEN, &cgrp->flags))
6218 tcgrp->freezer.nr_frozen_descendants--;
6219 }
6220 spin_unlock_irq(&css_set_lock);
6221
6222 cgroup1_check_for_release(parent);
6223
6224 ret = blocking_notifier_call_chain(&cgroup_lifetime_notifier,
6225 CGROUP_LIFETIME_OFFLINE, cgrp);
6226 WARN_ON_ONCE(notifier_to_errno(ret));
6227
6228 /* put the base reference */
6229 percpu_ref_kill(&cgrp->self.refcnt);
6230
6231 if (!cgroup_is_populated(cgrp))
6232 cgroup_finish_destroy(cgrp);
6233
6234 return 0;
6235 };
6236
6237 /**
6238 * cgroup_finish_destroy - deferred half of @cgrp destruction
6239 * @cgrp: cgroup whose subtree just became empty
6240 *
6241 * See cgroup_destroy_locked() for the rationale.
6242 */
cgroup_finish_destroy(struct cgroup * cgrp)6243 static void cgroup_finish_destroy(struct cgroup *cgrp)
6244 {
6245 struct cgroup_subsys_state *css;
6246 int ssid;
6247
6248 lockdep_assert_held(&cgroup_mutex);
6249
6250 for_each_css(css, ssid, cgrp)
6251 kill_css_finish(css);
6252 }
6253
cgroup_rmdir(struct kernfs_node * kn)6254 int cgroup_rmdir(struct kernfs_node *kn)
6255 {
6256 struct cgroup *cgrp;
6257 int ret = 0;
6258
6259 cgrp = cgroup_kn_lock_live(kn, false);
6260 if (!cgrp)
6261 return 0;
6262
6263 ret = cgroup_destroy_locked(cgrp);
6264 if (!ret)
6265 TRACE_CGROUP_PATH(rmdir, cgrp);
6266
6267 cgroup_kn_unlock(kn);
6268 return ret;
6269 }
6270
6271 static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
6272 .show_options = cgroup_show_options,
6273 .mkdir = cgroup_mkdir,
6274 .rmdir = cgroup_rmdir,
6275 .show_path = cgroup_show_path,
6276 };
6277
cgroup_init_subsys(struct cgroup_subsys * ss,bool early)6278 static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
6279 {
6280 struct cgroup_subsys_state *css;
6281
6282 pr_debug("Initializing cgroup subsys %s\n", ss->name);
6283
6284 cgroup_lock();
6285
6286 idr_init(&ss->css_idr);
6287 INIT_LIST_HEAD(&ss->cfts);
6288
6289 /* Create the root cgroup state for this subsystem */
6290 ss->root = &cgrp_dfl_root;
6291 css = ss->css_alloc(NULL);
6292 /* We don't handle early failures gracefully */
6293 BUG_ON(IS_ERR(css));
6294 init_and_link_css(css, ss, &cgrp_dfl_root.cgrp);
6295
6296 /*
6297 * Root csses are never destroyed and we can't initialize
6298 * percpu_ref during early init. Disable refcnting.
6299 */
6300 css->flags |= CSS_NO_REF;
6301
6302 if (early) {
6303 /* allocation can't be done safely during early init */
6304 css->id = 1;
6305 } else {
6306 css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL);
6307 BUG_ON(css->id < 0);
6308
6309 BUG_ON(ss_rstat_init(ss));
6310 BUG_ON(css_rstat_init(css));
6311 }
6312
6313 /* Update the init_css_set to contain a subsys
6314 * pointer to this state - since the subsystem is
6315 * newly registered, all tasks and hence the
6316 * init_css_set is in the subsystem's root cgroup. */
6317 init_css_set.subsys[ss->id] = css;
6318
6319 have_fork_callback |= (bool)ss->fork << ss->id;
6320 have_exit_callback |= (bool)ss->exit << ss->id;
6321 have_release_callback |= (bool)ss->release << ss->id;
6322 have_canfork_callback |= (bool)ss->can_fork << ss->id;
6323
6324 /* At system boot, before all subsystems have been
6325 * registered, no tasks have been forked, so we don't
6326 * need to invoke fork callbacks here. */
6327 BUG_ON(!list_empty(&init_task.tasks));
6328
6329 BUG_ON(online_css(css));
6330
6331 cgroup_unlock();
6332 }
6333
6334 /**
6335 * cgroup_init_early - cgroup initialization at system boot
6336 *
6337 * Initialize cgroups at system boot, and initialize any
6338 * subsystems that request early init.
6339 */
cgroup_init_early(void)6340 int __init cgroup_init_early(void)
6341 {
6342 static struct cgroup_fs_context __initdata ctx;
6343 struct cgroup_subsys *ss;
6344 int i;
6345
6346 ctx.root = &cgrp_dfl_root;
6347 init_cgroup_root(&ctx);
6348 cgrp_dfl_root.cgrp.self.flags |= CSS_NO_REF;
6349
6350 RCU_INIT_POINTER(init_task.cgroups, &init_css_set);
6351
6352 for_each_subsys(ss, i) {
6353 WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id,
6354 "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p id:name=%d:%s\n",
6355 i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free,
6356 ss->id, ss->name);
6357 WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN,
6358 "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]);
6359 WARN(ss->early_init && ss->css_rstat_flush,
6360 "cgroup rstat cannot be used with early init subsystem\n");
6361
6362 ss->id = i;
6363 ss->name = cgroup_subsys_name[i];
6364 if (!ss->legacy_name)
6365 ss->legacy_name = cgroup_subsys_name[i];
6366
6367 if (ss->early_init)
6368 cgroup_init_subsys(ss, true);
6369 }
6370 return 0;
6371 }
6372
6373 /**
6374 * cgroup_init - cgroup initialization
6375 *
6376 * Register cgroup filesystem and /proc file, and initialize
6377 * any subsystems that didn't request early init.
6378 */
cgroup_init(void)6379 int __init cgroup_init(void)
6380 {
6381 struct cgroup_subsys *ss;
6382 int ssid;
6383
6384 BUILD_BUG_ON(CGROUP_SUBSYS_COUNT > 32);
6385 BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files));
6386 BUG_ON(cgroup_init_cftypes(NULL, cgroup_psi_files));
6387 BUG_ON(cgroup_init_cftypes(NULL, cgroup1_base_files));
6388
6389 BUG_ON(ss_rstat_init(NULL));
6390
6391 get_user_ns(init_cgroup_ns.user_ns);
6392 cgroup_rt_init();
6393
6394 cgroup_lock();
6395
6396 /*
6397 * Add init_css_set to the hash table so that dfl_root can link to
6398 * it during init.
6399 */
6400 hash_add(css_set_table, &init_css_set.hlist,
6401 css_set_hash(init_css_set.subsys));
6402
6403 cgroup_bpf_lifetime_notifier_init();
6404
6405 BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0));
6406
6407 cgroup_unlock();
6408
6409 for_each_subsys(ss, ssid) {
6410 if (ss->early_init) {
6411 struct cgroup_subsys_state *css =
6412 init_css_set.subsys[ss->id];
6413
6414 css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2,
6415 GFP_KERNEL);
6416 BUG_ON(css->id < 0);
6417 } else {
6418 cgroup_init_subsys(ss, false);
6419 }
6420
6421 list_add_tail(&init_css_set.e_cset_node[ssid],
6422 &cgrp_dfl_root.cgrp.e_csets[ssid]);
6423
6424 /*
6425 * Setting dfl_root subsys_mask needs to consider the
6426 * disabled flag and cftype registration needs kmalloc,
6427 * both of which aren't available during early_init.
6428 */
6429 if (!cgroup_ssid_enabled(ssid))
6430 continue;
6431
6432 if (cgroup1_ssid_disabled(ssid))
6433 pr_info("Disabling %s control group subsystem in v1 mounts\n",
6434 ss->legacy_name);
6435
6436 cgrp_dfl_root.subsys_mask |= 1 << ss->id;
6437
6438 /* implicit controllers must be threaded too */
6439 WARN_ON(ss->implicit_on_dfl && !ss->threaded);
6440
6441 if (ss->implicit_on_dfl)
6442 cgrp_dfl_implicit_ss_mask |= 1 << ss->id;
6443 else if (!ss->dfl_cftypes)
6444 cgrp_dfl_inhibit_ss_mask |= 1 << ss->id;
6445
6446 if (ss->threaded)
6447 cgrp_dfl_threaded_ss_mask |= 1 << ss->id;
6448
6449 if (ss->dfl_cftypes == ss->legacy_cftypes) {
6450 WARN_ON(cgroup_add_cftypes(ss, ss->dfl_cftypes));
6451 } else {
6452 WARN_ON(cgroup_add_dfl_cftypes(ss, ss->dfl_cftypes));
6453 WARN_ON(cgroup_add_legacy_cftypes(ss, ss->legacy_cftypes));
6454 }
6455
6456 if (ss->bind)
6457 ss->bind(init_css_set.subsys[ssid]);
6458
6459 cgroup_lock();
6460 css_populate_dir(init_css_set.subsys[ssid]);
6461 cgroup_unlock();
6462 }
6463
6464 /* init_css_set.subsys[] has been updated, re-hash */
6465 hash_del(&init_css_set.hlist);
6466 hash_add(css_set_table, &init_css_set.hlist,
6467 css_set_hash(init_css_set.subsys));
6468
6469 WARN_ON(sysfs_create_mount_point(fs_kobj, "cgroup"));
6470 WARN_ON(register_filesystem(&cgroup_fs_type));
6471 WARN_ON(register_filesystem(&cgroup2_fs_type));
6472 WARN_ON(!proc_create_single("cgroups", 0, NULL, proc_cgroupstats_show));
6473 #ifdef CONFIG_CPUSETS_V1
6474 WARN_ON(register_filesystem(&cpuset_fs_type));
6475 #endif
6476
6477 ns_tree_add(&init_cgroup_ns);
6478 return 0;
6479 }
6480
cgroup_wq_init(void)6481 static int __init cgroup_wq_init(void)
6482 {
6483 /*
6484 * There isn't much point in executing destruction path in
6485 * parallel. Good chunk is serialized with cgroup_mutex anyway.
6486 * Use 1 for @max_active.
6487 *
6488 * We would prefer to do this in cgroup_init() above, but that
6489 * is called before init_workqueues(): so leave this until after.
6490 */
6491 cgroup_offline_wq = alloc_workqueue("cgroup_offline", WQ_PERCPU, 1);
6492 BUG_ON(!cgroup_offline_wq);
6493
6494 cgroup_release_wq = alloc_workqueue("cgroup_release", WQ_PERCPU, 1);
6495 BUG_ON(!cgroup_release_wq);
6496
6497 cgroup_free_wq = alloc_workqueue("cgroup_free", WQ_PERCPU, 1);
6498 BUG_ON(!cgroup_free_wq);
6499 return 0;
6500 }
6501 core_initcall(cgroup_wq_init);
6502
cgroup_path_from_kernfs_id(u64 id,char * buf,size_t buflen)6503 void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
6504 {
6505 struct kernfs_node *kn;
6506
6507 kn = kernfs_find_and_get_node_by_id(cgrp_dfl_root.kf_root, id);
6508 if (!kn)
6509 return;
6510 kernfs_path(kn, buf, buflen);
6511 kernfs_put(kn);
6512 }
6513
6514 /*
6515 * __cgroup_get_from_id : get the cgroup associated with cgroup id
6516 * @id: cgroup id
6517 * On success return the cgrp or ERR_PTR on failure
6518 * There are no cgroup NS restrictions.
6519 */
__cgroup_get_from_id(u64 id)6520 struct cgroup *__cgroup_get_from_id(u64 id)
6521 {
6522 struct kernfs_node *kn;
6523 struct cgroup *cgrp;
6524
6525 kn = kernfs_find_and_get_node_by_id(cgrp_dfl_root.kf_root, id);
6526 if (!kn)
6527 return ERR_PTR(-ENOENT);
6528
6529 if (kernfs_type(kn) != KERNFS_DIR) {
6530 kernfs_put(kn);
6531 return ERR_PTR(-ENOENT);
6532 }
6533
6534 rcu_read_lock();
6535
6536 cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
6537 if (cgrp && !cgroup_tryget(cgrp))
6538 cgrp = NULL;
6539
6540 rcu_read_unlock();
6541 kernfs_put(kn);
6542
6543 if (!cgrp)
6544 return ERR_PTR(-ENOENT);
6545 return cgrp;
6546 }
6547
6548 /*
6549 * cgroup_get_from_id : get the cgroup associated with cgroup id
6550 * @id: cgroup id
6551 * On success return the cgrp or ERR_PTR on failure
6552 * Only cgroups within current task's cgroup NS are valid.
6553 */
cgroup_get_from_id(u64 id)6554 struct cgroup *cgroup_get_from_id(u64 id)
6555 {
6556 struct cgroup *cgrp, *root_cgrp;
6557
6558 cgrp = __cgroup_get_from_id(id);
6559 if (IS_ERR(cgrp))
6560 return cgrp;
6561
6562 root_cgrp = current_cgns_cgroup_dfl();
6563 if (!cgroup_is_descendant(cgrp, root_cgrp)) {
6564 cgroup_put(cgrp);
6565 return ERR_PTR(-ENOENT);
6566 }
6567
6568 return cgrp;
6569 }
6570 EXPORT_SYMBOL_GPL(cgroup_get_from_id);
6571
6572 /*
6573 * proc_cgroup_show()
6574 * - Print task's cgroup paths into seq_file, one line for each hierarchy
6575 * - Used for /proc/<pid>/cgroup.
6576 */
proc_cgroup_show(struct seq_file * m,struct pid_namespace * ns,struct pid * pid,struct task_struct * tsk)6577 int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
6578 struct pid *pid, struct task_struct *tsk)
6579 {
6580 char *buf;
6581 int retval;
6582 struct cgroup_root *root;
6583
6584 retval = -ENOMEM;
6585 buf = kmalloc(PATH_MAX, GFP_KERNEL);
6586 if (!buf)
6587 goto out;
6588
6589 rcu_read_lock();
6590 spin_lock_irq(&css_set_lock);
6591
6592 for_each_root(root) {
6593 struct cgroup_subsys *ss;
6594 struct cgroup *cgrp;
6595 int ssid, count = 0;
6596
6597 if (root == &cgrp_dfl_root && !READ_ONCE(cgrp_dfl_visible))
6598 continue;
6599
6600 cgrp = task_cgroup_from_root(tsk, root);
6601 /* The root has already been unmounted. */
6602 if (!cgrp)
6603 continue;
6604
6605 seq_printf(m, "%d:", root->hierarchy_id);
6606 if (root != &cgrp_dfl_root)
6607 for_each_subsys(ss, ssid)
6608 if (root->subsys_mask & (1 << ssid))
6609 seq_printf(m, "%s%s", count++ ? "," : "",
6610 ss->legacy_name);
6611 if (strlen(root->name))
6612 seq_printf(m, "%sname=%s", count ? "," : "",
6613 root->name);
6614 seq_putc(m, ':');
6615 /*
6616 * On traditional hierarchies, all zombie tasks show up as
6617 * belonging to the root cgroup. On the default hierarchy,
6618 * while a zombie doesn't show up in "cgroup.procs" and
6619 * thus can't be migrated, its /proc/PID/cgroup keeps
6620 * reporting the cgroup it belonged to before exiting. If
6621 * the cgroup is removed before the zombie is reaped,
6622 * " (deleted)" is appended to the cgroup path.
6623 */
6624 if (cgroup_on_dfl(cgrp) || !(tsk->flags & PF_EXITING)) {
6625 retval = cgroup_path_ns_locked(cgrp, buf, PATH_MAX,
6626 current->nsproxy->cgroup_ns);
6627 if (retval == -E2BIG)
6628 retval = -ENAMETOOLONG;
6629 if (retval < 0)
6630 goto out_unlock;
6631
6632 seq_puts(m, buf);
6633 } else {
6634 seq_puts(m, "/");
6635 }
6636
6637 if (cgroup_on_dfl(cgrp) && cgroup_is_dead(cgrp))
6638 seq_puts(m, " (deleted)\n");
6639 else
6640 seq_putc(m, '\n');
6641 }
6642
6643 retval = 0;
6644 out_unlock:
6645 spin_unlock_irq(&css_set_lock);
6646 rcu_read_unlock();
6647 kfree(buf);
6648 out:
6649 return retval;
6650 }
6651
6652 /**
6653 * cgroup_fork - initialize cgroup related fields during copy_process()
6654 * @child: pointer to task_struct of forking parent process.
6655 *
6656 * A task is associated with the init_css_set until cgroup_post_fork()
6657 * attaches it to the target css_set.
6658 */
cgroup_fork(struct task_struct * child)6659 void cgroup_fork(struct task_struct *child)
6660 {
6661 RCU_INIT_POINTER(child->cgroups, &init_css_set);
6662 INIT_LIST_HEAD(&child->cg_list);
6663 }
6664
6665 /**
6666 * cgroup_v1v2_get_from_file - get a cgroup pointer from a file pointer
6667 * @f: file corresponding to cgroup_dir
6668 *
6669 * Find the cgroup from a file pointer associated with a cgroup directory.
6670 * Returns a pointer to the cgroup on success. ERR_PTR is returned if the
6671 * cgroup cannot be found.
6672 */
cgroup_v1v2_get_from_file(struct file * f)6673 static struct cgroup *cgroup_v1v2_get_from_file(struct file *f)
6674 {
6675 struct cgroup_subsys_state *css;
6676
6677 css = css_tryget_online_from_dir(f->f_path.dentry, NULL);
6678 if (IS_ERR(css))
6679 return ERR_CAST(css);
6680
6681 return css->cgroup;
6682 }
6683
6684 /**
6685 * cgroup_get_from_file - same as cgroup_v1v2_get_from_file, but only supports
6686 * cgroup2.
6687 * @f: file corresponding to cgroup2_dir
6688 */
cgroup_get_from_file(struct file * f)6689 static struct cgroup *cgroup_get_from_file(struct file *f)
6690 {
6691 struct cgroup *cgrp = cgroup_v1v2_get_from_file(f);
6692
6693 if (IS_ERR(cgrp))
6694 return ERR_CAST(cgrp);
6695
6696 if (!cgroup_on_dfl(cgrp)) {
6697 cgroup_put(cgrp);
6698 return ERR_PTR(-EBADF);
6699 }
6700
6701 return cgrp;
6702 }
6703
6704 /**
6705 * cgroup_css_set_fork - find or create a css_set for a child process
6706 * @kargs: the arguments passed to create the child process
6707 *
6708 * This functions finds or creates a new css_set which the child
6709 * process will be attached to in cgroup_post_fork(). By default,
6710 * the child process will be given the same css_set as its parent.
6711 *
6712 * If CLONE_INTO_CGROUP is specified this function will try to find an
6713 * existing css_set which includes the requested cgroup and if not create
6714 * a new css_set that the child will be attached to later. If this function
6715 * succeeds it will hold cgroup_threadgroup_rwsem on return. If
6716 * CLONE_INTO_CGROUP is requested this function will grab cgroup mutex
6717 * before grabbing cgroup_threadgroup_rwsem and will hold a reference
6718 * to the target cgroup.
6719 */
cgroup_css_set_fork(struct kernel_clone_args * kargs)6720 static int cgroup_css_set_fork(struct kernel_clone_args *kargs)
6721 __acquires(&cgroup_mutex) __acquires(&cgroup_threadgroup_rwsem)
6722 {
6723 int ret;
6724 struct cgroup *dst_cgrp = NULL;
6725 struct css_set *cset;
6726 struct super_block *sb;
6727
6728 if (kargs->flags & CLONE_INTO_CGROUP)
6729 cgroup_lock();
6730
6731 cgroup_threadgroup_change_begin(current);
6732
6733 spin_lock_irq(&css_set_lock);
6734 cset = task_css_set(current);
6735 get_css_set(cset);
6736 if (kargs->cgrp)
6737 kargs->kill_seq = kargs->cgrp->kill_seq;
6738 else
6739 kargs->kill_seq = cset->dfl_cgrp->kill_seq;
6740 spin_unlock_irq(&css_set_lock);
6741
6742 if (!(kargs->flags & CLONE_INTO_CGROUP)) {
6743 kargs->cset = cset;
6744 return 0;
6745 }
6746
6747 CLASS(fd_raw, f)(kargs->cgroup);
6748 if (fd_empty(f)) {
6749 ret = -EBADF;
6750 goto err;
6751 }
6752 sb = fd_file(f)->f_path.dentry->d_sb;
6753
6754 dst_cgrp = cgroup_get_from_file(fd_file(f));
6755 if (IS_ERR(dst_cgrp)) {
6756 ret = PTR_ERR(dst_cgrp);
6757 dst_cgrp = NULL;
6758 goto err;
6759 }
6760
6761 if (cgroup_is_dead(dst_cgrp)) {
6762 ret = -ENODEV;
6763 goto err;
6764 }
6765
6766 /*
6767 * Verify that we the target cgroup is writable for us. This is
6768 * usually done by the vfs layer but since we're not going through
6769 * the vfs layer here we need to do it "manually".
6770 */
6771 ret = cgroup_may_write(dst_cgrp, sb);
6772 if (ret)
6773 goto err;
6774
6775 /*
6776 * Spawning a task directly into a cgroup works by passing a file
6777 * descriptor to the target cgroup directory. This can even be an O_PATH
6778 * file descriptor. But it can never be a cgroup.procs file descriptor.
6779 * This was done on purpose so spawning into a cgroup could be
6780 * conceptualized as an atomic
6781 *
6782 * fd = openat(dfd_cgroup, "cgroup.procs", ...);
6783 * write(fd, <child-pid>, ...);
6784 *
6785 * sequence, i.e. it's a shorthand for the caller opening and writing
6786 * cgroup.procs of the cgroup indicated by @dfd_cgroup. This allows us
6787 * to always use the caller's credentials.
6788 */
6789 ret = cgroup_attach_permissions(cset->dfl_cgrp, dst_cgrp, sb,
6790 !(kargs->flags & CLONE_THREAD),
6791 current->nsproxy->cgroup_ns);
6792 if (ret)
6793 goto err;
6794
6795 kargs->cset = find_css_set(cset, dst_cgrp);
6796 if (!kargs->cset) {
6797 ret = -ENOMEM;
6798 goto err;
6799 }
6800
6801 put_css_set(cset);
6802 kargs->cgrp = dst_cgrp;
6803 return ret;
6804
6805 err:
6806 cgroup_threadgroup_change_end(current);
6807 cgroup_unlock();
6808 if (dst_cgrp)
6809 cgroup_put(dst_cgrp);
6810 put_css_set(cset);
6811 if (kargs->cset)
6812 put_css_set(kargs->cset);
6813 return ret;
6814 }
6815
6816 /**
6817 * cgroup_css_set_put_fork - drop references we took during fork
6818 * @kargs: the arguments passed to create the child process
6819 *
6820 * Drop references to the prepared css_set and target cgroup if
6821 * CLONE_INTO_CGROUP was requested.
6822 */
cgroup_css_set_put_fork(struct kernel_clone_args * kargs)6823 static void cgroup_css_set_put_fork(struct kernel_clone_args *kargs)
6824 __releases(&cgroup_threadgroup_rwsem) __releases(&cgroup_mutex)
6825 {
6826 struct cgroup *cgrp = kargs->cgrp;
6827 struct css_set *cset = kargs->cset;
6828
6829 cgroup_threadgroup_change_end(current);
6830
6831 if (cset) {
6832 put_css_set(cset);
6833 kargs->cset = NULL;
6834 }
6835
6836 if (kargs->flags & CLONE_INTO_CGROUP) {
6837 cgroup_unlock();
6838 if (cgrp) {
6839 cgroup_put(cgrp);
6840 kargs->cgrp = NULL;
6841 }
6842 }
6843 }
6844
6845 /**
6846 * cgroup_can_fork - called on a new task before the process is exposed
6847 * @child: the child process
6848 * @kargs: the arguments passed to create the child process
6849 *
6850 * This prepares a new css_set for the child process which the child will
6851 * be attached to in cgroup_post_fork().
6852 * This calls the subsystem can_fork() callbacks. If the cgroup_can_fork()
6853 * callback returns an error, the fork aborts with that error code. This
6854 * allows for a cgroup subsystem to conditionally allow or deny new forks.
6855 */
cgroup_can_fork(struct task_struct * child,struct kernel_clone_args * kargs)6856 int cgroup_can_fork(struct task_struct *child, struct kernel_clone_args *kargs)
6857 {
6858 struct cgroup_subsys *ss;
6859 int i, j, ret;
6860
6861 ret = cgroup_css_set_fork(kargs);
6862 if (ret)
6863 return ret;
6864
6865 do_each_subsys_mask(ss, i, have_canfork_callback) {
6866 ret = ss->can_fork(child, kargs->cset);
6867 if (ret)
6868 goto out_revert;
6869 } while_each_subsys_mask();
6870
6871 return 0;
6872
6873 out_revert:
6874 for_each_subsys(ss, j) {
6875 if (j >= i)
6876 break;
6877 if (ss->cancel_fork)
6878 ss->cancel_fork(child, kargs->cset);
6879 }
6880
6881 cgroup_css_set_put_fork(kargs);
6882
6883 return ret;
6884 }
6885
6886 /**
6887 * cgroup_cancel_fork - called if a fork failed after cgroup_can_fork()
6888 * @child: the child process
6889 * @kargs: the arguments passed to create the child process
6890 *
6891 * This calls the cancel_fork() callbacks if a fork failed *after*
6892 * cgroup_can_fork() succeeded and cleans up references we took to
6893 * prepare a new css_set for the child process in cgroup_can_fork().
6894 */
cgroup_cancel_fork(struct task_struct * child,struct kernel_clone_args * kargs)6895 void cgroup_cancel_fork(struct task_struct *child,
6896 struct kernel_clone_args *kargs)
6897 {
6898 struct cgroup_subsys *ss;
6899 int i;
6900
6901 for_each_subsys(ss, i)
6902 if (ss->cancel_fork)
6903 ss->cancel_fork(child, kargs->cset);
6904
6905 cgroup_css_set_put_fork(kargs);
6906 }
6907
6908 /**
6909 * cgroup_post_fork - finalize cgroup setup for the child process
6910 * @child: the child process
6911 * @kargs: the arguments passed to create the child process
6912 *
6913 * Attach the child process to its css_set calling the subsystem fork()
6914 * callbacks.
6915 */
cgroup_post_fork(struct task_struct * child,struct kernel_clone_args * kargs)6916 void cgroup_post_fork(struct task_struct *child,
6917 struct kernel_clone_args *kargs)
6918 __releases(&cgroup_threadgroup_rwsem) __releases(&cgroup_mutex)
6919 {
6920 unsigned int cgrp_kill_seq = 0;
6921 unsigned long cgrp_flags = 0;
6922 bool kill = false;
6923 struct cgroup_subsys *ss;
6924 struct css_set *cset;
6925 int i;
6926
6927 cset = kargs->cset;
6928 kargs->cset = NULL;
6929
6930 spin_lock_irq(&css_set_lock);
6931
6932 /* init tasks are special, only link regular threads */
6933 if (likely(child->pid)) {
6934 if (kargs->cgrp) {
6935 cgrp_flags = kargs->cgrp->flags;
6936 cgrp_kill_seq = kargs->cgrp->kill_seq;
6937 } else {
6938 cgrp_flags = cset->dfl_cgrp->flags;
6939 cgrp_kill_seq = cset->dfl_cgrp->kill_seq;
6940 }
6941
6942 WARN_ON_ONCE(!list_empty(&child->cg_list));
6943 cset->nr_tasks++;
6944 css_set_move_task(child, NULL, cset, false);
6945 } else {
6946 put_css_set(cset);
6947 cset = NULL;
6948 }
6949
6950 if (!(child->flags & PF_KTHREAD)) {
6951 if (unlikely(test_bit(CGRP_FREEZE, &cgrp_flags))) {
6952 /*
6953 * If the cgroup has to be frozen, the new task has
6954 * too. Let's set the JOBCTL_TRAP_FREEZE jobctl bit to
6955 * get the task into the frozen state.
6956 */
6957 spin_lock(&child->sighand->siglock);
6958 WARN_ON_ONCE(child->frozen);
6959 child->jobctl |= JOBCTL_TRAP_FREEZE;
6960 spin_unlock(&child->sighand->siglock);
6961
6962 /*
6963 * Calling cgroup_update_frozen() isn't required here,
6964 * because it will be called anyway a bit later from
6965 * do_freezer_trap(). So we avoid cgroup's transient
6966 * switch from the frozen state and back.
6967 */
6968 }
6969
6970 /*
6971 * If the cgroup is to be killed notice it now and take the
6972 * child down right after we finished preparing it for
6973 * userspace.
6974 */
6975 kill = kargs->kill_seq != cgrp_kill_seq;
6976 }
6977
6978 spin_unlock_irq(&css_set_lock);
6979
6980 /*
6981 * Call ss->fork(). This must happen after @child is linked on
6982 * css_set; otherwise, @child might change state between ->fork()
6983 * and addition to css_set.
6984 */
6985 do_each_subsys_mask(ss, i, have_fork_callback) {
6986 ss->fork(child);
6987 } while_each_subsys_mask();
6988
6989 /* Make the new cset the root_cset of the new cgroup namespace. */
6990 if (kargs->flags & CLONE_NEWCGROUP) {
6991 struct css_set *rcset = child->nsproxy->cgroup_ns->root_cset;
6992
6993 get_css_set(cset);
6994 child->nsproxy->cgroup_ns->root_cset = cset;
6995 put_css_set(rcset);
6996 }
6997
6998 /* Cgroup has to be killed so take down child immediately. */
6999 if (unlikely(kill))
7000 do_send_sig_info(SIGKILL, SEND_SIG_NOINFO, child, PIDTYPE_TGID);
7001
7002 cgroup_css_set_put_fork(kargs);
7003 }
7004
7005 /**
7006 * cgroup_task_exit - detach cgroup from exiting task
7007 * @tsk: pointer to task_struct of exiting process
7008 *
7009 * Description: Detach cgroup from @tsk.
7010 *
7011 */
cgroup_task_exit(struct task_struct * tsk)7012 void cgroup_task_exit(struct task_struct *tsk)
7013 {
7014 struct cgroup_subsys *ss;
7015 int i;
7016
7017 /* see cgroup_post_fork() for details */
7018 do_each_subsys_mask(ss, i, have_exit_callback) {
7019 ss->exit(tsk);
7020 } while_each_subsys_mask();
7021 }
7022
do_cgroup_task_dead(struct task_struct * tsk)7023 static void do_cgroup_task_dead(struct task_struct *tsk)
7024 {
7025 struct css_set *cset;
7026 unsigned long flags;
7027
7028 spin_lock_irqsave(&css_set_lock, flags);
7029
7030 WARN_ON_ONCE(list_empty(&tsk->cg_list));
7031 cset = task_css_set(tsk);
7032 css_set_move_task(tsk, cset, NULL, false);
7033 cset->nr_tasks--;
7034 /* matches the signal->live check in css_task_iter_advance() */
7035 if (thread_group_leader(tsk) && atomic_read(&tsk->signal->live))
7036 list_add_tail(&tsk->cg_list, &cset->dying_tasks);
7037
7038 if (dl_task(tsk))
7039 dec_dl_tasks_cs(tsk);
7040
7041 WARN_ON_ONCE(cgroup_task_frozen(tsk));
7042 if (unlikely(!(tsk->flags & PF_KTHREAD) &&
7043 test_bit(CGRP_FREEZE, &task_dfl_cgroup(tsk)->flags)))
7044 cgroup_update_frozen(task_dfl_cgroup(tsk));
7045
7046 spin_unlock_irqrestore(&css_set_lock, flags);
7047 }
7048
7049 #ifdef CONFIG_PREEMPT_RT
7050 /*
7051 * cgroup_task_dead() is called from finish_task_switch() which doesn't allow
7052 * scheduling even in RT. As the task_dead path requires grabbing css_set_lock,
7053 * this lead to sleeping in the invalid context warning bug. css_set_lock is too
7054 * big to become a raw_spinlock. The task_dead path doesn't need to run
7055 * synchronously but can't be delayed indefinitely either as the dead task pins
7056 * the cgroup and task_struct can be pinned indefinitely. Bounce through lazy
7057 * irq_work to allow batching while ensuring timely completion.
7058 */
7059 static DEFINE_PER_CPU(struct llist_head, cgrp_dead_tasks);
7060 static DEFINE_PER_CPU(struct irq_work, cgrp_dead_tasks_iwork);
7061
cgrp_dead_tasks_iwork_fn(struct irq_work * iwork)7062 static void cgrp_dead_tasks_iwork_fn(struct irq_work *iwork)
7063 {
7064 struct llist_node *lnode;
7065 struct task_struct *task, *next;
7066
7067 lnode = llist_del_all(this_cpu_ptr(&cgrp_dead_tasks));
7068 llist_for_each_entry_safe(task, next, lnode, cg_dead_lnode) {
7069 do_cgroup_task_dead(task);
7070 put_task_struct(task);
7071 }
7072 }
7073
cgroup_rt_init(void)7074 static void __init cgroup_rt_init(void)
7075 {
7076 int cpu;
7077
7078 for_each_possible_cpu(cpu) {
7079 init_llist_head(per_cpu_ptr(&cgrp_dead_tasks, cpu));
7080 per_cpu(cgrp_dead_tasks_iwork, cpu) =
7081 IRQ_WORK_INIT_LAZY(cgrp_dead_tasks_iwork_fn);
7082 }
7083 }
7084
cgroup_task_dead(struct task_struct * task)7085 void cgroup_task_dead(struct task_struct *task)
7086 {
7087 get_task_struct(task);
7088 llist_add(&task->cg_dead_lnode, this_cpu_ptr(&cgrp_dead_tasks));
7089 irq_work_queue(this_cpu_ptr(&cgrp_dead_tasks_iwork));
7090 }
7091 #else /* CONFIG_PREEMPT_RT */
cgroup_rt_init(void)7092 static void __init cgroup_rt_init(void) {}
7093
cgroup_task_dead(struct task_struct * task)7094 void cgroup_task_dead(struct task_struct *task)
7095 {
7096 do_cgroup_task_dead(task);
7097 }
7098 #endif /* CONFIG_PREEMPT_RT */
7099
cgroup_task_release(struct task_struct * task)7100 void cgroup_task_release(struct task_struct *task)
7101 {
7102 struct cgroup_subsys *ss;
7103 int ssid;
7104
7105 do_each_subsys_mask(ss, ssid, have_release_callback) {
7106 ss->release(task);
7107 } while_each_subsys_mask();
7108 }
7109
cgroup_task_free(struct task_struct * task)7110 void cgroup_task_free(struct task_struct *task)
7111 {
7112 struct css_set *cset = task_css_set(task);
7113
7114 if (!list_empty(&task->cg_list)) {
7115 spin_lock_irq(&css_set_lock);
7116 css_set_skip_task_iters(task_css_set(task), task);
7117 list_del_init(&task->cg_list);
7118 spin_unlock_irq(&css_set_lock);
7119 }
7120
7121 put_css_set(cset);
7122 }
7123
cgroup_disable(char * str)7124 static int __init cgroup_disable(char *str)
7125 {
7126 struct cgroup_subsys *ss;
7127 char *token;
7128 int i;
7129
7130 while ((token = strsep(&str, ",")) != NULL) {
7131 if (!*token)
7132 continue;
7133
7134 for_each_subsys(ss, i) {
7135 if (strcmp(token, ss->name) &&
7136 strcmp(token, ss->legacy_name))
7137 continue;
7138
7139 static_branch_disable(cgroup_subsys_enabled_key[i]);
7140 pr_info("Disabling %s control group subsystem\n",
7141 ss->name);
7142 }
7143
7144 for (i = 0; i < OPT_FEATURE_COUNT; i++) {
7145 if (strcmp(token, cgroup_opt_feature_names[i]))
7146 continue;
7147 cgroup_feature_disable_mask |= 1 << i;
7148 pr_info("Disabling %s control group feature\n",
7149 cgroup_opt_feature_names[i]);
7150 break;
7151 }
7152 }
7153 return 1;
7154 }
7155 __setup("cgroup_disable=", cgroup_disable);
7156
enable_debug_cgroup(void)7157 void __init __weak enable_debug_cgroup(void) { }
7158
enable_cgroup_debug(char * str)7159 static int __init enable_cgroup_debug(char *str)
7160 {
7161 cgroup_debug = true;
7162 enable_debug_cgroup();
7163 return 1;
7164 }
7165 __setup("cgroup_debug", enable_cgroup_debug);
7166
cgroup_favordynmods_setup(char * str)7167 static int __init cgroup_favordynmods_setup(char *str)
7168 {
7169 return (kstrtobool(str, &have_favordynmods) == 0);
7170 }
7171 __setup("cgroup_favordynmods=", cgroup_favordynmods_setup);
7172
7173 /**
7174 * css_tryget_online_from_dir - get corresponding css from a cgroup dentry
7175 * @dentry: directory dentry of interest
7176 * @ss: subsystem of interest
7177 *
7178 * If @dentry is a directory for a cgroup which has @ss enabled on it, try
7179 * to get the corresponding css and return it. If such css doesn't exist
7180 * or can't be pinned, an ERR_PTR value is returned.
7181 */
css_tryget_online_from_dir(struct dentry * dentry,struct cgroup_subsys * ss)7182 struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
7183 struct cgroup_subsys *ss)
7184 {
7185 struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
7186 struct file_system_type *s_type = dentry->d_sb->s_type;
7187 struct cgroup_subsys_state *css = NULL;
7188 struct cgroup *cgrp;
7189
7190 /* is @dentry a cgroup dir? */
7191 if ((s_type != &cgroup_fs_type && s_type != &cgroup2_fs_type) ||
7192 !kn || kernfs_type(kn) != KERNFS_DIR)
7193 return ERR_PTR(-EBADF);
7194
7195 rcu_read_lock();
7196
7197 /*
7198 * This path doesn't originate from kernfs and @kn could already
7199 * have been or be removed at any point. @kn->priv is RCU
7200 * protected for this access. See css_release_work_fn() for details.
7201 */
7202 cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
7203 if (cgrp)
7204 css = cgroup_css(cgrp, ss);
7205
7206 if (!css || !css_tryget_online(css))
7207 css = ERR_PTR(-ENOENT);
7208
7209 rcu_read_unlock();
7210 return css;
7211 }
7212
7213 /**
7214 * css_from_id - lookup css by id
7215 * @id: the cgroup id
7216 * @ss: cgroup subsys to be looked into
7217 *
7218 * Returns the css if there's valid one with @id, otherwise returns NULL.
7219 * Should be called under rcu_read_lock().
7220 */
css_from_id(int id,struct cgroup_subsys * ss)7221 struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
7222 {
7223 WARN_ON_ONCE(!rcu_read_lock_held());
7224 return idr_find(&ss->css_idr, id);
7225 }
7226
7227 /**
7228 * cgroup_get_from_path - lookup and get a cgroup from its default hierarchy path
7229 * @path: path on the default hierarchy
7230 *
7231 * Find the cgroup at @path on the default hierarchy, increment its
7232 * reference count and return it. Returns pointer to the found cgroup on
7233 * success, ERR_PTR(-ENOENT) if @path doesn't exist or if the cgroup has already
7234 * been released and ERR_PTR(-ENOTDIR) if @path points to a non-directory.
7235 */
cgroup_get_from_path(const char * path)7236 struct cgroup *cgroup_get_from_path(const char *path)
7237 {
7238 struct kernfs_node *kn;
7239 struct cgroup *cgrp = ERR_PTR(-ENOENT);
7240 struct cgroup *root_cgrp;
7241
7242 root_cgrp = current_cgns_cgroup_dfl();
7243 kn = kernfs_walk_and_get(root_cgrp->kn, path);
7244 if (!kn)
7245 goto out;
7246
7247 if (kernfs_type(kn) != KERNFS_DIR) {
7248 cgrp = ERR_PTR(-ENOTDIR);
7249 goto out_kernfs;
7250 }
7251
7252 rcu_read_lock();
7253
7254 cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
7255 if (!cgrp || !cgroup_tryget(cgrp))
7256 cgrp = ERR_PTR(-ENOENT);
7257
7258 rcu_read_unlock();
7259
7260 out_kernfs:
7261 kernfs_put(kn);
7262 out:
7263 return cgrp;
7264 }
7265 EXPORT_SYMBOL_GPL(cgroup_get_from_path);
7266
7267 /**
7268 * cgroup_v1v2_get_from_fd - get a cgroup pointer from a fd
7269 * @fd: fd obtained by open(cgroup_dir)
7270 *
7271 * Find the cgroup from a fd which should be obtained
7272 * by opening a cgroup directory. Returns a pointer to the
7273 * cgroup on success. ERR_PTR is returned if the cgroup
7274 * cannot be found.
7275 */
cgroup_v1v2_get_from_fd(int fd)7276 struct cgroup *cgroup_v1v2_get_from_fd(int fd)
7277 {
7278 CLASS(fd_raw, f)(fd);
7279 if (fd_empty(f))
7280 return ERR_PTR(-EBADF);
7281
7282 return cgroup_v1v2_get_from_file(fd_file(f));
7283 }
7284
7285 /**
7286 * cgroup_get_from_fd - same as cgroup_v1v2_get_from_fd, but only supports
7287 * cgroup2.
7288 * @fd: fd obtained by open(cgroup2_dir)
7289 */
cgroup_get_from_fd(int fd)7290 struct cgroup *cgroup_get_from_fd(int fd)
7291 {
7292 struct cgroup *cgrp = cgroup_v1v2_get_from_fd(fd);
7293
7294 if (IS_ERR(cgrp))
7295 return ERR_CAST(cgrp);
7296
7297 if (!cgroup_on_dfl(cgrp)) {
7298 cgroup_put(cgrp);
7299 return ERR_PTR(-EBADF);
7300 }
7301 return cgrp;
7302 }
7303 EXPORT_SYMBOL_GPL(cgroup_get_from_fd);
7304
power_of_ten(int power)7305 static u64 power_of_ten(int power)
7306 {
7307 u64 v = 1;
7308 while (power--)
7309 v *= 10;
7310 return v;
7311 }
7312
7313 /**
7314 * cgroup_parse_float - parse a floating number
7315 * @input: input string
7316 * @dec_shift: number of decimal digits to shift
7317 * @v: output
7318 *
7319 * Parse a decimal floating point number in @input and store the result in
7320 * @v with decimal point right shifted @dec_shift times. For example, if
7321 * @input is "12.3456" and @dec_shift is 3, *@v will be set to 12345.
7322 * Returns 0 on success, -errno otherwise.
7323 *
7324 * There's nothing cgroup specific about this function except that it's
7325 * currently the only user.
7326 */
cgroup_parse_float(const char * input,unsigned dec_shift,s64 * v)7327 int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v)
7328 {
7329 s64 whole, frac = 0;
7330 int fstart = 0, fend = 0, flen;
7331
7332 if (!sscanf(input, "%lld.%n%lld%n", &whole, &fstart, &frac, &fend))
7333 return -EINVAL;
7334 if (frac < 0)
7335 return -EINVAL;
7336
7337 flen = fend > fstart ? fend - fstart : 0;
7338 if (flen < dec_shift)
7339 frac *= power_of_ten(dec_shift - flen);
7340 else
7341 frac = DIV_ROUND_CLOSEST_ULL(frac, power_of_ten(flen - dec_shift));
7342
7343 *v = whole * power_of_ten(dec_shift) + frac;
7344 return 0;
7345 }
7346
7347 /*
7348 * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
7349 * definition in cgroup-defs.h.
7350 */
7351 #ifdef CONFIG_SOCK_CGROUP_DATA
7352
cgroup_sk_alloc(struct sock_cgroup_data * skcd)7353 void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
7354 {
7355 struct cgroup *cgroup;
7356
7357 rcu_read_lock();
7358 /* Don't associate the sock with unrelated interrupted task's cgroup. */
7359 if (in_interrupt()) {
7360 cgroup = &cgrp_dfl_root.cgrp;
7361 cgroup_get(cgroup);
7362 goto out;
7363 }
7364
7365 while (true) {
7366 struct css_set *cset;
7367
7368 cset = task_css_set(current);
7369 if (likely(cgroup_tryget(cset->dfl_cgrp))) {
7370 cgroup = cset->dfl_cgrp;
7371 break;
7372 }
7373 cpu_relax();
7374 }
7375 out:
7376 skcd->cgroup = cgroup;
7377 cgroup_bpf_get(cgroup);
7378 rcu_read_unlock();
7379 }
7380
cgroup_sk_clone(struct sock_cgroup_data * skcd)7381 void cgroup_sk_clone(struct sock_cgroup_data *skcd)
7382 {
7383 struct cgroup *cgrp = sock_cgroup_ptr(skcd);
7384
7385 /*
7386 * We might be cloning a socket which is left in an empty
7387 * cgroup and the cgroup might have already been rmdir'd.
7388 * Don't use cgroup_get_live().
7389 */
7390 cgroup_get(cgrp);
7391 cgroup_bpf_get(cgrp);
7392 }
7393
cgroup_sk_free(struct sock_cgroup_data * skcd)7394 void cgroup_sk_free(struct sock_cgroup_data *skcd)
7395 {
7396 struct cgroup *cgrp = sock_cgroup_ptr(skcd);
7397
7398 cgroup_bpf_put(cgrp);
7399 cgroup_put(cgrp);
7400 }
7401
7402 #endif /* CONFIG_SOCK_CGROUP_DATA */
7403
7404 #ifdef CONFIG_SYSFS
show_delegatable_files(struct cftype * files,char * buf,ssize_t size,const char * prefix)7405 static ssize_t show_delegatable_files(struct cftype *files, char *buf,
7406 ssize_t size, const char *prefix)
7407 {
7408 struct cftype *cft;
7409 ssize_t ret = 0;
7410
7411 for (cft = files; cft && cft->name[0] != '\0'; cft++) {
7412 if (!(cft->flags & CFTYPE_NS_DELEGATABLE))
7413 continue;
7414
7415 if (prefix)
7416 ret += snprintf(buf + ret, size - ret, "%s.", prefix);
7417
7418 ret += snprintf(buf + ret, size - ret, "%s\n", cft->name);
7419
7420 if (WARN_ON(ret >= size))
7421 break;
7422 }
7423
7424 return ret;
7425 }
7426
delegate_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)7427 static ssize_t delegate_show(struct kobject *kobj, struct kobj_attribute *attr,
7428 char *buf)
7429 {
7430 struct cgroup_subsys *ss;
7431 int ssid;
7432 ssize_t ret = 0;
7433
7434 ret = show_delegatable_files(cgroup_base_files, buf + ret,
7435 PAGE_SIZE - ret, NULL);
7436 if (cgroup_psi_enabled())
7437 ret += show_delegatable_files(cgroup_psi_files, buf + ret,
7438 PAGE_SIZE - ret, NULL);
7439
7440 for_each_subsys(ss, ssid)
7441 ret += show_delegatable_files(ss->dfl_cftypes, buf + ret,
7442 PAGE_SIZE - ret,
7443 cgroup_subsys_name[ssid]);
7444
7445 return ret;
7446 }
7447 static struct kobj_attribute cgroup_delegate_attr = __ATTR_RO(delegate);
7448
features_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)7449 static ssize_t features_show(struct kobject *kobj, struct kobj_attribute *attr,
7450 char *buf)
7451 {
7452 return snprintf(buf, PAGE_SIZE,
7453 "nsdelegate\n"
7454 "favordynmods\n"
7455 "memory_localevents\n"
7456 "memory_recursiveprot\n"
7457 "memory_hugetlb_accounting\n"
7458 "pids_localevents\n");
7459 }
7460 static struct kobj_attribute cgroup_features_attr = __ATTR_RO(features);
7461
7462 static struct attribute *cgroup_sysfs_attrs[] = {
7463 &cgroup_delegate_attr.attr,
7464 &cgroup_features_attr.attr,
7465 NULL,
7466 };
7467
7468 static const struct attribute_group cgroup_sysfs_attr_group = {
7469 .attrs = cgroup_sysfs_attrs,
7470 .name = "cgroup",
7471 };
7472
cgroup_sysfs_init(void)7473 static int __init cgroup_sysfs_init(void)
7474 {
7475 return sysfs_create_group(kernel_kobj, &cgroup_sysfs_attr_group);
7476 }
7477 subsys_initcall(cgroup_sysfs_init);
7478
7479 #endif /* CONFIG_SYSFS */
7480