xref: /linux/kernel/cgroup/cgroup.c (revision 031fba65fc202abf1f193e321be7a2c274fd88ba)
1 /*
2  *  Generic process-grouping system.
3  *
4  *  Based originally on the cpuset system, extracted by Paul Menage
5  *  Copyright (C) 2006 Google, Inc
6  *
7  *  Notifications support
8  *  Copyright (C) 2009 Nokia Corporation
9  *  Author: Kirill A. Shutemov
10  *
11  *  Copyright notices from the original cpuset code:
12  *  --------------------------------------------------
13  *  Copyright (C) 2003 BULL SA.
14  *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
15  *
16  *  Portions derived from Patrick Mochel's sysfs code.
17  *  sysfs is Copyright (c) 2001-3 Patrick Mochel
18  *
19  *  2003-10-10 Written by Simon Derr.
20  *  2003-10-22 Updates by Stephen Hemminger.
21  *  2004 May-July Rework by Paul Jackson.
22  *  ---------------------------------------------------
23  *
24  *  This file is subject to the terms and conditions of the GNU General Public
25  *  License.  See the file COPYING in the main directory of the Linux
26  *  distribution for more details.
27  */
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 
31 #include "cgroup-internal.h"
32 
33 #include <linux/bpf-cgroup.h>
34 #include <linux/cred.h>
35 #include <linux/errno.h>
36 #include <linux/init_task.h>
37 #include <linux/kernel.h>
38 #include <linux/magic.h>
39 #include <linux/mutex.h>
40 #include <linux/mount.h>
41 #include <linux/pagemap.h>
42 #include <linux/proc_fs.h>
43 #include <linux/rcupdate.h>
44 #include <linux/sched.h>
45 #include <linux/sched/task.h>
46 #include <linux/slab.h>
47 #include <linux/spinlock.h>
48 #include <linux/percpu-rwsem.h>
49 #include <linux/string.h>
50 #include <linux/hashtable.h>
51 #include <linux/idr.h>
52 #include <linux/kthread.h>
53 #include <linux/atomic.h>
54 #include <linux/cpuset.h>
55 #include <linux/proc_ns.h>
56 #include <linux/nsproxy.h>
57 #include <linux/file.h>
58 #include <linux/fs_parser.h>
59 #include <linux/sched/cputime.h>
60 #include <linux/sched/deadline.h>
61 #include <linux/psi.h>
62 #include <net/sock.h>
63 
64 #define CREATE_TRACE_POINTS
65 #include <trace/events/cgroup.h>
66 
67 #define CGROUP_FILE_NAME_MAX		(MAX_CGROUP_TYPE_NAMELEN +	\
68 					 MAX_CFTYPE_NAME + 2)
69 /* let's not notify more than 100 times per second */
70 #define CGROUP_FILE_NOTIFY_MIN_INTV	DIV_ROUND_UP(HZ, 100)
71 
72 /*
73  * To avoid confusing the compiler (and generating warnings) with code
74  * that attempts to access what would be a 0-element array (i.e. sized
75  * to a potentially empty array when CGROUP_SUBSYS_COUNT == 0), this
76  * constant expression can be added.
77  */
78 #define CGROUP_HAS_SUBSYS_CONFIG	(CGROUP_SUBSYS_COUNT > 0)
79 
80 /*
81  * cgroup_mutex is the master lock.  Any modification to cgroup or its
82  * hierarchy must be performed while holding it.
83  *
84  * css_set_lock protects task->cgroups pointer, the list of css_set
85  * objects, and the chain of tasks off each css_set.
86  *
87  * These locks are exported if CONFIG_PROVE_RCU so that accessors in
88  * cgroup.h can use them for lockdep annotations.
89  */
90 DEFINE_MUTEX(cgroup_mutex);
91 DEFINE_SPINLOCK(css_set_lock);
92 
93 #ifdef CONFIG_PROVE_RCU
94 EXPORT_SYMBOL_GPL(cgroup_mutex);
95 EXPORT_SYMBOL_GPL(css_set_lock);
96 #endif
97 
98 DEFINE_SPINLOCK(trace_cgroup_path_lock);
99 char trace_cgroup_path[TRACE_CGROUP_PATH_LEN];
100 static bool cgroup_debug __read_mostly;
101 
102 /*
103  * Protects cgroup_idr and css_idr so that IDs can be released without
104  * grabbing cgroup_mutex.
105  */
106 static DEFINE_SPINLOCK(cgroup_idr_lock);
107 
108 /*
109  * Protects cgroup_file->kn for !self csses.  It synchronizes notifications
110  * against file removal/re-creation across css hiding.
111  */
112 static DEFINE_SPINLOCK(cgroup_file_kn_lock);
113 
114 DEFINE_PERCPU_RWSEM(cgroup_threadgroup_rwsem);
115 
116 #define cgroup_assert_mutex_or_rcu_locked()				\
117 	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\
118 			   !lockdep_is_held(&cgroup_mutex),		\
119 			   "cgroup_mutex or RCU read lock required");
120 
121 /*
122  * cgroup destruction makes heavy use of work items and there can be a lot
123  * of concurrent destructions.  Use a separate workqueue so that cgroup
124  * destruction work items don't end up filling up max_active of system_wq
125  * which may lead to deadlock.
126  */
127 static struct workqueue_struct *cgroup_destroy_wq;
128 
129 /* generate an array of cgroup subsystem pointers */
130 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
131 struct cgroup_subsys *cgroup_subsys[] = {
132 #include <linux/cgroup_subsys.h>
133 };
134 #undef SUBSYS
135 
136 /* array of cgroup subsystem names */
137 #define SUBSYS(_x) [_x ## _cgrp_id] = #_x,
138 static const char *cgroup_subsys_name[] = {
139 #include <linux/cgroup_subsys.h>
140 };
141 #undef SUBSYS
142 
143 /* array of static_keys for cgroup_subsys_enabled() and cgroup_subsys_on_dfl() */
144 #define SUBSYS(_x)								\
145 	DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_enabled_key);			\
146 	DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_on_dfl_key);			\
147 	EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_enabled_key);			\
148 	EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_on_dfl_key);
149 #include <linux/cgroup_subsys.h>
150 #undef SUBSYS
151 
152 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_enabled_key,
153 static struct static_key_true *cgroup_subsys_enabled_key[] = {
154 #include <linux/cgroup_subsys.h>
155 };
156 #undef SUBSYS
157 
158 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_on_dfl_key,
159 static struct static_key_true *cgroup_subsys_on_dfl_key[] = {
160 #include <linux/cgroup_subsys.h>
161 };
162 #undef SUBSYS
163 
164 static DEFINE_PER_CPU(struct cgroup_rstat_cpu, cgrp_dfl_root_rstat_cpu);
165 
166 /* the default hierarchy */
167 struct cgroup_root cgrp_dfl_root = { .cgrp.rstat_cpu = &cgrp_dfl_root_rstat_cpu };
168 EXPORT_SYMBOL_GPL(cgrp_dfl_root);
169 
170 /*
171  * The default hierarchy always exists but is hidden until mounted for the
172  * first time.  This is for backward compatibility.
173  */
174 static bool cgrp_dfl_visible;
175 
176 /* some controllers are not supported in the default hierarchy */
177 static u16 cgrp_dfl_inhibit_ss_mask;
178 
179 /* some controllers are implicitly enabled on the default hierarchy */
180 static u16 cgrp_dfl_implicit_ss_mask;
181 
182 /* some controllers can be threaded on the default hierarchy */
183 static u16 cgrp_dfl_threaded_ss_mask;
184 
185 /* The list of hierarchy roots */
186 LIST_HEAD(cgroup_roots);
187 static int cgroup_root_count;
188 
189 /* hierarchy ID allocation and mapping, protected by cgroup_mutex */
190 static DEFINE_IDR(cgroup_hierarchy_idr);
191 
192 /*
193  * Assign a monotonically increasing serial number to csses.  It guarantees
194  * cgroups with bigger numbers are newer than those with smaller numbers.
195  * Also, as csses are always appended to the parent's ->children list, it
196  * guarantees that sibling csses are always sorted in the ascending serial
197  * number order on the list.  Protected by cgroup_mutex.
198  */
199 static u64 css_serial_nr_next = 1;
200 
201 /*
202  * These bitmasks identify subsystems with specific features to avoid
203  * having to do iterative checks repeatedly.
204  */
205 static u16 have_fork_callback __read_mostly;
206 static u16 have_exit_callback __read_mostly;
207 static u16 have_release_callback __read_mostly;
208 static u16 have_canfork_callback __read_mostly;
209 
210 static bool have_favordynmods __ro_after_init = IS_ENABLED(CONFIG_CGROUP_FAVOR_DYNMODS);
211 
212 /* cgroup namespace for init task */
213 struct cgroup_namespace init_cgroup_ns = {
214 	.ns.count	= REFCOUNT_INIT(2),
215 	.user_ns	= &init_user_ns,
216 	.ns.ops		= &cgroupns_operations,
217 	.ns.inum	= PROC_CGROUP_INIT_INO,
218 	.root_cset	= &init_css_set,
219 };
220 
221 static struct file_system_type cgroup2_fs_type;
222 static struct cftype cgroup_base_files[];
223 static struct cftype cgroup_psi_files[];
224 
225 /* cgroup optional features */
226 enum cgroup_opt_features {
227 #ifdef CONFIG_PSI
228 	OPT_FEATURE_PRESSURE,
229 #endif
230 	OPT_FEATURE_COUNT
231 };
232 
233 static const char *cgroup_opt_feature_names[OPT_FEATURE_COUNT] = {
234 #ifdef CONFIG_PSI
235 	"pressure",
236 #endif
237 };
238 
239 static u16 cgroup_feature_disable_mask __read_mostly;
240 
241 static int cgroup_apply_control(struct cgroup *cgrp);
242 static void cgroup_finalize_control(struct cgroup *cgrp, int ret);
243 static void css_task_iter_skip(struct css_task_iter *it,
244 			       struct task_struct *task);
245 static int cgroup_destroy_locked(struct cgroup *cgrp);
246 static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
247 					      struct cgroup_subsys *ss);
248 static void css_release(struct percpu_ref *ref);
249 static void kill_css(struct cgroup_subsys_state *css);
250 static int cgroup_addrm_files(struct cgroup_subsys_state *css,
251 			      struct cgroup *cgrp, struct cftype cfts[],
252 			      bool is_add);
253 
254 #ifdef CONFIG_DEBUG_CGROUP_REF
255 #define CGROUP_REF_FN_ATTRS	noinline
256 #define CGROUP_REF_EXPORT(fn)	EXPORT_SYMBOL_GPL(fn);
257 #include <linux/cgroup_refcnt.h>
258 #endif
259 
260 /**
261  * cgroup_ssid_enabled - cgroup subsys enabled test by subsys ID
262  * @ssid: subsys ID of interest
263  *
264  * cgroup_subsys_enabled() can only be used with literal subsys names which
265  * is fine for individual subsystems but unsuitable for cgroup core.  This
266  * is slower static_key_enabled() based test indexed by @ssid.
267  */
268 bool cgroup_ssid_enabled(int ssid)
269 {
270 	if (!CGROUP_HAS_SUBSYS_CONFIG)
271 		return false;
272 
273 	return static_key_enabled(cgroup_subsys_enabled_key[ssid]);
274 }
275 
276 /**
277  * cgroup_on_dfl - test whether a cgroup is on the default hierarchy
278  * @cgrp: the cgroup of interest
279  *
280  * The default hierarchy is the v2 interface of cgroup and this function
281  * can be used to test whether a cgroup is on the default hierarchy for
282  * cases where a subsystem should behave differently depending on the
283  * interface version.
284  *
285  * List of changed behaviors:
286  *
287  * - Mount options "noprefix", "xattr", "clone_children", "release_agent"
288  *   and "name" are disallowed.
289  *
290  * - When mounting an existing superblock, mount options should match.
291  *
292  * - rename(2) is disallowed.
293  *
294  * - "tasks" is removed.  Everything should be at process granularity.  Use
295  *   "cgroup.procs" instead.
296  *
297  * - "cgroup.procs" is not sorted.  pids will be unique unless they got
298  *   recycled in-between reads.
299  *
300  * - "release_agent" and "notify_on_release" are removed.  Replacement
301  *   notification mechanism will be implemented.
302  *
303  * - "cgroup.clone_children" is removed.
304  *
305  * - "cgroup.subtree_populated" is available.  Its value is 0 if the cgroup
306  *   and its descendants contain no task; otherwise, 1.  The file also
307  *   generates kernfs notification which can be monitored through poll and
308  *   [di]notify when the value of the file changes.
309  *
310  * - cpuset: tasks will be kept in empty cpusets when hotplug happens and
311  *   take masks of ancestors with non-empty cpus/mems, instead of being
312  *   moved to an ancestor.
313  *
314  * - cpuset: a task can be moved into an empty cpuset, and again it takes
315  *   masks of ancestors.
316  *
317  * - blkcg: blk-throttle becomes properly hierarchical.
318  */
319 bool cgroup_on_dfl(const struct cgroup *cgrp)
320 {
321 	return cgrp->root == &cgrp_dfl_root;
322 }
323 
324 /* IDR wrappers which synchronize using cgroup_idr_lock */
325 static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end,
326 			    gfp_t gfp_mask)
327 {
328 	int ret;
329 
330 	idr_preload(gfp_mask);
331 	spin_lock_bh(&cgroup_idr_lock);
332 	ret = idr_alloc(idr, ptr, start, end, gfp_mask & ~__GFP_DIRECT_RECLAIM);
333 	spin_unlock_bh(&cgroup_idr_lock);
334 	idr_preload_end();
335 	return ret;
336 }
337 
338 static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id)
339 {
340 	void *ret;
341 
342 	spin_lock_bh(&cgroup_idr_lock);
343 	ret = idr_replace(idr, ptr, id);
344 	spin_unlock_bh(&cgroup_idr_lock);
345 	return ret;
346 }
347 
348 static void cgroup_idr_remove(struct idr *idr, int id)
349 {
350 	spin_lock_bh(&cgroup_idr_lock);
351 	idr_remove(idr, id);
352 	spin_unlock_bh(&cgroup_idr_lock);
353 }
354 
355 static bool cgroup_has_tasks(struct cgroup *cgrp)
356 {
357 	return cgrp->nr_populated_csets;
358 }
359 
360 static bool cgroup_is_threaded(struct cgroup *cgrp)
361 {
362 	return cgrp->dom_cgrp != cgrp;
363 }
364 
365 /* can @cgrp host both domain and threaded children? */
366 static bool cgroup_is_mixable(struct cgroup *cgrp)
367 {
368 	/*
369 	 * Root isn't under domain level resource control exempting it from
370 	 * the no-internal-process constraint, so it can serve as a thread
371 	 * root and a parent of resource domains at the same time.
372 	 */
373 	return !cgroup_parent(cgrp);
374 }
375 
376 /* can @cgrp become a thread root? Should always be true for a thread root */
377 static bool cgroup_can_be_thread_root(struct cgroup *cgrp)
378 {
379 	/* mixables don't care */
380 	if (cgroup_is_mixable(cgrp))
381 		return true;
382 
383 	/* domain roots can't be nested under threaded */
384 	if (cgroup_is_threaded(cgrp))
385 		return false;
386 
387 	/* can only have either domain or threaded children */
388 	if (cgrp->nr_populated_domain_children)
389 		return false;
390 
391 	/* and no domain controllers can be enabled */
392 	if (cgrp->subtree_control & ~cgrp_dfl_threaded_ss_mask)
393 		return false;
394 
395 	return true;
396 }
397 
398 /* is @cgrp root of a threaded subtree? */
399 static bool cgroup_is_thread_root(struct cgroup *cgrp)
400 {
401 	/* thread root should be a domain */
402 	if (cgroup_is_threaded(cgrp))
403 		return false;
404 
405 	/* a domain w/ threaded children is a thread root */
406 	if (cgrp->nr_threaded_children)
407 		return true;
408 
409 	/*
410 	 * A domain which has tasks and explicit threaded controllers
411 	 * enabled is a thread root.
412 	 */
413 	if (cgroup_has_tasks(cgrp) &&
414 	    (cgrp->subtree_control & cgrp_dfl_threaded_ss_mask))
415 		return true;
416 
417 	return false;
418 }
419 
420 /* a domain which isn't connected to the root w/o brekage can't be used */
421 static bool cgroup_is_valid_domain(struct cgroup *cgrp)
422 {
423 	/* the cgroup itself can be a thread root */
424 	if (cgroup_is_threaded(cgrp))
425 		return false;
426 
427 	/* but the ancestors can't be unless mixable */
428 	while ((cgrp = cgroup_parent(cgrp))) {
429 		if (!cgroup_is_mixable(cgrp) && cgroup_is_thread_root(cgrp))
430 			return false;
431 		if (cgroup_is_threaded(cgrp))
432 			return false;
433 	}
434 
435 	return true;
436 }
437 
438 /* subsystems visibly enabled on a cgroup */
439 static u16 cgroup_control(struct cgroup *cgrp)
440 {
441 	struct cgroup *parent = cgroup_parent(cgrp);
442 	u16 root_ss_mask = cgrp->root->subsys_mask;
443 
444 	if (parent) {
445 		u16 ss_mask = parent->subtree_control;
446 
447 		/* threaded cgroups can only have threaded controllers */
448 		if (cgroup_is_threaded(cgrp))
449 			ss_mask &= cgrp_dfl_threaded_ss_mask;
450 		return ss_mask;
451 	}
452 
453 	if (cgroup_on_dfl(cgrp))
454 		root_ss_mask &= ~(cgrp_dfl_inhibit_ss_mask |
455 				  cgrp_dfl_implicit_ss_mask);
456 	return root_ss_mask;
457 }
458 
459 /* subsystems enabled on a cgroup */
460 static u16 cgroup_ss_mask(struct cgroup *cgrp)
461 {
462 	struct cgroup *parent = cgroup_parent(cgrp);
463 
464 	if (parent) {
465 		u16 ss_mask = parent->subtree_ss_mask;
466 
467 		/* threaded cgroups can only have threaded controllers */
468 		if (cgroup_is_threaded(cgrp))
469 			ss_mask &= cgrp_dfl_threaded_ss_mask;
470 		return ss_mask;
471 	}
472 
473 	return cgrp->root->subsys_mask;
474 }
475 
476 /**
477  * cgroup_css - obtain a cgroup's css for the specified subsystem
478  * @cgrp: the cgroup of interest
479  * @ss: the subsystem of interest (%NULL returns @cgrp->self)
480  *
481  * Return @cgrp's css (cgroup_subsys_state) associated with @ss.  This
482  * function must be called either under cgroup_mutex or rcu_read_lock() and
483  * the caller is responsible for pinning the returned css if it wants to
484  * keep accessing it outside the said locks.  This function may return
485  * %NULL if @cgrp doesn't have @subsys_id enabled.
486  */
487 static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
488 					      struct cgroup_subsys *ss)
489 {
490 	if (CGROUP_HAS_SUBSYS_CONFIG && ss)
491 		return rcu_dereference_check(cgrp->subsys[ss->id],
492 					lockdep_is_held(&cgroup_mutex));
493 	else
494 		return &cgrp->self;
495 }
496 
497 /**
498  * cgroup_e_css_by_mask - obtain a cgroup's effective css for the specified ss
499  * @cgrp: the cgroup of interest
500  * @ss: the subsystem of interest (%NULL returns @cgrp->self)
501  *
502  * Similar to cgroup_css() but returns the effective css, which is defined
503  * as the matching css of the nearest ancestor including self which has @ss
504  * enabled.  If @ss is associated with the hierarchy @cgrp is on, this
505  * function is guaranteed to return non-NULL css.
506  */
507 static struct cgroup_subsys_state *cgroup_e_css_by_mask(struct cgroup *cgrp,
508 							struct cgroup_subsys *ss)
509 {
510 	lockdep_assert_held(&cgroup_mutex);
511 
512 	if (!ss)
513 		return &cgrp->self;
514 
515 	/*
516 	 * This function is used while updating css associations and thus
517 	 * can't test the csses directly.  Test ss_mask.
518 	 */
519 	while (!(cgroup_ss_mask(cgrp) & (1 << ss->id))) {
520 		cgrp = cgroup_parent(cgrp);
521 		if (!cgrp)
522 			return NULL;
523 	}
524 
525 	return cgroup_css(cgrp, ss);
526 }
527 
528 /**
529  * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
530  * @cgrp: the cgroup of interest
531  * @ss: the subsystem of interest
532  *
533  * Find and get the effective css of @cgrp for @ss.  The effective css is
534  * defined as the matching css of the nearest ancestor including self which
535  * has @ss enabled.  If @ss is not mounted on the hierarchy @cgrp is on,
536  * the root css is returned, so this function always returns a valid css.
537  *
538  * The returned css is not guaranteed to be online, and therefore it is the
539  * callers responsibility to try get a reference for it.
540  */
541 struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
542 					 struct cgroup_subsys *ss)
543 {
544 	struct cgroup_subsys_state *css;
545 
546 	if (!CGROUP_HAS_SUBSYS_CONFIG)
547 		return NULL;
548 
549 	do {
550 		css = cgroup_css(cgrp, ss);
551 
552 		if (css)
553 			return css;
554 		cgrp = cgroup_parent(cgrp);
555 	} while (cgrp);
556 
557 	return init_css_set.subsys[ss->id];
558 }
559 
560 /**
561  * cgroup_get_e_css - get a cgroup's effective css for the specified subsystem
562  * @cgrp: the cgroup of interest
563  * @ss: the subsystem of interest
564  *
565  * Find and get the effective css of @cgrp for @ss.  The effective css is
566  * defined as the matching css of the nearest ancestor including self which
567  * has @ss enabled.  If @ss is not mounted on the hierarchy @cgrp is on,
568  * the root css is returned, so this function always returns a valid css.
569  * The returned css must be put using css_put().
570  */
571 struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgrp,
572 					     struct cgroup_subsys *ss)
573 {
574 	struct cgroup_subsys_state *css;
575 
576 	if (!CGROUP_HAS_SUBSYS_CONFIG)
577 		return NULL;
578 
579 	rcu_read_lock();
580 
581 	do {
582 		css = cgroup_css(cgrp, ss);
583 
584 		if (css && css_tryget_online(css))
585 			goto out_unlock;
586 		cgrp = cgroup_parent(cgrp);
587 	} while (cgrp);
588 
589 	css = init_css_set.subsys[ss->id];
590 	css_get(css);
591 out_unlock:
592 	rcu_read_unlock();
593 	return css;
594 }
595 EXPORT_SYMBOL_GPL(cgroup_get_e_css);
596 
597 static void cgroup_get_live(struct cgroup *cgrp)
598 {
599 	WARN_ON_ONCE(cgroup_is_dead(cgrp));
600 	cgroup_get(cgrp);
601 }
602 
603 /**
604  * __cgroup_task_count - count the number of tasks in a cgroup. The caller
605  * is responsible for taking the css_set_lock.
606  * @cgrp: the cgroup in question
607  */
608 int __cgroup_task_count(const struct cgroup *cgrp)
609 {
610 	int count = 0;
611 	struct cgrp_cset_link *link;
612 
613 	lockdep_assert_held(&css_set_lock);
614 
615 	list_for_each_entry(link, &cgrp->cset_links, cset_link)
616 		count += link->cset->nr_tasks;
617 
618 	return count;
619 }
620 
621 /**
622  * cgroup_task_count - count the number of tasks in a cgroup.
623  * @cgrp: the cgroup in question
624  */
625 int cgroup_task_count(const struct cgroup *cgrp)
626 {
627 	int count;
628 
629 	spin_lock_irq(&css_set_lock);
630 	count = __cgroup_task_count(cgrp);
631 	spin_unlock_irq(&css_set_lock);
632 
633 	return count;
634 }
635 
636 struct cgroup_subsys_state *of_css(struct kernfs_open_file *of)
637 {
638 	struct cgroup *cgrp = of->kn->parent->priv;
639 	struct cftype *cft = of_cft(of);
640 
641 	/*
642 	 * This is open and unprotected implementation of cgroup_css().
643 	 * seq_css() is only called from a kernfs file operation which has
644 	 * an active reference on the file.  Because all the subsystem
645 	 * files are drained before a css is disassociated with a cgroup,
646 	 * the matching css from the cgroup's subsys table is guaranteed to
647 	 * be and stay valid until the enclosing operation is complete.
648 	 */
649 	if (CGROUP_HAS_SUBSYS_CONFIG && cft->ss)
650 		return rcu_dereference_raw(cgrp->subsys[cft->ss->id]);
651 	else
652 		return &cgrp->self;
653 }
654 EXPORT_SYMBOL_GPL(of_css);
655 
656 /**
657  * for_each_css - iterate all css's of a cgroup
658  * @css: the iteration cursor
659  * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
660  * @cgrp: the target cgroup to iterate css's of
661  *
662  * Should be called under cgroup_mutex.
663  */
664 #define for_each_css(css, ssid, cgrp)					\
665 	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++)	\
666 		if (!((css) = rcu_dereference_check(			\
667 				(cgrp)->subsys[(ssid)],			\
668 				lockdep_is_held(&cgroup_mutex)))) { }	\
669 		else
670 
671 /**
672  * do_each_subsys_mask - filter for_each_subsys with a bitmask
673  * @ss: the iteration cursor
674  * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
675  * @ss_mask: the bitmask
676  *
677  * The block will only run for cases where the ssid-th bit (1 << ssid) of
678  * @ss_mask is set.
679  */
680 #define do_each_subsys_mask(ss, ssid, ss_mask) do {			\
681 	unsigned long __ss_mask = (ss_mask);				\
682 	if (!CGROUP_HAS_SUBSYS_CONFIG) {				\
683 		(ssid) = 0;						\
684 		break;							\
685 	}								\
686 	for_each_set_bit(ssid, &__ss_mask, CGROUP_SUBSYS_COUNT) {	\
687 		(ss) = cgroup_subsys[ssid];				\
688 		{
689 
690 #define while_each_subsys_mask()					\
691 		}							\
692 	}								\
693 } while (false)
694 
695 /* iterate over child cgrps, lock should be held throughout iteration */
696 #define cgroup_for_each_live_child(child, cgrp)				\
697 	list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \
698 		if (({ lockdep_assert_held(&cgroup_mutex);		\
699 		       cgroup_is_dead(child); }))			\
700 			;						\
701 		else
702 
703 /* walk live descendants in pre order */
704 #define cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp)		\
705 	css_for_each_descendant_pre((d_css), cgroup_css((cgrp), NULL))	\
706 		if (({ lockdep_assert_held(&cgroup_mutex);		\
707 		       (dsct) = (d_css)->cgroup;			\
708 		       cgroup_is_dead(dsct); }))			\
709 			;						\
710 		else
711 
712 /* walk live descendants in postorder */
713 #define cgroup_for_each_live_descendant_post(dsct, d_css, cgrp)		\
714 	css_for_each_descendant_post((d_css), cgroup_css((cgrp), NULL))	\
715 		if (({ lockdep_assert_held(&cgroup_mutex);		\
716 		       (dsct) = (d_css)->cgroup;			\
717 		       cgroup_is_dead(dsct); }))			\
718 			;						\
719 		else
720 
721 /*
722  * The default css_set - used by init and its children prior to any
723  * hierarchies being mounted. It contains a pointer to the root state
724  * for each subsystem. Also used to anchor the list of css_sets. Not
725  * reference-counted, to improve performance when child cgroups
726  * haven't been created.
727  */
728 struct css_set init_css_set = {
729 	.refcount		= REFCOUNT_INIT(1),
730 	.dom_cset		= &init_css_set,
731 	.tasks			= LIST_HEAD_INIT(init_css_set.tasks),
732 	.mg_tasks		= LIST_HEAD_INIT(init_css_set.mg_tasks),
733 	.dying_tasks		= LIST_HEAD_INIT(init_css_set.dying_tasks),
734 	.task_iters		= LIST_HEAD_INIT(init_css_set.task_iters),
735 	.threaded_csets		= LIST_HEAD_INIT(init_css_set.threaded_csets),
736 	.cgrp_links		= LIST_HEAD_INIT(init_css_set.cgrp_links),
737 	.mg_src_preload_node	= LIST_HEAD_INIT(init_css_set.mg_src_preload_node),
738 	.mg_dst_preload_node	= LIST_HEAD_INIT(init_css_set.mg_dst_preload_node),
739 	.mg_node		= LIST_HEAD_INIT(init_css_set.mg_node),
740 
741 	/*
742 	 * The following field is re-initialized when this cset gets linked
743 	 * in cgroup_init().  However, let's initialize the field
744 	 * statically too so that the default cgroup can be accessed safely
745 	 * early during boot.
746 	 */
747 	.dfl_cgrp		= &cgrp_dfl_root.cgrp,
748 };
749 
750 static int css_set_count	= 1;	/* 1 for init_css_set */
751 
752 static bool css_set_threaded(struct css_set *cset)
753 {
754 	return cset->dom_cset != cset;
755 }
756 
757 /**
758  * css_set_populated - does a css_set contain any tasks?
759  * @cset: target css_set
760  *
761  * css_set_populated() should be the same as !!cset->nr_tasks at steady
762  * state. However, css_set_populated() can be called while a task is being
763  * added to or removed from the linked list before the nr_tasks is
764  * properly updated. Hence, we can't just look at ->nr_tasks here.
765  */
766 static bool css_set_populated(struct css_set *cset)
767 {
768 	lockdep_assert_held(&css_set_lock);
769 
770 	return !list_empty(&cset->tasks) || !list_empty(&cset->mg_tasks);
771 }
772 
773 /**
774  * cgroup_update_populated - update the populated count of a cgroup
775  * @cgrp: the target cgroup
776  * @populated: inc or dec populated count
777  *
778  * One of the css_sets associated with @cgrp is either getting its first
779  * task or losing the last.  Update @cgrp->nr_populated_* accordingly.  The
780  * count is propagated towards root so that a given cgroup's
781  * nr_populated_children is zero iff none of its descendants contain any
782  * tasks.
783  *
784  * @cgrp's interface file "cgroup.populated" is zero if both
785  * @cgrp->nr_populated_csets and @cgrp->nr_populated_children are zero and
786  * 1 otherwise.  When the sum changes from or to zero, userland is notified
787  * that the content of the interface file has changed.  This can be used to
788  * detect when @cgrp and its descendants become populated or empty.
789  */
790 static void cgroup_update_populated(struct cgroup *cgrp, bool populated)
791 {
792 	struct cgroup *child = NULL;
793 	int adj = populated ? 1 : -1;
794 
795 	lockdep_assert_held(&css_set_lock);
796 
797 	do {
798 		bool was_populated = cgroup_is_populated(cgrp);
799 
800 		if (!child) {
801 			cgrp->nr_populated_csets += adj;
802 		} else {
803 			if (cgroup_is_threaded(child))
804 				cgrp->nr_populated_threaded_children += adj;
805 			else
806 				cgrp->nr_populated_domain_children += adj;
807 		}
808 
809 		if (was_populated == cgroup_is_populated(cgrp))
810 			break;
811 
812 		cgroup1_check_for_release(cgrp);
813 		TRACE_CGROUP_PATH(notify_populated, cgrp,
814 				  cgroup_is_populated(cgrp));
815 		cgroup_file_notify(&cgrp->events_file);
816 
817 		child = cgrp;
818 		cgrp = cgroup_parent(cgrp);
819 	} while (cgrp);
820 }
821 
822 /**
823  * css_set_update_populated - update populated state of a css_set
824  * @cset: target css_set
825  * @populated: whether @cset is populated or depopulated
826  *
827  * @cset is either getting the first task or losing the last.  Update the
828  * populated counters of all associated cgroups accordingly.
829  */
830 static void css_set_update_populated(struct css_set *cset, bool populated)
831 {
832 	struct cgrp_cset_link *link;
833 
834 	lockdep_assert_held(&css_set_lock);
835 
836 	list_for_each_entry(link, &cset->cgrp_links, cgrp_link)
837 		cgroup_update_populated(link->cgrp, populated);
838 }
839 
840 /*
841  * @task is leaving, advance task iterators which are pointing to it so
842  * that they can resume at the next position.  Advancing an iterator might
843  * remove it from the list, use safe walk.  See css_task_iter_skip() for
844  * details.
845  */
846 static void css_set_skip_task_iters(struct css_set *cset,
847 				    struct task_struct *task)
848 {
849 	struct css_task_iter *it, *pos;
850 
851 	list_for_each_entry_safe(it, pos, &cset->task_iters, iters_node)
852 		css_task_iter_skip(it, task);
853 }
854 
855 /**
856  * css_set_move_task - move a task from one css_set to another
857  * @task: task being moved
858  * @from_cset: css_set @task currently belongs to (may be NULL)
859  * @to_cset: new css_set @task is being moved to (may be NULL)
860  * @use_mg_tasks: move to @to_cset->mg_tasks instead of ->tasks
861  *
862  * Move @task from @from_cset to @to_cset.  If @task didn't belong to any
863  * css_set, @from_cset can be NULL.  If @task is being disassociated
864  * instead of moved, @to_cset can be NULL.
865  *
866  * This function automatically handles populated counter updates and
867  * css_task_iter adjustments but the caller is responsible for managing
868  * @from_cset and @to_cset's reference counts.
869  */
870 static void css_set_move_task(struct task_struct *task,
871 			      struct css_set *from_cset, struct css_set *to_cset,
872 			      bool use_mg_tasks)
873 {
874 	lockdep_assert_held(&css_set_lock);
875 
876 	if (to_cset && !css_set_populated(to_cset))
877 		css_set_update_populated(to_cset, true);
878 
879 	if (from_cset) {
880 		WARN_ON_ONCE(list_empty(&task->cg_list));
881 
882 		css_set_skip_task_iters(from_cset, task);
883 		list_del_init(&task->cg_list);
884 		if (!css_set_populated(from_cset))
885 			css_set_update_populated(from_cset, false);
886 	} else {
887 		WARN_ON_ONCE(!list_empty(&task->cg_list));
888 	}
889 
890 	if (to_cset) {
891 		/*
892 		 * We are synchronized through cgroup_threadgroup_rwsem
893 		 * against PF_EXITING setting such that we can't race
894 		 * against cgroup_exit()/cgroup_free() dropping the css_set.
895 		 */
896 		WARN_ON_ONCE(task->flags & PF_EXITING);
897 
898 		cgroup_move_task(task, to_cset);
899 		list_add_tail(&task->cg_list, use_mg_tasks ? &to_cset->mg_tasks :
900 							     &to_cset->tasks);
901 	}
902 }
903 
904 /*
905  * hash table for cgroup groups. This improves the performance to find
906  * an existing css_set. This hash doesn't (currently) take into
907  * account cgroups in empty hierarchies.
908  */
909 #define CSS_SET_HASH_BITS	7
910 static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS);
911 
912 static unsigned long css_set_hash(struct cgroup_subsys_state **css)
913 {
914 	unsigned long key = 0UL;
915 	struct cgroup_subsys *ss;
916 	int i;
917 
918 	for_each_subsys(ss, i)
919 		key += (unsigned long)css[i];
920 	key = (key >> 16) ^ key;
921 
922 	return key;
923 }
924 
925 void put_css_set_locked(struct css_set *cset)
926 {
927 	struct cgrp_cset_link *link, *tmp_link;
928 	struct cgroup_subsys *ss;
929 	int ssid;
930 
931 	lockdep_assert_held(&css_set_lock);
932 
933 	if (!refcount_dec_and_test(&cset->refcount))
934 		return;
935 
936 	WARN_ON_ONCE(!list_empty(&cset->threaded_csets));
937 
938 	/* This css_set is dead. Unlink it and release cgroup and css refs */
939 	for_each_subsys(ss, ssid) {
940 		list_del(&cset->e_cset_node[ssid]);
941 		css_put(cset->subsys[ssid]);
942 	}
943 	hash_del(&cset->hlist);
944 	css_set_count--;
945 
946 	list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) {
947 		list_del(&link->cset_link);
948 		list_del(&link->cgrp_link);
949 		if (cgroup_parent(link->cgrp))
950 			cgroup_put(link->cgrp);
951 		kfree(link);
952 	}
953 
954 	if (css_set_threaded(cset)) {
955 		list_del(&cset->threaded_csets_node);
956 		put_css_set_locked(cset->dom_cset);
957 	}
958 
959 	kfree_rcu(cset, rcu_head);
960 }
961 
962 /**
963  * compare_css_sets - helper function for find_existing_css_set().
964  * @cset: candidate css_set being tested
965  * @old_cset: existing css_set for a task
966  * @new_cgrp: cgroup that's being entered by the task
967  * @template: desired set of css pointers in css_set (pre-calculated)
968  *
969  * Returns true if "cset" matches "old_cset" except for the hierarchy
970  * which "new_cgrp" belongs to, for which it should match "new_cgrp".
971  */
972 static bool compare_css_sets(struct css_set *cset,
973 			     struct css_set *old_cset,
974 			     struct cgroup *new_cgrp,
975 			     struct cgroup_subsys_state *template[])
976 {
977 	struct cgroup *new_dfl_cgrp;
978 	struct list_head *l1, *l2;
979 
980 	/*
981 	 * On the default hierarchy, there can be csets which are
982 	 * associated with the same set of cgroups but different csses.
983 	 * Let's first ensure that csses match.
984 	 */
985 	if (memcmp(template, cset->subsys, sizeof(cset->subsys)))
986 		return false;
987 
988 
989 	/* @cset's domain should match the default cgroup's */
990 	if (cgroup_on_dfl(new_cgrp))
991 		new_dfl_cgrp = new_cgrp;
992 	else
993 		new_dfl_cgrp = old_cset->dfl_cgrp;
994 
995 	if (new_dfl_cgrp->dom_cgrp != cset->dom_cset->dfl_cgrp)
996 		return false;
997 
998 	/*
999 	 * Compare cgroup pointers in order to distinguish between
1000 	 * different cgroups in hierarchies.  As different cgroups may
1001 	 * share the same effective css, this comparison is always
1002 	 * necessary.
1003 	 */
1004 	l1 = &cset->cgrp_links;
1005 	l2 = &old_cset->cgrp_links;
1006 	while (1) {
1007 		struct cgrp_cset_link *link1, *link2;
1008 		struct cgroup *cgrp1, *cgrp2;
1009 
1010 		l1 = l1->next;
1011 		l2 = l2->next;
1012 		/* See if we reached the end - both lists are equal length. */
1013 		if (l1 == &cset->cgrp_links) {
1014 			BUG_ON(l2 != &old_cset->cgrp_links);
1015 			break;
1016 		} else {
1017 			BUG_ON(l2 == &old_cset->cgrp_links);
1018 		}
1019 		/* Locate the cgroups associated with these links. */
1020 		link1 = list_entry(l1, struct cgrp_cset_link, cgrp_link);
1021 		link2 = list_entry(l2, struct cgrp_cset_link, cgrp_link);
1022 		cgrp1 = link1->cgrp;
1023 		cgrp2 = link2->cgrp;
1024 		/* Hierarchies should be linked in the same order. */
1025 		BUG_ON(cgrp1->root != cgrp2->root);
1026 
1027 		/*
1028 		 * If this hierarchy is the hierarchy of the cgroup
1029 		 * that's changing, then we need to check that this
1030 		 * css_set points to the new cgroup; if it's any other
1031 		 * hierarchy, then this css_set should point to the
1032 		 * same cgroup as the old css_set.
1033 		 */
1034 		if (cgrp1->root == new_cgrp->root) {
1035 			if (cgrp1 != new_cgrp)
1036 				return false;
1037 		} else {
1038 			if (cgrp1 != cgrp2)
1039 				return false;
1040 		}
1041 	}
1042 	return true;
1043 }
1044 
1045 /**
1046  * find_existing_css_set - init css array and find the matching css_set
1047  * @old_cset: the css_set that we're using before the cgroup transition
1048  * @cgrp: the cgroup that we're moving into
1049  * @template: out param for the new set of csses, should be clear on entry
1050  */
1051 static struct css_set *find_existing_css_set(struct css_set *old_cset,
1052 					struct cgroup *cgrp,
1053 					struct cgroup_subsys_state **template)
1054 {
1055 	struct cgroup_root *root = cgrp->root;
1056 	struct cgroup_subsys *ss;
1057 	struct css_set *cset;
1058 	unsigned long key;
1059 	int i;
1060 
1061 	/*
1062 	 * Build the set of subsystem state objects that we want to see in the
1063 	 * new css_set. While subsystems can change globally, the entries here
1064 	 * won't change, so no need for locking.
1065 	 */
1066 	for_each_subsys(ss, i) {
1067 		if (root->subsys_mask & (1UL << i)) {
1068 			/*
1069 			 * @ss is in this hierarchy, so we want the
1070 			 * effective css from @cgrp.
1071 			 */
1072 			template[i] = cgroup_e_css_by_mask(cgrp, ss);
1073 		} else {
1074 			/*
1075 			 * @ss is not in this hierarchy, so we don't want
1076 			 * to change the css.
1077 			 */
1078 			template[i] = old_cset->subsys[i];
1079 		}
1080 	}
1081 
1082 	key = css_set_hash(template);
1083 	hash_for_each_possible(css_set_table, cset, hlist, key) {
1084 		if (!compare_css_sets(cset, old_cset, cgrp, template))
1085 			continue;
1086 
1087 		/* This css_set matches what we need */
1088 		return cset;
1089 	}
1090 
1091 	/* No existing cgroup group matched */
1092 	return NULL;
1093 }
1094 
1095 static void free_cgrp_cset_links(struct list_head *links_to_free)
1096 {
1097 	struct cgrp_cset_link *link, *tmp_link;
1098 
1099 	list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) {
1100 		list_del(&link->cset_link);
1101 		kfree(link);
1102 	}
1103 }
1104 
1105 /**
1106  * allocate_cgrp_cset_links - allocate cgrp_cset_links
1107  * @count: the number of links to allocate
1108  * @tmp_links: list_head the allocated links are put on
1109  *
1110  * Allocate @count cgrp_cset_link structures and chain them on @tmp_links
1111  * through ->cset_link.  Returns 0 on success or -errno.
1112  */
1113 static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links)
1114 {
1115 	struct cgrp_cset_link *link;
1116 	int i;
1117 
1118 	INIT_LIST_HEAD(tmp_links);
1119 
1120 	for (i = 0; i < count; i++) {
1121 		link = kzalloc(sizeof(*link), GFP_KERNEL);
1122 		if (!link) {
1123 			free_cgrp_cset_links(tmp_links);
1124 			return -ENOMEM;
1125 		}
1126 		list_add(&link->cset_link, tmp_links);
1127 	}
1128 	return 0;
1129 }
1130 
1131 /**
1132  * link_css_set - a helper function to link a css_set to a cgroup
1133  * @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links()
1134  * @cset: the css_set to be linked
1135  * @cgrp: the destination cgroup
1136  */
1137 static void link_css_set(struct list_head *tmp_links, struct css_set *cset,
1138 			 struct cgroup *cgrp)
1139 {
1140 	struct cgrp_cset_link *link;
1141 
1142 	BUG_ON(list_empty(tmp_links));
1143 
1144 	if (cgroup_on_dfl(cgrp))
1145 		cset->dfl_cgrp = cgrp;
1146 
1147 	link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link);
1148 	link->cset = cset;
1149 	link->cgrp = cgrp;
1150 
1151 	/*
1152 	 * Always add links to the tail of the lists so that the lists are
1153 	 * in chronological order.
1154 	 */
1155 	list_move_tail(&link->cset_link, &cgrp->cset_links);
1156 	list_add_tail(&link->cgrp_link, &cset->cgrp_links);
1157 
1158 	if (cgroup_parent(cgrp))
1159 		cgroup_get_live(cgrp);
1160 }
1161 
1162 /**
1163  * find_css_set - return a new css_set with one cgroup updated
1164  * @old_cset: the baseline css_set
1165  * @cgrp: the cgroup to be updated
1166  *
1167  * Return a new css_set that's equivalent to @old_cset, but with @cgrp
1168  * substituted into the appropriate hierarchy.
1169  */
1170 static struct css_set *find_css_set(struct css_set *old_cset,
1171 				    struct cgroup *cgrp)
1172 {
1173 	struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { };
1174 	struct css_set *cset;
1175 	struct list_head tmp_links;
1176 	struct cgrp_cset_link *link;
1177 	struct cgroup_subsys *ss;
1178 	unsigned long key;
1179 	int ssid;
1180 
1181 	lockdep_assert_held(&cgroup_mutex);
1182 
1183 	/* First see if we already have a cgroup group that matches
1184 	 * the desired set */
1185 	spin_lock_irq(&css_set_lock);
1186 	cset = find_existing_css_set(old_cset, cgrp, template);
1187 	if (cset)
1188 		get_css_set(cset);
1189 	spin_unlock_irq(&css_set_lock);
1190 
1191 	if (cset)
1192 		return cset;
1193 
1194 	cset = kzalloc(sizeof(*cset), GFP_KERNEL);
1195 	if (!cset)
1196 		return NULL;
1197 
1198 	/* Allocate all the cgrp_cset_link objects that we'll need */
1199 	if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) {
1200 		kfree(cset);
1201 		return NULL;
1202 	}
1203 
1204 	refcount_set(&cset->refcount, 1);
1205 	cset->dom_cset = cset;
1206 	INIT_LIST_HEAD(&cset->tasks);
1207 	INIT_LIST_HEAD(&cset->mg_tasks);
1208 	INIT_LIST_HEAD(&cset->dying_tasks);
1209 	INIT_LIST_HEAD(&cset->task_iters);
1210 	INIT_LIST_HEAD(&cset->threaded_csets);
1211 	INIT_HLIST_NODE(&cset->hlist);
1212 	INIT_LIST_HEAD(&cset->cgrp_links);
1213 	INIT_LIST_HEAD(&cset->mg_src_preload_node);
1214 	INIT_LIST_HEAD(&cset->mg_dst_preload_node);
1215 	INIT_LIST_HEAD(&cset->mg_node);
1216 
1217 	/* Copy the set of subsystem state objects generated in
1218 	 * find_existing_css_set() */
1219 	memcpy(cset->subsys, template, sizeof(cset->subsys));
1220 
1221 	spin_lock_irq(&css_set_lock);
1222 	/* Add reference counts and links from the new css_set. */
1223 	list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
1224 		struct cgroup *c = link->cgrp;
1225 
1226 		if (c->root == cgrp->root)
1227 			c = cgrp;
1228 		link_css_set(&tmp_links, cset, c);
1229 	}
1230 
1231 	BUG_ON(!list_empty(&tmp_links));
1232 
1233 	css_set_count++;
1234 
1235 	/* Add @cset to the hash table */
1236 	key = css_set_hash(cset->subsys);
1237 	hash_add(css_set_table, &cset->hlist, key);
1238 
1239 	for_each_subsys(ss, ssid) {
1240 		struct cgroup_subsys_state *css = cset->subsys[ssid];
1241 
1242 		list_add_tail(&cset->e_cset_node[ssid],
1243 			      &css->cgroup->e_csets[ssid]);
1244 		css_get(css);
1245 	}
1246 
1247 	spin_unlock_irq(&css_set_lock);
1248 
1249 	/*
1250 	 * If @cset should be threaded, look up the matching dom_cset and
1251 	 * link them up.  We first fully initialize @cset then look for the
1252 	 * dom_cset.  It's simpler this way and safe as @cset is guaranteed
1253 	 * to stay empty until we return.
1254 	 */
1255 	if (cgroup_is_threaded(cset->dfl_cgrp)) {
1256 		struct css_set *dcset;
1257 
1258 		dcset = find_css_set(cset, cset->dfl_cgrp->dom_cgrp);
1259 		if (!dcset) {
1260 			put_css_set(cset);
1261 			return NULL;
1262 		}
1263 
1264 		spin_lock_irq(&css_set_lock);
1265 		cset->dom_cset = dcset;
1266 		list_add_tail(&cset->threaded_csets_node,
1267 			      &dcset->threaded_csets);
1268 		spin_unlock_irq(&css_set_lock);
1269 	}
1270 
1271 	return cset;
1272 }
1273 
1274 struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
1275 {
1276 	struct cgroup *root_cgrp = kernfs_root_to_node(kf_root)->priv;
1277 
1278 	return root_cgrp->root;
1279 }
1280 
1281 void cgroup_favor_dynmods(struct cgroup_root *root, bool favor)
1282 {
1283 	bool favoring = root->flags & CGRP_ROOT_FAVOR_DYNMODS;
1284 
1285 	/* see the comment above CGRP_ROOT_FAVOR_DYNMODS definition */
1286 	if (favor && !favoring) {
1287 		rcu_sync_enter(&cgroup_threadgroup_rwsem.rss);
1288 		root->flags |= CGRP_ROOT_FAVOR_DYNMODS;
1289 	} else if (!favor && favoring) {
1290 		rcu_sync_exit(&cgroup_threadgroup_rwsem.rss);
1291 		root->flags &= ~CGRP_ROOT_FAVOR_DYNMODS;
1292 	}
1293 }
1294 
1295 static int cgroup_init_root_id(struct cgroup_root *root)
1296 {
1297 	int id;
1298 
1299 	lockdep_assert_held(&cgroup_mutex);
1300 
1301 	id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 0, 0, GFP_KERNEL);
1302 	if (id < 0)
1303 		return id;
1304 
1305 	root->hierarchy_id = id;
1306 	return 0;
1307 }
1308 
1309 static void cgroup_exit_root_id(struct cgroup_root *root)
1310 {
1311 	lockdep_assert_held(&cgroup_mutex);
1312 
1313 	idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id);
1314 }
1315 
1316 void cgroup_free_root(struct cgroup_root *root)
1317 {
1318 	kfree(root);
1319 }
1320 
1321 static void cgroup_destroy_root(struct cgroup_root *root)
1322 {
1323 	struct cgroup *cgrp = &root->cgrp;
1324 	struct cgrp_cset_link *link, *tmp_link;
1325 
1326 	trace_cgroup_destroy_root(root);
1327 
1328 	cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1329 
1330 	BUG_ON(atomic_read(&root->nr_cgrps));
1331 	BUG_ON(!list_empty(&cgrp->self.children));
1332 
1333 	/* Rebind all subsystems back to the default hierarchy */
1334 	WARN_ON(rebind_subsystems(&cgrp_dfl_root, root->subsys_mask));
1335 
1336 	/*
1337 	 * Release all the links from cset_links to this hierarchy's
1338 	 * root cgroup
1339 	 */
1340 	spin_lock_irq(&css_set_lock);
1341 
1342 	list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
1343 		list_del(&link->cset_link);
1344 		list_del(&link->cgrp_link);
1345 		kfree(link);
1346 	}
1347 
1348 	spin_unlock_irq(&css_set_lock);
1349 
1350 	if (!list_empty(&root->root_list)) {
1351 		list_del(&root->root_list);
1352 		cgroup_root_count--;
1353 	}
1354 
1355 	if (!have_favordynmods)
1356 		cgroup_favor_dynmods(root, false);
1357 
1358 	cgroup_exit_root_id(root);
1359 
1360 	cgroup_unlock();
1361 
1362 	cgroup_rstat_exit(cgrp);
1363 	kernfs_destroy_root(root->kf_root);
1364 	cgroup_free_root(root);
1365 }
1366 
1367 /*
1368  * Returned cgroup is without refcount but it's valid as long as cset pins it.
1369  */
1370 static inline struct cgroup *__cset_cgroup_from_root(struct css_set *cset,
1371 					    struct cgroup_root *root)
1372 {
1373 	struct cgroup *res_cgroup = NULL;
1374 
1375 	if (cset == &init_css_set) {
1376 		res_cgroup = &root->cgrp;
1377 	} else if (root == &cgrp_dfl_root) {
1378 		res_cgroup = cset->dfl_cgrp;
1379 	} else {
1380 		struct cgrp_cset_link *link;
1381 		lockdep_assert_held(&css_set_lock);
1382 
1383 		list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
1384 			struct cgroup *c = link->cgrp;
1385 
1386 			if (c->root == root) {
1387 				res_cgroup = c;
1388 				break;
1389 			}
1390 		}
1391 	}
1392 
1393 	BUG_ON(!res_cgroup);
1394 	return res_cgroup;
1395 }
1396 
1397 /*
1398  * look up cgroup associated with current task's cgroup namespace on the
1399  * specified hierarchy
1400  */
1401 static struct cgroup *
1402 current_cgns_cgroup_from_root(struct cgroup_root *root)
1403 {
1404 	struct cgroup *res = NULL;
1405 	struct css_set *cset;
1406 
1407 	lockdep_assert_held(&css_set_lock);
1408 
1409 	rcu_read_lock();
1410 
1411 	cset = current->nsproxy->cgroup_ns->root_cset;
1412 	res = __cset_cgroup_from_root(cset, root);
1413 
1414 	rcu_read_unlock();
1415 
1416 	return res;
1417 }
1418 
1419 /*
1420  * Look up cgroup associated with current task's cgroup namespace on the default
1421  * hierarchy.
1422  *
1423  * Unlike current_cgns_cgroup_from_root(), this doesn't need locks:
1424  * - Internal rcu_read_lock is unnecessary because we don't dereference any rcu
1425  *   pointers.
1426  * - css_set_lock is not needed because we just read cset->dfl_cgrp.
1427  * - As a bonus returned cgrp is pinned with the current because it cannot
1428  *   switch cgroup_ns asynchronously.
1429  */
1430 static struct cgroup *current_cgns_cgroup_dfl(void)
1431 {
1432 	struct css_set *cset;
1433 
1434 	if (current->nsproxy) {
1435 		cset = current->nsproxy->cgroup_ns->root_cset;
1436 		return __cset_cgroup_from_root(cset, &cgrp_dfl_root);
1437 	} else {
1438 		/*
1439 		 * NOTE: This function may be called from bpf_cgroup_from_id()
1440 		 * on a task which has already passed exit_task_namespaces() and
1441 		 * nsproxy == NULL. Fall back to cgrp_dfl_root which will make all
1442 		 * cgroups visible for lookups.
1443 		 */
1444 		return &cgrp_dfl_root.cgrp;
1445 	}
1446 }
1447 
1448 /* look up cgroup associated with given css_set on the specified hierarchy */
1449 static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
1450 					    struct cgroup_root *root)
1451 {
1452 	lockdep_assert_held(&cgroup_mutex);
1453 	lockdep_assert_held(&css_set_lock);
1454 
1455 	return __cset_cgroup_from_root(cset, root);
1456 }
1457 
1458 /*
1459  * Return the cgroup for "task" from the given hierarchy. Must be
1460  * called with cgroup_mutex and css_set_lock held.
1461  */
1462 struct cgroup *task_cgroup_from_root(struct task_struct *task,
1463 				     struct cgroup_root *root)
1464 {
1465 	/*
1466 	 * No need to lock the task - since we hold css_set_lock the
1467 	 * task can't change groups.
1468 	 */
1469 	return cset_cgroup_from_root(task_css_set(task), root);
1470 }
1471 
1472 /*
1473  * A task must hold cgroup_mutex to modify cgroups.
1474  *
1475  * Any task can increment and decrement the count field without lock.
1476  * So in general, code holding cgroup_mutex can't rely on the count
1477  * field not changing.  However, if the count goes to zero, then only
1478  * cgroup_attach_task() can increment it again.  Because a count of zero
1479  * means that no tasks are currently attached, therefore there is no
1480  * way a task attached to that cgroup can fork (the other way to
1481  * increment the count).  So code holding cgroup_mutex can safely
1482  * assume that if the count is zero, it will stay zero. Similarly, if
1483  * a task holds cgroup_mutex on a cgroup with zero count, it
1484  * knows that the cgroup won't be removed, as cgroup_rmdir()
1485  * needs that mutex.
1486  *
1487  * A cgroup can only be deleted if both its 'count' of using tasks
1488  * is zero, and its list of 'children' cgroups is empty.  Since all
1489  * tasks in the system use _some_ cgroup, and since there is always at
1490  * least one task in the system (init, pid == 1), therefore, root cgroup
1491  * always has either children cgroups and/or using tasks.  So we don't
1492  * need a special hack to ensure that root cgroup cannot be deleted.
1493  *
1494  * P.S.  One more locking exception.  RCU is used to guard the
1495  * update of a tasks cgroup pointer by cgroup_attach_task()
1496  */
1497 
1498 static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
1499 
1500 static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
1501 			      char *buf)
1502 {
1503 	struct cgroup_subsys *ss = cft->ss;
1504 
1505 	if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) &&
1506 	    !(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) {
1507 		const char *dbg = (cft->flags & CFTYPE_DEBUG) ? ".__DEBUG__." : "";
1508 
1509 		snprintf(buf, CGROUP_FILE_NAME_MAX, "%s%s.%s",
1510 			 dbg, cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name,
1511 			 cft->name);
1512 	} else {
1513 		strscpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
1514 	}
1515 	return buf;
1516 }
1517 
1518 /**
1519  * cgroup_file_mode - deduce file mode of a control file
1520  * @cft: the control file in question
1521  *
1522  * S_IRUGO for read, S_IWUSR for write.
1523  */
1524 static umode_t cgroup_file_mode(const struct cftype *cft)
1525 {
1526 	umode_t mode = 0;
1527 
1528 	if (cft->read_u64 || cft->read_s64 || cft->seq_show)
1529 		mode |= S_IRUGO;
1530 
1531 	if (cft->write_u64 || cft->write_s64 || cft->write) {
1532 		if (cft->flags & CFTYPE_WORLD_WRITABLE)
1533 			mode |= S_IWUGO;
1534 		else
1535 			mode |= S_IWUSR;
1536 	}
1537 
1538 	return mode;
1539 }
1540 
1541 /**
1542  * cgroup_calc_subtree_ss_mask - calculate subtree_ss_mask
1543  * @subtree_control: the new subtree_control mask to consider
1544  * @this_ss_mask: available subsystems
1545  *
1546  * On the default hierarchy, a subsystem may request other subsystems to be
1547  * enabled together through its ->depends_on mask.  In such cases, more
1548  * subsystems than specified in "cgroup.subtree_control" may be enabled.
1549  *
1550  * This function calculates which subsystems need to be enabled if
1551  * @subtree_control is to be applied while restricted to @this_ss_mask.
1552  */
1553 static u16 cgroup_calc_subtree_ss_mask(u16 subtree_control, u16 this_ss_mask)
1554 {
1555 	u16 cur_ss_mask = subtree_control;
1556 	struct cgroup_subsys *ss;
1557 	int ssid;
1558 
1559 	lockdep_assert_held(&cgroup_mutex);
1560 
1561 	cur_ss_mask |= cgrp_dfl_implicit_ss_mask;
1562 
1563 	while (true) {
1564 		u16 new_ss_mask = cur_ss_mask;
1565 
1566 		do_each_subsys_mask(ss, ssid, cur_ss_mask) {
1567 			new_ss_mask |= ss->depends_on;
1568 		} while_each_subsys_mask();
1569 
1570 		/*
1571 		 * Mask out subsystems which aren't available.  This can
1572 		 * happen only if some depended-upon subsystems were bound
1573 		 * to non-default hierarchies.
1574 		 */
1575 		new_ss_mask &= this_ss_mask;
1576 
1577 		if (new_ss_mask == cur_ss_mask)
1578 			break;
1579 		cur_ss_mask = new_ss_mask;
1580 	}
1581 
1582 	return cur_ss_mask;
1583 }
1584 
1585 /**
1586  * cgroup_kn_unlock - unlocking helper for cgroup kernfs methods
1587  * @kn: the kernfs_node being serviced
1588  *
1589  * This helper undoes cgroup_kn_lock_live() and should be invoked before
1590  * the method finishes if locking succeeded.  Note that once this function
1591  * returns the cgroup returned by cgroup_kn_lock_live() may become
1592  * inaccessible any time.  If the caller intends to continue to access the
1593  * cgroup, it should pin it before invoking this function.
1594  */
1595 void cgroup_kn_unlock(struct kernfs_node *kn)
1596 {
1597 	struct cgroup *cgrp;
1598 
1599 	if (kernfs_type(kn) == KERNFS_DIR)
1600 		cgrp = kn->priv;
1601 	else
1602 		cgrp = kn->parent->priv;
1603 
1604 	cgroup_unlock();
1605 
1606 	kernfs_unbreak_active_protection(kn);
1607 	cgroup_put(cgrp);
1608 }
1609 
1610 /**
1611  * cgroup_kn_lock_live - locking helper for cgroup kernfs methods
1612  * @kn: the kernfs_node being serviced
1613  * @drain_offline: perform offline draining on the cgroup
1614  *
1615  * This helper is to be used by a cgroup kernfs method currently servicing
1616  * @kn.  It breaks the active protection, performs cgroup locking and
1617  * verifies that the associated cgroup is alive.  Returns the cgroup if
1618  * alive; otherwise, %NULL.  A successful return should be undone by a
1619  * matching cgroup_kn_unlock() invocation.  If @drain_offline is %true, the
1620  * cgroup is drained of offlining csses before return.
1621  *
1622  * Any cgroup kernfs method implementation which requires locking the
1623  * associated cgroup should use this helper.  It avoids nesting cgroup
1624  * locking under kernfs active protection and allows all kernfs operations
1625  * including self-removal.
1626  */
1627 struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn, bool drain_offline)
1628 {
1629 	struct cgroup *cgrp;
1630 
1631 	if (kernfs_type(kn) == KERNFS_DIR)
1632 		cgrp = kn->priv;
1633 	else
1634 		cgrp = kn->parent->priv;
1635 
1636 	/*
1637 	 * We're gonna grab cgroup_mutex which nests outside kernfs
1638 	 * active_ref.  cgroup liveliness check alone provides enough
1639 	 * protection against removal.  Ensure @cgrp stays accessible and
1640 	 * break the active_ref protection.
1641 	 */
1642 	if (!cgroup_tryget(cgrp))
1643 		return NULL;
1644 	kernfs_break_active_protection(kn);
1645 
1646 	if (drain_offline)
1647 		cgroup_lock_and_drain_offline(cgrp);
1648 	else
1649 		cgroup_lock();
1650 
1651 	if (!cgroup_is_dead(cgrp))
1652 		return cgrp;
1653 
1654 	cgroup_kn_unlock(kn);
1655 	return NULL;
1656 }
1657 
1658 static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
1659 {
1660 	char name[CGROUP_FILE_NAME_MAX];
1661 
1662 	lockdep_assert_held(&cgroup_mutex);
1663 
1664 	if (cft->file_offset) {
1665 		struct cgroup_subsys_state *css = cgroup_css(cgrp, cft->ss);
1666 		struct cgroup_file *cfile = (void *)css + cft->file_offset;
1667 
1668 		spin_lock_irq(&cgroup_file_kn_lock);
1669 		cfile->kn = NULL;
1670 		spin_unlock_irq(&cgroup_file_kn_lock);
1671 
1672 		del_timer_sync(&cfile->notify_timer);
1673 	}
1674 
1675 	kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
1676 }
1677 
1678 /**
1679  * css_clear_dir - remove subsys files in a cgroup directory
1680  * @css: target css
1681  */
1682 static void css_clear_dir(struct cgroup_subsys_state *css)
1683 {
1684 	struct cgroup *cgrp = css->cgroup;
1685 	struct cftype *cfts;
1686 
1687 	if (!(css->flags & CSS_VISIBLE))
1688 		return;
1689 
1690 	css->flags &= ~CSS_VISIBLE;
1691 
1692 	if (!css->ss) {
1693 		if (cgroup_on_dfl(cgrp)) {
1694 			cgroup_addrm_files(css, cgrp,
1695 					   cgroup_base_files, false);
1696 			if (cgroup_psi_enabled())
1697 				cgroup_addrm_files(css, cgrp,
1698 						   cgroup_psi_files, false);
1699 		} else {
1700 			cgroup_addrm_files(css, cgrp,
1701 					   cgroup1_base_files, false);
1702 		}
1703 	} else {
1704 		list_for_each_entry(cfts, &css->ss->cfts, node)
1705 			cgroup_addrm_files(css, cgrp, cfts, false);
1706 	}
1707 }
1708 
1709 /**
1710  * css_populate_dir - create subsys files in a cgroup directory
1711  * @css: target css
1712  *
1713  * On failure, no file is added.
1714  */
1715 static int css_populate_dir(struct cgroup_subsys_state *css)
1716 {
1717 	struct cgroup *cgrp = css->cgroup;
1718 	struct cftype *cfts, *failed_cfts;
1719 	int ret;
1720 
1721 	if (css->flags & CSS_VISIBLE)
1722 		return 0;
1723 
1724 	if (!css->ss) {
1725 		if (cgroup_on_dfl(cgrp)) {
1726 			ret = cgroup_addrm_files(css, cgrp,
1727 						 cgroup_base_files, true);
1728 			if (ret < 0)
1729 				return ret;
1730 
1731 			if (cgroup_psi_enabled()) {
1732 				ret = cgroup_addrm_files(css, cgrp,
1733 							 cgroup_psi_files, true);
1734 				if (ret < 0)
1735 					return ret;
1736 			}
1737 		} else {
1738 			ret = cgroup_addrm_files(css, cgrp,
1739 						 cgroup1_base_files, true);
1740 			if (ret < 0)
1741 				return ret;
1742 		}
1743 	} else {
1744 		list_for_each_entry(cfts, &css->ss->cfts, node) {
1745 			ret = cgroup_addrm_files(css, cgrp, cfts, true);
1746 			if (ret < 0) {
1747 				failed_cfts = cfts;
1748 				goto err;
1749 			}
1750 		}
1751 	}
1752 
1753 	css->flags |= CSS_VISIBLE;
1754 
1755 	return 0;
1756 err:
1757 	list_for_each_entry(cfts, &css->ss->cfts, node) {
1758 		if (cfts == failed_cfts)
1759 			break;
1760 		cgroup_addrm_files(css, cgrp, cfts, false);
1761 	}
1762 	return ret;
1763 }
1764 
1765 int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
1766 {
1767 	struct cgroup *dcgrp = &dst_root->cgrp;
1768 	struct cgroup_subsys *ss;
1769 	int ssid, ret;
1770 	u16 dfl_disable_ss_mask = 0;
1771 
1772 	lockdep_assert_held(&cgroup_mutex);
1773 
1774 	do_each_subsys_mask(ss, ssid, ss_mask) {
1775 		/*
1776 		 * If @ss has non-root csses attached to it, can't move.
1777 		 * If @ss is an implicit controller, it is exempt from this
1778 		 * rule and can be stolen.
1779 		 */
1780 		if (css_next_child(NULL, cgroup_css(&ss->root->cgrp, ss)) &&
1781 		    !ss->implicit_on_dfl)
1782 			return -EBUSY;
1783 
1784 		/* can't move between two non-dummy roots either */
1785 		if (ss->root != &cgrp_dfl_root && dst_root != &cgrp_dfl_root)
1786 			return -EBUSY;
1787 
1788 		/*
1789 		 * Collect ssid's that need to be disabled from default
1790 		 * hierarchy.
1791 		 */
1792 		if (ss->root == &cgrp_dfl_root)
1793 			dfl_disable_ss_mask |= 1 << ssid;
1794 
1795 	} while_each_subsys_mask();
1796 
1797 	if (dfl_disable_ss_mask) {
1798 		struct cgroup *scgrp = &cgrp_dfl_root.cgrp;
1799 
1800 		/*
1801 		 * Controllers from default hierarchy that need to be rebound
1802 		 * are all disabled together in one go.
1803 		 */
1804 		cgrp_dfl_root.subsys_mask &= ~dfl_disable_ss_mask;
1805 		WARN_ON(cgroup_apply_control(scgrp));
1806 		cgroup_finalize_control(scgrp, 0);
1807 	}
1808 
1809 	do_each_subsys_mask(ss, ssid, ss_mask) {
1810 		struct cgroup_root *src_root = ss->root;
1811 		struct cgroup *scgrp = &src_root->cgrp;
1812 		struct cgroup_subsys_state *css = cgroup_css(scgrp, ss);
1813 		struct css_set *cset, *cset_pos;
1814 		struct css_task_iter *it;
1815 
1816 		WARN_ON(!css || cgroup_css(dcgrp, ss));
1817 
1818 		if (src_root != &cgrp_dfl_root) {
1819 			/* disable from the source */
1820 			src_root->subsys_mask &= ~(1 << ssid);
1821 			WARN_ON(cgroup_apply_control(scgrp));
1822 			cgroup_finalize_control(scgrp, 0);
1823 		}
1824 
1825 		/* rebind */
1826 		RCU_INIT_POINTER(scgrp->subsys[ssid], NULL);
1827 		rcu_assign_pointer(dcgrp->subsys[ssid], css);
1828 		ss->root = dst_root;
1829 		css->cgroup = dcgrp;
1830 
1831 		spin_lock_irq(&css_set_lock);
1832 		WARN_ON(!list_empty(&dcgrp->e_csets[ss->id]));
1833 		list_for_each_entry_safe(cset, cset_pos, &scgrp->e_csets[ss->id],
1834 					 e_cset_node[ss->id]) {
1835 			list_move_tail(&cset->e_cset_node[ss->id],
1836 				       &dcgrp->e_csets[ss->id]);
1837 			/*
1838 			 * all css_sets of scgrp together in same order to dcgrp,
1839 			 * patch in-flight iterators to preserve correct iteration.
1840 			 * since the iterator is always advanced right away and
1841 			 * finished when it->cset_pos meets it->cset_head, so only
1842 			 * update it->cset_head is enough here.
1843 			 */
1844 			list_for_each_entry(it, &cset->task_iters, iters_node)
1845 				if (it->cset_head == &scgrp->e_csets[ss->id])
1846 					it->cset_head = &dcgrp->e_csets[ss->id];
1847 		}
1848 		spin_unlock_irq(&css_set_lock);
1849 
1850 		if (ss->css_rstat_flush) {
1851 			list_del_rcu(&css->rstat_css_node);
1852 			synchronize_rcu();
1853 			list_add_rcu(&css->rstat_css_node,
1854 				     &dcgrp->rstat_css_list);
1855 		}
1856 
1857 		/* default hierarchy doesn't enable controllers by default */
1858 		dst_root->subsys_mask |= 1 << ssid;
1859 		if (dst_root == &cgrp_dfl_root) {
1860 			static_branch_enable(cgroup_subsys_on_dfl_key[ssid]);
1861 		} else {
1862 			dcgrp->subtree_control |= 1 << ssid;
1863 			static_branch_disable(cgroup_subsys_on_dfl_key[ssid]);
1864 		}
1865 
1866 		ret = cgroup_apply_control(dcgrp);
1867 		if (ret)
1868 			pr_warn("partial failure to rebind %s controller (err=%d)\n",
1869 				ss->name, ret);
1870 
1871 		if (ss->bind)
1872 			ss->bind(css);
1873 	} while_each_subsys_mask();
1874 
1875 	kernfs_activate(dcgrp->kn);
1876 	return 0;
1877 }
1878 
1879 int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
1880 		     struct kernfs_root *kf_root)
1881 {
1882 	int len = 0;
1883 	char *buf = NULL;
1884 	struct cgroup_root *kf_cgroot = cgroup_root_from_kf(kf_root);
1885 	struct cgroup *ns_cgroup;
1886 
1887 	buf = kmalloc(PATH_MAX, GFP_KERNEL);
1888 	if (!buf)
1889 		return -ENOMEM;
1890 
1891 	spin_lock_irq(&css_set_lock);
1892 	ns_cgroup = current_cgns_cgroup_from_root(kf_cgroot);
1893 	len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX);
1894 	spin_unlock_irq(&css_set_lock);
1895 
1896 	if (len >= PATH_MAX)
1897 		len = -ERANGE;
1898 	else if (len > 0) {
1899 		seq_escape(sf, buf, " \t\n\\");
1900 		len = 0;
1901 	}
1902 	kfree(buf);
1903 	return len;
1904 }
1905 
1906 enum cgroup2_param {
1907 	Opt_nsdelegate,
1908 	Opt_favordynmods,
1909 	Opt_memory_localevents,
1910 	Opt_memory_recursiveprot,
1911 	nr__cgroup2_params
1912 };
1913 
1914 static const struct fs_parameter_spec cgroup2_fs_parameters[] = {
1915 	fsparam_flag("nsdelegate",		Opt_nsdelegate),
1916 	fsparam_flag("favordynmods",		Opt_favordynmods),
1917 	fsparam_flag("memory_localevents",	Opt_memory_localevents),
1918 	fsparam_flag("memory_recursiveprot",	Opt_memory_recursiveprot),
1919 	{}
1920 };
1921 
1922 static int cgroup2_parse_param(struct fs_context *fc, struct fs_parameter *param)
1923 {
1924 	struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1925 	struct fs_parse_result result;
1926 	int opt;
1927 
1928 	opt = fs_parse(fc, cgroup2_fs_parameters, param, &result);
1929 	if (opt < 0)
1930 		return opt;
1931 
1932 	switch (opt) {
1933 	case Opt_nsdelegate:
1934 		ctx->flags |= CGRP_ROOT_NS_DELEGATE;
1935 		return 0;
1936 	case Opt_favordynmods:
1937 		ctx->flags |= CGRP_ROOT_FAVOR_DYNMODS;
1938 		return 0;
1939 	case Opt_memory_localevents:
1940 		ctx->flags |= CGRP_ROOT_MEMORY_LOCAL_EVENTS;
1941 		return 0;
1942 	case Opt_memory_recursiveprot:
1943 		ctx->flags |= CGRP_ROOT_MEMORY_RECURSIVE_PROT;
1944 		return 0;
1945 	}
1946 	return -EINVAL;
1947 }
1948 
1949 static void apply_cgroup_root_flags(unsigned int root_flags)
1950 {
1951 	if (current->nsproxy->cgroup_ns == &init_cgroup_ns) {
1952 		if (root_flags & CGRP_ROOT_NS_DELEGATE)
1953 			cgrp_dfl_root.flags |= CGRP_ROOT_NS_DELEGATE;
1954 		else
1955 			cgrp_dfl_root.flags &= ~CGRP_ROOT_NS_DELEGATE;
1956 
1957 		cgroup_favor_dynmods(&cgrp_dfl_root,
1958 				     root_flags & CGRP_ROOT_FAVOR_DYNMODS);
1959 
1960 		if (root_flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
1961 			cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_LOCAL_EVENTS;
1962 		else
1963 			cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_LOCAL_EVENTS;
1964 
1965 		if (root_flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT)
1966 			cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_RECURSIVE_PROT;
1967 		else
1968 			cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_RECURSIVE_PROT;
1969 	}
1970 }
1971 
1972 static int cgroup_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
1973 {
1974 	if (cgrp_dfl_root.flags & CGRP_ROOT_NS_DELEGATE)
1975 		seq_puts(seq, ",nsdelegate");
1976 	if (cgrp_dfl_root.flags & CGRP_ROOT_FAVOR_DYNMODS)
1977 		seq_puts(seq, ",favordynmods");
1978 	if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
1979 		seq_puts(seq, ",memory_localevents");
1980 	if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT)
1981 		seq_puts(seq, ",memory_recursiveprot");
1982 	return 0;
1983 }
1984 
1985 static int cgroup_reconfigure(struct fs_context *fc)
1986 {
1987 	struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1988 
1989 	apply_cgroup_root_flags(ctx->flags);
1990 	return 0;
1991 }
1992 
1993 static void init_cgroup_housekeeping(struct cgroup *cgrp)
1994 {
1995 	struct cgroup_subsys *ss;
1996 	int ssid;
1997 
1998 	INIT_LIST_HEAD(&cgrp->self.sibling);
1999 	INIT_LIST_HEAD(&cgrp->self.children);
2000 	INIT_LIST_HEAD(&cgrp->cset_links);
2001 	INIT_LIST_HEAD(&cgrp->pidlists);
2002 	mutex_init(&cgrp->pidlist_mutex);
2003 	cgrp->self.cgroup = cgrp;
2004 	cgrp->self.flags |= CSS_ONLINE;
2005 	cgrp->dom_cgrp = cgrp;
2006 	cgrp->max_descendants = INT_MAX;
2007 	cgrp->max_depth = INT_MAX;
2008 	INIT_LIST_HEAD(&cgrp->rstat_css_list);
2009 	prev_cputime_init(&cgrp->prev_cputime);
2010 
2011 	for_each_subsys(ss, ssid)
2012 		INIT_LIST_HEAD(&cgrp->e_csets[ssid]);
2013 
2014 	init_waitqueue_head(&cgrp->offline_waitq);
2015 	INIT_WORK(&cgrp->release_agent_work, cgroup1_release_agent);
2016 }
2017 
2018 void init_cgroup_root(struct cgroup_fs_context *ctx)
2019 {
2020 	struct cgroup_root *root = ctx->root;
2021 	struct cgroup *cgrp = &root->cgrp;
2022 
2023 	INIT_LIST_HEAD(&root->root_list);
2024 	atomic_set(&root->nr_cgrps, 1);
2025 	cgrp->root = root;
2026 	init_cgroup_housekeeping(cgrp);
2027 
2028 	/* DYNMODS must be modified through cgroup_favor_dynmods() */
2029 	root->flags = ctx->flags & ~CGRP_ROOT_FAVOR_DYNMODS;
2030 	if (ctx->release_agent)
2031 		strscpy(root->release_agent_path, ctx->release_agent, PATH_MAX);
2032 	if (ctx->name)
2033 		strscpy(root->name, ctx->name, MAX_CGROUP_ROOT_NAMELEN);
2034 	if (ctx->cpuset_clone_children)
2035 		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
2036 }
2037 
2038 int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask)
2039 {
2040 	LIST_HEAD(tmp_links);
2041 	struct cgroup *root_cgrp = &root->cgrp;
2042 	struct kernfs_syscall_ops *kf_sops;
2043 	struct css_set *cset;
2044 	int i, ret;
2045 
2046 	lockdep_assert_held(&cgroup_mutex);
2047 
2048 	ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release,
2049 			      0, GFP_KERNEL);
2050 	if (ret)
2051 		goto out;
2052 
2053 	/*
2054 	 * We're accessing css_set_count without locking css_set_lock here,
2055 	 * but that's OK - it can only be increased by someone holding
2056 	 * cgroup_lock, and that's us.  Later rebinding may disable
2057 	 * controllers on the default hierarchy and thus create new csets,
2058 	 * which can't be more than the existing ones.  Allocate 2x.
2059 	 */
2060 	ret = allocate_cgrp_cset_links(2 * css_set_count, &tmp_links);
2061 	if (ret)
2062 		goto cancel_ref;
2063 
2064 	ret = cgroup_init_root_id(root);
2065 	if (ret)
2066 		goto cancel_ref;
2067 
2068 	kf_sops = root == &cgrp_dfl_root ?
2069 		&cgroup_kf_syscall_ops : &cgroup1_kf_syscall_ops;
2070 
2071 	root->kf_root = kernfs_create_root(kf_sops,
2072 					   KERNFS_ROOT_CREATE_DEACTIVATED |
2073 					   KERNFS_ROOT_SUPPORT_EXPORTOP |
2074 					   KERNFS_ROOT_SUPPORT_USER_XATTR,
2075 					   root_cgrp);
2076 	if (IS_ERR(root->kf_root)) {
2077 		ret = PTR_ERR(root->kf_root);
2078 		goto exit_root_id;
2079 	}
2080 	root_cgrp->kn = kernfs_root_to_node(root->kf_root);
2081 	WARN_ON_ONCE(cgroup_ino(root_cgrp) != 1);
2082 	root_cgrp->ancestors[0] = root_cgrp;
2083 
2084 	ret = css_populate_dir(&root_cgrp->self);
2085 	if (ret)
2086 		goto destroy_root;
2087 
2088 	ret = cgroup_rstat_init(root_cgrp);
2089 	if (ret)
2090 		goto destroy_root;
2091 
2092 	ret = rebind_subsystems(root, ss_mask);
2093 	if (ret)
2094 		goto exit_stats;
2095 
2096 	ret = cgroup_bpf_inherit(root_cgrp);
2097 	WARN_ON_ONCE(ret);
2098 
2099 	trace_cgroup_setup_root(root);
2100 
2101 	/*
2102 	 * There must be no failure case after here, since rebinding takes
2103 	 * care of subsystems' refcounts, which are explicitly dropped in
2104 	 * the failure exit path.
2105 	 */
2106 	list_add(&root->root_list, &cgroup_roots);
2107 	cgroup_root_count++;
2108 
2109 	/*
2110 	 * Link the root cgroup in this hierarchy into all the css_set
2111 	 * objects.
2112 	 */
2113 	spin_lock_irq(&css_set_lock);
2114 	hash_for_each(css_set_table, i, cset, hlist) {
2115 		link_css_set(&tmp_links, cset, root_cgrp);
2116 		if (css_set_populated(cset))
2117 			cgroup_update_populated(root_cgrp, true);
2118 	}
2119 	spin_unlock_irq(&css_set_lock);
2120 
2121 	BUG_ON(!list_empty(&root_cgrp->self.children));
2122 	BUG_ON(atomic_read(&root->nr_cgrps) != 1);
2123 
2124 	ret = 0;
2125 	goto out;
2126 
2127 exit_stats:
2128 	cgroup_rstat_exit(root_cgrp);
2129 destroy_root:
2130 	kernfs_destroy_root(root->kf_root);
2131 	root->kf_root = NULL;
2132 exit_root_id:
2133 	cgroup_exit_root_id(root);
2134 cancel_ref:
2135 	percpu_ref_exit(&root_cgrp->self.refcnt);
2136 out:
2137 	free_cgrp_cset_links(&tmp_links);
2138 	return ret;
2139 }
2140 
2141 int cgroup_do_get_tree(struct fs_context *fc)
2142 {
2143 	struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
2144 	int ret;
2145 
2146 	ctx->kfc.root = ctx->root->kf_root;
2147 	if (fc->fs_type == &cgroup2_fs_type)
2148 		ctx->kfc.magic = CGROUP2_SUPER_MAGIC;
2149 	else
2150 		ctx->kfc.magic = CGROUP_SUPER_MAGIC;
2151 	ret = kernfs_get_tree(fc);
2152 
2153 	/*
2154 	 * In non-init cgroup namespace, instead of root cgroup's dentry,
2155 	 * we return the dentry corresponding to the cgroupns->root_cgrp.
2156 	 */
2157 	if (!ret && ctx->ns != &init_cgroup_ns) {
2158 		struct dentry *nsdentry;
2159 		struct super_block *sb = fc->root->d_sb;
2160 		struct cgroup *cgrp;
2161 
2162 		cgroup_lock();
2163 		spin_lock_irq(&css_set_lock);
2164 
2165 		cgrp = cset_cgroup_from_root(ctx->ns->root_cset, ctx->root);
2166 
2167 		spin_unlock_irq(&css_set_lock);
2168 		cgroup_unlock();
2169 
2170 		nsdentry = kernfs_node_dentry(cgrp->kn, sb);
2171 		dput(fc->root);
2172 		if (IS_ERR(nsdentry)) {
2173 			deactivate_locked_super(sb);
2174 			ret = PTR_ERR(nsdentry);
2175 			nsdentry = NULL;
2176 		}
2177 		fc->root = nsdentry;
2178 	}
2179 
2180 	if (!ctx->kfc.new_sb_created)
2181 		cgroup_put(&ctx->root->cgrp);
2182 
2183 	return ret;
2184 }
2185 
2186 /*
2187  * Destroy a cgroup filesystem context.
2188  */
2189 static void cgroup_fs_context_free(struct fs_context *fc)
2190 {
2191 	struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
2192 
2193 	kfree(ctx->name);
2194 	kfree(ctx->release_agent);
2195 	put_cgroup_ns(ctx->ns);
2196 	kernfs_free_fs_context(fc);
2197 	kfree(ctx);
2198 }
2199 
2200 static int cgroup_get_tree(struct fs_context *fc)
2201 {
2202 	struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
2203 	int ret;
2204 
2205 	WRITE_ONCE(cgrp_dfl_visible, true);
2206 	cgroup_get_live(&cgrp_dfl_root.cgrp);
2207 	ctx->root = &cgrp_dfl_root;
2208 
2209 	ret = cgroup_do_get_tree(fc);
2210 	if (!ret)
2211 		apply_cgroup_root_flags(ctx->flags);
2212 	return ret;
2213 }
2214 
2215 static const struct fs_context_operations cgroup_fs_context_ops = {
2216 	.free		= cgroup_fs_context_free,
2217 	.parse_param	= cgroup2_parse_param,
2218 	.get_tree	= cgroup_get_tree,
2219 	.reconfigure	= cgroup_reconfigure,
2220 };
2221 
2222 static const struct fs_context_operations cgroup1_fs_context_ops = {
2223 	.free		= cgroup_fs_context_free,
2224 	.parse_param	= cgroup1_parse_param,
2225 	.get_tree	= cgroup1_get_tree,
2226 	.reconfigure	= cgroup1_reconfigure,
2227 };
2228 
2229 /*
2230  * Initialise the cgroup filesystem creation/reconfiguration context.  Notably,
2231  * we select the namespace we're going to use.
2232  */
2233 static int cgroup_init_fs_context(struct fs_context *fc)
2234 {
2235 	struct cgroup_fs_context *ctx;
2236 
2237 	ctx = kzalloc(sizeof(struct cgroup_fs_context), GFP_KERNEL);
2238 	if (!ctx)
2239 		return -ENOMEM;
2240 
2241 	ctx->ns = current->nsproxy->cgroup_ns;
2242 	get_cgroup_ns(ctx->ns);
2243 	fc->fs_private = &ctx->kfc;
2244 	if (fc->fs_type == &cgroup2_fs_type)
2245 		fc->ops = &cgroup_fs_context_ops;
2246 	else
2247 		fc->ops = &cgroup1_fs_context_ops;
2248 	put_user_ns(fc->user_ns);
2249 	fc->user_ns = get_user_ns(ctx->ns->user_ns);
2250 	fc->global = true;
2251 
2252 	if (have_favordynmods)
2253 		ctx->flags |= CGRP_ROOT_FAVOR_DYNMODS;
2254 
2255 	return 0;
2256 }
2257 
2258 static void cgroup_kill_sb(struct super_block *sb)
2259 {
2260 	struct kernfs_root *kf_root = kernfs_root_from_sb(sb);
2261 	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
2262 
2263 	/*
2264 	 * If @root doesn't have any children, start killing it.
2265 	 * This prevents new mounts by disabling percpu_ref_tryget_live().
2266 	 *
2267 	 * And don't kill the default root.
2268 	 */
2269 	if (list_empty(&root->cgrp.self.children) && root != &cgrp_dfl_root &&
2270 	    !percpu_ref_is_dying(&root->cgrp.self.refcnt)) {
2271 		cgroup_bpf_offline(&root->cgrp);
2272 		percpu_ref_kill(&root->cgrp.self.refcnt);
2273 	}
2274 	cgroup_put(&root->cgrp);
2275 	kernfs_kill_sb(sb);
2276 }
2277 
2278 struct file_system_type cgroup_fs_type = {
2279 	.name			= "cgroup",
2280 	.init_fs_context	= cgroup_init_fs_context,
2281 	.parameters		= cgroup1_fs_parameters,
2282 	.kill_sb		= cgroup_kill_sb,
2283 	.fs_flags		= FS_USERNS_MOUNT,
2284 };
2285 
2286 static struct file_system_type cgroup2_fs_type = {
2287 	.name			= "cgroup2",
2288 	.init_fs_context	= cgroup_init_fs_context,
2289 	.parameters		= cgroup2_fs_parameters,
2290 	.kill_sb		= cgroup_kill_sb,
2291 	.fs_flags		= FS_USERNS_MOUNT,
2292 };
2293 
2294 #ifdef CONFIG_CPUSETS
2295 static const struct fs_context_operations cpuset_fs_context_ops = {
2296 	.get_tree	= cgroup1_get_tree,
2297 	.free		= cgroup_fs_context_free,
2298 };
2299 
2300 /*
2301  * This is ugly, but preserves the userspace API for existing cpuset
2302  * users. If someone tries to mount the "cpuset" filesystem, we
2303  * silently switch it to mount "cgroup" instead
2304  */
2305 static int cpuset_init_fs_context(struct fs_context *fc)
2306 {
2307 	char *agent = kstrdup("/sbin/cpuset_release_agent", GFP_USER);
2308 	struct cgroup_fs_context *ctx;
2309 	int err;
2310 
2311 	err = cgroup_init_fs_context(fc);
2312 	if (err) {
2313 		kfree(agent);
2314 		return err;
2315 	}
2316 
2317 	fc->ops = &cpuset_fs_context_ops;
2318 
2319 	ctx = cgroup_fc2context(fc);
2320 	ctx->subsys_mask = 1 << cpuset_cgrp_id;
2321 	ctx->flags |= CGRP_ROOT_NOPREFIX;
2322 	ctx->release_agent = agent;
2323 
2324 	get_filesystem(&cgroup_fs_type);
2325 	put_filesystem(fc->fs_type);
2326 	fc->fs_type = &cgroup_fs_type;
2327 
2328 	return 0;
2329 }
2330 
2331 static struct file_system_type cpuset_fs_type = {
2332 	.name			= "cpuset",
2333 	.init_fs_context	= cpuset_init_fs_context,
2334 	.fs_flags		= FS_USERNS_MOUNT,
2335 };
2336 #endif
2337 
2338 int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
2339 			  struct cgroup_namespace *ns)
2340 {
2341 	struct cgroup *root = cset_cgroup_from_root(ns->root_cset, cgrp->root);
2342 
2343 	return kernfs_path_from_node(cgrp->kn, root->kn, buf, buflen);
2344 }
2345 
2346 int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
2347 		   struct cgroup_namespace *ns)
2348 {
2349 	int ret;
2350 
2351 	cgroup_lock();
2352 	spin_lock_irq(&css_set_lock);
2353 
2354 	ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns);
2355 
2356 	spin_unlock_irq(&css_set_lock);
2357 	cgroup_unlock();
2358 
2359 	return ret;
2360 }
2361 EXPORT_SYMBOL_GPL(cgroup_path_ns);
2362 
2363 /**
2364  * cgroup_attach_lock - Lock for ->attach()
2365  * @lock_threadgroup: whether to down_write cgroup_threadgroup_rwsem
2366  *
2367  * cgroup migration sometimes needs to stabilize threadgroups against forks and
2368  * exits by write-locking cgroup_threadgroup_rwsem. However, some ->attach()
2369  * implementations (e.g. cpuset), also need to disable CPU hotplug.
2370  * Unfortunately, letting ->attach() operations acquire cpus_read_lock() can
2371  * lead to deadlocks.
2372  *
2373  * Bringing up a CPU may involve creating and destroying tasks which requires
2374  * read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside
2375  * cpus_read_lock(). If we call an ->attach() which acquires the cpus lock while
2376  * write-locking threadgroup_rwsem, the locking order is reversed and we end up
2377  * waiting for an on-going CPU hotplug operation which in turn is waiting for
2378  * the threadgroup_rwsem to be released to create new tasks. For more details:
2379  *
2380  *   http://lkml.kernel.org/r/20220711174629.uehfmqegcwn2lqzu@wubuntu
2381  *
2382  * Resolve the situation by always acquiring cpus_read_lock() before optionally
2383  * write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume that
2384  * CPU hotplug is disabled on entry.
2385  */
2386 void cgroup_attach_lock(bool lock_threadgroup)
2387 {
2388 	cpus_read_lock();
2389 	if (lock_threadgroup)
2390 		percpu_down_write(&cgroup_threadgroup_rwsem);
2391 }
2392 
2393 /**
2394  * cgroup_attach_unlock - Undo cgroup_attach_lock()
2395  * @lock_threadgroup: whether to up_write cgroup_threadgroup_rwsem
2396  */
2397 void cgroup_attach_unlock(bool lock_threadgroup)
2398 {
2399 	if (lock_threadgroup)
2400 		percpu_up_write(&cgroup_threadgroup_rwsem);
2401 	cpus_read_unlock();
2402 }
2403 
2404 /**
2405  * cgroup_migrate_add_task - add a migration target task to a migration context
2406  * @task: target task
2407  * @mgctx: target migration context
2408  *
2409  * Add @task, which is a migration target, to @mgctx->tset.  This function
2410  * becomes noop if @task doesn't need to be migrated.  @task's css_set
2411  * should have been added as a migration source and @task->cg_list will be
2412  * moved from the css_set's tasks list to mg_tasks one.
2413  */
2414 static void cgroup_migrate_add_task(struct task_struct *task,
2415 				    struct cgroup_mgctx *mgctx)
2416 {
2417 	struct css_set *cset;
2418 
2419 	lockdep_assert_held(&css_set_lock);
2420 
2421 	/* @task either already exited or can't exit until the end */
2422 	if (task->flags & PF_EXITING)
2423 		return;
2424 
2425 	/* cgroup_threadgroup_rwsem protects racing against forks */
2426 	WARN_ON_ONCE(list_empty(&task->cg_list));
2427 
2428 	cset = task_css_set(task);
2429 	if (!cset->mg_src_cgrp)
2430 		return;
2431 
2432 	mgctx->tset.nr_tasks++;
2433 
2434 	list_move_tail(&task->cg_list, &cset->mg_tasks);
2435 	if (list_empty(&cset->mg_node))
2436 		list_add_tail(&cset->mg_node,
2437 			      &mgctx->tset.src_csets);
2438 	if (list_empty(&cset->mg_dst_cset->mg_node))
2439 		list_add_tail(&cset->mg_dst_cset->mg_node,
2440 			      &mgctx->tset.dst_csets);
2441 }
2442 
2443 /**
2444  * cgroup_taskset_first - reset taskset and return the first task
2445  * @tset: taskset of interest
2446  * @dst_cssp: output variable for the destination css
2447  *
2448  * @tset iteration is initialized and the first task is returned.
2449  */
2450 struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
2451 					 struct cgroup_subsys_state **dst_cssp)
2452 {
2453 	tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node);
2454 	tset->cur_task = NULL;
2455 
2456 	return cgroup_taskset_next(tset, dst_cssp);
2457 }
2458 
2459 /**
2460  * cgroup_taskset_next - iterate to the next task in taskset
2461  * @tset: taskset of interest
2462  * @dst_cssp: output variable for the destination css
2463  *
2464  * Return the next task in @tset.  Iteration must have been initialized
2465  * with cgroup_taskset_first().
2466  */
2467 struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
2468 					struct cgroup_subsys_state **dst_cssp)
2469 {
2470 	struct css_set *cset = tset->cur_cset;
2471 	struct task_struct *task = tset->cur_task;
2472 
2473 	while (CGROUP_HAS_SUBSYS_CONFIG && &cset->mg_node != tset->csets) {
2474 		if (!task)
2475 			task = list_first_entry(&cset->mg_tasks,
2476 						struct task_struct, cg_list);
2477 		else
2478 			task = list_next_entry(task, cg_list);
2479 
2480 		if (&task->cg_list != &cset->mg_tasks) {
2481 			tset->cur_cset = cset;
2482 			tset->cur_task = task;
2483 
2484 			/*
2485 			 * This function may be called both before and
2486 			 * after cgroup_migrate_execute().  The two cases
2487 			 * can be distinguished by looking at whether @cset
2488 			 * has its ->mg_dst_cset set.
2489 			 */
2490 			if (cset->mg_dst_cset)
2491 				*dst_cssp = cset->mg_dst_cset->subsys[tset->ssid];
2492 			else
2493 				*dst_cssp = cset->subsys[tset->ssid];
2494 
2495 			return task;
2496 		}
2497 
2498 		cset = list_next_entry(cset, mg_node);
2499 		task = NULL;
2500 	}
2501 
2502 	return NULL;
2503 }
2504 
2505 /**
2506  * cgroup_migrate_execute - migrate a taskset
2507  * @mgctx: migration context
2508  *
2509  * Migrate tasks in @mgctx as setup by migration preparation functions.
2510  * This function fails iff one of the ->can_attach callbacks fails and
2511  * guarantees that either all or none of the tasks in @mgctx are migrated.
2512  * @mgctx is consumed regardless of success.
2513  */
2514 static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx)
2515 {
2516 	struct cgroup_taskset *tset = &mgctx->tset;
2517 	struct cgroup_subsys *ss;
2518 	struct task_struct *task, *tmp_task;
2519 	struct css_set *cset, *tmp_cset;
2520 	int ssid, failed_ssid, ret;
2521 
2522 	/* check that we can legitimately attach to the cgroup */
2523 	if (tset->nr_tasks) {
2524 		do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
2525 			if (ss->can_attach) {
2526 				tset->ssid = ssid;
2527 				ret = ss->can_attach(tset);
2528 				if (ret) {
2529 					failed_ssid = ssid;
2530 					goto out_cancel_attach;
2531 				}
2532 			}
2533 		} while_each_subsys_mask();
2534 	}
2535 
2536 	/*
2537 	 * Now that we're guaranteed success, proceed to move all tasks to
2538 	 * the new cgroup.  There are no failure cases after here, so this
2539 	 * is the commit point.
2540 	 */
2541 	spin_lock_irq(&css_set_lock);
2542 	list_for_each_entry(cset, &tset->src_csets, mg_node) {
2543 		list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) {
2544 			struct css_set *from_cset = task_css_set(task);
2545 			struct css_set *to_cset = cset->mg_dst_cset;
2546 
2547 			get_css_set(to_cset);
2548 			to_cset->nr_tasks++;
2549 			css_set_move_task(task, from_cset, to_cset, true);
2550 			from_cset->nr_tasks--;
2551 			/*
2552 			 * If the source or destination cgroup is frozen,
2553 			 * the task might require to change its state.
2554 			 */
2555 			cgroup_freezer_migrate_task(task, from_cset->dfl_cgrp,
2556 						    to_cset->dfl_cgrp);
2557 			put_css_set_locked(from_cset);
2558 
2559 		}
2560 	}
2561 	spin_unlock_irq(&css_set_lock);
2562 
2563 	/*
2564 	 * Migration is committed, all target tasks are now on dst_csets.
2565 	 * Nothing is sensitive to fork() after this point.  Notify
2566 	 * controllers that migration is complete.
2567 	 */
2568 	tset->csets = &tset->dst_csets;
2569 
2570 	if (tset->nr_tasks) {
2571 		do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
2572 			if (ss->attach) {
2573 				tset->ssid = ssid;
2574 				ss->attach(tset);
2575 			}
2576 		} while_each_subsys_mask();
2577 	}
2578 
2579 	ret = 0;
2580 	goto out_release_tset;
2581 
2582 out_cancel_attach:
2583 	if (tset->nr_tasks) {
2584 		do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
2585 			if (ssid == failed_ssid)
2586 				break;
2587 			if (ss->cancel_attach) {
2588 				tset->ssid = ssid;
2589 				ss->cancel_attach(tset);
2590 			}
2591 		} while_each_subsys_mask();
2592 	}
2593 out_release_tset:
2594 	spin_lock_irq(&css_set_lock);
2595 	list_splice_init(&tset->dst_csets, &tset->src_csets);
2596 	list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) {
2597 		list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
2598 		list_del_init(&cset->mg_node);
2599 	}
2600 	spin_unlock_irq(&css_set_lock);
2601 
2602 	/*
2603 	 * Re-initialize the cgroup_taskset structure in case it is reused
2604 	 * again in another cgroup_migrate_add_task()/cgroup_migrate_execute()
2605 	 * iteration.
2606 	 */
2607 	tset->nr_tasks = 0;
2608 	tset->csets    = &tset->src_csets;
2609 	return ret;
2610 }
2611 
2612 /**
2613  * cgroup_migrate_vet_dst - verify whether a cgroup can be migration destination
2614  * @dst_cgrp: destination cgroup to test
2615  *
2616  * On the default hierarchy, except for the mixable, (possible) thread root
2617  * and threaded cgroups, subtree_control must be zero for migration
2618  * destination cgroups with tasks so that child cgroups don't compete
2619  * against tasks.
2620  */
2621 int cgroup_migrate_vet_dst(struct cgroup *dst_cgrp)
2622 {
2623 	/* v1 doesn't have any restriction */
2624 	if (!cgroup_on_dfl(dst_cgrp))
2625 		return 0;
2626 
2627 	/* verify @dst_cgrp can host resources */
2628 	if (!cgroup_is_valid_domain(dst_cgrp->dom_cgrp))
2629 		return -EOPNOTSUPP;
2630 
2631 	/*
2632 	 * If @dst_cgrp is already or can become a thread root or is
2633 	 * threaded, it doesn't matter.
2634 	 */
2635 	if (cgroup_can_be_thread_root(dst_cgrp) || cgroup_is_threaded(dst_cgrp))
2636 		return 0;
2637 
2638 	/* apply no-internal-process constraint */
2639 	if (dst_cgrp->subtree_control)
2640 		return -EBUSY;
2641 
2642 	return 0;
2643 }
2644 
2645 /**
2646  * cgroup_migrate_finish - cleanup after attach
2647  * @mgctx: migration context
2648  *
2649  * Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst().  See
2650  * those functions for details.
2651  */
2652 void cgroup_migrate_finish(struct cgroup_mgctx *mgctx)
2653 {
2654 	struct css_set *cset, *tmp_cset;
2655 
2656 	lockdep_assert_held(&cgroup_mutex);
2657 
2658 	spin_lock_irq(&css_set_lock);
2659 
2660 	list_for_each_entry_safe(cset, tmp_cset, &mgctx->preloaded_src_csets,
2661 				 mg_src_preload_node) {
2662 		cset->mg_src_cgrp = NULL;
2663 		cset->mg_dst_cgrp = NULL;
2664 		cset->mg_dst_cset = NULL;
2665 		list_del_init(&cset->mg_src_preload_node);
2666 		put_css_set_locked(cset);
2667 	}
2668 
2669 	list_for_each_entry_safe(cset, tmp_cset, &mgctx->preloaded_dst_csets,
2670 				 mg_dst_preload_node) {
2671 		cset->mg_src_cgrp = NULL;
2672 		cset->mg_dst_cgrp = NULL;
2673 		cset->mg_dst_cset = NULL;
2674 		list_del_init(&cset->mg_dst_preload_node);
2675 		put_css_set_locked(cset);
2676 	}
2677 
2678 	spin_unlock_irq(&css_set_lock);
2679 }
2680 
2681 /**
2682  * cgroup_migrate_add_src - add a migration source css_set
2683  * @src_cset: the source css_set to add
2684  * @dst_cgrp: the destination cgroup
2685  * @mgctx: migration context
2686  *
2687  * Tasks belonging to @src_cset are about to be migrated to @dst_cgrp.  Pin
2688  * @src_cset and add it to @mgctx->src_csets, which should later be cleaned
2689  * up by cgroup_migrate_finish().
2690  *
2691  * This function may be called without holding cgroup_threadgroup_rwsem
2692  * even if the target is a process.  Threads may be created and destroyed
2693  * but as long as cgroup_mutex is not dropped, no new css_set can be put
2694  * into play and the preloaded css_sets are guaranteed to cover all
2695  * migrations.
2696  */
2697 void cgroup_migrate_add_src(struct css_set *src_cset,
2698 			    struct cgroup *dst_cgrp,
2699 			    struct cgroup_mgctx *mgctx)
2700 {
2701 	struct cgroup *src_cgrp;
2702 
2703 	lockdep_assert_held(&cgroup_mutex);
2704 	lockdep_assert_held(&css_set_lock);
2705 
2706 	/*
2707 	 * If ->dead, @src_set is associated with one or more dead cgroups
2708 	 * and doesn't contain any migratable tasks.  Ignore it early so
2709 	 * that the rest of migration path doesn't get confused by it.
2710 	 */
2711 	if (src_cset->dead)
2712 		return;
2713 
2714 	if (!list_empty(&src_cset->mg_src_preload_node))
2715 		return;
2716 
2717 	src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);
2718 
2719 	WARN_ON(src_cset->mg_src_cgrp);
2720 	WARN_ON(src_cset->mg_dst_cgrp);
2721 	WARN_ON(!list_empty(&src_cset->mg_tasks));
2722 	WARN_ON(!list_empty(&src_cset->mg_node));
2723 
2724 	src_cset->mg_src_cgrp = src_cgrp;
2725 	src_cset->mg_dst_cgrp = dst_cgrp;
2726 	get_css_set(src_cset);
2727 	list_add_tail(&src_cset->mg_src_preload_node, &mgctx->preloaded_src_csets);
2728 }
2729 
2730 /**
2731  * cgroup_migrate_prepare_dst - prepare destination css_sets for migration
2732  * @mgctx: migration context
2733  *
2734  * Tasks are about to be moved and all the source css_sets have been
2735  * preloaded to @mgctx->preloaded_src_csets.  This function looks up and
2736  * pins all destination css_sets, links each to its source, and append them
2737  * to @mgctx->preloaded_dst_csets.
2738  *
2739  * This function must be called after cgroup_migrate_add_src() has been
2740  * called on each migration source css_set.  After migration is performed
2741  * using cgroup_migrate(), cgroup_migrate_finish() must be called on
2742  * @mgctx.
2743  */
2744 int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx)
2745 {
2746 	struct css_set *src_cset, *tmp_cset;
2747 
2748 	lockdep_assert_held(&cgroup_mutex);
2749 
2750 	/* look up the dst cset for each src cset and link it to src */
2751 	list_for_each_entry_safe(src_cset, tmp_cset, &mgctx->preloaded_src_csets,
2752 				 mg_src_preload_node) {
2753 		struct css_set *dst_cset;
2754 		struct cgroup_subsys *ss;
2755 		int ssid;
2756 
2757 		dst_cset = find_css_set(src_cset, src_cset->mg_dst_cgrp);
2758 		if (!dst_cset)
2759 			return -ENOMEM;
2760 
2761 		WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset);
2762 
2763 		/*
2764 		 * If src cset equals dst, it's noop.  Drop the src.
2765 		 * cgroup_migrate() will skip the cset too.  Note that we
2766 		 * can't handle src == dst as some nodes are used by both.
2767 		 */
2768 		if (src_cset == dst_cset) {
2769 			src_cset->mg_src_cgrp = NULL;
2770 			src_cset->mg_dst_cgrp = NULL;
2771 			list_del_init(&src_cset->mg_src_preload_node);
2772 			put_css_set(src_cset);
2773 			put_css_set(dst_cset);
2774 			continue;
2775 		}
2776 
2777 		src_cset->mg_dst_cset = dst_cset;
2778 
2779 		if (list_empty(&dst_cset->mg_dst_preload_node))
2780 			list_add_tail(&dst_cset->mg_dst_preload_node,
2781 				      &mgctx->preloaded_dst_csets);
2782 		else
2783 			put_css_set(dst_cset);
2784 
2785 		for_each_subsys(ss, ssid)
2786 			if (src_cset->subsys[ssid] != dst_cset->subsys[ssid])
2787 				mgctx->ss_mask |= 1 << ssid;
2788 	}
2789 
2790 	return 0;
2791 }
2792 
2793 /**
2794  * cgroup_migrate - migrate a process or task to a cgroup
2795  * @leader: the leader of the process or the task to migrate
2796  * @threadgroup: whether @leader points to the whole process or a single task
2797  * @mgctx: migration context
2798  *
2799  * Migrate a process or task denoted by @leader.  If migrating a process,
2800  * the caller must be holding cgroup_threadgroup_rwsem.  The caller is also
2801  * responsible for invoking cgroup_migrate_add_src() and
2802  * cgroup_migrate_prepare_dst() on the targets before invoking this
2803  * function and following up with cgroup_migrate_finish().
2804  *
2805  * As long as a controller's ->can_attach() doesn't fail, this function is
2806  * guaranteed to succeed.  This means that, excluding ->can_attach()
2807  * failure, when migrating multiple targets, the success or failure can be
2808  * decided for all targets by invoking group_migrate_prepare_dst() before
2809  * actually starting migrating.
2810  */
2811 int cgroup_migrate(struct task_struct *leader, bool threadgroup,
2812 		   struct cgroup_mgctx *mgctx)
2813 {
2814 	struct task_struct *task;
2815 
2816 	/*
2817 	 * The following thread iteration should be inside an RCU critical
2818 	 * section to prevent tasks from being freed while taking the snapshot.
2819 	 * spin_lock_irq() implies RCU critical section here.
2820 	 */
2821 	spin_lock_irq(&css_set_lock);
2822 	task = leader;
2823 	do {
2824 		cgroup_migrate_add_task(task, mgctx);
2825 		if (!threadgroup)
2826 			break;
2827 	} while_each_thread(leader, task);
2828 	spin_unlock_irq(&css_set_lock);
2829 
2830 	return cgroup_migrate_execute(mgctx);
2831 }
2832 
2833 /**
2834  * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup
2835  * @dst_cgrp: the cgroup to attach to
2836  * @leader: the task or the leader of the threadgroup to be attached
2837  * @threadgroup: attach the whole threadgroup?
2838  *
2839  * Call holding cgroup_mutex and cgroup_threadgroup_rwsem.
2840  */
2841 int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
2842 		       bool threadgroup)
2843 {
2844 	DEFINE_CGROUP_MGCTX(mgctx);
2845 	struct task_struct *task;
2846 	int ret = 0;
2847 
2848 	/* look up all src csets */
2849 	spin_lock_irq(&css_set_lock);
2850 	rcu_read_lock();
2851 	task = leader;
2852 	do {
2853 		cgroup_migrate_add_src(task_css_set(task), dst_cgrp, &mgctx);
2854 		if (!threadgroup)
2855 			break;
2856 	} while_each_thread(leader, task);
2857 	rcu_read_unlock();
2858 	spin_unlock_irq(&css_set_lock);
2859 
2860 	/* prepare dst csets and commit */
2861 	ret = cgroup_migrate_prepare_dst(&mgctx);
2862 	if (!ret)
2863 		ret = cgroup_migrate(leader, threadgroup, &mgctx);
2864 
2865 	cgroup_migrate_finish(&mgctx);
2866 
2867 	if (!ret)
2868 		TRACE_CGROUP_PATH(attach_task, dst_cgrp, leader, threadgroup);
2869 
2870 	return ret;
2871 }
2872 
2873 struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
2874 					     bool *threadgroup_locked)
2875 {
2876 	struct task_struct *tsk;
2877 	pid_t pid;
2878 
2879 	if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
2880 		return ERR_PTR(-EINVAL);
2881 
2882 	/*
2883 	 * If we migrate a single thread, we don't care about threadgroup
2884 	 * stability. If the thread is `current`, it won't exit(2) under our
2885 	 * hands or change PID through exec(2). We exclude
2886 	 * cgroup_update_dfl_csses and other cgroup_{proc,thread}s_write
2887 	 * callers by cgroup_mutex.
2888 	 * Therefore, we can skip the global lock.
2889 	 */
2890 	lockdep_assert_held(&cgroup_mutex);
2891 	*threadgroup_locked = pid || threadgroup;
2892 	cgroup_attach_lock(*threadgroup_locked);
2893 
2894 	rcu_read_lock();
2895 	if (pid) {
2896 		tsk = find_task_by_vpid(pid);
2897 		if (!tsk) {
2898 			tsk = ERR_PTR(-ESRCH);
2899 			goto out_unlock_threadgroup;
2900 		}
2901 	} else {
2902 		tsk = current;
2903 	}
2904 
2905 	if (threadgroup)
2906 		tsk = tsk->group_leader;
2907 
2908 	/*
2909 	 * kthreads may acquire PF_NO_SETAFFINITY during initialization.
2910 	 * If userland migrates such a kthread to a non-root cgroup, it can
2911 	 * become trapped in a cpuset, or RT kthread may be born in a
2912 	 * cgroup with no rt_runtime allocated.  Just say no.
2913 	 */
2914 	if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) {
2915 		tsk = ERR_PTR(-EINVAL);
2916 		goto out_unlock_threadgroup;
2917 	}
2918 
2919 	get_task_struct(tsk);
2920 	goto out_unlock_rcu;
2921 
2922 out_unlock_threadgroup:
2923 	cgroup_attach_unlock(*threadgroup_locked);
2924 	*threadgroup_locked = false;
2925 out_unlock_rcu:
2926 	rcu_read_unlock();
2927 	return tsk;
2928 }
2929 
2930 void cgroup_procs_write_finish(struct task_struct *task, bool threadgroup_locked)
2931 {
2932 	struct cgroup_subsys *ss;
2933 	int ssid;
2934 
2935 	/* release reference from cgroup_procs_write_start() */
2936 	put_task_struct(task);
2937 
2938 	cgroup_attach_unlock(threadgroup_locked);
2939 
2940 	for_each_subsys(ss, ssid)
2941 		if (ss->post_attach)
2942 			ss->post_attach();
2943 }
2944 
2945 static void cgroup_print_ss_mask(struct seq_file *seq, u16 ss_mask)
2946 {
2947 	struct cgroup_subsys *ss;
2948 	bool printed = false;
2949 	int ssid;
2950 
2951 	do_each_subsys_mask(ss, ssid, ss_mask) {
2952 		if (printed)
2953 			seq_putc(seq, ' ');
2954 		seq_puts(seq, ss->name);
2955 		printed = true;
2956 	} while_each_subsys_mask();
2957 	if (printed)
2958 		seq_putc(seq, '\n');
2959 }
2960 
2961 /* show controllers which are enabled from the parent */
2962 static int cgroup_controllers_show(struct seq_file *seq, void *v)
2963 {
2964 	struct cgroup *cgrp = seq_css(seq)->cgroup;
2965 
2966 	cgroup_print_ss_mask(seq, cgroup_control(cgrp));
2967 	return 0;
2968 }
2969 
2970 /* show controllers which are enabled for a given cgroup's children */
2971 static int cgroup_subtree_control_show(struct seq_file *seq, void *v)
2972 {
2973 	struct cgroup *cgrp = seq_css(seq)->cgroup;
2974 
2975 	cgroup_print_ss_mask(seq, cgrp->subtree_control);
2976 	return 0;
2977 }
2978 
2979 /**
2980  * cgroup_update_dfl_csses - update css assoc of a subtree in default hierarchy
2981  * @cgrp: root of the subtree to update csses for
2982  *
2983  * @cgrp's control masks have changed and its subtree's css associations
2984  * need to be updated accordingly.  This function looks up all css_sets
2985  * which are attached to the subtree, creates the matching updated css_sets
2986  * and migrates the tasks to the new ones.
2987  */
2988 static int cgroup_update_dfl_csses(struct cgroup *cgrp)
2989 {
2990 	DEFINE_CGROUP_MGCTX(mgctx);
2991 	struct cgroup_subsys_state *d_css;
2992 	struct cgroup *dsct;
2993 	struct css_set *src_cset;
2994 	bool has_tasks;
2995 	int ret;
2996 
2997 	lockdep_assert_held(&cgroup_mutex);
2998 
2999 	/* look up all csses currently attached to @cgrp's subtree */
3000 	spin_lock_irq(&css_set_lock);
3001 	cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
3002 		struct cgrp_cset_link *link;
3003 
3004 		/*
3005 		 * As cgroup_update_dfl_csses() is only called by
3006 		 * cgroup_apply_control(). The csses associated with the
3007 		 * given cgrp will not be affected by changes made to
3008 		 * its subtree_control file. We can skip them.
3009 		 */
3010 		if (dsct == cgrp)
3011 			continue;
3012 
3013 		list_for_each_entry(link, &dsct->cset_links, cset_link)
3014 			cgroup_migrate_add_src(link->cset, dsct, &mgctx);
3015 	}
3016 	spin_unlock_irq(&css_set_lock);
3017 
3018 	/*
3019 	 * We need to write-lock threadgroup_rwsem while migrating tasks.
3020 	 * However, if there are no source csets for @cgrp, changing its
3021 	 * controllers isn't gonna produce any task migrations and the
3022 	 * write-locking can be skipped safely.
3023 	 */
3024 	has_tasks = !list_empty(&mgctx.preloaded_src_csets);
3025 	cgroup_attach_lock(has_tasks);
3026 
3027 	/* NULL dst indicates self on default hierarchy */
3028 	ret = cgroup_migrate_prepare_dst(&mgctx);
3029 	if (ret)
3030 		goto out_finish;
3031 
3032 	spin_lock_irq(&css_set_lock);
3033 	list_for_each_entry(src_cset, &mgctx.preloaded_src_csets,
3034 			    mg_src_preload_node) {
3035 		struct task_struct *task, *ntask;
3036 
3037 		/* all tasks in src_csets need to be migrated */
3038 		list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list)
3039 			cgroup_migrate_add_task(task, &mgctx);
3040 	}
3041 	spin_unlock_irq(&css_set_lock);
3042 
3043 	ret = cgroup_migrate_execute(&mgctx);
3044 out_finish:
3045 	cgroup_migrate_finish(&mgctx);
3046 	cgroup_attach_unlock(has_tasks);
3047 	return ret;
3048 }
3049 
3050 /**
3051  * cgroup_lock_and_drain_offline - lock cgroup_mutex and drain offlined csses
3052  * @cgrp: root of the target subtree
3053  *
3054  * Because css offlining is asynchronous, userland may try to re-enable a
3055  * controller while the previous css is still around.  This function grabs
3056  * cgroup_mutex and drains the previous css instances of @cgrp's subtree.
3057  */
3058 void cgroup_lock_and_drain_offline(struct cgroup *cgrp)
3059 	__acquires(&cgroup_mutex)
3060 {
3061 	struct cgroup *dsct;
3062 	struct cgroup_subsys_state *d_css;
3063 	struct cgroup_subsys *ss;
3064 	int ssid;
3065 
3066 restart:
3067 	cgroup_lock();
3068 
3069 	cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
3070 		for_each_subsys(ss, ssid) {
3071 			struct cgroup_subsys_state *css = cgroup_css(dsct, ss);
3072 			DEFINE_WAIT(wait);
3073 
3074 			if (!css || !percpu_ref_is_dying(&css->refcnt))
3075 				continue;
3076 
3077 			cgroup_get_live(dsct);
3078 			prepare_to_wait(&dsct->offline_waitq, &wait,
3079 					TASK_UNINTERRUPTIBLE);
3080 
3081 			cgroup_unlock();
3082 			schedule();
3083 			finish_wait(&dsct->offline_waitq, &wait);
3084 
3085 			cgroup_put(dsct);
3086 			goto restart;
3087 		}
3088 	}
3089 }
3090 
3091 /**
3092  * cgroup_save_control - save control masks and dom_cgrp of a subtree
3093  * @cgrp: root of the target subtree
3094  *
3095  * Save ->subtree_control, ->subtree_ss_mask and ->dom_cgrp to the
3096  * respective old_ prefixed fields for @cgrp's subtree including @cgrp
3097  * itself.
3098  */
3099 static void cgroup_save_control(struct cgroup *cgrp)
3100 {
3101 	struct cgroup *dsct;
3102 	struct cgroup_subsys_state *d_css;
3103 
3104 	cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
3105 		dsct->old_subtree_control = dsct->subtree_control;
3106 		dsct->old_subtree_ss_mask = dsct->subtree_ss_mask;
3107 		dsct->old_dom_cgrp = dsct->dom_cgrp;
3108 	}
3109 }
3110 
3111 /**
3112  * cgroup_propagate_control - refresh control masks of a subtree
3113  * @cgrp: root of the target subtree
3114  *
3115  * For @cgrp and its subtree, ensure ->subtree_ss_mask matches
3116  * ->subtree_control and propagate controller availability through the
3117  * subtree so that descendants don't have unavailable controllers enabled.
3118  */
3119 static void cgroup_propagate_control(struct cgroup *cgrp)
3120 {
3121 	struct cgroup *dsct;
3122 	struct cgroup_subsys_state *d_css;
3123 
3124 	cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
3125 		dsct->subtree_control &= cgroup_control(dsct);
3126 		dsct->subtree_ss_mask =
3127 			cgroup_calc_subtree_ss_mask(dsct->subtree_control,
3128 						    cgroup_ss_mask(dsct));
3129 	}
3130 }
3131 
3132 /**
3133  * cgroup_restore_control - restore control masks and dom_cgrp of a subtree
3134  * @cgrp: root of the target subtree
3135  *
3136  * Restore ->subtree_control, ->subtree_ss_mask and ->dom_cgrp from the
3137  * respective old_ prefixed fields for @cgrp's subtree including @cgrp
3138  * itself.
3139  */
3140 static void cgroup_restore_control(struct cgroup *cgrp)
3141 {
3142 	struct cgroup *dsct;
3143 	struct cgroup_subsys_state *d_css;
3144 
3145 	cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
3146 		dsct->subtree_control = dsct->old_subtree_control;
3147 		dsct->subtree_ss_mask = dsct->old_subtree_ss_mask;
3148 		dsct->dom_cgrp = dsct->old_dom_cgrp;
3149 	}
3150 }
3151 
3152 static bool css_visible(struct cgroup_subsys_state *css)
3153 {
3154 	struct cgroup_subsys *ss = css->ss;
3155 	struct cgroup *cgrp = css->cgroup;
3156 
3157 	if (cgroup_control(cgrp) & (1 << ss->id))
3158 		return true;
3159 	if (!(cgroup_ss_mask(cgrp) & (1 << ss->id)))
3160 		return false;
3161 	return cgroup_on_dfl(cgrp) && ss->implicit_on_dfl;
3162 }
3163 
3164 /**
3165  * cgroup_apply_control_enable - enable or show csses according to control
3166  * @cgrp: root of the target subtree
3167  *
3168  * Walk @cgrp's subtree and create new csses or make the existing ones
3169  * visible.  A css is created invisible if it's being implicitly enabled
3170  * through dependency.  An invisible css is made visible when the userland
3171  * explicitly enables it.
3172  *
3173  * Returns 0 on success, -errno on failure.  On failure, csses which have
3174  * been processed already aren't cleaned up.  The caller is responsible for
3175  * cleaning up with cgroup_apply_control_disable().
3176  */
3177 static int cgroup_apply_control_enable(struct cgroup *cgrp)
3178 {
3179 	struct cgroup *dsct;
3180 	struct cgroup_subsys_state *d_css;
3181 	struct cgroup_subsys *ss;
3182 	int ssid, ret;
3183 
3184 	cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
3185 		for_each_subsys(ss, ssid) {
3186 			struct cgroup_subsys_state *css = cgroup_css(dsct, ss);
3187 
3188 			if (!(cgroup_ss_mask(dsct) & (1 << ss->id)))
3189 				continue;
3190 
3191 			if (!css) {
3192 				css = css_create(dsct, ss);
3193 				if (IS_ERR(css))
3194 					return PTR_ERR(css);
3195 			}
3196 
3197 			WARN_ON_ONCE(percpu_ref_is_dying(&css->refcnt));
3198 
3199 			if (css_visible(css)) {
3200 				ret = css_populate_dir(css);
3201 				if (ret)
3202 					return ret;
3203 			}
3204 		}
3205 	}
3206 
3207 	return 0;
3208 }
3209 
3210 /**
3211  * cgroup_apply_control_disable - kill or hide csses according to control
3212  * @cgrp: root of the target subtree
3213  *
3214  * Walk @cgrp's subtree and kill and hide csses so that they match
3215  * cgroup_ss_mask() and cgroup_visible_mask().
3216  *
3217  * A css is hidden when the userland requests it to be disabled while other
3218  * subsystems are still depending on it.  The css must not actively control
3219  * resources and be in the vanilla state if it's made visible again later.
3220  * Controllers which may be depended upon should provide ->css_reset() for
3221  * this purpose.
3222  */
3223 static void cgroup_apply_control_disable(struct cgroup *cgrp)
3224 {
3225 	struct cgroup *dsct;
3226 	struct cgroup_subsys_state *d_css;
3227 	struct cgroup_subsys *ss;
3228 	int ssid;
3229 
3230 	cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
3231 		for_each_subsys(ss, ssid) {
3232 			struct cgroup_subsys_state *css = cgroup_css(dsct, ss);
3233 
3234 			if (!css)
3235 				continue;
3236 
3237 			WARN_ON_ONCE(percpu_ref_is_dying(&css->refcnt));
3238 
3239 			if (css->parent &&
3240 			    !(cgroup_ss_mask(dsct) & (1 << ss->id))) {
3241 				kill_css(css);
3242 			} else if (!css_visible(css)) {
3243 				css_clear_dir(css);
3244 				if (ss->css_reset)
3245 					ss->css_reset(css);
3246 			}
3247 		}
3248 	}
3249 }
3250 
3251 /**
3252  * cgroup_apply_control - apply control mask updates to the subtree
3253  * @cgrp: root of the target subtree
3254  *
3255  * subsystems can be enabled and disabled in a subtree using the following
3256  * steps.
3257  *
3258  * 1. Call cgroup_save_control() to stash the current state.
3259  * 2. Update ->subtree_control masks in the subtree as desired.
3260  * 3. Call cgroup_apply_control() to apply the changes.
3261  * 4. Optionally perform other related operations.
3262  * 5. Call cgroup_finalize_control() to finish up.
3263  *
3264  * This function implements step 3 and propagates the mask changes
3265  * throughout @cgrp's subtree, updates csses accordingly and perform
3266  * process migrations.
3267  */
3268 static int cgroup_apply_control(struct cgroup *cgrp)
3269 {
3270 	int ret;
3271 
3272 	cgroup_propagate_control(cgrp);
3273 
3274 	ret = cgroup_apply_control_enable(cgrp);
3275 	if (ret)
3276 		return ret;
3277 
3278 	/*
3279 	 * At this point, cgroup_e_css_by_mask() results reflect the new csses
3280 	 * making the following cgroup_update_dfl_csses() properly update
3281 	 * css associations of all tasks in the subtree.
3282 	 */
3283 	return cgroup_update_dfl_csses(cgrp);
3284 }
3285 
3286 /**
3287  * cgroup_finalize_control - finalize control mask update
3288  * @cgrp: root of the target subtree
3289  * @ret: the result of the update
3290  *
3291  * Finalize control mask update.  See cgroup_apply_control() for more info.
3292  */
3293 static void cgroup_finalize_control(struct cgroup *cgrp, int ret)
3294 {
3295 	if (ret) {
3296 		cgroup_restore_control(cgrp);
3297 		cgroup_propagate_control(cgrp);
3298 	}
3299 
3300 	cgroup_apply_control_disable(cgrp);
3301 }
3302 
3303 static int cgroup_vet_subtree_control_enable(struct cgroup *cgrp, u16 enable)
3304 {
3305 	u16 domain_enable = enable & ~cgrp_dfl_threaded_ss_mask;
3306 
3307 	/* if nothing is getting enabled, nothing to worry about */
3308 	if (!enable)
3309 		return 0;
3310 
3311 	/* can @cgrp host any resources? */
3312 	if (!cgroup_is_valid_domain(cgrp->dom_cgrp))
3313 		return -EOPNOTSUPP;
3314 
3315 	/* mixables don't care */
3316 	if (cgroup_is_mixable(cgrp))
3317 		return 0;
3318 
3319 	if (domain_enable) {
3320 		/* can't enable domain controllers inside a thread subtree */
3321 		if (cgroup_is_thread_root(cgrp) || cgroup_is_threaded(cgrp))
3322 			return -EOPNOTSUPP;
3323 	} else {
3324 		/*
3325 		 * Threaded controllers can handle internal competitions
3326 		 * and are always allowed inside a (prospective) thread
3327 		 * subtree.
3328 		 */
3329 		if (cgroup_can_be_thread_root(cgrp) || cgroup_is_threaded(cgrp))
3330 			return 0;
3331 	}
3332 
3333 	/*
3334 	 * Controllers can't be enabled for a cgroup with tasks to avoid
3335 	 * child cgroups competing against tasks.
3336 	 */
3337 	if (cgroup_has_tasks(cgrp))
3338 		return -EBUSY;
3339 
3340 	return 0;
3341 }
3342 
3343 /* change the enabled child controllers for a cgroup in the default hierarchy */
3344 static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
3345 					    char *buf, size_t nbytes,
3346 					    loff_t off)
3347 {
3348 	u16 enable = 0, disable = 0;
3349 	struct cgroup *cgrp, *child;
3350 	struct cgroup_subsys *ss;
3351 	char *tok;
3352 	int ssid, ret;
3353 
3354 	/*
3355 	 * Parse input - space separated list of subsystem names prefixed
3356 	 * with either + or -.
3357 	 */
3358 	buf = strstrip(buf);
3359 	while ((tok = strsep(&buf, " "))) {
3360 		if (tok[0] == '\0')
3361 			continue;
3362 		do_each_subsys_mask(ss, ssid, ~cgrp_dfl_inhibit_ss_mask) {
3363 			if (!cgroup_ssid_enabled(ssid) ||
3364 			    strcmp(tok + 1, ss->name))
3365 				continue;
3366 
3367 			if (*tok == '+') {
3368 				enable |= 1 << ssid;
3369 				disable &= ~(1 << ssid);
3370 			} else if (*tok == '-') {
3371 				disable |= 1 << ssid;
3372 				enable &= ~(1 << ssid);
3373 			} else {
3374 				return -EINVAL;
3375 			}
3376 			break;
3377 		} while_each_subsys_mask();
3378 		if (ssid == CGROUP_SUBSYS_COUNT)
3379 			return -EINVAL;
3380 	}
3381 
3382 	cgrp = cgroup_kn_lock_live(of->kn, true);
3383 	if (!cgrp)
3384 		return -ENODEV;
3385 
3386 	for_each_subsys(ss, ssid) {
3387 		if (enable & (1 << ssid)) {
3388 			if (cgrp->subtree_control & (1 << ssid)) {
3389 				enable &= ~(1 << ssid);
3390 				continue;
3391 			}
3392 
3393 			if (!(cgroup_control(cgrp) & (1 << ssid))) {
3394 				ret = -ENOENT;
3395 				goto out_unlock;
3396 			}
3397 		} else if (disable & (1 << ssid)) {
3398 			if (!(cgrp->subtree_control & (1 << ssid))) {
3399 				disable &= ~(1 << ssid);
3400 				continue;
3401 			}
3402 
3403 			/* a child has it enabled? */
3404 			cgroup_for_each_live_child(child, cgrp) {
3405 				if (child->subtree_control & (1 << ssid)) {
3406 					ret = -EBUSY;
3407 					goto out_unlock;
3408 				}
3409 			}
3410 		}
3411 	}
3412 
3413 	if (!enable && !disable) {
3414 		ret = 0;
3415 		goto out_unlock;
3416 	}
3417 
3418 	ret = cgroup_vet_subtree_control_enable(cgrp, enable);
3419 	if (ret)
3420 		goto out_unlock;
3421 
3422 	/* save and update control masks and prepare csses */
3423 	cgroup_save_control(cgrp);
3424 
3425 	cgrp->subtree_control |= enable;
3426 	cgrp->subtree_control &= ~disable;
3427 
3428 	ret = cgroup_apply_control(cgrp);
3429 	cgroup_finalize_control(cgrp, ret);
3430 	if (ret)
3431 		goto out_unlock;
3432 
3433 	kernfs_activate(cgrp->kn);
3434 out_unlock:
3435 	cgroup_kn_unlock(of->kn);
3436 	return ret ?: nbytes;
3437 }
3438 
3439 /**
3440  * cgroup_enable_threaded - make @cgrp threaded
3441  * @cgrp: the target cgroup
3442  *
3443  * Called when "threaded" is written to the cgroup.type interface file and
3444  * tries to make @cgrp threaded and join the parent's resource domain.
3445  * This function is never called on the root cgroup as cgroup.type doesn't
3446  * exist on it.
3447  */
3448 static int cgroup_enable_threaded(struct cgroup *cgrp)
3449 {
3450 	struct cgroup *parent = cgroup_parent(cgrp);
3451 	struct cgroup *dom_cgrp = parent->dom_cgrp;
3452 	struct cgroup *dsct;
3453 	struct cgroup_subsys_state *d_css;
3454 	int ret;
3455 
3456 	lockdep_assert_held(&cgroup_mutex);
3457 
3458 	/* noop if already threaded */
3459 	if (cgroup_is_threaded(cgrp))
3460 		return 0;
3461 
3462 	/*
3463 	 * If @cgroup is populated or has domain controllers enabled, it
3464 	 * can't be switched.  While the below cgroup_can_be_thread_root()
3465 	 * test can catch the same conditions, that's only when @parent is
3466 	 * not mixable, so let's check it explicitly.
3467 	 */
3468 	if (cgroup_is_populated(cgrp) ||
3469 	    cgrp->subtree_control & ~cgrp_dfl_threaded_ss_mask)
3470 		return -EOPNOTSUPP;
3471 
3472 	/* we're joining the parent's domain, ensure its validity */
3473 	if (!cgroup_is_valid_domain(dom_cgrp) ||
3474 	    !cgroup_can_be_thread_root(dom_cgrp))
3475 		return -EOPNOTSUPP;
3476 
3477 	/*
3478 	 * The following shouldn't cause actual migrations and should
3479 	 * always succeed.
3480 	 */
3481 	cgroup_save_control(cgrp);
3482 
3483 	cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp)
3484 		if (dsct == cgrp || cgroup_is_threaded(dsct))
3485 			dsct->dom_cgrp = dom_cgrp;
3486 
3487 	ret = cgroup_apply_control(cgrp);
3488 	if (!ret)
3489 		parent->nr_threaded_children++;
3490 
3491 	cgroup_finalize_control(cgrp, ret);
3492 	return ret;
3493 }
3494 
3495 static int cgroup_type_show(struct seq_file *seq, void *v)
3496 {
3497 	struct cgroup *cgrp = seq_css(seq)->cgroup;
3498 
3499 	if (cgroup_is_threaded(cgrp))
3500 		seq_puts(seq, "threaded\n");
3501 	else if (!cgroup_is_valid_domain(cgrp))
3502 		seq_puts(seq, "domain invalid\n");
3503 	else if (cgroup_is_thread_root(cgrp))
3504 		seq_puts(seq, "domain threaded\n");
3505 	else
3506 		seq_puts(seq, "domain\n");
3507 
3508 	return 0;
3509 }
3510 
3511 static ssize_t cgroup_type_write(struct kernfs_open_file *of, char *buf,
3512 				 size_t nbytes, loff_t off)
3513 {
3514 	struct cgroup *cgrp;
3515 	int ret;
3516 
3517 	/* only switching to threaded mode is supported */
3518 	if (strcmp(strstrip(buf), "threaded"))
3519 		return -EINVAL;
3520 
3521 	/* drain dying csses before we re-apply (threaded) subtree control */
3522 	cgrp = cgroup_kn_lock_live(of->kn, true);
3523 	if (!cgrp)
3524 		return -ENOENT;
3525 
3526 	/* threaded can only be enabled */
3527 	ret = cgroup_enable_threaded(cgrp);
3528 
3529 	cgroup_kn_unlock(of->kn);
3530 	return ret ?: nbytes;
3531 }
3532 
3533 static int cgroup_max_descendants_show(struct seq_file *seq, void *v)
3534 {
3535 	struct cgroup *cgrp = seq_css(seq)->cgroup;
3536 	int descendants = READ_ONCE(cgrp->max_descendants);
3537 
3538 	if (descendants == INT_MAX)
3539 		seq_puts(seq, "max\n");
3540 	else
3541 		seq_printf(seq, "%d\n", descendants);
3542 
3543 	return 0;
3544 }
3545 
3546 static ssize_t cgroup_max_descendants_write(struct kernfs_open_file *of,
3547 					   char *buf, size_t nbytes, loff_t off)
3548 {
3549 	struct cgroup *cgrp;
3550 	int descendants;
3551 	ssize_t ret;
3552 
3553 	buf = strstrip(buf);
3554 	if (!strcmp(buf, "max")) {
3555 		descendants = INT_MAX;
3556 	} else {
3557 		ret = kstrtoint(buf, 0, &descendants);
3558 		if (ret)
3559 			return ret;
3560 	}
3561 
3562 	if (descendants < 0)
3563 		return -ERANGE;
3564 
3565 	cgrp = cgroup_kn_lock_live(of->kn, false);
3566 	if (!cgrp)
3567 		return -ENOENT;
3568 
3569 	cgrp->max_descendants = descendants;
3570 
3571 	cgroup_kn_unlock(of->kn);
3572 
3573 	return nbytes;
3574 }
3575 
3576 static int cgroup_max_depth_show(struct seq_file *seq, void *v)
3577 {
3578 	struct cgroup *cgrp = seq_css(seq)->cgroup;
3579 	int depth = READ_ONCE(cgrp->max_depth);
3580 
3581 	if (depth == INT_MAX)
3582 		seq_puts(seq, "max\n");
3583 	else
3584 		seq_printf(seq, "%d\n", depth);
3585 
3586 	return 0;
3587 }
3588 
3589 static ssize_t cgroup_max_depth_write(struct kernfs_open_file *of,
3590 				      char *buf, size_t nbytes, loff_t off)
3591 {
3592 	struct cgroup *cgrp;
3593 	ssize_t ret;
3594 	int depth;
3595 
3596 	buf = strstrip(buf);
3597 	if (!strcmp(buf, "max")) {
3598 		depth = INT_MAX;
3599 	} else {
3600 		ret = kstrtoint(buf, 0, &depth);
3601 		if (ret)
3602 			return ret;
3603 	}
3604 
3605 	if (depth < 0)
3606 		return -ERANGE;
3607 
3608 	cgrp = cgroup_kn_lock_live(of->kn, false);
3609 	if (!cgrp)
3610 		return -ENOENT;
3611 
3612 	cgrp->max_depth = depth;
3613 
3614 	cgroup_kn_unlock(of->kn);
3615 
3616 	return nbytes;
3617 }
3618 
3619 static int cgroup_events_show(struct seq_file *seq, void *v)
3620 {
3621 	struct cgroup *cgrp = seq_css(seq)->cgroup;
3622 
3623 	seq_printf(seq, "populated %d\n", cgroup_is_populated(cgrp));
3624 	seq_printf(seq, "frozen %d\n", test_bit(CGRP_FROZEN, &cgrp->flags));
3625 
3626 	return 0;
3627 }
3628 
3629 static int cgroup_stat_show(struct seq_file *seq, void *v)
3630 {
3631 	struct cgroup *cgroup = seq_css(seq)->cgroup;
3632 
3633 	seq_printf(seq, "nr_descendants %d\n",
3634 		   cgroup->nr_descendants);
3635 	seq_printf(seq, "nr_dying_descendants %d\n",
3636 		   cgroup->nr_dying_descendants);
3637 
3638 	return 0;
3639 }
3640 
3641 #ifdef CONFIG_CGROUP_SCHED
3642 /**
3643  * cgroup_tryget_css - try to get a cgroup's css for the specified subsystem
3644  * @cgrp: the cgroup of interest
3645  * @ss: the subsystem of interest
3646  *
3647  * Find and get @cgrp's css associated with @ss.  If the css doesn't exist
3648  * or is offline, %NULL is returned.
3649  */
3650 static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp,
3651 						     struct cgroup_subsys *ss)
3652 {
3653 	struct cgroup_subsys_state *css;
3654 
3655 	rcu_read_lock();
3656 	css = cgroup_css(cgrp, ss);
3657 	if (css && !css_tryget_online(css))
3658 		css = NULL;
3659 	rcu_read_unlock();
3660 
3661 	return css;
3662 }
3663 
3664 static int cgroup_extra_stat_show(struct seq_file *seq, int ssid)
3665 {
3666 	struct cgroup *cgrp = seq_css(seq)->cgroup;
3667 	struct cgroup_subsys *ss = cgroup_subsys[ssid];
3668 	struct cgroup_subsys_state *css;
3669 	int ret;
3670 
3671 	if (!ss->css_extra_stat_show)
3672 		return 0;
3673 
3674 	css = cgroup_tryget_css(cgrp, ss);
3675 	if (!css)
3676 		return 0;
3677 
3678 	ret = ss->css_extra_stat_show(seq, css);
3679 	css_put(css);
3680 	return ret;
3681 }
3682 
3683 static int cgroup_local_stat_show(struct seq_file *seq,
3684 				  struct cgroup *cgrp, int ssid)
3685 {
3686 	struct cgroup_subsys *ss = cgroup_subsys[ssid];
3687 	struct cgroup_subsys_state *css;
3688 	int ret;
3689 
3690 	if (!ss->css_local_stat_show)
3691 		return 0;
3692 
3693 	css = cgroup_tryget_css(cgrp, ss);
3694 	if (!css)
3695 		return 0;
3696 
3697 	ret = ss->css_local_stat_show(seq, css);
3698 	css_put(css);
3699 	return ret;
3700 }
3701 #endif
3702 
3703 static int cpu_stat_show(struct seq_file *seq, void *v)
3704 {
3705 	int ret = 0;
3706 
3707 	cgroup_base_stat_cputime_show(seq);
3708 #ifdef CONFIG_CGROUP_SCHED
3709 	ret = cgroup_extra_stat_show(seq, cpu_cgrp_id);
3710 #endif
3711 	return ret;
3712 }
3713 
3714 static int cpu_local_stat_show(struct seq_file *seq, void *v)
3715 {
3716 	struct cgroup __maybe_unused *cgrp = seq_css(seq)->cgroup;
3717 	int ret = 0;
3718 
3719 #ifdef CONFIG_CGROUP_SCHED
3720 	ret = cgroup_local_stat_show(seq, cgrp, cpu_cgrp_id);
3721 #endif
3722 	return ret;
3723 }
3724 
3725 #ifdef CONFIG_PSI
3726 static int cgroup_io_pressure_show(struct seq_file *seq, void *v)
3727 {
3728 	struct cgroup *cgrp = seq_css(seq)->cgroup;
3729 	struct psi_group *psi = cgroup_psi(cgrp);
3730 
3731 	return psi_show(seq, psi, PSI_IO);
3732 }
3733 static int cgroup_memory_pressure_show(struct seq_file *seq, void *v)
3734 {
3735 	struct cgroup *cgrp = seq_css(seq)->cgroup;
3736 	struct psi_group *psi = cgroup_psi(cgrp);
3737 
3738 	return psi_show(seq, psi, PSI_MEM);
3739 }
3740 static int cgroup_cpu_pressure_show(struct seq_file *seq, void *v)
3741 {
3742 	struct cgroup *cgrp = seq_css(seq)->cgroup;
3743 	struct psi_group *psi = cgroup_psi(cgrp);
3744 
3745 	return psi_show(seq, psi, PSI_CPU);
3746 }
3747 
3748 static ssize_t pressure_write(struct kernfs_open_file *of, char *buf,
3749 			      size_t nbytes, enum psi_res res)
3750 {
3751 	struct cgroup_file_ctx *ctx = of->priv;
3752 	struct psi_trigger *new;
3753 	struct cgroup *cgrp;
3754 	struct psi_group *psi;
3755 
3756 	cgrp = cgroup_kn_lock_live(of->kn, false);
3757 	if (!cgrp)
3758 		return -ENODEV;
3759 
3760 	cgroup_get(cgrp);
3761 	cgroup_kn_unlock(of->kn);
3762 
3763 	/* Allow only one trigger per file descriptor */
3764 	if (ctx->psi.trigger) {
3765 		cgroup_put(cgrp);
3766 		return -EBUSY;
3767 	}
3768 
3769 	psi = cgroup_psi(cgrp);
3770 	new = psi_trigger_create(psi, buf, res, of->file, of);
3771 	if (IS_ERR(new)) {
3772 		cgroup_put(cgrp);
3773 		return PTR_ERR(new);
3774 	}
3775 
3776 	smp_store_release(&ctx->psi.trigger, new);
3777 	cgroup_put(cgrp);
3778 
3779 	return nbytes;
3780 }
3781 
3782 static ssize_t cgroup_io_pressure_write(struct kernfs_open_file *of,
3783 					  char *buf, size_t nbytes,
3784 					  loff_t off)
3785 {
3786 	return pressure_write(of, buf, nbytes, PSI_IO);
3787 }
3788 
3789 static ssize_t cgroup_memory_pressure_write(struct kernfs_open_file *of,
3790 					  char *buf, size_t nbytes,
3791 					  loff_t off)
3792 {
3793 	return pressure_write(of, buf, nbytes, PSI_MEM);
3794 }
3795 
3796 static ssize_t cgroup_cpu_pressure_write(struct kernfs_open_file *of,
3797 					  char *buf, size_t nbytes,
3798 					  loff_t off)
3799 {
3800 	return pressure_write(of, buf, nbytes, PSI_CPU);
3801 }
3802 
3803 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
3804 static int cgroup_irq_pressure_show(struct seq_file *seq, void *v)
3805 {
3806 	struct cgroup *cgrp = seq_css(seq)->cgroup;
3807 	struct psi_group *psi = cgroup_psi(cgrp);
3808 
3809 	return psi_show(seq, psi, PSI_IRQ);
3810 }
3811 
3812 static ssize_t cgroup_irq_pressure_write(struct kernfs_open_file *of,
3813 					 char *buf, size_t nbytes,
3814 					 loff_t off)
3815 {
3816 	return pressure_write(of, buf, nbytes, PSI_IRQ);
3817 }
3818 #endif
3819 
3820 static int cgroup_pressure_show(struct seq_file *seq, void *v)
3821 {
3822 	struct cgroup *cgrp = seq_css(seq)->cgroup;
3823 	struct psi_group *psi = cgroup_psi(cgrp);
3824 
3825 	seq_printf(seq, "%d\n", psi->enabled);
3826 
3827 	return 0;
3828 }
3829 
3830 static ssize_t cgroup_pressure_write(struct kernfs_open_file *of,
3831 				     char *buf, size_t nbytes,
3832 				     loff_t off)
3833 {
3834 	ssize_t ret;
3835 	int enable;
3836 	struct cgroup *cgrp;
3837 	struct psi_group *psi;
3838 
3839 	ret = kstrtoint(strstrip(buf), 0, &enable);
3840 	if (ret)
3841 		return ret;
3842 
3843 	if (enable < 0 || enable > 1)
3844 		return -ERANGE;
3845 
3846 	cgrp = cgroup_kn_lock_live(of->kn, false);
3847 	if (!cgrp)
3848 		return -ENOENT;
3849 
3850 	psi = cgroup_psi(cgrp);
3851 	if (psi->enabled != enable) {
3852 		int i;
3853 
3854 		/* show or hide {cpu,memory,io,irq}.pressure files */
3855 		for (i = 0; i < NR_PSI_RESOURCES; i++)
3856 			cgroup_file_show(&cgrp->psi_files[i], enable);
3857 
3858 		psi->enabled = enable;
3859 		if (enable)
3860 			psi_cgroup_restart(psi);
3861 	}
3862 
3863 	cgroup_kn_unlock(of->kn);
3864 
3865 	return nbytes;
3866 }
3867 
3868 static __poll_t cgroup_pressure_poll(struct kernfs_open_file *of,
3869 					  poll_table *pt)
3870 {
3871 	struct cgroup_file_ctx *ctx = of->priv;
3872 
3873 	return psi_trigger_poll(&ctx->psi.trigger, of->file, pt);
3874 }
3875 
3876 static int cgroup_pressure_open(struct kernfs_open_file *of)
3877 {
3878 	if (of->file->f_mode & FMODE_WRITE && !capable(CAP_SYS_RESOURCE))
3879 		return -EPERM;
3880 
3881 	return 0;
3882 }
3883 
3884 static void cgroup_pressure_release(struct kernfs_open_file *of)
3885 {
3886 	struct cgroup_file_ctx *ctx = of->priv;
3887 
3888 	psi_trigger_destroy(ctx->psi.trigger);
3889 }
3890 
3891 bool cgroup_psi_enabled(void)
3892 {
3893 	if (static_branch_likely(&psi_disabled))
3894 		return false;
3895 
3896 	return (cgroup_feature_disable_mask & (1 << OPT_FEATURE_PRESSURE)) == 0;
3897 }
3898 
3899 #else /* CONFIG_PSI */
3900 bool cgroup_psi_enabled(void)
3901 {
3902 	return false;
3903 }
3904 
3905 #endif /* CONFIG_PSI */
3906 
3907 static int cgroup_freeze_show(struct seq_file *seq, void *v)
3908 {
3909 	struct cgroup *cgrp = seq_css(seq)->cgroup;
3910 
3911 	seq_printf(seq, "%d\n", cgrp->freezer.freeze);
3912 
3913 	return 0;
3914 }
3915 
3916 static ssize_t cgroup_freeze_write(struct kernfs_open_file *of,
3917 				   char *buf, size_t nbytes, loff_t off)
3918 {
3919 	struct cgroup *cgrp;
3920 	ssize_t ret;
3921 	int freeze;
3922 
3923 	ret = kstrtoint(strstrip(buf), 0, &freeze);
3924 	if (ret)
3925 		return ret;
3926 
3927 	if (freeze < 0 || freeze > 1)
3928 		return -ERANGE;
3929 
3930 	cgrp = cgroup_kn_lock_live(of->kn, false);
3931 	if (!cgrp)
3932 		return -ENOENT;
3933 
3934 	cgroup_freeze(cgrp, freeze);
3935 
3936 	cgroup_kn_unlock(of->kn);
3937 
3938 	return nbytes;
3939 }
3940 
3941 static void __cgroup_kill(struct cgroup *cgrp)
3942 {
3943 	struct css_task_iter it;
3944 	struct task_struct *task;
3945 
3946 	lockdep_assert_held(&cgroup_mutex);
3947 
3948 	spin_lock_irq(&css_set_lock);
3949 	set_bit(CGRP_KILL, &cgrp->flags);
3950 	spin_unlock_irq(&css_set_lock);
3951 
3952 	css_task_iter_start(&cgrp->self, CSS_TASK_ITER_PROCS | CSS_TASK_ITER_THREADED, &it);
3953 	while ((task = css_task_iter_next(&it))) {
3954 		/* Ignore kernel threads here. */
3955 		if (task->flags & PF_KTHREAD)
3956 			continue;
3957 
3958 		/* Skip tasks that are already dying. */
3959 		if (__fatal_signal_pending(task))
3960 			continue;
3961 
3962 		send_sig(SIGKILL, task, 0);
3963 	}
3964 	css_task_iter_end(&it);
3965 
3966 	spin_lock_irq(&css_set_lock);
3967 	clear_bit(CGRP_KILL, &cgrp->flags);
3968 	spin_unlock_irq(&css_set_lock);
3969 }
3970 
3971 static void cgroup_kill(struct cgroup *cgrp)
3972 {
3973 	struct cgroup_subsys_state *css;
3974 	struct cgroup *dsct;
3975 
3976 	lockdep_assert_held(&cgroup_mutex);
3977 
3978 	cgroup_for_each_live_descendant_pre(dsct, css, cgrp)
3979 		__cgroup_kill(dsct);
3980 }
3981 
3982 static ssize_t cgroup_kill_write(struct kernfs_open_file *of, char *buf,
3983 				 size_t nbytes, loff_t off)
3984 {
3985 	ssize_t ret = 0;
3986 	int kill;
3987 	struct cgroup *cgrp;
3988 
3989 	ret = kstrtoint(strstrip(buf), 0, &kill);
3990 	if (ret)
3991 		return ret;
3992 
3993 	if (kill != 1)
3994 		return -ERANGE;
3995 
3996 	cgrp = cgroup_kn_lock_live(of->kn, false);
3997 	if (!cgrp)
3998 		return -ENOENT;
3999 
4000 	/*
4001 	 * Killing is a process directed operation, i.e. the whole thread-group
4002 	 * is taken down so act like we do for cgroup.procs and only make this
4003 	 * writable in non-threaded cgroups.
4004 	 */
4005 	if (cgroup_is_threaded(cgrp))
4006 		ret = -EOPNOTSUPP;
4007 	else
4008 		cgroup_kill(cgrp);
4009 
4010 	cgroup_kn_unlock(of->kn);
4011 
4012 	return ret ?: nbytes;
4013 }
4014 
4015 static int cgroup_file_open(struct kernfs_open_file *of)
4016 {
4017 	struct cftype *cft = of_cft(of);
4018 	struct cgroup_file_ctx *ctx;
4019 	int ret;
4020 
4021 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
4022 	if (!ctx)
4023 		return -ENOMEM;
4024 
4025 	ctx->ns = current->nsproxy->cgroup_ns;
4026 	get_cgroup_ns(ctx->ns);
4027 	of->priv = ctx;
4028 
4029 	if (!cft->open)
4030 		return 0;
4031 
4032 	ret = cft->open(of);
4033 	if (ret) {
4034 		put_cgroup_ns(ctx->ns);
4035 		kfree(ctx);
4036 	}
4037 	return ret;
4038 }
4039 
4040 static void cgroup_file_release(struct kernfs_open_file *of)
4041 {
4042 	struct cftype *cft = of_cft(of);
4043 	struct cgroup_file_ctx *ctx = of->priv;
4044 
4045 	if (cft->release)
4046 		cft->release(of);
4047 	put_cgroup_ns(ctx->ns);
4048 	kfree(ctx);
4049 }
4050 
4051 static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
4052 				 size_t nbytes, loff_t off)
4053 {
4054 	struct cgroup_file_ctx *ctx = of->priv;
4055 	struct cgroup *cgrp = of->kn->parent->priv;
4056 	struct cftype *cft = of_cft(of);
4057 	struct cgroup_subsys_state *css;
4058 	int ret;
4059 
4060 	if (!nbytes)
4061 		return 0;
4062 
4063 	/*
4064 	 * If namespaces are delegation boundaries, disallow writes to
4065 	 * files in an non-init namespace root from inside the namespace
4066 	 * except for the files explicitly marked delegatable -
4067 	 * cgroup.procs and cgroup.subtree_control.
4068 	 */
4069 	if ((cgrp->root->flags & CGRP_ROOT_NS_DELEGATE) &&
4070 	    !(cft->flags & CFTYPE_NS_DELEGATABLE) &&
4071 	    ctx->ns != &init_cgroup_ns && ctx->ns->root_cset->dfl_cgrp == cgrp)
4072 		return -EPERM;
4073 
4074 	if (cft->write)
4075 		return cft->write(of, buf, nbytes, off);
4076 
4077 	/*
4078 	 * kernfs guarantees that a file isn't deleted with operations in
4079 	 * flight, which means that the matching css is and stays alive and
4080 	 * doesn't need to be pinned.  The RCU locking is not necessary
4081 	 * either.  It's just for the convenience of using cgroup_css().
4082 	 */
4083 	rcu_read_lock();
4084 	css = cgroup_css(cgrp, cft->ss);
4085 	rcu_read_unlock();
4086 
4087 	if (cft->write_u64) {
4088 		unsigned long long v;
4089 		ret = kstrtoull(buf, 0, &v);
4090 		if (!ret)
4091 			ret = cft->write_u64(css, cft, v);
4092 	} else if (cft->write_s64) {
4093 		long long v;
4094 		ret = kstrtoll(buf, 0, &v);
4095 		if (!ret)
4096 			ret = cft->write_s64(css, cft, v);
4097 	} else {
4098 		ret = -EINVAL;
4099 	}
4100 
4101 	return ret ?: nbytes;
4102 }
4103 
4104 static __poll_t cgroup_file_poll(struct kernfs_open_file *of, poll_table *pt)
4105 {
4106 	struct cftype *cft = of_cft(of);
4107 
4108 	if (cft->poll)
4109 		return cft->poll(of, pt);
4110 
4111 	return kernfs_generic_poll(of, pt);
4112 }
4113 
4114 static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos)
4115 {
4116 	return seq_cft(seq)->seq_start(seq, ppos);
4117 }
4118 
4119 static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos)
4120 {
4121 	return seq_cft(seq)->seq_next(seq, v, ppos);
4122 }
4123 
4124 static void cgroup_seqfile_stop(struct seq_file *seq, void *v)
4125 {
4126 	if (seq_cft(seq)->seq_stop)
4127 		seq_cft(seq)->seq_stop(seq, v);
4128 }
4129 
4130 static int cgroup_seqfile_show(struct seq_file *m, void *arg)
4131 {
4132 	struct cftype *cft = seq_cft(m);
4133 	struct cgroup_subsys_state *css = seq_css(m);
4134 
4135 	if (cft->seq_show)
4136 		return cft->seq_show(m, arg);
4137 
4138 	if (cft->read_u64)
4139 		seq_printf(m, "%llu\n", cft->read_u64(css, cft));
4140 	else if (cft->read_s64)
4141 		seq_printf(m, "%lld\n", cft->read_s64(css, cft));
4142 	else
4143 		return -EINVAL;
4144 	return 0;
4145 }
4146 
4147 static struct kernfs_ops cgroup_kf_single_ops = {
4148 	.atomic_write_len	= PAGE_SIZE,
4149 	.open			= cgroup_file_open,
4150 	.release		= cgroup_file_release,
4151 	.write			= cgroup_file_write,
4152 	.poll			= cgroup_file_poll,
4153 	.seq_show		= cgroup_seqfile_show,
4154 };
4155 
4156 static struct kernfs_ops cgroup_kf_ops = {
4157 	.atomic_write_len	= PAGE_SIZE,
4158 	.open			= cgroup_file_open,
4159 	.release		= cgroup_file_release,
4160 	.write			= cgroup_file_write,
4161 	.poll			= cgroup_file_poll,
4162 	.seq_start		= cgroup_seqfile_start,
4163 	.seq_next		= cgroup_seqfile_next,
4164 	.seq_stop		= cgroup_seqfile_stop,
4165 	.seq_show		= cgroup_seqfile_show,
4166 };
4167 
4168 /* set uid and gid of cgroup dirs and files to that of the creator */
4169 static int cgroup_kn_set_ugid(struct kernfs_node *kn)
4170 {
4171 	struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
4172 			       .ia_uid = current_fsuid(),
4173 			       .ia_gid = current_fsgid(), };
4174 
4175 	if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
4176 	    gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
4177 		return 0;
4178 
4179 	return kernfs_setattr(kn, &iattr);
4180 }
4181 
4182 static void cgroup_file_notify_timer(struct timer_list *timer)
4183 {
4184 	cgroup_file_notify(container_of(timer, struct cgroup_file,
4185 					notify_timer));
4186 }
4187 
4188 static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
4189 			   struct cftype *cft)
4190 {
4191 	char name[CGROUP_FILE_NAME_MAX];
4192 	struct kernfs_node *kn;
4193 	struct lock_class_key *key = NULL;
4194 	int ret;
4195 
4196 #ifdef CONFIG_DEBUG_LOCK_ALLOC
4197 	key = &cft->lockdep_key;
4198 #endif
4199 	kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name),
4200 				  cgroup_file_mode(cft),
4201 				  GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
4202 				  0, cft->kf_ops, cft,
4203 				  NULL, key);
4204 	if (IS_ERR(kn))
4205 		return PTR_ERR(kn);
4206 
4207 	ret = cgroup_kn_set_ugid(kn);
4208 	if (ret) {
4209 		kernfs_remove(kn);
4210 		return ret;
4211 	}
4212 
4213 	if (cft->file_offset) {
4214 		struct cgroup_file *cfile = (void *)css + cft->file_offset;
4215 
4216 		timer_setup(&cfile->notify_timer, cgroup_file_notify_timer, 0);
4217 
4218 		spin_lock_irq(&cgroup_file_kn_lock);
4219 		cfile->kn = kn;
4220 		spin_unlock_irq(&cgroup_file_kn_lock);
4221 	}
4222 
4223 	return 0;
4224 }
4225 
4226 /**
4227  * cgroup_addrm_files - add or remove files to a cgroup directory
4228  * @css: the target css
4229  * @cgrp: the target cgroup (usually css->cgroup)
4230  * @cfts: array of cftypes to be added
4231  * @is_add: whether to add or remove
4232  *
4233  * Depending on @is_add, add or remove files defined by @cfts on @cgrp.
4234  * For removals, this function never fails.
4235  */
4236 static int cgroup_addrm_files(struct cgroup_subsys_state *css,
4237 			      struct cgroup *cgrp, struct cftype cfts[],
4238 			      bool is_add)
4239 {
4240 	struct cftype *cft, *cft_end = NULL;
4241 	int ret = 0;
4242 
4243 	lockdep_assert_held(&cgroup_mutex);
4244 
4245 restart:
4246 	for (cft = cfts; cft != cft_end && cft->name[0] != '\0'; cft++) {
4247 		/* does cft->flags tell us to skip this file on @cgrp? */
4248 		if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp))
4249 			continue;
4250 		if ((cft->flags & __CFTYPE_NOT_ON_DFL) && cgroup_on_dfl(cgrp))
4251 			continue;
4252 		if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgroup_parent(cgrp))
4253 			continue;
4254 		if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgroup_parent(cgrp))
4255 			continue;
4256 		if ((cft->flags & CFTYPE_DEBUG) && !cgroup_debug)
4257 			continue;
4258 		if (is_add) {
4259 			ret = cgroup_add_file(css, cgrp, cft);
4260 			if (ret) {
4261 				pr_warn("%s: failed to add %s, err=%d\n",
4262 					__func__, cft->name, ret);
4263 				cft_end = cft;
4264 				is_add = false;
4265 				goto restart;
4266 			}
4267 		} else {
4268 			cgroup_rm_file(cgrp, cft);
4269 		}
4270 	}
4271 	return ret;
4272 }
4273 
4274 static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add)
4275 {
4276 	struct cgroup_subsys *ss = cfts[0].ss;
4277 	struct cgroup *root = &ss->root->cgrp;
4278 	struct cgroup_subsys_state *css;
4279 	int ret = 0;
4280 
4281 	lockdep_assert_held(&cgroup_mutex);
4282 
4283 	/* add/rm files for all cgroups created before */
4284 	css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
4285 		struct cgroup *cgrp = css->cgroup;
4286 
4287 		if (!(css->flags & CSS_VISIBLE))
4288 			continue;
4289 
4290 		ret = cgroup_addrm_files(css, cgrp, cfts, is_add);
4291 		if (ret)
4292 			break;
4293 	}
4294 
4295 	if (is_add && !ret)
4296 		kernfs_activate(root->kn);
4297 	return ret;
4298 }
4299 
4300 static void cgroup_exit_cftypes(struct cftype *cfts)
4301 {
4302 	struct cftype *cft;
4303 
4304 	for (cft = cfts; cft->name[0] != '\0'; cft++) {
4305 		/* free copy for custom atomic_write_len, see init_cftypes() */
4306 		if (cft->max_write_len && cft->max_write_len != PAGE_SIZE)
4307 			kfree(cft->kf_ops);
4308 		cft->kf_ops = NULL;
4309 		cft->ss = NULL;
4310 
4311 		/* revert flags set by cgroup core while adding @cfts */
4312 		cft->flags &= ~(__CFTYPE_ONLY_ON_DFL | __CFTYPE_NOT_ON_DFL |
4313 				__CFTYPE_ADDED);
4314 	}
4315 }
4316 
4317 static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
4318 {
4319 	struct cftype *cft;
4320 	int ret = 0;
4321 
4322 	for (cft = cfts; cft->name[0] != '\0'; cft++) {
4323 		struct kernfs_ops *kf_ops;
4324 
4325 		WARN_ON(cft->ss || cft->kf_ops);
4326 
4327 		if (cft->flags & __CFTYPE_ADDED) {
4328 			ret = -EBUSY;
4329 			break;
4330 		}
4331 
4332 		if (cft->seq_start)
4333 			kf_ops = &cgroup_kf_ops;
4334 		else
4335 			kf_ops = &cgroup_kf_single_ops;
4336 
4337 		/*
4338 		 * Ugh... if @cft wants a custom max_write_len, we need to
4339 		 * make a copy of kf_ops to set its atomic_write_len.
4340 		 */
4341 		if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) {
4342 			kf_ops = kmemdup(kf_ops, sizeof(*kf_ops), GFP_KERNEL);
4343 			if (!kf_ops) {
4344 				ret = -ENOMEM;
4345 				break;
4346 			}
4347 			kf_ops->atomic_write_len = cft->max_write_len;
4348 		}
4349 
4350 		cft->kf_ops = kf_ops;
4351 		cft->ss = ss;
4352 		cft->flags |= __CFTYPE_ADDED;
4353 	}
4354 
4355 	if (ret)
4356 		cgroup_exit_cftypes(cfts);
4357 	return ret;
4358 }
4359 
4360 static void cgroup_rm_cftypes_locked(struct cftype *cfts)
4361 {
4362 	lockdep_assert_held(&cgroup_mutex);
4363 
4364 	list_del(&cfts->node);
4365 	cgroup_apply_cftypes(cfts, false);
4366 	cgroup_exit_cftypes(cfts);
4367 }
4368 
4369 /**
4370  * cgroup_rm_cftypes - remove an array of cftypes from a subsystem
4371  * @cfts: zero-length name terminated array of cftypes
4372  *
4373  * Unregister @cfts.  Files described by @cfts are removed from all
4374  * existing cgroups and all future cgroups won't have them either.  This
4375  * function can be called anytime whether @cfts' subsys is attached or not.
4376  *
4377  * Returns 0 on successful unregistration, -ENOENT if @cfts is not
4378  * registered.
4379  */
4380 int cgroup_rm_cftypes(struct cftype *cfts)
4381 {
4382 	if (!cfts || cfts[0].name[0] == '\0')
4383 		return 0;
4384 
4385 	if (!(cfts[0].flags & __CFTYPE_ADDED))
4386 		return -ENOENT;
4387 
4388 	cgroup_lock();
4389 	cgroup_rm_cftypes_locked(cfts);
4390 	cgroup_unlock();
4391 	return 0;
4392 }
4393 
4394 /**
4395  * cgroup_add_cftypes - add an array of cftypes to a subsystem
4396  * @ss: target cgroup subsystem
4397  * @cfts: zero-length name terminated array of cftypes
4398  *
4399  * Register @cfts to @ss.  Files described by @cfts are created for all
4400  * existing cgroups to which @ss is attached and all future cgroups will
4401  * have them too.  This function can be called anytime whether @ss is
4402  * attached or not.
4403  *
4404  * Returns 0 on successful registration, -errno on failure.  Note that this
4405  * function currently returns 0 as long as @cfts registration is successful
4406  * even if some file creation attempts on existing cgroups fail.
4407  */
4408 static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
4409 {
4410 	int ret;
4411 
4412 	if (!cgroup_ssid_enabled(ss->id))
4413 		return 0;
4414 
4415 	if (!cfts || cfts[0].name[0] == '\0')
4416 		return 0;
4417 
4418 	ret = cgroup_init_cftypes(ss, cfts);
4419 	if (ret)
4420 		return ret;
4421 
4422 	cgroup_lock();
4423 
4424 	list_add_tail(&cfts->node, &ss->cfts);
4425 	ret = cgroup_apply_cftypes(cfts, true);
4426 	if (ret)
4427 		cgroup_rm_cftypes_locked(cfts);
4428 
4429 	cgroup_unlock();
4430 	return ret;
4431 }
4432 
4433 /**
4434  * cgroup_add_dfl_cftypes - add an array of cftypes for default hierarchy
4435  * @ss: target cgroup subsystem
4436  * @cfts: zero-length name terminated array of cftypes
4437  *
4438  * Similar to cgroup_add_cftypes() but the added files are only used for
4439  * the default hierarchy.
4440  */
4441 int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
4442 {
4443 	struct cftype *cft;
4444 
4445 	for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
4446 		cft->flags |= __CFTYPE_ONLY_ON_DFL;
4447 	return cgroup_add_cftypes(ss, cfts);
4448 }
4449 
4450 /**
4451  * cgroup_add_legacy_cftypes - add an array of cftypes for legacy hierarchies
4452  * @ss: target cgroup subsystem
4453  * @cfts: zero-length name terminated array of cftypes
4454  *
4455  * Similar to cgroup_add_cftypes() but the added files are only used for
4456  * the legacy hierarchies.
4457  */
4458 int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
4459 {
4460 	struct cftype *cft;
4461 
4462 	for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
4463 		cft->flags |= __CFTYPE_NOT_ON_DFL;
4464 	return cgroup_add_cftypes(ss, cfts);
4465 }
4466 
4467 /**
4468  * cgroup_file_notify - generate a file modified event for a cgroup_file
4469  * @cfile: target cgroup_file
4470  *
4471  * @cfile must have been obtained by setting cftype->file_offset.
4472  */
4473 void cgroup_file_notify(struct cgroup_file *cfile)
4474 {
4475 	unsigned long flags;
4476 
4477 	spin_lock_irqsave(&cgroup_file_kn_lock, flags);
4478 	if (cfile->kn) {
4479 		unsigned long last = cfile->notified_at;
4480 		unsigned long next = last + CGROUP_FILE_NOTIFY_MIN_INTV;
4481 
4482 		if (time_in_range(jiffies, last, next)) {
4483 			timer_reduce(&cfile->notify_timer, next);
4484 		} else {
4485 			kernfs_notify(cfile->kn);
4486 			cfile->notified_at = jiffies;
4487 		}
4488 	}
4489 	spin_unlock_irqrestore(&cgroup_file_kn_lock, flags);
4490 }
4491 
4492 /**
4493  * cgroup_file_show - show or hide a hidden cgroup file
4494  * @cfile: target cgroup_file obtained by setting cftype->file_offset
4495  * @show: whether to show or hide
4496  */
4497 void cgroup_file_show(struct cgroup_file *cfile, bool show)
4498 {
4499 	struct kernfs_node *kn;
4500 
4501 	spin_lock_irq(&cgroup_file_kn_lock);
4502 	kn = cfile->kn;
4503 	kernfs_get(kn);
4504 	spin_unlock_irq(&cgroup_file_kn_lock);
4505 
4506 	if (kn)
4507 		kernfs_show(kn, show);
4508 
4509 	kernfs_put(kn);
4510 }
4511 
4512 /**
4513  * css_next_child - find the next child of a given css
4514  * @pos: the current position (%NULL to initiate traversal)
4515  * @parent: css whose children to walk
4516  *
4517  * This function returns the next child of @parent and should be called
4518  * under either cgroup_mutex or RCU read lock.  The only requirement is
4519  * that @parent and @pos are accessible.  The next sibling is guaranteed to
4520  * be returned regardless of their states.
4521  *
4522  * If a subsystem synchronizes ->css_online() and the start of iteration, a
4523  * css which finished ->css_online() is guaranteed to be visible in the
4524  * future iterations and will stay visible until the last reference is put.
4525  * A css which hasn't finished ->css_online() or already finished
4526  * ->css_offline() may show up during traversal.  It's each subsystem's
4527  * responsibility to synchronize against on/offlining.
4528  */
4529 struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
4530 					   struct cgroup_subsys_state *parent)
4531 {
4532 	struct cgroup_subsys_state *next;
4533 
4534 	cgroup_assert_mutex_or_rcu_locked();
4535 
4536 	/*
4537 	 * @pos could already have been unlinked from the sibling list.
4538 	 * Once a cgroup is removed, its ->sibling.next is no longer
4539 	 * updated when its next sibling changes.  CSS_RELEASED is set when
4540 	 * @pos is taken off list, at which time its next pointer is valid,
4541 	 * and, as releases are serialized, the one pointed to by the next
4542 	 * pointer is guaranteed to not have started release yet.  This
4543 	 * implies that if we observe !CSS_RELEASED on @pos in this RCU
4544 	 * critical section, the one pointed to by its next pointer is
4545 	 * guaranteed to not have finished its RCU grace period even if we
4546 	 * have dropped rcu_read_lock() in-between iterations.
4547 	 *
4548 	 * If @pos has CSS_RELEASED set, its next pointer can't be
4549 	 * dereferenced; however, as each css is given a monotonically
4550 	 * increasing unique serial number and always appended to the
4551 	 * sibling list, the next one can be found by walking the parent's
4552 	 * children until the first css with higher serial number than
4553 	 * @pos's.  While this path can be slower, it happens iff iteration
4554 	 * races against release and the race window is very small.
4555 	 */
4556 	if (!pos) {
4557 		next = list_entry_rcu(parent->children.next, struct cgroup_subsys_state, sibling);
4558 	} else if (likely(!(pos->flags & CSS_RELEASED))) {
4559 		next = list_entry_rcu(pos->sibling.next, struct cgroup_subsys_state, sibling);
4560 	} else {
4561 		list_for_each_entry_rcu(next, &parent->children, sibling,
4562 					lockdep_is_held(&cgroup_mutex))
4563 			if (next->serial_nr > pos->serial_nr)
4564 				break;
4565 	}
4566 
4567 	/*
4568 	 * @next, if not pointing to the head, can be dereferenced and is
4569 	 * the next sibling.
4570 	 */
4571 	if (&next->sibling != &parent->children)
4572 		return next;
4573 	return NULL;
4574 }
4575 
4576 /**
4577  * css_next_descendant_pre - find the next descendant for pre-order walk
4578  * @pos: the current position (%NULL to initiate traversal)
4579  * @root: css whose descendants to walk
4580  *
4581  * To be used by css_for_each_descendant_pre().  Find the next descendant
4582  * to visit for pre-order traversal of @root's descendants.  @root is
4583  * included in the iteration and the first node to be visited.
4584  *
4585  * While this function requires cgroup_mutex or RCU read locking, it
4586  * doesn't require the whole traversal to be contained in a single critical
4587  * section.  This function will return the correct next descendant as long
4588  * as both @pos and @root are accessible and @pos is a descendant of @root.
4589  *
4590  * If a subsystem synchronizes ->css_online() and the start of iteration, a
4591  * css which finished ->css_online() is guaranteed to be visible in the
4592  * future iterations and will stay visible until the last reference is put.
4593  * A css which hasn't finished ->css_online() or already finished
4594  * ->css_offline() may show up during traversal.  It's each subsystem's
4595  * responsibility to synchronize against on/offlining.
4596  */
4597 struct cgroup_subsys_state *
4598 css_next_descendant_pre(struct cgroup_subsys_state *pos,
4599 			struct cgroup_subsys_state *root)
4600 {
4601 	struct cgroup_subsys_state *next;
4602 
4603 	cgroup_assert_mutex_or_rcu_locked();
4604 
4605 	/* if first iteration, visit @root */
4606 	if (!pos)
4607 		return root;
4608 
4609 	/* visit the first child if exists */
4610 	next = css_next_child(NULL, pos);
4611 	if (next)
4612 		return next;
4613 
4614 	/* no child, visit my or the closest ancestor's next sibling */
4615 	while (pos != root) {
4616 		next = css_next_child(pos, pos->parent);
4617 		if (next)
4618 			return next;
4619 		pos = pos->parent;
4620 	}
4621 
4622 	return NULL;
4623 }
4624 EXPORT_SYMBOL_GPL(css_next_descendant_pre);
4625 
4626 /**
4627  * css_rightmost_descendant - return the rightmost descendant of a css
4628  * @pos: css of interest
4629  *
4630  * Return the rightmost descendant of @pos.  If there's no descendant, @pos
4631  * is returned.  This can be used during pre-order traversal to skip
4632  * subtree of @pos.
4633  *
4634  * While this function requires cgroup_mutex or RCU read locking, it
4635  * doesn't require the whole traversal to be contained in a single critical
4636  * section.  This function will return the correct rightmost descendant as
4637  * long as @pos is accessible.
4638  */
4639 struct cgroup_subsys_state *
4640 css_rightmost_descendant(struct cgroup_subsys_state *pos)
4641 {
4642 	struct cgroup_subsys_state *last, *tmp;
4643 
4644 	cgroup_assert_mutex_or_rcu_locked();
4645 
4646 	do {
4647 		last = pos;
4648 		/* ->prev isn't RCU safe, walk ->next till the end */
4649 		pos = NULL;
4650 		css_for_each_child(tmp, last)
4651 			pos = tmp;
4652 	} while (pos);
4653 
4654 	return last;
4655 }
4656 
4657 static struct cgroup_subsys_state *
4658 css_leftmost_descendant(struct cgroup_subsys_state *pos)
4659 {
4660 	struct cgroup_subsys_state *last;
4661 
4662 	do {
4663 		last = pos;
4664 		pos = css_next_child(NULL, pos);
4665 	} while (pos);
4666 
4667 	return last;
4668 }
4669 
4670 /**
4671  * css_next_descendant_post - find the next descendant for post-order walk
4672  * @pos: the current position (%NULL to initiate traversal)
4673  * @root: css whose descendants to walk
4674  *
4675  * To be used by css_for_each_descendant_post().  Find the next descendant
4676  * to visit for post-order traversal of @root's descendants.  @root is
4677  * included in the iteration and the last node to be visited.
4678  *
4679  * While this function requires cgroup_mutex or RCU read locking, it
4680  * doesn't require the whole traversal to be contained in a single critical
4681  * section.  This function will return the correct next descendant as long
4682  * as both @pos and @cgroup are accessible and @pos is a descendant of
4683  * @cgroup.
4684  *
4685  * If a subsystem synchronizes ->css_online() and the start of iteration, a
4686  * css which finished ->css_online() is guaranteed to be visible in the
4687  * future iterations and will stay visible until the last reference is put.
4688  * A css which hasn't finished ->css_online() or already finished
4689  * ->css_offline() may show up during traversal.  It's each subsystem's
4690  * responsibility to synchronize against on/offlining.
4691  */
4692 struct cgroup_subsys_state *
4693 css_next_descendant_post(struct cgroup_subsys_state *pos,
4694 			 struct cgroup_subsys_state *root)
4695 {
4696 	struct cgroup_subsys_state *next;
4697 
4698 	cgroup_assert_mutex_or_rcu_locked();
4699 
4700 	/* if first iteration, visit leftmost descendant which may be @root */
4701 	if (!pos)
4702 		return css_leftmost_descendant(root);
4703 
4704 	/* if we visited @root, we're done */
4705 	if (pos == root)
4706 		return NULL;
4707 
4708 	/* if there's an unvisited sibling, visit its leftmost descendant */
4709 	next = css_next_child(pos, pos->parent);
4710 	if (next)
4711 		return css_leftmost_descendant(next);
4712 
4713 	/* no sibling left, visit parent */
4714 	return pos->parent;
4715 }
4716 
4717 /**
4718  * css_has_online_children - does a css have online children
4719  * @css: the target css
4720  *
4721  * Returns %true if @css has any online children; otherwise, %false.  This
4722  * function can be called from any context but the caller is responsible
4723  * for synchronizing against on/offlining as necessary.
4724  */
4725 bool css_has_online_children(struct cgroup_subsys_state *css)
4726 {
4727 	struct cgroup_subsys_state *child;
4728 	bool ret = false;
4729 
4730 	rcu_read_lock();
4731 	css_for_each_child(child, css) {
4732 		if (child->flags & CSS_ONLINE) {
4733 			ret = true;
4734 			break;
4735 		}
4736 	}
4737 	rcu_read_unlock();
4738 	return ret;
4739 }
4740 
4741 static struct css_set *css_task_iter_next_css_set(struct css_task_iter *it)
4742 {
4743 	struct list_head *l;
4744 	struct cgrp_cset_link *link;
4745 	struct css_set *cset;
4746 
4747 	lockdep_assert_held(&css_set_lock);
4748 
4749 	/* find the next threaded cset */
4750 	if (it->tcset_pos) {
4751 		l = it->tcset_pos->next;
4752 
4753 		if (l != it->tcset_head) {
4754 			it->tcset_pos = l;
4755 			return container_of(l, struct css_set,
4756 					    threaded_csets_node);
4757 		}
4758 
4759 		it->tcset_pos = NULL;
4760 	}
4761 
4762 	/* find the next cset */
4763 	l = it->cset_pos;
4764 	l = l->next;
4765 	if (l == it->cset_head) {
4766 		it->cset_pos = NULL;
4767 		return NULL;
4768 	}
4769 
4770 	if (it->ss) {
4771 		cset = container_of(l, struct css_set, e_cset_node[it->ss->id]);
4772 	} else {
4773 		link = list_entry(l, struct cgrp_cset_link, cset_link);
4774 		cset = link->cset;
4775 	}
4776 
4777 	it->cset_pos = l;
4778 
4779 	/* initialize threaded css_set walking */
4780 	if (it->flags & CSS_TASK_ITER_THREADED) {
4781 		if (it->cur_dcset)
4782 			put_css_set_locked(it->cur_dcset);
4783 		it->cur_dcset = cset;
4784 		get_css_set(cset);
4785 
4786 		it->tcset_head = &cset->threaded_csets;
4787 		it->tcset_pos = &cset->threaded_csets;
4788 	}
4789 
4790 	return cset;
4791 }
4792 
4793 /**
4794  * css_task_iter_advance_css_set - advance a task iterator to the next css_set
4795  * @it: the iterator to advance
4796  *
4797  * Advance @it to the next css_set to walk.
4798  */
4799 static void css_task_iter_advance_css_set(struct css_task_iter *it)
4800 {
4801 	struct css_set *cset;
4802 
4803 	lockdep_assert_held(&css_set_lock);
4804 
4805 	/* Advance to the next non-empty css_set and find first non-empty tasks list*/
4806 	while ((cset = css_task_iter_next_css_set(it))) {
4807 		if (!list_empty(&cset->tasks)) {
4808 			it->cur_tasks_head = &cset->tasks;
4809 			break;
4810 		} else if (!list_empty(&cset->mg_tasks)) {
4811 			it->cur_tasks_head = &cset->mg_tasks;
4812 			break;
4813 		} else if (!list_empty(&cset->dying_tasks)) {
4814 			it->cur_tasks_head = &cset->dying_tasks;
4815 			break;
4816 		}
4817 	}
4818 	if (!cset) {
4819 		it->task_pos = NULL;
4820 		return;
4821 	}
4822 	it->task_pos = it->cur_tasks_head->next;
4823 
4824 	/*
4825 	 * We don't keep css_sets locked across iteration steps and thus
4826 	 * need to take steps to ensure that iteration can be resumed after
4827 	 * the lock is re-acquired.  Iteration is performed at two levels -
4828 	 * css_sets and tasks in them.
4829 	 *
4830 	 * Once created, a css_set never leaves its cgroup lists, so a
4831 	 * pinned css_set is guaranteed to stay put and we can resume
4832 	 * iteration afterwards.
4833 	 *
4834 	 * Tasks may leave @cset across iteration steps.  This is resolved
4835 	 * by registering each iterator with the css_set currently being
4836 	 * walked and making css_set_move_task() advance iterators whose
4837 	 * next task is leaving.
4838 	 */
4839 	if (it->cur_cset) {
4840 		list_del(&it->iters_node);
4841 		put_css_set_locked(it->cur_cset);
4842 	}
4843 	get_css_set(cset);
4844 	it->cur_cset = cset;
4845 	list_add(&it->iters_node, &cset->task_iters);
4846 }
4847 
4848 static void css_task_iter_skip(struct css_task_iter *it,
4849 			       struct task_struct *task)
4850 {
4851 	lockdep_assert_held(&css_set_lock);
4852 
4853 	if (it->task_pos == &task->cg_list) {
4854 		it->task_pos = it->task_pos->next;
4855 		it->flags |= CSS_TASK_ITER_SKIPPED;
4856 	}
4857 }
4858 
4859 static void css_task_iter_advance(struct css_task_iter *it)
4860 {
4861 	struct task_struct *task;
4862 
4863 	lockdep_assert_held(&css_set_lock);
4864 repeat:
4865 	if (it->task_pos) {
4866 		/*
4867 		 * Advance iterator to find next entry. We go through cset
4868 		 * tasks, mg_tasks and dying_tasks, when consumed we move onto
4869 		 * the next cset.
4870 		 */
4871 		if (it->flags & CSS_TASK_ITER_SKIPPED)
4872 			it->flags &= ~CSS_TASK_ITER_SKIPPED;
4873 		else
4874 			it->task_pos = it->task_pos->next;
4875 
4876 		if (it->task_pos == &it->cur_cset->tasks) {
4877 			it->cur_tasks_head = &it->cur_cset->mg_tasks;
4878 			it->task_pos = it->cur_tasks_head->next;
4879 		}
4880 		if (it->task_pos == &it->cur_cset->mg_tasks) {
4881 			it->cur_tasks_head = &it->cur_cset->dying_tasks;
4882 			it->task_pos = it->cur_tasks_head->next;
4883 		}
4884 		if (it->task_pos == &it->cur_cset->dying_tasks)
4885 			css_task_iter_advance_css_set(it);
4886 	} else {
4887 		/* called from start, proceed to the first cset */
4888 		css_task_iter_advance_css_set(it);
4889 	}
4890 
4891 	if (!it->task_pos)
4892 		return;
4893 
4894 	task = list_entry(it->task_pos, struct task_struct, cg_list);
4895 
4896 	if (it->flags & CSS_TASK_ITER_PROCS) {
4897 		/* if PROCS, skip over tasks which aren't group leaders */
4898 		if (!thread_group_leader(task))
4899 			goto repeat;
4900 
4901 		/* and dying leaders w/o live member threads */
4902 		if (it->cur_tasks_head == &it->cur_cset->dying_tasks &&
4903 		    !atomic_read(&task->signal->live))
4904 			goto repeat;
4905 	} else {
4906 		/* skip all dying ones */
4907 		if (it->cur_tasks_head == &it->cur_cset->dying_tasks)
4908 			goto repeat;
4909 	}
4910 }
4911 
4912 /**
4913  * css_task_iter_start - initiate task iteration
4914  * @css: the css to walk tasks of
4915  * @flags: CSS_TASK_ITER_* flags
4916  * @it: the task iterator to use
4917  *
4918  * Initiate iteration through the tasks of @css.  The caller can call
4919  * css_task_iter_next() to walk through the tasks until the function
4920  * returns NULL.  On completion of iteration, css_task_iter_end() must be
4921  * called.
4922  */
4923 void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
4924 			 struct css_task_iter *it)
4925 {
4926 	unsigned long irqflags;
4927 
4928 	memset(it, 0, sizeof(*it));
4929 
4930 	spin_lock_irqsave(&css_set_lock, irqflags);
4931 
4932 	it->ss = css->ss;
4933 	it->flags = flags;
4934 
4935 	if (CGROUP_HAS_SUBSYS_CONFIG && it->ss)
4936 		it->cset_pos = &css->cgroup->e_csets[css->ss->id];
4937 	else
4938 		it->cset_pos = &css->cgroup->cset_links;
4939 
4940 	it->cset_head = it->cset_pos;
4941 
4942 	css_task_iter_advance(it);
4943 
4944 	spin_unlock_irqrestore(&css_set_lock, irqflags);
4945 }
4946 
4947 /**
4948  * css_task_iter_next - return the next task for the iterator
4949  * @it: the task iterator being iterated
4950  *
4951  * The "next" function for task iteration.  @it should have been
4952  * initialized via css_task_iter_start().  Returns NULL when the iteration
4953  * reaches the end.
4954  */
4955 struct task_struct *css_task_iter_next(struct css_task_iter *it)
4956 {
4957 	unsigned long irqflags;
4958 
4959 	if (it->cur_task) {
4960 		put_task_struct(it->cur_task);
4961 		it->cur_task = NULL;
4962 	}
4963 
4964 	spin_lock_irqsave(&css_set_lock, irqflags);
4965 
4966 	/* @it may be half-advanced by skips, finish advancing */
4967 	if (it->flags & CSS_TASK_ITER_SKIPPED)
4968 		css_task_iter_advance(it);
4969 
4970 	if (it->task_pos) {
4971 		it->cur_task = list_entry(it->task_pos, struct task_struct,
4972 					  cg_list);
4973 		get_task_struct(it->cur_task);
4974 		css_task_iter_advance(it);
4975 	}
4976 
4977 	spin_unlock_irqrestore(&css_set_lock, irqflags);
4978 
4979 	return it->cur_task;
4980 }
4981 
4982 /**
4983  * css_task_iter_end - finish task iteration
4984  * @it: the task iterator to finish
4985  *
4986  * Finish task iteration started by css_task_iter_start().
4987  */
4988 void css_task_iter_end(struct css_task_iter *it)
4989 {
4990 	unsigned long irqflags;
4991 
4992 	if (it->cur_cset) {
4993 		spin_lock_irqsave(&css_set_lock, irqflags);
4994 		list_del(&it->iters_node);
4995 		put_css_set_locked(it->cur_cset);
4996 		spin_unlock_irqrestore(&css_set_lock, irqflags);
4997 	}
4998 
4999 	if (it->cur_dcset)
5000 		put_css_set(it->cur_dcset);
5001 
5002 	if (it->cur_task)
5003 		put_task_struct(it->cur_task);
5004 }
5005 
5006 static void cgroup_procs_release(struct kernfs_open_file *of)
5007 {
5008 	struct cgroup_file_ctx *ctx = of->priv;
5009 
5010 	if (ctx->procs.started)
5011 		css_task_iter_end(&ctx->procs.iter);
5012 }
5013 
5014 static void *cgroup_procs_next(struct seq_file *s, void *v, loff_t *pos)
5015 {
5016 	struct kernfs_open_file *of = s->private;
5017 	struct cgroup_file_ctx *ctx = of->priv;
5018 
5019 	if (pos)
5020 		(*pos)++;
5021 
5022 	return css_task_iter_next(&ctx->procs.iter);
5023 }
5024 
5025 static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos,
5026 				  unsigned int iter_flags)
5027 {
5028 	struct kernfs_open_file *of = s->private;
5029 	struct cgroup *cgrp = seq_css(s)->cgroup;
5030 	struct cgroup_file_ctx *ctx = of->priv;
5031 	struct css_task_iter *it = &ctx->procs.iter;
5032 
5033 	/*
5034 	 * When a seq_file is seeked, it's always traversed sequentially
5035 	 * from position 0, so we can simply keep iterating on !0 *pos.
5036 	 */
5037 	if (!ctx->procs.started) {
5038 		if (WARN_ON_ONCE((*pos)))
5039 			return ERR_PTR(-EINVAL);
5040 		css_task_iter_start(&cgrp->self, iter_flags, it);
5041 		ctx->procs.started = true;
5042 	} else if (!(*pos)) {
5043 		css_task_iter_end(it);
5044 		css_task_iter_start(&cgrp->self, iter_flags, it);
5045 	} else
5046 		return it->cur_task;
5047 
5048 	return cgroup_procs_next(s, NULL, NULL);
5049 }
5050 
5051 static void *cgroup_procs_start(struct seq_file *s, loff_t *pos)
5052 {
5053 	struct cgroup *cgrp = seq_css(s)->cgroup;
5054 
5055 	/*
5056 	 * All processes of a threaded subtree belong to the domain cgroup
5057 	 * of the subtree.  Only threads can be distributed across the
5058 	 * subtree.  Reject reads on cgroup.procs in the subtree proper.
5059 	 * They're always empty anyway.
5060 	 */
5061 	if (cgroup_is_threaded(cgrp))
5062 		return ERR_PTR(-EOPNOTSUPP);
5063 
5064 	return __cgroup_procs_start(s, pos, CSS_TASK_ITER_PROCS |
5065 					    CSS_TASK_ITER_THREADED);
5066 }
5067 
5068 static int cgroup_procs_show(struct seq_file *s, void *v)
5069 {
5070 	seq_printf(s, "%d\n", task_pid_vnr(v));
5071 	return 0;
5072 }
5073 
5074 static int cgroup_may_write(const struct cgroup *cgrp, struct super_block *sb)
5075 {
5076 	int ret;
5077 	struct inode *inode;
5078 
5079 	lockdep_assert_held(&cgroup_mutex);
5080 
5081 	inode = kernfs_get_inode(sb, cgrp->procs_file.kn);
5082 	if (!inode)
5083 		return -ENOMEM;
5084 
5085 	ret = inode_permission(&nop_mnt_idmap, inode, MAY_WRITE);
5086 	iput(inode);
5087 	return ret;
5088 }
5089 
5090 static int cgroup_procs_write_permission(struct cgroup *src_cgrp,
5091 					 struct cgroup *dst_cgrp,
5092 					 struct super_block *sb,
5093 					 struct cgroup_namespace *ns)
5094 {
5095 	struct cgroup *com_cgrp = src_cgrp;
5096 	int ret;
5097 
5098 	lockdep_assert_held(&cgroup_mutex);
5099 
5100 	/* find the common ancestor */
5101 	while (!cgroup_is_descendant(dst_cgrp, com_cgrp))
5102 		com_cgrp = cgroup_parent(com_cgrp);
5103 
5104 	/* %current should be authorized to migrate to the common ancestor */
5105 	ret = cgroup_may_write(com_cgrp, sb);
5106 	if (ret)
5107 		return ret;
5108 
5109 	/*
5110 	 * If namespaces are delegation boundaries, %current must be able
5111 	 * to see both source and destination cgroups from its namespace.
5112 	 */
5113 	if ((cgrp_dfl_root.flags & CGRP_ROOT_NS_DELEGATE) &&
5114 	    (!cgroup_is_descendant(src_cgrp, ns->root_cset->dfl_cgrp) ||
5115 	     !cgroup_is_descendant(dst_cgrp, ns->root_cset->dfl_cgrp)))
5116 		return -ENOENT;
5117 
5118 	return 0;
5119 }
5120 
5121 static int cgroup_attach_permissions(struct cgroup *src_cgrp,
5122 				     struct cgroup *dst_cgrp,
5123 				     struct super_block *sb, bool threadgroup,
5124 				     struct cgroup_namespace *ns)
5125 {
5126 	int ret = 0;
5127 
5128 	ret = cgroup_procs_write_permission(src_cgrp, dst_cgrp, sb, ns);
5129 	if (ret)
5130 		return ret;
5131 
5132 	ret = cgroup_migrate_vet_dst(dst_cgrp);
5133 	if (ret)
5134 		return ret;
5135 
5136 	if (!threadgroup && (src_cgrp->dom_cgrp != dst_cgrp->dom_cgrp))
5137 		ret = -EOPNOTSUPP;
5138 
5139 	return ret;
5140 }
5141 
5142 static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
5143 				    bool threadgroup)
5144 {
5145 	struct cgroup_file_ctx *ctx = of->priv;
5146 	struct cgroup *src_cgrp, *dst_cgrp;
5147 	struct task_struct *task;
5148 	const struct cred *saved_cred;
5149 	ssize_t ret;
5150 	bool threadgroup_locked;
5151 
5152 	dst_cgrp = cgroup_kn_lock_live(of->kn, false);
5153 	if (!dst_cgrp)
5154 		return -ENODEV;
5155 
5156 	task = cgroup_procs_write_start(buf, threadgroup, &threadgroup_locked);
5157 	ret = PTR_ERR_OR_ZERO(task);
5158 	if (ret)
5159 		goto out_unlock;
5160 
5161 	/* find the source cgroup */
5162 	spin_lock_irq(&css_set_lock);
5163 	src_cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
5164 	spin_unlock_irq(&css_set_lock);
5165 
5166 	/*
5167 	 * Process and thread migrations follow same delegation rule. Check
5168 	 * permissions using the credentials from file open to protect against
5169 	 * inherited fd attacks.
5170 	 */
5171 	saved_cred = override_creds(of->file->f_cred);
5172 	ret = cgroup_attach_permissions(src_cgrp, dst_cgrp,
5173 					of->file->f_path.dentry->d_sb,
5174 					threadgroup, ctx->ns);
5175 	revert_creds(saved_cred);
5176 	if (ret)
5177 		goto out_finish;
5178 
5179 	ret = cgroup_attach_task(dst_cgrp, task, threadgroup);
5180 
5181 out_finish:
5182 	cgroup_procs_write_finish(task, threadgroup_locked);
5183 out_unlock:
5184 	cgroup_kn_unlock(of->kn);
5185 
5186 	return ret;
5187 }
5188 
5189 static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
5190 				  char *buf, size_t nbytes, loff_t off)
5191 {
5192 	return __cgroup_procs_write(of, buf, true) ?: nbytes;
5193 }
5194 
5195 static void *cgroup_threads_start(struct seq_file *s, loff_t *pos)
5196 {
5197 	return __cgroup_procs_start(s, pos, 0);
5198 }
5199 
5200 static ssize_t cgroup_threads_write(struct kernfs_open_file *of,
5201 				    char *buf, size_t nbytes, loff_t off)
5202 {
5203 	return __cgroup_procs_write(of, buf, false) ?: nbytes;
5204 }
5205 
5206 /* cgroup core interface files for the default hierarchy */
5207 static struct cftype cgroup_base_files[] = {
5208 	{
5209 		.name = "cgroup.type",
5210 		.flags = CFTYPE_NOT_ON_ROOT,
5211 		.seq_show = cgroup_type_show,
5212 		.write = cgroup_type_write,
5213 	},
5214 	{
5215 		.name = "cgroup.procs",
5216 		.flags = CFTYPE_NS_DELEGATABLE,
5217 		.file_offset = offsetof(struct cgroup, procs_file),
5218 		.release = cgroup_procs_release,
5219 		.seq_start = cgroup_procs_start,
5220 		.seq_next = cgroup_procs_next,
5221 		.seq_show = cgroup_procs_show,
5222 		.write = cgroup_procs_write,
5223 	},
5224 	{
5225 		.name = "cgroup.threads",
5226 		.flags = CFTYPE_NS_DELEGATABLE,
5227 		.release = cgroup_procs_release,
5228 		.seq_start = cgroup_threads_start,
5229 		.seq_next = cgroup_procs_next,
5230 		.seq_show = cgroup_procs_show,
5231 		.write = cgroup_threads_write,
5232 	},
5233 	{
5234 		.name = "cgroup.controllers",
5235 		.seq_show = cgroup_controllers_show,
5236 	},
5237 	{
5238 		.name = "cgroup.subtree_control",
5239 		.flags = CFTYPE_NS_DELEGATABLE,
5240 		.seq_show = cgroup_subtree_control_show,
5241 		.write = cgroup_subtree_control_write,
5242 	},
5243 	{
5244 		.name = "cgroup.events",
5245 		.flags = CFTYPE_NOT_ON_ROOT,
5246 		.file_offset = offsetof(struct cgroup, events_file),
5247 		.seq_show = cgroup_events_show,
5248 	},
5249 	{
5250 		.name = "cgroup.max.descendants",
5251 		.seq_show = cgroup_max_descendants_show,
5252 		.write = cgroup_max_descendants_write,
5253 	},
5254 	{
5255 		.name = "cgroup.max.depth",
5256 		.seq_show = cgroup_max_depth_show,
5257 		.write = cgroup_max_depth_write,
5258 	},
5259 	{
5260 		.name = "cgroup.stat",
5261 		.seq_show = cgroup_stat_show,
5262 	},
5263 	{
5264 		.name = "cgroup.freeze",
5265 		.flags = CFTYPE_NOT_ON_ROOT,
5266 		.seq_show = cgroup_freeze_show,
5267 		.write = cgroup_freeze_write,
5268 	},
5269 	{
5270 		.name = "cgroup.kill",
5271 		.flags = CFTYPE_NOT_ON_ROOT,
5272 		.write = cgroup_kill_write,
5273 	},
5274 	{
5275 		.name = "cpu.stat",
5276 		.seq_show = cpu_stat_show,
5277 	},
5278 	{
5279 		.name = "cpu.stat.local",
5280 		.seq_show = cpu_local_stat_show,
5281 	},
5282 	{ }	/* terminate */
5283 };
5284 
5285 static struct cftype cgroup_psi_files[] = {
5286 #ifdef CONFIG_PSI
5287 	{
5288 		.name = "io.pressure",
5289 		.file_offset = offsetof(struct cgroup, psi_files[PSI_IO]),
5290 		.open = cgroup_pressure_open,
5291 		.seq_show = cgroup_io_pressure_show,
5292 		.write = cgroup_io_pressure_write,
5293 		.poll = cgroup_pressure_poll,
5294 		.release = cgroup_pressure_release,
5295 	},
5296 	{
5297 		.name = "memory.pressure",
5298 		.file_offset = offsetof(struct cgroup, psi_files[PSI_MEM]),
5299 		.open = cgroup_pressure_open,
5300 		.seq_show = cgroup_memory_pressure_show,
5301 		.write = cgroup_memory_pressure_write,
5302 		.poll = cgroup_pressure_poll,
5303 		.release = cgroup_pressure_release,
5304 	},
5305 	{
5306 		.name = "cpu.pressure",
5307 		.file_offset = offsetof(struct cgroup, psi_files[PSI_CPU]),
5308 		.open = cgroup_pressure_open,
5309 		.seq_show = cgroup_cpu_pressure_show,
5310 		.write = cgroup_cpu_pressure_write,
5311 		.poll = cgroup_pressure_poll,
5312 		.release = cgroup_pressure_release,
5313 	},
5314 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
5315 	{
5316 		.name = "irq.pressure",
5317 		.file_offset = offsetof(struct cgroup, psi_files[PSI_IRQ]),
5318 		.open = cgroup_pressure_open,
5319 		.seq_show = cgroup_irq_pressure_show,
5320 		.write = cgroup_irq_pressure_write,
5321 		.poll = cgroup_pressure_poll,
5322 		.release = cgroup_pressure_release,
5323 	},
5324 #endif
5325 	{
5326 		.name = "cgroup.pressure",
5327 		.seq_show = cgroup_pressure_show,
5328 		.write = cgroup_pressure_write,
5329 	},
5330 #endif /* CONFIG_PSI */
5331 	{ }	/* terminate */
5332 };
5333 
5334 /*
5335  * css destruction is four-stage process.
5336  *
5337  * 1. Destruction starts.  Killing of the percpu_ref is initiated.
5338  *    Implemented in kill_css().
5339  *
5340  * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
5341  *    and thus css_tryget_online() is guaranteed to fail, the css can be
5342  *    offlined by invoking offline_css().  After offlining, the base ref is
5343  *    put.  Implemented in css_killed_work_fn().
5344  *
5345  * 3. When the percpu_ref reaches zero, the only possible remaining
5346  *    accessors are inside RCU read sections.  css_release() schedules the
5347  *    RCU callback.
5348  *
5349  * 4. After the grace period, the css can be freed.  Implemented in
5350  *    css_free_rwork_fn().
5351  *
5352  * It is actually hairier because both step 2 and 4 require process context
5353  * and thus involve punting to css->destroy_work adding two additional
5354  * steps to the already complex sequence.
5355  */
5356 static void css_free_rwork_fn(struct work_struct *work)
5357 {
5358 	struct cgroup_subsys_state *css = container_of(to_rcu_work(work),
5359 				struct cgroup_subsys_state, destroy_rwork);
5360 	struct cgroup_subsys *ss = css->ss;
5361 	struct cgroup *cgrp = css->cgroup;
5362 
5363 	percpu_ref_exit(&css->refcnt);
5364 
5365 	if (ss) {
5366 		/* css free path */
5367 		struct cgroup_subsys_state *parent = css->parent;
5368 		int id = css->id;
5369 
5370 		ss->css_free(css);
5371 		cgroup_idr_remove(&ss->css_idr, id);
5372 		cgroup_put(cgrp);
5373 
5374 		if (parent)
5375 			css_put(parent);
5376 	} else {
5377 		/* cgroup free path */
5378 		atomic_dec(&cgrp->root->nr_cgrps);
5379 		cgroup1_pidlist_destroy_all(cgrp);
5380 		cancel_work_sync(&cgrp->release_agent_work);
5381 		bpf_cgrp_storage_free(cgrp);
5382 
5383 		if (cgroup_parent(cgrp)) {
5384 			/*
5385 			 * We get a ref to the parent, and put the ref when
5386 			 * this cgroup is being freed, so it's guaranteed
5387 			 * that the parent won't be destroyed before its
5388 			 * children.
5389 			 */
5390 			cgroup_put(cgroup_parent(cgrp));
5391 			kernfs_put(cgrp->kn);
5392 			psi_cgroup_free(cgrp);
5393 			cgroup_rstat_exit(cgrp);
5394 			kfree(cgrp);
5395 		} else {
5396 			/*
5397 			 * This is root cgroup's refcnt reaching zero,
5398 			 * which indicates that the root should be
5399 			 * released.
5400 			 */
5401 			cgroup_destroy_root(cgrp->root);
5402 		}
5403 	}
5404 }
5405 
5406 static void css_release_work_fn(struct work_struct *work)
5407 {
5408 	struct cgroup_subsys_state *css =
5409 		container_of(work, struct cgroup_subsys_state, destroy_work);
5410 	struct cgroup_subsys *ss = css->ss;
5411 	struct cgroup *cgrp = css->cgroup;
5412 
5413 	cgroup_lock();
5414 
5415 	css->flags |= CSS_RELEASED;
5416 	list_del_rcu(&css->sibling);
5417 
5418 	if (ss) {
5419 		/* css release path */
5420 		if (!list_empty(&css->rstat_css_node)) {
5421 			cgroup_rstat_flush(cgrp);
5422 			list_del_rcu(&css->rstat_css_node);
5423 		}
5424 
5425 		cgroup_idr_replace(&ss->css_idr, NULL, css->id);
5426 		if (ss->css_released)
5427 			ss->css_released(css);
5428 	} else {
5429 		struct cgroup *tcgrp;
5430 
5431 		/* cgroup release path */
5432 		TRACE_CGROUP_PATH(release, cgrp);
5433 
5434 		cgroup_rstat_flush(cgrp);
5435 
5436 		spin_lock_irq(&css_set_lock);
5437 		for (tcgrp = cgroup_parent(cgrp); tcgrp;
5438 		     tcgrp = cgroup_parent(tcgrp))
5439 			tcgrp->nr_dying_descendants--;
5440 		spin_unlock_irq(&css_set_lock);
5441 
5442 		/*
5443 		 * There are two control paths which try to determine
5444 		 * cgroup from dentry without going through kernfs -
5445 		 * cgroupstats_build() and css_tryget_online_from_dir().
5446 		 * Those are supported by RCU protecting clearing of
5447 		 * cgrp->kn->priv backpointer.
5448 		 */
5449 		if (cgrp->kn)
5450 			RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv,
5451 					 NULL);
5452 	}
5453 
5454 	cgroup_unlock();
5455 
5456 	INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
5457 	queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
5458 }
5459 
5460 static void css_release(struct percpu_ref *ref)
5461 {
5462 	struct cgroup_subsys_state *css =
5463 		container_of(ref, struct cgroup_subsys_state, refcnt);
5464 
5465 	INIT_WORK(&css->destroy_work, css_release_work_fn);
5466 	queue_work(cgroup_destroy_wq, &css->destroy_work);
5467 }
5468 
5469 static void init_and_link_css(struct cgroup_subsys_state *css,
5470 			      struct cgroup_subsys *ss, struct cgroup *cgrp)
5471 {
5472 	lockdep_assert_held(&cgroup_mutex);
5473 
5474 	cgroup_get_live(cgrp);
5475 
5476 	memset(css, 0, sizeof(*css));
5477 	css->cgroup = cgrp;
5478 	css->ss = ss;
5479 	css->id = -1;
5480 	INIT_LIST_HEAD(&css->sibling);
5481 	INIT_LIST_HEAD(&css->children);
5482 	INIT_LIST_HEAD(&css->rstat_css_node);
5483 	css->serial_nr = css_serial_nr_next++;
5484 	atomic_set(&css->online_cnt, 0);
5485 
5486 	if (cgroup_parent(cgrp)) {
5487 		css->parent = cgroup_css(cgroup_parent(cgrp), ss);
5488 		css_get(css->parent);
5489 	}
5490 
5491 	if (ss->css_rstat_flush)
5492 		list_add_rcu(&css->rstat_css_node, &cgrp->rstat_css_list);
5493 
5494 	BUG_ON(cgroup_css(cgrp, ss));
5495 }
5496 
5497 /* invoke ->css_online() on a new CSS and mark it online if successful */
5498 static int online_css(struct cgroup_subsys_state *css)
5499 {
5500 	struct cgroup_subsys *ss = css->ss;
5501 	int ret = 0;
5502 
5503 	lockdep_assert_held(&cgroup_mutex);
5504 
5505 	if (ss->css_online)
5506 		ret = ss->css_online(css);
5507 	if (!ret) {
5508 		css->flags |= CSS_ONLINE;
5509 		rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
5510 
5511 		atomic_inc(&css->online_cnt);
5512 		if (css->parent)
5513 			atomic_inc(&css->parent->online_cnt);
5514 	}
5515 	return ret;
5516 }
5517 
5518 /* if the CSS is online, invoke ->css_offline() on it and mark it offline */
5519 static void offline_css(struct cgroup_subsys_state *css)
5520 {
5521 	struct cgroup_subsys *ss = css->ss;
5522 
5523 	lockdep_assert_held(&cgroup_mutex);
5524 
5525 	if (!(css->flags & CSS_ONLINE))
5526 		return;
5527 
5528 	if (ss->css_offline)
5529 		ss->css_offline(css);
5530 
5531 	css->flags &= ~CSS_ONLINE;
5532 	RCU_INIT_POINTER(css->cgroup->subsys[ss->id], NULL);
5533 
5534 	wake_up_all(&css->cgroup->offline_waitq);
5535 }
5536 
5537 /**
5538  * css_create - create a cgroup_subsys_state
5539  * @cgrp: the cgroup new css will be associated with
5540  * @ss: the subsys of new css
5541  *
5542  * Create a new css associated with @cgrp - @ss pair.  On success, the new
5543  * css is online and installed in @cgrp.  This function doesn't create the
5544  * interface files.  Returns 0 on success, -errno on failure.
5545  */
5546 static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
5547 					      struct cgroup_subsys *ss)
5548 {
5549 	struct cgroup *parent = cgroup_parent(cgrp);
5550 	struct cgroup_subsys_state *parent_css = cgroup_css(parent, ss);
5551 	struct cgroup_subsys_state *css;
5552 	int err;
5553 
5554 	lockdep_assert_held(&cgroup_mutex);
5555 
5556 	css = ss->css_alloc(parent_css);
5557 	if (!css)
5558 		css = ERR_PTR(-ENOMEM);
5559 	if (IS_ERR(css))
5560 		return css;
5561 
5562 	init_and_link_css(css, ss, cgrp);
5563 
5564 	err = percpu_ref_init(&css->refcnt, css_release, 0, GFP_KERNEL);
5565 	if (err)
5566 		goto err_free_css;
5567 
5568 	err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL);
5569 	if (err < 0)
5570 		goto err_free_css;
5571 	css->id = err;
5572 
5573 	/* @css is ready to be brought online now, make it visible */
5574 	list_add_tail_rcu(&css->sibling, &parent_css->children);
5575 	cgroup_idr_replace(&ss->css_idr, css, css->id);
5576 
5577 	err = online_css(css);
5578 	if (err)
5579 		goto err_list_del;
5580 
5581 	return css;
5582 
5583 err_list_del:
5584 	list_del_rcu(&css->sibling);
5585 err_free_css:
5586 	list_del_rcu(&css->rstat_css_node);
5587 	INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
5588 	queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
5589 	return ERR_PTR(err);
5590 }
5591 
5592 /*
5593  * The returned cgroup is fully initialized including its control mask, but
5594  * it doesn't have the control mask applied.
5595  */
5596 static struct cgroup *cgroup_create(struct cgroup *parent, const char *name,
5597 				    umode_t mode)
5598 {
5599 	struct cgroup_root *root = parent->root;
5600 	struct cgroup *cgrp, *tcgrp;
5601 	struct kernfs_node *kn;
5602 	int level = parent->level + 1;
5603 	int ret;
5604 
5605 	/* allocate the cgroup and its ID, 0 is reserved for the root */
5606 	cgrp = kzalloc(struct_size(cgrp, ancestors, (level + 1)), GFP_KERNEL);
5607 	if (!cgrp)
5608 		return ERR_PTR(-ENOMEM);
5609 
5610 	ret = percpu_ref_init(&cgrp->self.refcnt, css_release, 0, GFP_KERNEL);
5611 	if (ret)
5612 		goto out_free_cgrp;
5613 
5614 	ret = cgroup_rstat_init(cgrp);
5615 	if (ret)
5616 		goto out_cancel_ref;
5617 
5618 	/* create the directory */
5619 	kn = kernfs_create_dir(parent->kn, name, mode, cgrp);
5620 	if (IS_ERR(kn)) {
5621 		ret = PTR_ERR(kn);
5622 		goto out_stat_exit;
5623 	}
5624 	cgrp->kn = kn;
5625 
5626 	init_cgroup_housekeeping(cgrp);
5627 
5628 	cgrp->self.parent = &parent->self;
5629 	cgrp->root = root;
5630 	cgrp->level = level;
5631 
5632 	ret = psi_cgroup_alloc(cgrp);
5633 	if (ret)
5634 		goto out_kernfs_remove;
5635 
5636 	ret = cgroup_bpf_inherit(cgrp);
5637 	if (ret)
5638 		goto out_psi_free;
5639 
5640 	/*
5641 	 * New cgroup inherits effective freeze counter, and
5642 	 * if the parent has to be frozen, the child has too.
5643 	 */
5644 	cgrp->freezer.e_freeze = parent->freezer.e_freeze;
5645 	if (cgrp->freezer.e_freeze) {
5646 		/*
5647 		 * Set the CGRP_FREEZE flag, so when a process will be
5648 		 * attached to the child cgroup, it will become frozen.
5649 		 * At this point the new cgroup is unpopulated, so we can
5650 		 * consider it frozen immediately.
5651 		 */
5652 		set_bit(CGRP_FREEZE, &cgrp->flags);
5653 		set_bit(CGRP_FROZEN, &cgrp->flags);
5654 	}
5655 
5656 	spin_lock_irq(&css_set_lock);
5657 	for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) {
5658 		cgrp->ancestors[tcgrp->level] = tcgrp;
5659 
5660 		if (tcgrp != cgrp) {
5661 			tcgrp->nr_descendants++;
5662 
5663 			/*
5664 			 * If the new cgroup is frozen, all ancestor cgroups
5665 			 * get a new frozen descendant, but their state can't
5666 			 * change because of this.
5667 			 */
5668 			if (cgrp->freezer.e_freeze)
5669 				tcgrp->freezer.nr_frozen_descendants++;
5670 		}
5671 	}
5672 	spin_unlock_irq(&css_set_lock);
5673 
5674 	if (notify_on_release(parent))
5675 		set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
5676 
5677 	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags))
5678 		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
5679 
5680 	cgrp->self.serial_nr = css_serial_nr_next++;
5681 
5682 	/* allocation complete, commit to creation */
5683 	list_add_tail_rcu(&cgrp->self.sibling, &cgroup_parent(cgrp)->self.children);
5684 	atomic_inc(&root->nr_cgrps);
5685 	cgroup_get_live(parent);
5686 
5687 	/*
5688 	 * On the default hierarchy, a child doesn't automatically inherit
5689 	 * subtree_control from the parent.  Each is configured manually.
5690 	 */
5691 	if (!cgroup_on_dfl(cgrp))
5692 		cgrp->subtree_control = cgroup_control(cgrp);
5693 
5694 	cgroup_propagate_control(cgrp);
5695 
5696 	return cgrp;
5697 
5698 out_psi_free:
5699 	psi_cgroup_free(cgrp);
5700 out_kernfs_remove:
5701 	kernfs_remove(cgrp->kn);
5702 out_stat_exit:
5703 	cgroup_rstat_exit(cgrp);
5704 out_cancel_ref:
5705 	percpu_ref_exit(&cgrp->self.refcnt);
5706 out_free_cgrp:
5707 	kfree(cgrp);
5708 	return ERR_PTR(ret);
5709 }
5710 
5711 static bool cgroup_check_hierarchy_limits(struct cgroup *parent)
5712 {
5713 	struct cgroup *cgroup;
5714 	int ret = false;
5715 	int level = 1;
5716 
5717 	lockdep_assert_held(&cgroup_mutex);
5718 
5719 	for (cgroup = parent; cgroup; cgroup = cgroup_parent(cgroup)) {
5720 		if (cgroup->nr_descendants >= cgroup->max_descendants)
5721 			goto fail;
5722 
5723 		if (level > cgroup->max_depth)
5724 			goto fail;
5725 
5726 		level++;
5727 	}
5728 
5729 	ret = true;
5730 fail:
5731 	return ret;
5732 }
5733 
5734 int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode)
5735 {
5736 	struct cgroup *parent, *cgrp;
5737 	int ret;
5738 
5739 	/* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
5740 	if (strchr(name, '\n'))
5741 		return -EINVAL;
5742 
5743 	parent = cgroup_kn_lock_live(parent_kn, false);
5744 	if (!parent)
5745 		return -ENODEV;
5746 
5747 	if (!cgroup_check_hierarchy_limits(parent)) {
5748 		ret = -EAGAIN;
5749 		goto out_unlock;
5750 	}
5751 
5752 	cgrp = cgroup_create(parent, name, mode);
5753 	if (IS_ERR(cgrp)) {
5754 		ret = PTR_ERR(cgrp);
5755 		goto out_unlock;
5756 	}
5757 
5758 	/*
5759 	 * This extra ref will be put in cgroup_free_fn() and guarantees
5760 	 * that @cgrp->kn is always accessible.
5761 	 */
5762 	kernfs_get(cgrp->kn);
5763 
5764 	ret = cgroup_kn_set_ugid(cgrp->kn);
5765 	if (ret)
5766 		goto out_destroy;
5767 
5768 	ret = css_populate_dir(&cgrp->self);
5769 	if (ret)
5770 		goto out_destroy;
5771 
5772 	ret = cgroup_apply_control_enable(cgrp);
5773 	if (ret)
5774 		goto out_destroy;
5775 
5776 	TRACE_CGROUP_PATH(mkdir, cgrp);
5777 
5778 	/* let's create and online css's */
5779 	kernfs_activate(cgrp->kn);
5780 
5781 	ret = 0;
5782 	goto out_unlock;
5783 
5784 out_destroy:
5785 	cgroup_destroy_locked(cgrp);
5786 out_unlock:
5787 	cgroup_kn_unlock(parent_kn);
5788 	return ret;
5789 }
5790 
5791 /*
5792  * This is called when the refcnt of a css is confirmed to be killed.
5793  * css_tryget_online() is now guaranteed to fail.  Tell the subsystem to
5794  * initiate destruction and put the css ref from kill_css().
5795  */
5796 static void css_killed_work_fn(struct work_struct *work)
5797 {
5798 	struct cgroup_subsys_state *css =
5799 		container_of(work, struct cgroup_subsys_state, destroy_work);
5800 
5801 	cgroup_lock();
5802 
5803 	do {
5804 		offline_css(css);
5805 		css_put(css);
5806 		/* @css can't go away while we're holding cgroup_mutex */
5807 		css = css->parent;
5808 	} while (css && atomic_dec_and_test(&css->online_cnt));
5809 
5810 	cgroup_unlock();
5811 }
5812 
5813 /* css kill confirmation processing requires process context, bounce */
5814 static void css_killed_ref_fn(struct percpu_ref *ref)
5815 {
5816 	struct cgroup_subsys_state *css =
5817 		container_of(ref, struct cgroup_subsys_state, refcnt);
5818 
5819 	if (atomic_dec_and_test(&css->online_cnt)) {
5820 		INIT_WORK(&css->destroy_work, css_killed_work_fn);
5821 		queue_work(cgroup_destroy_wq, &css->destroy_work);
5822 	}
5823 }
5824 
5825 /**
5826  * kill_css - destroy a css
5827  * @css: css to destroy
5828  *
5829  * This function initiates destruction of @css by removing cgroup interface
5830  * files and putting its base reference.  ->css_offline() will be invoked
5831  * asynchronously once css_tryget_online() is guaranteed to fail and when
5832  * the reference count reaches zero, @css will be released.
5833  */
5834 static void kill_css(struct cgroup_subsys_state *css)
5835 {
5836 	lockdep_assert_held(&cgroup_mutex);
5837 
5838 	if (css->flags & CSS_DYING)
5839 		return;
5840 
5841 	css->flags |= CSS_DYING;
5842 
5843 	/*
5844 	 * This must happen before css is disassociated with its cgroup.
5845 	 * See seq_css() for details.
5846 	 */
5847 	css_clear_dir(css);
5848 
5849 	/*
5850 	 * Killing would put the base ref, but we need to keep it alive
5851 	 * until after ->css_offline().
5852 	 */
5853 	css_get(css);
5854 
5855 	/*
5856 	 * cgroup core guarantees that, by the time ->css_offline() is
5857 	 * invoked, no new css reference will be given out via
5858 	 * css_tryget_online().  We can't simply call percpu_ref_kill() and
5859 	 * proceed to offlining css's because percpu_ref_kill() doesn't
5860 	 * guarantee that the ref is seen as killed on all CPUs on return.
5861 	 *
5862 	 * Use percpu_ref_kill_and_confirm() to get notifications as each
5863 	 * css is confirmed to be seen as killed on all CPUs.
5864 	 */
5865 	percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn);
5866 }
5867 
5868 /**
5869  * cgroup_destroy_locked - the first stage of cgroup destruction
5870  * @cgrp: cgroup to be destroyed
5871  *
5872  * css's make use of percpu refcnts whose killing latency shouldn't be
5873  * exposed to userland and are RCU protected.  Also, cgroup core needs to
5874  * guarantee that css_tryget_online() won't succeed by the time
5875  * ->css_offline() is invoked.  To satisfy all the requirements,
5876  * destruction is implemented in the following two steps.
5877  *
5878  * s1. Verify @cgrp can be destroyed and mark it dying.  Remove all
5879  *     userland visible parts and start killing the percpu refcnts of
5880  *     css's.  Set up so that the next stage will be kicked off once all
5881  *     the percpu refcnts are confirmed to be killed.
5882  *
5883  * s2. Invoke ->css_offline(), mark the cgroup dead and proceed with the
5884  *     rest of destruction.  Once all cgroup references are gone, the
5885  *     cgroup is RCU-freed.
5886  *
5887  * This function implements s1.  After this step, @cgrp is gone as far as
5888  * the userland is concerned and a new cgroup with the same name may be
5889  * created.  As cgroup doesn't care about the names internally, this
5890  * doesn't cause any problem.
5891  */
5892 static int cgroup_destroy_locked(struct cgroup *cgrp)
5893 	__releases(&cgroup_mutex) __acquires(&cgroup_mutex)
5894 {
5895 	struct cgroup *tcgrp, *parent = cgroup_parent(cgrp);
5896 	struct cgroup_subsys_state *css;
5897 	struct cgrp_cset_link *link;
5898 	int ssid;
5899 
5900 	lockdep_assert_held(&cgroup_mutex);
5901 
5902 	/*
5903 	 * Only migration can raise populated from zero and we're already
5904 	 * holding cgroup_mutex.
5905 	 */
5906 	if (cgroup_is_populated(cgrp))
5907 		return -EBUSY;
5908 
5909 	/*
5910 	 * Make sure there's no live children.  We can't test emptiness of
5911 	 * ->self.children as dead children linger on it while being
5912 	 * drained; otherwise, "rmdir parent/child parent" may fail.
5913 	 */
5914 	if (css_has_online_children(&cgrp->self))
5915 		return -EBUSY;
5916 
5917 	/*
5918 	 * Mark @cgrp and the associated csets dead.  The former prevents
5919 	 * further task migration and child creation by disabling
5920 	 * cgroup_kn_lock_live().  The latter makes the csets ignored by
5921 	 * the migration path.
5922 	 */
5923 	cgrp->self.flags &= ~CSS_ONLINE;
5924 
5925 	spin_lock_irq(&css_set_lock);
5926 	list_for_each_entry(link, &cgrp->cset_links, cset_link)
5927 		link->cset->dead = true;
5928 	spin_unlock_irq(&css_set_lock);
5929 
5930 	/* initiate massacre of all css's */
5931 	for_each_css(css, ssid, cgrp)
5932 		kill_css(css);
5933 
5934 	/* clear and remove @cgrp dir, @cgrp has an extra ref on its kn */
5935 	css_clear_dir(&cgrp->self);
5936 	kernfs_remove(cgrp->kn);
5937 
5938 	if (cgroup_is_threaded(cgrp))
5939 		parent->nr_threaded_children--;
5940 
5941 	spin_lock_irq(&css_set_lock);
5942 	for (tcgrp = parent; tcgrp; tcgrp = cgroup_parent(tcgrp)) {
5943 		tcgrp->nr_descendants--;
5944 		tcgrp->nr_dying_descendants++;
5945 		/*
5946 		 * If the dying cgroup is frozen, decrease frozen descendants
5947 		 * counters of ancestor cgroups.
5948 		 */
5949 		if (test_bit(CGRP_FROZEN, &cgrp->flags))
5950 			tcgrp->freezer.nr_frozen_descendants--;
5951 	}
5952 	spin_unlock_irq(&css_set_lock);
5953 
5954 	cgroup1_check_for_release(parent);
5955 
5956 	cgroup_bpf_offline(cgrp);
5957 
5958 	/* put the base reference */
5959 	percpu_ref_kill(&cgrp->self.refcnt);
5960 
5961 	return 0;
5962 };
5963 
5964 int cgroup_rmdir(struct kernfs_node *kn)
5965 {
5966 	struct cgroup *cgrp;
5967 	int ret = 0;
5968 
5969 	cgrp = cgroup_kn_lock_live(kn, false);
5970 	if (!cgrp)
5971 		return 0;
5972 
5973 	ret = cgroup_destroy_locked(cgrp);
5974 	if (!ret)
5975 		TRACE_CGROUP_PATH(rmdir, cgrp);
5976 
5977 	cgroup_kn_unlock(kn);
5978 	return ret;
5979 }
5980 
5981 static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
5982 	.show_options		= cgroup_show_options,
5983 	.mkdir			= cgroup_mkdir,
5984 	.rmdir			= cgroup_rmdir,
5985 	.show_path		= cgroup_show_path,
5986 };
5987 
5988 static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
5989 {
5990 	struct cgroup_subsys_state *css;
5991 
5992 	pr_debug("Initializing cgroup subsys %s\n", ss->name);
5993 
5994 	cgroup_lock();
5995 
5996 	idr_init(&ss->css_idr);
5997 	INIT_LIST_HEAD(&ss->cfts);
5998 
5999 	/* Create the root cgroup state for this subsystem */
6000 	ss->root = &cgrp_dfl_root;
6001 	css = ss->css_alloc(NULL);
6002 	/* We don't handle early failures gracefully */
6003 	BUG_ON(IS_ERR(css));
6004 	init_and_link_css(css, ss, &cgrp_dfl_root.cgrp);
6005 
6006 	/*
6007 	 * Root csses are never destroyed and we can't initialize
6008 	 * percpu_ref during early init.  Disable refcnting.
6009 	 */
6010 	css->flags |= CSS_NO_REF;
6011 
6012 	if (early) {
6013 		/* allocation can't be done safely during early init */
6014 		css->id = 1;
6015 	} else {
6016 		css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL);
6017 		BUG_ON(css->id < 0);
6018 	}
6019 
6020 	/* Update the init_css_set to contain a subsys
6021 	 * pointer to this state - since the subsystem is
6022 	 * newly registered, all tasks and hence the
6023 	 * init_css_set is in the subsystem's root cgroup. */
6024 	init_css_set.subsys[ss->id] = css;
6025 
6026 	have_fork_callback |= (bool)ss->fork << ss->id;
6027 	have_exit_callback |= (bool)ss->exit << ss->id;
6028 	have_release_callback |= (bool)ss->release << ss->id;
6029 	have_canfork_callback |= (bool)ss->can_fork << ss->id;
6030 
6031 	/* At system boot, before all subsystems have been
6032 	 * registered, no tasks have been forked, so we don't
6033 	 * need to invoke fork callbacks here. */
6034 	BUG_ON(!list_empty(&init_task.tasks));
6035 
6036 	BUG_ON(online_css(css));
6037 
6038 	cgroup_unlock();
6039 }
6040 
6041 /**
6042  * cgroup_init_early - cgroup initialization at system boot
6043  *
6044  * Initialize cgroups at system boot, and initialize any
6045  * subsystems that request early init.
6046  */
6047 int __init cgroup_init_early(void)
6048 {
6049 	static struct cgroup_fs_context __initdata ctx;
6050 	struct cgroup_subsys *ss;
6051 	int i;
6052 
6053 	ctx.root = &cgrp_dfl_root;
6054 	init_cgroup_root(&ctx);
6055 	cgrp_dfl_root.cgrp.self.flags |= CSS_NO_REF;
6056 
6057 	RCU_INIT_POINTER(init_task.cgroups, &init_css_set);
6058 
6059 	for_each_subsys(ss, i) {
6060 		WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id,
6061 		     "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p id:name=%d:%s\n",
6062 		     i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free,
6063 		     ss->id, ss->name);
6064 		WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN,
6065 		     "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]);
6066 
6067 		ss->id = i;
6068 		ss->name = cgroup_subsys_name[i];
6069 		if (!ss->legacy_name)
6070 			ss->legacy_name = cgroup_subsys_name[i];
6071 
6072 		if (ss->early_init)
6073 			cgroup_init_subsys(ss, true);
6074 	}
6075 	return 0;
6076 }
6077 
6078 /**
6079  * cgroup_init - cgroup initialization
6080  *
6081  * Register cgroup filesystem and /proc file, and initialize
6082  * any subsystems that didn't request early init.
6083  */
6084 int __init cgroup_init(void)
6085 {
6086 	struct cgroup_subsys *ss;
6087 	int ssid;
6088 
6089 	BUILD_BUG_ON(CGROUP_SUBSYS_COUNT > 16);
6090 	BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files));
6091 	BUG_ON(cgroup_init_cftypes(NULL, cgroup_psi_files));
6092 	BUG_ON(cgroup_init_cftypes(NULL, cgroup1_base_files));
6093 
6094 	cgroup_rstat_boot();
6095 
6096 	get_user_ns(init_cgroup_ns.user_ns);
6097 
6098 	cgroup_lock();
6099 
6100 	/*
6101 	 * Add init_css_set to the hash table so that dfl_root can link to
6102 	 * it during init.
6103 	 */
6104 	hash_add(css_set_table, &init_css_set.hlist,
6105 		 css_set_hash(init_css_set.subsys));
6106 
6107 	BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0));
6108 
6109 	cgroup_unlock();
6110 
6111 	for_each_subsys(ss, ssid) {
6112 		if (ss->early_init) {
6113 			struct cgroup_subsys_state *css =
6114 				init_css_set.subsys[ss->id];
6115 
6116 			css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2,
6117 						   GFP_KERNEL);
6118 			BUG_ON(css->id < 0);
6119 		} else {
6120 			cgroup_init_subsys(ss, false);
6121 		}
6122 
6123 		list_add_tail(&init_css_set.e_cset_node[ssid],
6124 			      &cgrp_dfl_root.cgrp.e_csets[ssid]);
6125 
6126 		/*
6127 		 * Setting dfl_root subsys_mask needs to consider the
6128 		 * disabled flag and cftype registration needs kmalloc,
6129 		 * both of which aren't available during early_init.
6130 		 */
6131 		if (!cgroup_ssid_enabled(ssid))
6132 			continue;
6133 
6134 		if (cgroup1_ssid_disabled(ssid))
6135 			pr_info("Disabling %s control group subsystem in v1 mounts\n",
6136 				ss->legacy_name);
6137 
6138 		cgrp_dfl_root.subsys_mask |= 1 << ss->id;
6139 
6140 		/* implicit controllers must be threaded too */
6141 		WARN_ON(ss->implicit_on_dfl && !ss->threaded);
6142 
6143 		if (ss->implicit_on_dfl)
6144 			cgrp_dfl_implicit_ss_mask |= 1 << ss->id;
6145 		else if (!ss->dfl_cftypes)
6146 			cgrp_dfl_inhibit_ss_mask |= 1 << ss->id;
6147 
6148 		if (ss->threaded)
6149 			cgrp_dfl_threaded_ss_mask |= 1 << ss->id;
6150 
6151 		if (ss->dfl_cftypes == ss->legacy_cftypes) {
6152 			WARN_ON(cgroup_add_cftypes(ss, ss->dfl_cftypes));
6153 		} else {
6154 			WARN_ON(cgroup_add_dfl_cftypes(ss, ss->dfl_cftypes));
6155 			WARN_ON(cgroup_add_legacy_cftypes(ss, ss->legacy_cftypes));
6156 		}
6157 
6158 		if (ss->bind)
6159 			ss->bind(init_css_set.subsys[ssid]);
6160 
6161 		cgroup_lock();
6162 		css_populate_dir(init_css_set.subsys[ssid]);
6163 		cgroup_unlock();
6164 	}
6165 
6166 	/* init_css_set.subsys[] has been updated, re-hash */
6167 	hash_del(&init_css_set.hlist);
6168 	hash_add(css_set_table, &init_css_set.hlist,
6169 		 css_set_hash(init_css_set.subsys));
6170 
6171 	WARN_ON(sysfs_create_mount_point(fs_kobj, "cgroup"));
6172 	WARN_ON(register_filesystem(&cgroup_fs_type));
6173 	WARN_ON(register_filesystem(&cgroup2_fs_type));
6174 	WARN_ON(!proc_create_single("cgroups", 0, NULL, proc_cgroupstats_show));
6175 #ifdef CONFIG_CPUSETS
6176 	WARN_ON(register_filesystem(&cpuset_fs_type));
6177 #endif
6178 
6179 	return 0;
6180 }
6181 
6182 static int __init cgroup_wq_init(void)
6183 {
6184 	/*
6185 	 * There isn't much point in executing destruction path in
6186 	 * parallel.  Good chunk is serialized with cgroup_mutex anyway.
6187 	 * Use 1 for @max_active.
6188 	 *
6189 	 * We would prefer to do this in cgroup_init() above, but that
6190 	 * is called before init_workqueues(): so leave this until after.
6191 	 */
6192 	cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
6193 	BUG_ON(!cgroup_destroy_wq);
6194 	return 0;
6195 }
6196 core_initcall(cgroup_wq_init);
6197 
6198 void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
6199 {
6200 	struct kernfs_node *kn;
6201 
6202 	kn = kernfs_find_and_get_node_by_id(cgrp_dfl_root.kf_root, id);
6203 	if (!kn)
6204 		return;
6205 	kernfs_path(kn, buf, buflen);
6206 	kernfs_put(kn);
6207 }
6208 
6209 /*
6210  * cgroup_get_from_id : get the cgroup associated with cgroup id
6211  * @id: cgroup id
6212  * On success return the cgrp or ERR_PTR on failure
6213  * Only cgroups within current task's cgroup NS are valid.
6214  */
6215 struct cgroup *cgroup_get_from_id(u64 id)
6216 {
6217 	struct kernfs_node *kn;
6218 	struct cgroup *cgrp, *root_cgrp;
6219 
6220 	kn = kernfs_find_and_get_node_by_id(cgrp_dfl_root.kf_root, id);
6221 	if (!kn)
6222 		return ERR_PTR(-ENOENT);
6223 
6224 	if (kernfs_type(kn) != KERNFS_DIR) {
6225 		kernfs_put(kn);
6226 		return ERR_PTR(-ENOENT);
6227 	}
6228 
6229 	rcu_read_lock();
6230 
6231 	cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
6232 	if (cgrp && !cgroup_tryget(cgrp))
6233 		cgrp = NULL;
6234 
6235 	rcu_read_unlock();
6236 	kernfs_put(kn);
6237 
6238 	if (!cgrp)
6239 		return ERR_PTR(-ENOENT);
6240 
6241 	root_cgrp = current_cgns_cgroup_dfl();
6242 	if (!cgroup_is_descendant(cgrp, root_cgrp)) {
6243 		cgroup_put(cgrp);
6244 		return ERR_PTR(-ENOENT);
6245 	}
6246 
6247 	return cgrp;
6248 }
6249 EXPORT_SYMBOL_GPL(cgroup_get_from_id);
6250 
6251 /*
6252  * proc_cgroup_show()
6253  *  - Print task's cgroup paths into seq_file, one line for each hierarchy
6254  *  - Used for /proc/<pid>/cgroup.
6255  */
6256 int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
6257 		     struct pid *pid, struct task_struct *tsk)
6258 {
6259 	char *buf;
6260 	int retval;
6261 	struct cgroup_root *root;
6262 
6263 	retval = -ENOMEM;
6264 	buf = kmalloc(PATH_MAX, GFP_KERNEL);
6265 	if (!buf)
6266 		goto out;
6267 
6268 	cgroup_lock();
6269 	spin_lock_irq(&css_set_lock);
6270 
6271 	for_each_root(root) {
6272 		struct cgroup_subsys *ss;
6273 		struct cgroup *cgrp;
6274 		int ssid, count = 0;
6275 
6276 		if (root == &cgrp_dfl_root && !READ_ONCE(cgrp_dfl_visible))
6277 			continue;
6278 
6279 		seq_printf(m, "%d:", root->hierarchy_id);
6280 		if (root != &cgrp_dfl_root)
6281 			for_each_subsys(ss, ssid)
6282 				if (root->subsys_mask & (1 << ssid))
6283 					seq_printf(m, "%s%s", count++ ? "," : "",
6284 						   ss->legacy_name);
6285 		if (strlen(root->name))
6286 			seq_printf(m, "%sname=%s", count ? "," : "",
6287 				   root->name);
6288 		seq_putc(m, ':');
6289 
6290 		cgrp = task_cgroup_from_root(tsk, root);
6291 
6292 		/*
6293 		 * On traditional hierarchies, all zombie tasks show up as
6294 		 * belonging to the root cgroup.  On the default hierarchy,
6295 		 * while a zombie doesn't show up in "cgroup.procs" and
6296 		 * thus can't be migrated, its /proc/PID/cgroup keeps
6297 		 * reporting the cgroup it belonged to before exiting.  If
6298 		 * the cgroup is removed before the zombie is reaped,
6299 		 * " (deleted)" is appended to the cgroup path.
6300 		 */
6301 		if (cgroup_on_dfl(cgrp) || !(tsk->flags & PF_EXITING)) {
6302 			retval = cgroup_path_ns_locked(cgrp, buf, PATH_MAX,
6303 						current->nsproxy->cgroup_ns);
6304 			if (retval >= PATH_MAX)
6305 				retval = -ENAMETOOLONG;
6306 			if (retval < 0)
6307 				goto out_unlock;
6308 
6309 			seq_puts(m, buf);
6310 		} else {
6311 			seq_puts(m, "/");
6312 		}
6313 
6314 		if (cgroup_on_dfl(cgrp) && cgroup_is_dead(cgrp))
6315 			seq_puts(m, " (deleted)\n");
6316 		else
6317 			seq_putc(m, '\n');
6318 	}
6319 
6320 	retval = 0;
6321 out_unlock:
6322 	spin_unlock_irq(&css_set_lock);
6323 	cgroup_unlock();
6324 	kfree(buf);
6325 out:
6326 	return retval;
6327 }
6328 
6329 /**
6330  * cgroup_fork - initialize cgroup related fields during copy_process()
6331  * @child: pointer to task_struct of forking parent process.
6332  *
6333  * A task is associated with the init_css_set until cgroup_post_fork()
6334  * attaches it to the target css_set.
6335  */
6336 void cgroup_fork(struct task_struct *child)
6337 {
6338 	RCU_INIT_POINTER(child->cgroups, &init_css_set);
6339 	INIT_LIST_HEAD(&child->cg_list);
6340 }
6341 
6342 /**
6343  * cgroup_v1v2_get_from_file - get a cgroup pointer from a file pointer
6344  * @f: file corresponding to cgroup_dir
6345  *
6346  * Find the cgroup from a file pointer associated with a cgroup directory.
6347  * Returns a pointer to the cgroup on success. ERR_PTR is returned if the
6348  * cgroup cannot be found.
6349  */
6350 static struct cgroup *cgroup_v1v2_get_from_file(struct file *f)
6351 {
6352 	struct cgroup_subsys_state *css;
6353 
6354 	css = css_tryget_online_from_dir(f->f_path.dentry, NULL);
6355 	if (IS_ERR(css))
6356 		return ERR_CAST(css);
6357 
6358 	return css->cgroup;
6359 }
6360 
6361 /**
6362  * cgroup_get_from_file - same as cgroup_v1v2_get_from_file, but only supports
6363  * cgroup2.
6364  * @f: file corresponding to cgroup2_dir
6365  */
6366 static struct cgroup *cgroup_get_from_file(struct file *f)
6367 {
6368 	struct cgroup *cgrp = cgroup_v1v2_get_from_file(f);
6369 
6370 	if (IS_ERR(cgrp))
6371 		return ERR_CAST(cgrp);
6372 
6373 	if (!cgroup_on_dfl(cgrp)) {
6374 		cgroup_put(cgrp);
6375 		return ERR_PTR(-EBADF);
6376 	}
6377 
6378 	return cgrp;
6379 }
6380 
6381 /**
6382  * cgroup_css_set_fork - find or create a css_set for a child process
6383  * @kargs: the arguments passed to create the child process
6384  *
6385  * This functions finds or creates a new css_set which the child
6386  * process will be attached to in cgroup_post_fork(). By default,
6387  * the child process will be given the same css_set as its parent.
6388  *
6389  * If CLONE_INTO_CGROUP is specified this function will try to find an
6390  * existing css_set which includes the requested cgroup and if not create
6391  * a new css_set that the child will be attached to later. If this function
6392  * succeeds it will hold cgroup_threadgroup_rwsem on return. If
6393  * CLONE_INTO_CGROUP is requested this function will grab cgroup mutex
6394  * before grabbing cgroup_threadgroup_rwsem and will hold a reference
6395  * to the target cgroup.
6396  */
6397 static int cgroup_css_set_fork(struct kernel_clone_args *kargs)
6398 	__acquires(&cgroup_mutex) __acquires(&cgroup_threadgroup_rwsem)
6399 {
6400 	int ret;
6401 	struct cgroup *dst_cgrp = NULL;
6402 	struct css_set *cset;
6403 	struct super_block *sb;
6404 	struct file *f;
6405 
6406 	if (kargs->flags & CLONE_INTO_CGROUP)
6407 		cgroup_lock();
6408 
6409 	cgroup_threadgroup_change_begin(current);
6410 
6411 	spin_lock_irq(&css_set_lock);
6412 	cset = task_css_set(current);
6413 	get_css_set(cset);
6414 	spin_unlock_irq(&css_set_lock);
6415 
6416 	if (!(kargs->flags & CLONE_INTO_CGROUP)) {
6417 		kargs->cset = cset;
6418 		return 0;
6419 	}
6420 
6421 	f = fget_raw(kargs->cgroup);
6422 	if (!f) {
6423 		ret = -EBADF;
6424 		goto err;
6425 	}
6426 	sb = f->f_path.dentry->d_sb;
6427 
6428 	dst_cgrp = cgroup_get_from_file(f);
6429 	if (IS_ERR(dst_cgrp)) {
6430 		ret = PTR_ERR(dst_cgrp);
6431 		dst_cgrp = NULL;
6432 		goto err;
6433 	}
6434 
6435 	if (cgroup_is_dead(dst_cgrp)) {
6436 		ret = -ENODEV;
6437 		goto err;
6438 	}
6439 
6440 	/*
6441 	 * Verify that we the target cgroup is writable for us. This is
6442 	 * usually done by the vfs layer but since we're not going through
6443 	 * the vfs layer here we need to do it "manually".
6444 	 */
6445 	ret = cgroup_may_write(dst_cgrp, sb);
6446 	if (ret)
6447 		goto err;
6448 
6449 	/*
6450 	 * Spawning a task directly into a cgroup works by passing a file
6451 	 * descriptor to the target cgroup directory. This can even be an O_PATH
6452 	 * file descriptor. But it can never be a cgroup.procs file descriptor.
6453 	 * This was done on purpose so spawning into a cgroup could be
6454 	 * conceptualized as an atomic
6455 	 *
6456 	 *   fd = openat(dfd_cgroup, "cgroup.procs", ...);
6457 	 *   write(fd, <child-pid>, ...);
6458 	 *
6459 	 * sequence, i.e. it's a shorthand for the caller opening and writing
6460 	 * cgroup.procs of the cgroup indicated by @dfd_cgroup. This allows us
6461 	 * to always use the caller's credentials.
6462 	 */
6463 	ret = cgroup_attach_permissions(cset->dfl_cgrp, dst_cgrp, sb,
6464 					!(kargs->flags & CLONE_THREAD),
6465 					current->nsproxy->cgroup_ns);
6466 	if (ret)
6467 		goto err;
6468 
6469 	kargs->cset = find_css_set(cset, dst_cgrp);
6470 	if (!kargs->cset) {
6471 		ret = -ENOMEM;
6472 		goto err;
6473 	}
6474 
6475 	put_css_set(cset);
6476 	fput(f);
6477 	kargs->cgrp = dst_cgrp;
6478 	return ret;
6479 
6480 err:
6481 	cgroup_threadgroup_change_end(current);
6482 	cgroup_unlock();
6483 	if (f)
6484 		fput(f);
6485 	if (dst_cgrp)
6486 		cgroup_put(dst_cgrp);
6487 	put_css_set(cset);
6488 	if (kargs->cset)
6489 		put_css_set(kargs->cset);
6490 	return ret;
6491 }
6492 
6493 /**
6494  * cgroup_css_set_put_fork - drop references we took during fork
6495  * @kargs: the arguments passed to create the child process
6496  *
6497  * Drop references to the prepared css_set and target cgroup if
6498  * CLONE_INTO_CGROUP was requested.
6499  */
6500 static void cgroup_css_set_put_fork(struct kernel_clone_args *kargs)
6501 	__releases(&cgroup_threadgroup_rwsem) __releases(&cgroup_mutex)
6502 {
6503 	struct cgroup *cgrp = kargs->cgrp;
6504 	struct css_set *cset = kargs->cset;
6505 
6506 	cgroup_threadgroup_change_end(current);
6507 
6508 	if (cset) {
6509 		put_css_set(cset);
6510 		kargs->cset = NULL;
6511 	}
6512 
6513 	if (kargs->flags & CLONE_INTO_CGROUP) {
6514 		cgroup_unlock();
6515 		if (cgrp) {
6516 			cgroup_put(cgrp);
6517 			kargs->cgrp = NULL;
6518 		}
6519 	}
6520 }
6521 
6522 /**
6523  * cgroup_can_fork - called on a new task before the process is exposed
6524  * @child: the child process
6525  * @kargs: the arguments passed to create the child process
6526  *
6527  * This prepares a new css_set for the child process which the child will
6528  * be attached to in cgroup_post_fork().
6529  * This calls the subsystem can_fork() callbacks. If the cgroup_can_fork()
6530  * callback returns an error, the fork aborts with that error code. This
6531  * allows for a cgroup subsystem to conditionally allow or deny new forks.
6532  */
6533 int cgroup_can_fork(struct task_struct *child, struct kernel_clone_args *kargs)
6534 {
6535 	struct cgroup_subsys *ss;
6536 	int i, j, ret;
6537 
6538 	ret = cgroup_css_set_fork(kargs);
6539 	if (ret)
6540 		return ret;
6541 
6542 	do_each_subsys_mask(ss, i, have_canfork_callback) {
6543 		ret = ss->can_fork(child, kargs->cset);
6544 		if (ret)
6545 			goto out_revert;
6546 	} while_each_subsys_mask();
6547 
6548 	return 0;
6549 
6550 out_revert:
6551 	for_each_subsys(ss, j) {
6552 		if (j >= i)
6553 			break;
6554 		if (ss->cancel_fork)
6555 			ss->cancel_fork(child, kargs->cset);
6556 	}
6557 
6558 	cgroup_css_set_put_fork(kargs);
6559 
6560 	return ret;
6561 }
6562 
6563 /**
6564  * cgroup_cancel_fork - called if a fork failed after cgroup_can_fork()
6565  * @child: the child process
6566  * @kargs: the arguments passed to create the child process
6567  *
6568  * This calls the cancel_fork() callbacks if a fork failed *after*
6569  * cgroup_can_fork() succeeded and cleans up references we took to
6570  * prepare a new css_set for the child process in cgroup_can_fork().
6571  */
6572 void cgroup_cancel_fork(struct task_struct *child,
6573 			struct kernel_clone_args *kargs)
6574 {
6575 	struct cgroup_subsys *ss;
6576 	int i;
6577 
6578 	for_each_subsys(ss, i)
6579 		if (ss->cancel_fork)
6580 			ss->cancel_fork(child, kargs->cset);
6581 
6582 	cgroup_css_set_put_fork(kargs);
6583 }
6584 
6585 /**
6586  * cgroup_post_fork - finalize cgroup setup for the child process
6587  * @child: the child process
6588  * @kargs: the arguments passed to create the child process
6589  *
6590  * Attach the child process to its css_set calling the subsystem fork()
6591  * callbacks.
6592  */
6593 void cgroup_post_fork(struct task_struct *child,
6594 		      struct kernel_clone_args *kargs)
6595 	__releases(&cgroup_threadgroup_rwsem) __releases(&cgroup_mutex)
6596 {
6597 	unsigned long cgrp_flags = 0;
6598 	bool kill = false;
6599 	struct cgroup_subsys *ss;
6600 	struct css_set *cset;
6601 	int i;
6602 
6603 	cset = kargs->cset;
6604 	kargs->cset = NULL;
6605 
6606 	spin_lock_irq(&css_set_lock);
6607 
6608 	/* init tasks are special, only link regular threads */
6609 	if (likely(child->pid)) {
6610 		if (kargs->cgrp)
6611 			cgrp_flags = kargs->cgrp->flags;
6612 		else
6613 			cgrp_flags = cset->dfl_cgrp->flags;
6614 
6615 		WARN_ON_ONCE(!list_empty(&child->cg_list));
6616 		cset->nr_tasks++;
6617 		css_set_move_task(child, NULL, cset, false);
6618 	} else {
6619 		put_css_set(cset);
6620 		cset = NULL;
6621 	}
6622 
6623 	if (!(child->flags & PF_KTHREAD)) {
6624 		if (unlikely(test_bit(CGRP_FREEZE, &cgrp_flags))) {
6625 			/*
6626 			 * If the cgroup has to be frozen, the new task has
6627 			 * too. Let's set the JOBCTL_TRAP_FREEZE jobctl bit to
6628 			 * get the task into the frozen state.
6629 			 */
6630 			spin_lock(&child->sighand->siglock);
6631 			WARN_ON_ONCE(child->frozen);
6632 			child->jobctl |= JOBCTL_TRAP_FREEZE;
6633 			spin_unlock(&child->sighand->siglock);
6634 
6635 			/*
6636 			 * Calling cgroup_update_frozen() isn't required here,
6637 			 * because it will be called anyway a bit later from
6638 			 * do_freezer_trap(). So we avoid cgroup's transient
6639 			 * switch from the frozen state and back.
6640 			 */
6641 		}
6642 
6643 		/*
6644 		 * If the cgroup is to be killed notice it now and take the
6645 		 * child down right after we finished preparing it for
6646 		 * userspace.
6647 		 */
6648 		kill = test_bit(CGRP_KILL, &cgrp_flags);
6649 	}
6650 
6651 	spin_unlock_irq(&css_set_lock);
6652 
6653 	/*
6654 	 * Call ss->fork().  This must happen after @child is linked on
6655 	 * css_set; otherwise, @child might change state between ->fork()
6656 	 * and addition to css_set.
6657 	 */
6658 	do_each_subsys_mask(ss, i, have_fork_callback) {
6659 		ss->fork(child);
6660 	} while_each_subsys_mask();
6661 
6662 	/* Make the new cset the root_cset of the new cgroup namespace. */
6663 	if (kargs->flags & CLONE_NEWCGROUP) {
6664 		struct css_set *rcset = child->nsproxy->cgroup_ns->root_cset;
6665 
6666 		get_css_set(cset);
6667 		child->nsproxy->cgroup_ns->root_cset = cset;
6668 		put_css_set(rcset);
6669 	}
6670 
6671 	/* Cgroup has to be killed so take down child immediately. */
6672 	if (unlikely(kill))
6673 		do_send_sig_info(SIGKILL, SEND_SIG_NOINFO, child, PIDTYPE_TGID);
6674 
6675 	cgroup_css_set_put_fork(kargs);
6676 }
6677 
6678 /**
6679  * cgroup_exit - detach cgroup from exiting task
6680  * @tsk: pointer to task_struct of exiting process
6681  *
6682  * Description: Detach cgroup from @tsk.
6683  *
6684  */
6685 void cgroup_exit(struct task_struct *tsk)
6686 {
6687 	struct cgroup_subsys *ss;
6688 	struct css_set *cset;
6689 	int i;
6690 
6691 	spin_lock_irq(&css_set_lock);
6692 
6693 	WARN_ON_ONCE(list_empty(&tsk->cg_list));
6694 	cset = task_css_set(tsk);
6695 	css_set_move_task(tsk, cset, NULL, false);
6696 	list_add_tail(&tsk->cg_list, &cset->dying_tasks);
6697 	cset->nr_tasks--;
6698 
6699 	if (dl_task(tsk))
6700 		dec_dl_tasks_cs(tsk);
6701 
6702 	WARN_ON_ONCE(cgroup_task_frozen(tsk));
6703 	if (unlikely(!(tsk->flags & PF_KTHREAD) &&
6704 		     test_bit(CGRP_FREEZE, &task_dfl_cgroup(tsk)->flags)))
6705 		cgroup_update_frozen(task_dfl_cgroup(tsk));
6706 
6707 	spin_unlock_irq(&css_set_lock);
6708 
6709 	/* see cgroup_post_fork() for details */
6710 	do_each_subsys_mask(ss, i, have_exit_callback) {
6711 		ss->exit(tsk);
6712 	} while_each_subsys_mask();
6713 }
6714 
6715 void cgroup_release(struct task_struct *task)
6716 {
6717 	struct cgroup_subsys *ss;
6718 	int ssid;
6719 
6720 	do_each_subsys_mask(ss, ssid, have_release_callback) {
6721 		ss->release(task);
6722 	} while_each_subsys_mask();
6723 
6724 	spin_lock_irq(&css_set_lock);
6725 	css_set_skip_task_iters(task_css_set(task), task);
6726 	list_del_init(&task->cg_list);
6727 	spin_unlock_irq(&css_set_lock);
6728 }
6729 
6730 void cgroup_free(struct task_struct *task)
6731 {
6732 	struct css_set *cset = task_css_set(task);
6733 	put_css_set(cset);
6734 }
6735 
6736 static int __init cgroup_disable(char *str)
6737 {
6738 	struct cgroup_subsys *ss;
6739 	char *token;
6740 	int i;
6741 
6742 	while ((token = strsep(&str, ",")) != NULL) {
6743 		if (!*token)
6744 			continue;
6745 
6746 		for_each_subsys(ss, i) {
6747 			if (strcmp(token, ss->name) &&
6748 			    strcmp(token, ss->legacy_name))
6749 				continue;
6750 
6751 			static_branch_disable(cgroup_subsys_enabled_key[i]);
6752 			pr_info("Disabling %s control group subsystem\n",
6753 				ss->name);
6754 		}
6755 
6756 		for (i = 0; i < OPT_FEATURE_COUNT; i++) {
6757 			if (strcmp(token, cgroup_opt_feature_names[i]))
6758 				continue;
6759 			cgroup_feature_disable_mask |= 1 << i;
6760 			pr_info("Disabling %s control group feature\n",
6761 				cgroup_opt_feature_names[i]);
6762 			break;
6763 		}
6764 	}
6765 	return 1;
6766 }
6767 __setup("cgroup_disable=", cgroup_disable);
6768 
6769 void __init __weak enable_debug_cgroup(void) { }
6770 
6771 static int __init enable_cgroup_debug(char *str)
6772 {
6773 	cgroup_debug = true;
6774 	enable_debug_cgroup();
6775 	return 1;
6776 }
6777 __setup("cgroup_debug", enable_cgroup_debug);
6778 
6779 static int __init cgroup_favordynmods_setup(char *str)
6780 {
6781 	return (kstrtobool(str, &have_favordynmods) == 0);
6782 }
6783 __setup("cgroup_favordynmods=", cgroup_favordynmods_setup);
6784 
6785 /**
6786  * css_tryget_online_from_dir - get corresponding css from a cgroup dentry
6787  * @dentry: directory dentry of interest
6788  * @ss: subsystem of interest
6789  *
6790  * If @dentry is a directory for a cgroup which has @ss enabled on it, try
6791  * to get the corresponding css and return it.  If such css doesn't exist
6792  * or can't be pinned, an ERR_PTR value is returned.
6793  */
6794 struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
6795 						       struct cgroup_subsys *ss)
6796 {
6797 	struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
6798 	struct file_system_type *s_type = dentry->d_sb->s_type;
6799 	struct cgroup_subsys_state *css = NULL;
6800 	struct cgroup *cgrp;
6801 
6802 	/* is @dentry a cgroup dir? */
6803 	if ((s_type != &cgroup_fs_type && s_type != &cgroup2_fs_type) ||
6804 	    !kn || kernfs_type(kn) != KERNFS_DIR)
6805 		return ERR_PTR(-EBADF);
6806 
6807 	rcu_read_lock();
6808 
6809 	/*
6810 	 * This path doesn't originate from kernfs and @kn could already
6811 	 * have been or be removed at any point.  @kn->priv is RCU
6812 	 * protected for this access.  See css_release_work_fn() for details.
6813 	 */
6814 	cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
6815 	if (cgrp)
6816 		css = cgroup_css(cgrp, ss);
6817 
6818 	if (!css || !css_tryget_online(css))
6819 		css = ERR_PTR(-ENOENT);
6820 
6821 	rcu_read_unlock();
6822 	return css;
6823 }
6824 
6825 /**
6826  * css_from_id - lookup css by id
6827  * @id: the cgroup id
6828  * @ss: cgroup subsys to be looked into
6829  *
6830  * Returns the css if there's valid one with @id, otherwise returns NULL.
6831  * Should be called under rcu_read_lock().
6832  */
6833 struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
6834 {
6835 	WARN_ON_ONCE(!rcu_read_lock_held());
6836 	return idr_find(&ss->css_idr, id);
6837 }
6838 
6839 /**
6840  * cgroup_get_from_path - lookup and get a cgroup from its default hierarchy path
6841  * @path: path on the default hierarchy
6842  *
6843  * Find the cgroup at @path on the default hierarchy, increment its
6844  * reference count and return it.  Returns pointer to the found cgroup on
6845  * success, ERR_PTR(-ENOENT) if @path doesn't exist or if the cgroup has already
6846  * been released and ERR_PTR(-ENOTDIR) if @path points to a non-directory.
6847  */
6848 struct cgroup *cgroup_get_from_path(const char *path)
6849 {
6850 	struct kernfs_node *kn;
6851 	struct cgroup *cgrp = ERR_PTR(-ENOENT);
6852 	struct cgroup *root_cgrp;
6853 
6854 	root_cgrp = current_cgns_cgroup_dfl();
6855 	kn = kernfs_walk_and_get(root_cgrp->kn, path);
6856 	if (!kn)
6857 		goto out;
6858 
6859 	if (kernfs_type(kn) != KERNFS_DIR) {
6860 		cgrp = ERR_PTR(-ENOTDIR);
6861 		goto out_kernfs;
6862 	}
6863 
6864 	rcu_read_lock();
6865 
6866 	cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
6867 	if (!cgrp || !cgroup_tryget(cgrp))
6868 		cgrp = ERR_PTR(-ENOENT);
6869 
6870 	rcu_read_unlock();
6871 
6872 out_kernfs:
6873 	kernfs_put(kn);
6874 out:
6875 	return cgrp;
6876 }
6877 EXPORT_SYMBOL_GPL(cgroup_get_from_path);
6878 
6879 /**
6880  * cgroup_v1v2_get_from_fd - get a cgroup pointer from a fd
6881  * @fd: fd obtained by open(cgroup_dir)
6882  *
6883  * Find the cgroup from a fd which should be obtained
6884  * by opening a cgroup directory.  Returns a pointer to the
6885  * cgroup on success. ERR_PTR is returned if the cgroup
6886  * cannot be found.
6887  */
6888 struct cgroup *cgroup_v1v2_get_from_fd(int fd)
6889 {
6890 	struct cgroup *cgrp;
6891 	struct fd f = fdget_raw(fd);
6892 	if (!f.file)
6893 		return ERR_PTR(-EBADF);
6894 
6895 	cgrp = cgroup_v1v2_get_from_file(f.file);
6896 	fdput(f);
6897 	return cgrp;
6898 }
6899 
6900 /**
6901  * cgroup_get_from_fd - same as cgroup_v1v2_get_from_fd, but only supports
6902  * cgroup2.
6903  * @fd: fd obtained by open(cgroup2_dir)
6904  */
6905 struct cgroup *cgroup_get_from_fd(int fd)
6906 {
6907 	struct cgroup *cgrp = cgroup_v1v2_get_from_fd(fd);
6908 
6909 	if (IS_ERR(cgrp))
6910 		return ERR_CAST(cgrp);
6911 
6912 	if (!cgroup_on_dfl(cgrp)) {
6913 		cgroup_put(cgrp);
6914 		return ERR_PTR(-EBADF);
6915 	}
6916 	return cgrp;
6917 }
6918 EXPORT_SYMBOL_GPL(cgroup_get_from_fd);
6919 
6920 static u64 power_of_ten(int power)
6921 {
6922 	u64 v = 1;
6923 	while (power--)
6924 		v *= 10;
6925 	return v;
6926 }
6927 
6928 /**
6929  * cgroup_parse_float - parse a floating number
6930  * @input: input string
6931  * @dec_shift: number of decimal digits to shift
6932  * @v: output
6933  *
6934  * Parse a decimal floating point number in @input and store the result in
6935  * @v with decimal point right shifted @dec_shift times.  For example, if
6936  * @input is "12.3456" and @dec_shift is 3, *@v will be set to 12345.
6937  * Returns 0 on success, -errno otherwise.
6938  *
6939  * There's nothing cgroup specific about this function except that it's
6940  * currently the only user.
6941  */
6942 int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v)
6943 {
6944 	s64 whole, frac = 0;
6945 	int fstart = 0, fend = 0, flen;
6946 
6947 	if (!sscanf(input, "%lld.%n%lld%n", &whole, &fstart, &frac, &fend))
6948 		return -EINVAL;
6949 	if (frac < 0)
6950 		return -EINVAL;
6951 
6952 	flen = fend > fstart ? fend - fstart : 0;
6953 	if (flen < dec_shift)
6954 		frac *= power_of_ten(dec_shift - flen);
6955 	else
6956 		frac = DIV_ROUND_CLOSEST_ULL(frac, power_of_ten(flen - dec_shift));
6957 
6958 	*v = whole * power_of_ten(dec_shift) + frac;
6959 	return 0;
6960 }
6961 
6962 /*
6963  * sock->sk_cgrp_data handling.  For more info, see sock_cgroup_data
6964  * definition in cgroup-defs.h.
6965  */
6966 #ifdef CONFIG_SOCK_CGROUP_DATA
6967 
6968 void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
6969 {
6970 	struct cgroup *cgroup;
6971 
6972 	rcu_read_lock();
6973 	/* Don't associate the sock with unrelated interrupted task's cgroup. */
6974 	if (in_interrupt()) {
6975 		cgroup = &cgrp_dfl_root.cgrp;
6976 		cgroup_get(cgroup);
6977 		goto out;
6978 	}
6979 
6980 	while (true) {
6981 		struct css_set *cset;
6982 
6983 		cset = task_css_set(current);
6984 		if (likely(cgroup_tryget(cset->dfl_cgrp))) {
6985 			cgroup = cset->dfl_cgrp;
6986 			break;
6987 		}
6988 		cpu_relax();
6989 	}
6990 out:
6991 	skcd->cgroup = cgroup;
6992 	cgroup_bpf_get(cgroup);
6993 	rcu_read_unlock();
6994 }
6995 
6996 void cgroup_sk_clone(struct sock_cgroup_data *skcd)
6997 {
6998 	struct cgroup *cgrp = sock_cgroup_ptr(skcd);
6999 
7000 	/*
7001 	 * We might be cloning a socket which is left in an empty
7002 	 * cgroup and the cgroup might have already been rmdir'd.
7003 	 * Don't use cgroup_get_live().
7004 	 */
7005 	cgroup_get(cgrp);
7006 	cgroup_bpf_get(cgrp);
7007 }
7008 
7009 void cgroup_sk_free(struct sock_cgroup_data *skcd)
7010 {
7011 	struct cgroup *cgrp = sock_cgroup_ptr(skcd);
7012 
7013 	cgroup_bpf_put(cgrp);
7014 	cgroup_put(cgrp);
7015 }
7016 
7017 #endif	/* CONFIG_SOCK_CGROUP_DATA */
7018 
7019 #ifdef CONFIG_SYSFS
7020 static ssize_t show_delegatable_files(struct cftype *files, char *buf,
7021 				      ssize_t size, const char *prefix)
7022 {
7023 	struct cftype *cft;
7024 	ssize_t ret = 0;
7025 
7026 	for (cft = files; cft && cft->name[0] != '\0'; cft++) {
7027 		if (!(cft->flags & CFTYPE_NS_DELEGATABLE))
7028 			continue;
7029 
7030 		if (prefix)
7031 			ret += snprintf(buf + ret, size - ret, "%s.", prefix);
7032 
7033 		ret += snprintf(buf + ret, size - ret, "%s\n", cft->name);
7034 
7035 		if (WARN_ON(ret >= size))
7036 			break;
7037 	}
7038 
7039 	return ret;
7040 }
7041 
7042 static ssize_t delegate_show(struct kobject *kobj, struct kobj_attribute *attr,
7043 			      char *buf)
7044 {
7045 	struct cgroup_subsys *ss;
7046 	int ssid;
7047 	ssize_t ret = 0;
7048 
7049 	ret = show_delegatable_files(cgroup_base_files, buf + ret,
7050 				     PAGE_SIZE - ret, NULL);
7051 	if (cgroup_psi_enabled())
7052 		ret += show_delegatable_files(cgroup_psi_files, buf + ret,
7053 					      PAGE_SIZE - ret, NULL);
7054 
7055 	for_each_subsys(ss, ssid)
7056 		ret += show_delegatable_files(ss->dfl_cftypes, buf + ret,
7057 					      PAGE_SIZE - ret,
7058 					      cgroup_subsys_name[ssid]);
7059 
7060 	return ret;
7061 }
7062 static struct kobj_attribute cgroup_delegate_attr = __ATTR_RO(delegate);
7063 
7064 static ssize_t features_show(struct kobject *kobj, struct kobj_attribute *attr,
7065 			     char *buf)
7066 {
7067 	return snprintf(buf, PAGE_SIZE,
7068 			"nsdelegate\n"
7069 			"favordynmods\n"
7070 			"memory_localevents\n"
7071 			"memory_recursiveprot\n");
7072 }
7073 static struct kobj_attribute cgroup_features_attr = __ATTR_RO(features);
7074 
7075 static struct attribute *cgroup_sysfs_attrs[] = {
7076 	&cgroup_delegate_attr.attr,
7077 	&cgroup_features_attr.attr,
7078 	NULL,
7079 };
7080 
7081 static const struct attribute_group cgroup_sysfs_attr_group = {
7082 	.attrs = cgroup_sysfs_attrs,
7083 	.name = "cgroup",
7084 };
7085 
7086 static int __init cgroup_sysfs_init(void)
7087 {
7088 	return sysfs_create_group(kernel_kobj, &cgroup_sysfs_attr_group);
7089 }
7090 subsys_initcall(cgroup_sysfs_init);
7091 
7092 #endif /* CONFIG_SYSFS */
7093