xref: /linux/mm/memcontrol.c (revision 2c97b5ae83dca56718774e7b4bf9640f05d11867)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <xemul@openvz.org>
9  *
10  * Memory thresholds
11  * Copyright (C) 2009 Nokia Corporation
12  * Author: Kirill A. Shutemov
13  *
14  * Kernel Memory Controller
15  * Copyright (C) 2012 Parallels Inc. and Google Inc.
16  * Authors: Glauber Costa and Suleiman Souhlal
17  *
18  * Native page reclaim
19  * Charge lifetime sanitation
20  * Lockless page tracking & accounting
21  * Unified hierarchy configuration model
22  * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23  */
24 
25 #include <linux/page_counter.h>
26 #include <linux/memcontrol.h>
27 #include <linux/cgroup.h>
28 #include <linux/pagewalk.h>
29 #include <linux/sched/mm.h>
30 #include <linux/shmem_fs.h>
31 #include <linux/hugetlb.h>
32 #include <linux/pagemap.h>
33 #include <linux/vm_event_item.h>
34 #include <linux/smp.h>
35 #include <linux/page-flags.h>
36 #include <linux/backing-dev.h>
37 #include <linux/bit_spinlock.h>
38 #include <linux/rcupdate.h>
39 #include <linux/limits.h>
40 #include <linux/export.h>
41 #include <linux/mutex.h>
42 #include <linux/rbtree.h>
43 #include <linux/slab.h>
44 #include <linux/swap.h>
45 #include <linux/swapops.h>
46 #include <linux/spinlock.h>
47 #include <linux/eventfd.h>
48 #include <linux/poll.h>
49 #include <linux/sort.h>
50 #include <linux/fs.h>
51 #include <linux/seq_file.h>
52 #include <linux/vmpressure.h>
53 #include <linux/mm_inline.h>
54 #include <linux/swap_cgroup.h>
55 #include <linux/cpu.h>
56 #include <linux/oom.h>
57 #include <linux/lockdep.h>
58 #include <linux/file.h>
59 #include <linux/tracehook.h>
60 #include <linux/psi.h>
61 #include <linux/seq_buf.h>
62 #include "internal.h"
63 #include <net/sock.h>
64 #include <net/ip.h>
65 #include "slab.h"
66 
67 #include <linux/uaccess.h>
68 
69 #include <trace/events/vmscan.h>
70 
71 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
72 EXPORT_SYMBOL(memory_cgrp_subsys);
73 
74 struct mem_cgroup *root_mem_cgroup __read_mostly;
75 
76 #define MEM_CGROUP_RECLAIM_RETRIES	5
77 
78 /* Socket memory accounting disabled? */
79 static bool cgroup_memory_nosocket;
80 
81 /* Kernel memory accounting disabled? */
82 static bool cgroup_memory_nokmem;
83 
84 /* Whether the swap controller is active */
85 #ifdef CONFIG_MEMCG_SWAP
86 int do_swap_account __read_mostly;
87 #else
88 #define do_swap_account		0
89 #endif
90 
91 #ifdef CONFIG_CGROUP_WRITEBACK
92 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
93 #endif
94 
95 /* Whether legacy memory+swap accounting is active */
96 static bool do_memsw_account(void)
97 {
98 	return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
99 }
100 
101 static const char *const mem_cgroup_lru_names[] = {
102 	"inactive_anon",
103 	"active_anon",
104 	"inactive_file",
105 	"active_file",
106 	"unevictable",
107 };
108 
109 #define THRESHOLDS_EVENTS_TARGET 128
110 #define SOFTLIMIT_EVENTS_TARGET 1024
111 
112 /*
113  * Cgroups above their limits are maintained in a RB-Tree, independent of
114  * their hierarchy representation
115  */
116 
117 struct mem_cgroup_tree_per_node {
118 	struct rb_root rb_root;
119 	struct rb_node *rb_rightmost;
120 	spinlock_t lock;
121 };
122 
123 struct mem_cgroup_tree {
124 	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
125 };
126 
127 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
128 
129 /* for OOM */
130 struct mem_cgroup_eventfd_list {
131 	struct list_head list;
132 	struct eventfd_ctx *eventfd;
133 };
134 
135 /*
136  * cgroup_event represents events which userspace want to receive.
137  */
138 struct mem_cgroup_event {
139 	/*
140 	 * memcg which the event belongs to.
141 	 */
142 	struct mem_cgroup *memcg;
143 	/*
144 	 * eventfd to signal userspace about the event.
145 	 */
146 	struct eventfd_ctx *eventfd;
147 	/*
148 	 * Each of these stored in a list by the cgroup.
149 	 */
150 	struct list_head list;
151 	/*
152 	 * register_event() callback will be used to add new userspace
153 	 * waiter for changes related to this event.  Use eventfd_signal()
154 	 * on eventfd to send notification to userspace.
155 	 */
156 	int (*register_event)(struct mem_cgroup *memcg,
157 			      struct eventfd_ctx *eventfd, const char *args);
158 	/*
159 	 * unregister_event() callback will be called when userspace closes
160 	 * the eventfd or on cgroup removing.  This callback must be set,
161 	 * if you want provide notification functionality.
162 	 */
163 	void (*unregister_event)(struct mem_cgroup *memcg,
164 				 struct eventfd_ctx *eventfd);
165 	/*
166 	 * All fields below needed to unregister event when
167 	 * userspace closes eventfd.
168 	 */
169 	poll_table pt;
170 	wait_queue_head_t *wqh;
171 	wait_queue_entry_t wait;
172 	struct work_struct remove;
173 };
174 
175 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
176 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
177 
178 /* Stuffs for move charges at task migration. */
179 /*
180  * Types of charges to be moved.
181  */
182 #define MOVE_ANON	0x1U
183 #define MOVE_FILE	0x2U
184 #define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
185 
186 /* "mc" and its members are protected by cgroup_mutex */
187 static struct move_charge_struct {
188 	spinlock_t	  lock; /* for from, to */
189 	struct mm_struct  *mm;
190 	struct mem_cgroup *from;
191 	struct mem_cgroup *to;
192 	unsigned long flags;
193 	unsigned long precharge;
194 	unsigned long moved_charge;
195 	unsigned long moved_swap;
196 	struct task_struct *moving_task;	/* a task moving charges */
197 	wait_queue_head_t waitq;		/* a waitq for other context */
198 } mc = {
199 	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
200 	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
201 };
202 
203 /*
204  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
205  * limit reclaim to prevent infinite loops, if they ever occur.
206  */
207 #define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
208 #define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
209 
210 enum charge_type {
211 	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
212 	MEM_CGROUP_CHARGE_TYPE_ANON,
213 	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
214 	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
215 	NR_CHARGE_TYPE,
216 };
217 
218 /* for encoding cft->private value on file */
219 enum res_type {
220 	_MEM,
221 	_MEMSWAP,
222 	_OOM_TYPE,
223 	_KMEM,
224 	_TCP,
225 };
226 
227 #define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
228 #define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
229 #define MEMFILE_ATTR(val)	((val) & 0xffff)
230 /* Used for OOM nofiier */
231 #define OOM_CONTROL		(0)
232 
233 /*
234  * Iteration constructs for visiting all cgroups (under a tree).  If
235  * loops are exited prematurely (break), mem_cgroup_iter_break() must
236  * be used for reference counting.
237  */
238 #define for_each_mem_cgroup_tree(iter, root)		\
239 	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
240 	     iter != NULL;				\
241 	     iter = mem_cgroup_iter(root, iter, NULL))
242 
243 #define for_each_mem_cgroup(iter)			\
244 	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
245 	     iter != NULL;				\
246 	     iter = mem_cgroup_iter(NULL, iter, NULL))
247 
248 static inline bool should_force_charge(void)
249 {
250 	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
251 		(current->flags & PF_EXITING);
252 }
253 
254 /* Some nice accessors for the vmpressure. */
255 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
256 {
257 	if (!memcg)
258 		memcg = root_mem_cgroup;
259 	return &memcg->vmpressure;
260 }
261 
262 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
263 {
264 	return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
265 }
266 
267 #ifdef CONFIG_MEMCG_KMEM
268 /*
269  * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
270  * The main reason for not using cgroup id for this:
271  *  this works better in sparse environments, where we have a lot of memcgs,
272  *  but only a few kmem-limited. Or also, if we have, for instance, 200
273  *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
274  *  200 entry array for that.
275  *
276  * The current size of the caches array is stored in memcg_nr_cache_ids. It
277  * will double each time we have to increase it.
278  */
279 static DEFINE_IDA(memcg_cache_ida);
280 int memcg_nr_cache_ids;
281 
282 /* Protects memcg_nr_cache_ids */
283 static DECLARE_RWSEM(memcg_cache_ids_sem);
284 
285 void memcg_get_cache_ids(void)
286 {
287 	down_read(&memcg_cache_ids_sem);
288 }
289 
290 void memcg_put_cache_ids(void)
291 {
292 	up_read(&memcg_cache_ids_sem);
293 }
294 
295 /*
296  * MIN_SIZE is different than 1, because we would like to avoid going through
297  * the alloc/free process all the time. In a small machine, 4 kmem-limited
298  * cgroups is a reasonable guess. In the future, it could be a parameter or
299  * tunable, but that is strictly not necessary.
300  *
301  * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
302  * this constant directly from cgroup, but it is understandable that this is
303  * better kept as an internal representation in cgroup.c. In any case, the
304  * cgrp_id space is not getting any smaller, and we don't have to necessarily
305  * increase ours as well if it increases.
306  */
307 #define MEMCG_CACHES_MIN_SIZE 4
308 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
309 
310 /*
311  * A lot of the calls to the cache allocation functions are expected to be
312  * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
313  * conditional to this static branch, we'll have to allow modules that does
314  * kmem_cache_alloc and the such to see this symbol as well
315  */
316 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
317 EXPORT_SYMBOL(memcg_kmem_enabled_key);
318 
319 struct workqueue_struct *memcg_kmem_cache_wq;
320 #endif
321 
322 static int memcg_shrinker_map_size;
323 static DEFINE_MUTEX(memcg_shrinker_map_mutex);
324 
325 static void memcg_free_shrinker_map_rcu(struct rcu_head *head)
326 {
327 	kvfree(container_of(head, struct memcg_shrinker_map, rcu));
328 }
329 
330 static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
331 					 int size, int old_size)
332 {
333 	struct memcg_shrinker_map *new, *old;
334 	int nid;
335 
336 	lockdep_assert_held(&memcg_shrinker_map_mutex);
337 
338 	for_each_node(nid) {
339 		old = rcu_dereference_protected(
340 			mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
341 		/* Not yet online memcg */
342 		if (!old)
343 			return 0;
344 
345 		new = kvmalloc(sizeof(*new) + size, GFP_KERNEL);
346 		if (!new)
347 			return -ENOMEM;
348 
349 		/* Set all old bits, clear all new bits */
350 		memset(new->map, (int)0xff, old_size);
351 		memset((void *)new->map + old_size, 0, size - old_size);
352 
353 		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new);
354 		call_rcu(&old->rcu, memcg_free_shrinker_map_rcu);
355 	}
356 
357 	return 0;
358 }
359 
360 static void memcg_free_shrinker_maps(struct mem_cgroup *memcg)
361 {
362 	struct mem_cgroup_per_node *pn;
363 	struct memcg_shrinker_map *map;
364 	int nid;
365 
366 	if (mem_cgroup_is_root(memcg))
367 		return;
368 
369 	for_each_node(nid) {
370 		pn = mem_cgroup_nodeinfo(memcg, nid);
371 		map = rcu_dereference_protected(pn->shrinker_map, true);
372 		if (map)
373 			kvfree(map);
374 		rcu_assign_pointer(pn->shrinker_map, NULL);
375 	}
376 }
377 
378 static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
379 {
380 	struct memcg_shrinker_map *map;
381 	int nid, size, ret = 0;
382 
383 	if (mem_cgroup_is_root(memcg))
384 		return 0;
385 
386 	mutex_lock(&memcg_shrinker_map_mutex);
387 	size = memcg_shrinker_map_size;
388 	for_each_node(nid) {
389 		map = kvzalloc(sizeof(*map) + size, GFP_KERNEL);
390 		if (!map) {
391 			memcg_free_shrinker_maps(memcg);
392 			ret = -ENOMEM;
393 			break;
394 		}
395 		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map);
396 	}
397 	mutex_unlock(&memcg_shrinker_map_mutex);
398 
399 	return ret;
400 }
401 
402 int memcg_expand_shrinker_maps(int new_id)
403 {
404 	int size, old_size, ret = 0;
405 	struct mem_cgroup *memcg;
406 
407 	size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
408 	old_size = memcg_shrinker_map_size;
409 	if (size <= old_size)
410 		return 0;
411 
412 	mutex_lock(&memcg_shrinker_map_mutex);
413 	if (!root_mem_cgroup)
414 		goto unlock;
415 
416 	for_each_mem_cgroup(memcg) {
417 		if (mem_cgroup_is_root(memcg))
418 			continue;
419 		ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
420 		if (ret)
421 			goto unlock;
422 	}
423 unlock:
424 	if (!ret)
425 		memcg_shrinker_map_size = size;
426 	mutex_unlock(&memcg_shrinker_map_mutex);
427 	return ret;
428 }
429 
430 void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
431 {
432 	if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
433 		struct memcg_shrinker_map *map;
434 
435 		rcu_read_lock();
436 		map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map);
437 		/* Pairs with smp mb in shrink_slab() */
438 		smp_mb__before_atomic();
439 		set_bit(shrinker_id, map->map);
440 		rcu_read_unlock();
441 	}
442 }
443 
444 /**
445  * mem_cgroup_css_from_page - css of the memcg associated with a page
446  * @page: page of interest
447  *
448  * If memcg is bound to the default hierarchy, css of the memcg associated
449  * with @page is returned.  The returned css remains associated with @page
450  * until it is released.
451  *
452  * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
453  * is returned.
454  */
455 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
456 {
457 	struct mem_cgroup *memcg;
458 
459 	memcg = page->mem_cgroup;
460 
461 	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
462 		memcg = root_mem_cgroup;
463 
464 	return &memcg->css;
465 }
466 
467 /**
468  * page_cgroup_ino - return inode number of the memcg a page is charged to
469  * @page: the page
470  *
471  * Look up the closest online ancestor of the memory cgroup @page is charged to
472  * and return its inode number or 0 if @page is not charged to any cgroup. It
473  * is safe to call this function without holding a reference to @page.
474  *
475  * Note, this function is inherently racy, because there is nothing to prevent
476  * the cgroup inode from getting torn down and potentially reallocated a moment
477  * after page_cgroup_ino() returns, so it only should be used by callers that
478  * do not care (such as procfs interfaces).
479  */
480 ino_t page_cgroup_ino(struct page *page)
481 {
482 	struct mem_cgroup *memcg;
483 	unsigned long ino = 0;
484 
485 	rcu_read_lock();
486 	if (PageSlab(page) && !PageTail(page))
487 		memcg = memcg_from_slab_page(page);
488 	else
489 		memcg = READ_ONCE(page->mem_cgroup);
490 	while (memcg && !(memcg->css.flags & CSS_ONLINE))
491 		memcg = parent_mem_cgroup(memcg);
492 	if (memcg)
493 		ino = cgroup_ino(memcg->css.cgroup);
494 	rcu_read_unlock();
495 	return ino;
496 }
497 
498 static struct mem_cgroup_per_node *
499 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
500 {
501 	int nid = page_to_nid(page);
502 
503 	return memcg->nodeinfo[nid];
504 }
505 
506 static struct mem_cgroup_tree_per_node *
507 soft_limit_tree_node(int nid)
508 {
509 	return soft_limit_tree.rb_tree_per_node[nid];
510 }
511 
512 static struct mem_cgroup_tree_per_node *
513 soft_limit_tree_from_page(struct page *page)
514 {
515 	int nid = page_to_nid(page);
516 
517 	return soft_limit_tree.rb_tree_per_node[nid];
518 }
519 
520 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
521 					 struct mem_cgroup_tree_per_node *mctz,
522 					 unsigned long new_usage_in_excess)
523 {
524 	struct rb_node **p = &mctz->rb_root.rb_node;
525 	struct rb_node *parent = NULL;
526 	struct mem_cgroup_per_node *mz_node;
527 	bool rightmost = true;
528 
529 	if (mz->on_tree)
530 		return;
531 
532 	mz->usage_in_excess = new_usage_in_excess;
533 	if (!mz->usage_in_excess)
534 		return;
535 	while (*p) {
536 		parent = *p;
537 		mz_node = rb_entry(parent, struct mem_cgroup_per_node,
538 					tree_node);
539 		if (mz->usage_in_excess < mz_node->usage_in_excess) {
540 			p = &(*p)->rb_left;
541 			rightmost = false;
542 		}
543 
544 		/*
545 		 * We can't avoid mem cgroups that are over their soft
546 		 * limit by the same amount
547 		 */
548 		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
549 			p = &(*p)->rb_right;
550 	}
551 
552 	if (rightmost)
553 		mctz->rb_rightmost = &mz->tree_node;
554 
555 	rb_link_node(&mz->tree_node, parent, p);
556 	rb_insert_color(&mz->tree_node, &mctz->rb_root);
557 	mz->on_tree = true;
558 }
559 
560 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
561 					 struct mem_cgroup_tree_per_node *mctz)
562 {
563 	if (!mz->on_tree)
564 		return;
565 
566 	if (&mz->tree_node == mctz->rb_rightmost)
567 		mctz->rb_rightmost = rb_prev(&mz->tree_node);
568 
569 	rb_erase(&mz->tree_node, &mctz->rb_root);
570 	mz->on_tree = false;
571 }
572 
573 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
574 				       struct mem_cgroup_tree_per_node *mctz)
575 {
576 	unsigned long flags;
577 
578 	spin_lock_irqsave(&mctz->lock, flags);
579 	__mem_cgroup_remove_exceeded(mz, mctz);
580 	spin_unlock_irqrestore(&mctz->lock, flags);
581 }
582 
583 static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
584 {
585 	unsigned long nr_pages = page_counter_read(&memcg->memory);
586 	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
587 	unsigned long excess = 0;
588 
589 	if (nr_pages > soft_limit)
590 		excess = nr_pages - soft_limit;
591 
592 	return excess;
593 }
594 
595 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
596 {
597 	unsigned long excess;
598 	struct mem_cgroup_per_node *mz;
599 	struct mem_cgroup_tree_per_node *mctz;
600 
601 	mctz = soft_limit_tree_from_page(page);
602 	if (!mctz)
603 		return;
604 	/*
605 	 * Necessary to update all ancestors when hierarchy is used.
606 	 * because their event counter is not touched.
607 	 */
608 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
609 		mz = mem_cgroup_page_nodeinfo(memcg, page);
610 		excess = soft_limit_excess(memcg);
611 		/*
612 		 * We have to update the tree if mz is on RB-tree or
613 		 * mem is over its softlimit.
614 		 */
615 		if (excess || mz->on_tree) {
616 			unsigned long flags;
617 
618 			spin_lock_irqsave(&mctz->lock, flags);
619 			/* if on-tree, remove it */
620 			if (mz->on_tree)
621 				__mem_cgroup_remove_exceeded(mz, mctz);
622 			/*
623 			 * Insert again. mz->usage_in_excess will be updated.
624 			 * If excess is 0, no tree ops.
625 			 */
626 			__mem_cgroup_insert_exceeded(mz, mctz, excess);
627 			spin_unlock_irqrestore(&mctz->lock, flags);
628 		}
629 	}
630 }
631 
632 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
633 {
634 	struct mem_cgroup_tree_per_node *mctz;
635 	struct mem_cgroup_per_node *mz;
636 	int nid;
637 
638 	for_each_node(nid) {
639 		mz = mem_cgroup_nodeinfo(memcg, nid);
640 		mctz = soft_limit_tree_node(nid);
641 		if (mctz)
642 			mem_cgroup_remove_exceeded(mz, mctz);
643 	}
644 }
645 
646 static struct mem_cgroup_per_node *
647 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
648 {
649 	struct mem_cgroup_per_node *mz;
650 
651 retry:
652 	mz = NULL;
653 	if (!mctz->rb_rightmost)
654 		goto done;		/* Nothing to reclaim from */
655 
656 	mz = rb_entry(mctz->rb_rightmost,
657 		      struct mem_cgroup_per_node, tree_node);
658 	/*
659 	 * Remove the node now but someone else can add it back,
660 	 * we will to add it back at the end of reclaim to its correct
661 	 * position in the tree.
662 	 */
663 	__mem_cgroup_remove_exceeded(mz, mctz);
664 	if (!soft_limit_excess(mz->memcg) ||
665 	    !css_tryget_online(&mz->memcg->css))
666 		goto retry;
667 done:
668 	return mz;
669 }
670 
671 static struct mem_cgroup_per_node *
672 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
673 {
674 	struct mem_cgroup_per_node *mz;
675 
676 	spin_lock_irq(&mctz->lock);
677 	mz = __mem_cgroup_largest_soft_limit_node(mctz);
678 	spin_unlock_irq(&mctz->lock);
679 	return mz;
680 }
681 
682 /**
683  * __mod_memcg_state - update cgroup memory statistics
684  * @memcg: the memory cgroup
685  * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
686  * @val: delta to add to the counter, can be negative
687  */
688 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
689 {
690 	long x;
691 
692 	if (mem_cgroup_disabled())
693 		return;
694 
695 	x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
696 	if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
697 		struct mem_cgroup *mi;
698 
699 		/*
700 		 * Batch local counters to keep them in sync with
701 		 * the hierarchical ones.
702 		 */
703 		__this_cpu_add(memcg->vmstats_local->stat[idx], x);
704 		for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
705 			atomic_long_add(x, &mi->vmstats[idx]);
706 		x = 0;
707 	}
708 	__this_cpu_write(memcg->vmstats_percpu->stat[idx], x);
709 }
710 
711 static struct mem_cgroup_per_node *
712 parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid)
713 {
714 	struct mem_cgroup *parent;
715 
716 	parent = parent_mem_cgroup(pn->memcg);
717 	if (!parent)
718 		return NULL;
719 	return mem_cgroup_nodeinfo(parent, nid);
720 }
721 
722 /**
723  * __mod_lruvec_state - update lruvec memory statistics
724  * @lruvec: the lruvec
725  * @idx: the stat item
726  * @val: delta to add to the counter, can be negative
727  *
728  * The lruvec is the intersection of the NUMA node and a cgroup. This
729  * function updates the all three counters that are affected by a
730  * change of state at this level: per-node, per-cgroup, per-lruvec.
731  */
732 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
733 			int val)
734 {
735 	pg_data_t *pgdat = lruvec_pgdat(lruvec);
736 	struct mem_cgroup_per_node *pn;
737 	struct mem_cgroup *memcg;
738 	long x;
739 
740 	/* Update node */
741 	__mod_node_page_state(pgdat, idx, val);
742 
743 	if (mem_cgroup_disabled())
744 		return;
745 
746 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
747 	memcg = pn->memcg;
748 
749 	/* Update memcg */
750 	__mod_memcg_state(memcg, idx, val);
751 
752 	/* Update lruvec */
753 	__this_cpu_add(pn->lruvec_stat_local->count[idx], val);
754 
755 	x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
756 	if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
757 		struct mem_cgroup_per_node *pi;
758 
759 		for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
760 			atomic_long_add(x, &pi->lruvec_stat[idx]);
761 		x = 0;
762 	}
763 	__this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
764 }
765 
766 void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
767 {
768 	struct page *page = virt_to_head_page(p);
769 	pg_data_t *pgdat = page_pgdat(page);
770 	struct mem_cgroup *memcg;
771 	struct lruvec *lruvec;
772 
773 	rcu_read_lock();
774 	memcg = memcg_from_slab_page(page);
775 
776 	/* Untracked pages have no memcg, no lruvec. Update only the node */
777 	if (!memcg || memcg == root_mem_cgroup) {
778 		__mod_node_page_state(pgdat, idx, val);
779 	} else {
780 		lruvec = mem_cgroup_lruvec(memcg, pgdat);
781 		__mod_lruvec_state(lruvec, idx, val);
782 	}
783 	rcu_read_unlock();
784 }
785 
786 /**
787  * __count_memcg_events - account VM events in a cgroup
788  * @memcg: the memory cgroup
789  * @idx: the event item
790  * @count: the number of events that occured
791  */
792 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
793 			  unsigned long count)
794 {
795 	unsigned long x;
796 
797 	if (mem_cgroup_disabled())
798 		return;
799 
800 	x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]);
801 	if (unlikely(x > MEMCG_CHARGE_BATCH)) {
802 		struct mem_cgroup *mi;
803 
804 		/*
805 		 * Batch local counters to keep them in sync with
806 		 * the hierarchical ones.
807 		 */
808 		__this_cpu_add(memcg->vmstats_local->events[idx], x);
809 		for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
810 			atomic_long_add(x, &mi->vmevents[idx]);
811 		x = 0;
812 	}
813 	__this_cpu_write(memcg->vmstats_percpu->events[idx], x);
814 }
815 
816 static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
817 {
818 	return atomic_long_read(&memcg->vmevents[event]);
819 }
820 
821 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
822 {
823 	long x = 0;
824 	int cpu;
825 
826 	for_each_possible_cpu(cpu)
827 		x += per_cpu(memcg->vmstats_local->events[event], cpu);
828 	return x;
829 }
830 
831 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
832 					 struct page *page,
833 					 bool compound, int nr_pages)
834 {
835 	/*
836 	 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
837 	 * counted as CACHE even if it's on ANON LRU.
838 	 */
839 	if (PageAnon(page))
840 		__mod_memcg_state(memcg, MEMCG_RSS, nr_pages);
841 	else {
842 		__mod_memcg_state(memcg, MEMCG_CACHE, nr_pages);
843 		if (PageSwapBacked(page))
844 			__mod_memcg_state(memcg, NR_SHMEM, nr_pages);
845 	}
846 
847 	if (compound) {
848 		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
849 		__mod_memcg_state(memcg, MEMCG_RSS_HUGE, nr_pages);
850 	}
851 
852 	/* pagein of a big page is an event. So, ignore page size */
853 	if (nr_pages > 0)
854 		__count_memcg_events(memcg, PGPGIN, 1);
855 	else {
856 		__count_memcg_events(memcg, PGPGOUT, 1);
857 		nr_pages = -nr_pages; /* for event */
858 	}
859 
860 	__this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
861 }
862 
863 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
864 				       enum mem_cgroup_events_target target)
865 {
866 	unsigned long val, next;
867 
868 	val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
869 	next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
870 	/* from time_after() in jiffies.h */
871 	if ((long)(next - val) < 0) {
872 		switch (target) {
873 		case MEM_CGROUP_TARGET_THRESH:
874 			next = val + THRESHOLDS_EVENTS_TARGET;
875 			break;
876 		case MEM_CGROUP_TARGET_SOFTLIMIT:
877 			next = val + SOFTLIMIT_EVENTS_TARGET;
878 			break;
879 		default:
880 			break;
881 		}
882 		__this_cpu_write(memcg->vmstats_percpu->targets[target], next);
883 		return true;
884 	}
885 	return false;
886 }
887 
888 /*
889  * Check events in order.
890  *
891  */
892 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
893 {
894 	/* threshold event is triggered in finer grain than soft limit */
895 	if (unlikely(mem_cgroup_event_ratelimit(memcg,
896 						MEM_CGROUP_TARGET_THRESH))) {
897 		bool do_softlimit;
898 
899 		do_softlimit = mem_cgroup_event_ratelimit(memcg,
900 						MEM_CGROUP_TARGET_SOFTLIMIT);
901 		mem_cgroup_threshold(memcg);
902 		if (unlikely(do_softlimit))
903 			mem_cgroup_update_tree(memcg, page);
904 	}
905 }
906 
907 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
908 {
909 	/*
910 	 * mm_update_next_owner() may clear mm->owner to NULL
911 	 * if it races with swapoff, page migration, etc.
912 	 * So this can be called with p == NULL.
913 	 */
914 	if (unlikely(!p))
915 		return NULL;
916 
917 	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
918 }
919 EXPORT_SYMBOL(mem_cgroup_from_task);
920 
921 /**
922  * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
923  * @mm: mm from which memcg should be extracted. It can be NULL.
924  *
925  * Obtain a reference on mm->memcg and returns it if successful. Otherwise
926  * root_mem_cgroup is returned. However if mem_cgroup is disabled, NULL is
927  * returned.
928  */
929 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
930 {
931 	struct mem_cgroup *memcg;
932 
933 	if (mem_cgroup_disabled())
934 		return NULL;
935 
936 	rcu_read_lock();
937 	do {
938 		/*
939 		 * Page cache insertions can happen withou an
940 		 * actual mm context, e.g. during disk probing
941 		 * on boot, loopback IO, acct() writes etc.
942 		 */
943 		if (unlikely(!mm))
944 			memcg = root_mem_cgroup;
945 		else {
946 			memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
947 			if (unlikely(!memcg))
948 				memcg = root_mem_cgroup;
949 		}
950 	} while (!css_tryget(&memcg->css));
951 	rcu_read_unlock();
952 	return memcg;
953 }
954 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
955 
956 /**
957  * get_mem_cgroup_from_page: Obtain a reference on given page's memcg.
958  * @page: page from which memcg should be extracted.
959  *
960  * Obtain a reference on page->memcg and returns it if successful. Otherwise
961  * root_mem_cgroup is returned.
962  */
963 struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
964 {
965 	struct mem_cgroup *memcg = page->mem_cgroup;
966 
967 	if (mem_cgroup_disabled())
968 		return NULL;
969 
970 	rcu_read_lock();
971 	if (!memcg || !css_tryget_online(&memcg->css))
972 		memcg = root_mem_cgroup;
973 	rcu_read_unlock();
974 	return memcg;
975 }
976 EXPORT_SYMBOL(get_mem_cgroup_from_page);
977 
978 /**
979  * If current->active_memcg is non-NULL, do not fallback to current->mm->memcg.
980  */
981 static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void)
982 {
983 	if (unlikely(current->active_memcg)) {
984 		struct mem_cgroup *memcg = root_mem_cgroup;
985 
986 		rcu_read_lock();
987 		if (css_tryget_online(&current->active_memcg->css))
988 			memcg = current->active_memcg;
989 		rcu_read_unlock();
990 		return memcg;
991 	}
992 	return get_mem_cgroup_from_mm(current->mm);
993 }
994 
995 /**
996  * mem_cgroup_iter - iterate over memory cgroup hierarchy
997  * @root: hierarchy root
998  * @prev: previously returned memcg, NULL on first invocation
999  * @reclaim: cookie for shared reclaim walks, NULL for full walks
1000  *
1001  * Returns references to children of the hierarchy below @root, or
1002  * @root itself, or %NULL after a full round-trip.
1003  *
1004  * Caller must pass the return value in @prev on subsequent
1005  * invocations for reference counting, or use mem_cgroup_iter_break()
1006  * to cancel a hierarchy walk before the round-trip is complete.
1007  *
1008  * Reclaimers can specify a node and a priority level in @reclaim to
1009  * divide up the memcgs in the hierarchy among all concurrent
1010  * reclaimers operating on the same node and priority.
1011  */
1012 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1013 				   struct mem_cgroup *prev,
1014 				   struct mem_cgroup_reclaim_cookie *reclaim)
1015 {
1016 	struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
1017 	struct cgroup_subsys_state *css = NULL;
1018 	struct mem_cgroup *memcg = NULL;
1019 	struct mem_cgroup *pos = NULL;
1020 
1021 	if (mem_cgroup_disabled())
1022 		return NULL;
1023 
1024 	if (!root)
1025 		root = root_mem_cgroup;
1026 
1027 	if (prev && !reclaim)
1028 		pos = prev;
1029 
1030 	if (!root->use_hierarchy && root != root_mem_cgroup) {
1031 		if (prev)
1032 			goto out;
1033 		return root;
1034 	}
1035 
1036 	rcu_read_lock();
1037 
1038 	if (reclaim) {
1039 		struct mem_cgroup_per_node *mz;
1040 
1041 		mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
1042 		iter = &mz->iter;
1043 
1044 		if (prev && reclaim->generation != iter->generation)
1045 			goto out_unlock;
1046 
1047 		while (1) {
1048 			pos = READ_ONCE(iter->position);
1049 			if (!pos || css_tryget(&pos->css))
1050 				break;
1051 			/*
1052 			 * css reference reached zero, so iter->position will
1053 			 * be cleared by ->css_released. However, we should not
1054 			 * rely on this happening soon, because ->css_released
1055 			 * is called from a work queue, and by busy-waiting we
1056 			 * might block it. So we clear iter->position right
1057 			 * away.
1058 			 */
1059 			(void)cmpxchg(&iter->position, pos, NULL);
1060 		}
1061 	}
1062 
1063 	if (pos)
1064 		css = &pos->css;
1065 
1066 	for (;;) {
1067 		css = css_next_descendant_pre(css, &root->css);
1068 		if (!css) {
1069 			/*
1070 			 * Reclaimers share the hierarchy walk, and a
1071 			 * new one might jump in right at the end of
1072 			 * the hierarchy - make sure they see at least
1073 			 * one group and restart from the beginning.
1074 			 */
1075 			if (!prev)
1076 				continue;
1077 			break;
1078 		}
1079 
1080 		/*
1081 		 * Verify the css and acquire a reference.  The root
1082 		 * is provided by the caller, so we know it's alive
1083 		 * and kicking, and don't take an extra reference.
1084 		 */
1085 		memcg = mem_cgroup_from_css(css);
1086 
1087 		if (css == &root->css)
1088 			break;
1089 
1090 		if (css_tryget(css))
1091 			break;
1092 
1093 		memcg = NULL;
1094 	}
1095 
1096 	if (reclaim) {
1097 		/*
1098 		 * The position could have already been updated by a competing
1099 		 * thread, so check that the value hasn't changed since we read
1100 		 * it to avoid reclaiming from the same cgroup twice.
1101 		 */
1102 		(void)cmpxchg(&iter->position, pos, memcg);
1103 
1104 		if (pos)
1105 			css_put(&pos->css);
1106 
1107 		if (!memcg)
1108 			iter->generation++;
1109 		else if (!prev)
1110 			reclaim->generation = iter->generation;
1111 	}
1112 
1113 out_unlock:
1114 	rcu_read_unlock();
1115 out:
1116 	if (prev && prev != root)
1117 		css_put(&prev->css);
1118 
1119 	return memcg;
1120 }
1121 
1122 /**
1123  * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1124  * @root: hierarchy root
1125  * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1126  */
1127 void mem_cgroup_iter_break(struct mem_cgroup *root,
1128 			   struct mem_cgroup *prev)
1129 {
1130 	if (!root)
1131 		root = root_mem_cgroup;
1132 	if (prev && prev != root)
1133 		css_put(&prev->css);
1134 }
1135 
1136 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1137 					struct mem_cgroup *dead_memcg)
1138 {
1139 	struct mem_cgroup_reclaim_iter *iter;
1140 	struct mem_cgroup_per_node *mz;
1141 	int nid;
1142 
1143 	for_each_node(nid) {
1144 		mz = mem_cgroup_nodeinfo(from, nid);
1145 		iter = &mz->iter;
1146 		cmpxchg(&iter->position, dead_memcg, NULL);
1147 	}
1148 }
1149 
1150 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1151 {
1152 	struct mem_cgroup *memcg = dead_memcg;
1153 	struct mem_cgroup *last;
1154 
1155 	do {
1156 		__invalidate_reclaim_iterators(memcg, dead_memcg);
1157 		last = memcg;
1158 	} while ((memcg = parent_mem_cgroup(memcg)));
1159 
1160 	/*
1161 	 * When cgruop1 non-hierarchy mode is used,
1162 	 * parent_mem_cgroup() does not walk all the way up to the
1163 	 * cgroup root (root_mem_cgroup). So we have to handle
1164 	 * dead_memcg from cgroup root separately.
1165 	 */
1166 	if (last != root_mem_cgroup)
1167 		__invalidate_reclaim_iterators(root_mem_cgroup,
1168 						dead_memcg);
1169 }
1170 
1171 /**
1172  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1173  * @memcg: hierarchy root
1174  * @fn: function to call for each task
1175  * @arg: argument passed to @fn
1176  *
1177  * This function iterates over tasks attached to @memcg or to any of its
1178  * descendants and calls @fn for each task. If @fn returns a non-zero
1179  * value, the function breaks the iteration loop and returns the value.
1180  * Otherwise, it will iterate over all tasks and return 0.
1181  *
1182  * This function must not be called for the root memory cgroup.
1183  */
1184 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1185 			  int (*fn)(struct task_struct *, void *), void *arg)
1186 {
1187 	struct mem_cgroup *iter;
1188 	int ret = 0;
1189 
1190 	BUG_ON(memcg == root_mem_cgroup);
1191 
1192 	for_each_mem_cgroup_tree(iter, memcg) {
1193 		struct css_task_iter it;
1194 		struct task_struct *task;
1195 
1196 		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1197 		while (!ret && (task = css_task_iter_next(&it)))
1198 			ret = fn(task, arg);
1199 		css_task_iter_end(&it);
1200 		if (ret) {
1201 			mem_cgroup_iter_break(memcg, iter);
1202 			break;
1203 		}
1204 	}
1205 	return ret;
1206 }
1207 
1208 /**
1209  * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
1210  * @page: the page
1211  * @pgdat: pgdat of the page
1212  *
1213  * This function is only safe when following the LRU page isolation
1214  * and putback protocol: the LRU lock must be held, and the page must
1215  * either be PageLRU() or the caller must have isolated/allocated it.
1216  */
1217 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
1218 {
1219 	struct mem_cgroup_per_node *mz;
1220 	struct mem_cgroup *memcg;
1221 	struct lruvec *lruvec;
1222 
1223 	if (mem_cgroup_disabled()) {
1224 		lruvec = &pgdat->__lruvec;
1225 		goto out;
1226 	}
1227 
1228 	memcg = page->mem_cgroup;
1229 	/*
1230 	 * Swapcache readahead pages are added to the LRU - and
1231 	 * possibly migrated - before they are charged.
1232 	 */
1233 	if (!memcg)
1234 		memcg = root_mem_cgroup;
1235 
1236 	mz = mem_cgroup_page_nodeinfo(memcg, page);
1237 	lruvec = &mz->lruvec;
1238 out:
1239 	/*
1240 	 * Since a node can be onlined after the mem_cgroup was created,
1241 	 * we have to be prepared to initialize lruvec->zone here;
1242 	 * and if offlined then reonlined, we need to reinitialize it.
1243 	 */
1244 	if (unlikely(lruvec->pgdat != pgdat))
1245 		lruvec->pgdat = pgdat;
1246 	return lruvec;
1247 }
1248 
1249 /**
1250  * mem_cgroup_update_lru_size - account for adding or removing an lru page
1251  * @lruvec: mem_cgroup per zone lru vector
1252  * @lru: index of lru list the page is sitting on
1253  * @zid: zone id of the accounted pages
1254  * @nr_pages: positive when adding or negative when removing
1255  *
1256  * This function must be called under lru_lock, just before a page is added
1257  * to or just after a page is removed from an lru list (that ordering being
1258  * so as to allow it to check that lru_size 0 is consistent with list_empty).
1259  */
1260 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1261 				int zid, int nr_pages)
1262 {
1263 	struct mem_cgroup_per_node *mz;
1264 	unsigned long *lru_size;
1265 	long size;
1266 
1267 	if (mem_cgroup_disabled())
1268 		return;
1269 
1270 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1271 	lru_size = &mz->lru_zone_size[zid][lru];
1272 
1273 	if (nr_pages < 0)
1274 		*lru_size += nr_pages;
1275 
1276 	size = *lru_size;
1277 	if (WARN_ONCE(size < 0,
1278 		"%s(%p, %d, %d): lru_size %ld\n",
1279 		__func__, lruvec, lru, nr_pages, size)) {
1280 		VM_BUG_ON(1);
1281 		*lru_size = 0;
1282 	}
1283 
1284 	if (nr_pages > 0)
1285 		*lru_size += nr_pages;
1286 }
1287 
1288 /**
1289  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1290  * @memcg: the memory cgroup
1291  *
1292  * Returns the maximum amount of memory @mem can be charged with, in
1293  * pages.
1294  */
1295 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1296 {
1297 	unsigned long margin = 0;
1298 	unsigned long count;
1299 	unsigned long limit;
1300 
1301 	count = page_counter_read(&memcg->memory);
1302 	limit = READ_ONCE(memcg->memory.max);
1303 	if (count < limit)
1304 		margin = limit - count;
1305 
1306 	if (do_memsw_account()) {
1307 		count = page_counter_read(&memcg->memsw);
1308 		limit = READ_ONCE(memcg->memsw.max);
1309 		if (count <= limit)
1310 			margin = min(margin, limit - count);
1311 		else
1312 			margin = 0;
1313 	}
1314 
1315 	return margin;
1316 }
1317 
1318 /*
1319  * A routine for checking "mem" is under move_account() or not.
1320  *
1321  * Checking a cgroup is mc.from or mc.to or under hierarchy of
1322  * moving cgroups. This is for waiting at high-memory pressure
1323  * caused by "move".
1324  */
1325 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1326 {
1327 	struct mem_cgroup *from;
1328 	struct mem_cgroup *to;
1329 	bool ret = false;
1330 	/*
1331 	 * Unlike task_move routines, we access mc.to, mc.from not under
1332 	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1333 	 */
1334 	spin_lock(&mc.lock);
1335 	from = mc.from;
1336 	to = mc.to;
1337 	if (!from)
1338 		goto unlock;
1339 
1340 	ret = mem_cgroup_is_descendant(from, memcg) ||
1341 		mem_cgroup_is_descendant(to, memcg);
1342 unlock:
1343 	spin_unlock(&mc.lock);
1344 	return ret;
1345 }
1346 
1347 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1348 {
1349 	if (mc.moving_task && current != mc.moving_task) {
1350 		if (mem_cgroup_under_move(memcg)) {
1351 			DEFINE_WAIT(wait);
1352 			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1353 			/* moving charge context might have finished. */
1354 			if (mc.moving_task)
1355 				schedule();
1356 			finish_wait(&mc.waitq, &wait);
1357 			return true;
1358 		}
1359 	}
1360 	return false;
1361 }
1362 
1363 static char *memory_stat_format(struct mem_cgroup *memcg)
1364 {
1365 	struct seq_buf s;
1366 	int i;
1367 
1368 	seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
1369 	if (!s.buffer)
1370 		return NULL;
1371 
1372 	/*
1373 	 * Provide statistics on the state of the memory subsystem as
1374 	 * well as cumulative event counters that show past behavior.
1375 	 *
1376 	 * This list is ordered following a combination of these gradients:
1377 	 * 1) generic big picture -> specifics and details
1378 	 * 2) reflecting userspace activity -> reflecting kernel heuristics
1379 	 *
1380 	 * Current memory state:
1381 	 */
1382 
1383 	seq_buf_printf(&s, "anon %llu\n",
1384 		       (u64)memcg_page_state(memcg, MEMCG_RSS) *
1385 		       PAGE_SIZE);
1386 	seq_buf_printf(&s, "file %llu\n",
1387 		       (u64)memcg_page_state(memcg, MEMCG_CACHE) *
1388 		       PAGE_SIZE);
1389 	seq_buf_printf(&s, "kernel_stack %llu\n",
1390 		       (u64)memcg_page_state(memcg, MEMCG_KERNEL_STACK_KB) *
1391 		       1024);
1392 	seq_buf_printf(&s, "slab %llu\n",
1393 		       (u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) +
1394 			     memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE)) *
1395 		       PAGE_SIZE);
1396 	seq_buf_printf(&s, "sock %llu\n",
1397 		       (u64)memcg_page_state(memcg, MEMCG_SOCK) *
1398 		       PAGE_SIZE);
1399 
1400 	seq_buf_printf(&s, "shmem %llu\n",
1401 		       (u64)memcg_page_state(memcg, NR_SHMEM) *
1402 		       PAGE_SIZE);
1403 	seq_buf_printf(&s, "file_mapped %llu\n",
1404 		       (u64)memcg_page_state(memcg, NR_FILE_MAPPED) *
1405 		       PAGE_SIZE);
1406 	seq_buf_printf(&s, "file_dirty %llu\n",
1407 		       (u64)memcg_page_state(memcg, NR_FILE_DIRTY) *
1408 		       PAGE_SIZE);
1409 	seq_buf_printf(&s, "file_writeback %llu\n",
1410 		       (u64)memcg_page_state(memcg, NR_WRITEBACK) *
1411 		       PAGE_SIZE);
1412 
1413 	/*
1414 	 * TODO: We should eventually replace our own MEMCG_RSS_HUGE counter
1415 	 * with the NR_ANON_THP vm counter, but right now it's a pain in the
1416 	 * arse because it requires migrating the work out of rmap to a place
1417 	 * where the page->mem_cgroup is set up and stable.
1418 	 */
1419 	seq_buf_printf(&s, "anon_thp %llu\n",
1420 		       (u64)memcg_page_state(memcg, MEMCG_RSS_HUGE) *
1421 		       PAGE_SIZE);
1422 
1423 	for (i = 0; i < NR_LRU_LISTS; i++)
1424 		seq_buf_printf(&s, "%s %llu\n", mem_cgroup_lru_names[i],
1425 			       (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
1426 			       PAGE_SIZE);
1427 
1428 	seq_buf_printf(&s, "slab_reclaimable %llu\n",
1429 		       (u64)memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) *
1430 		       PAGE_SIZE);
1431 	seq_buf_printf(&s, "slab_unreclaimable %llu\n",
1432 		       (u64)memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE) *
1433 		       PAGE_SIZE);
1434 
1435 	/* Accumulated memory events */
1436 
1437 	seq_buf_printf(&s, "pgfault %lu\n", memcg_events(memcg, PGFAULT));
1438 	seq_buf_printf(&s, "pgmajfault %lu\n", memcg_events(memcg, PGMAJFAULT));
1439 
1440 	seq_buf_printf(&s, "workingset_refault %lu\n",
1441 		       memcg_page_state(memcg, WORKINGSET_REFAULT));
1442 	seq_buf_printf(&s, "workingset_activate %lu\n",
1443 		       memcg_page_state(memcg, WORKINGSET_ACTIVATE));
1444 	seq_buf_printf(&s, "workingset_nodereclaim %lu\n",
1445 		       memcg_page_state(memcg, WORKINGSET_NODERECLAIM));
1446 
1447 	seq_buf_printf(&s, "pgrefill %lu\n", memcg_events(memcg, PGREFILL));
1448 	seq_buf_printf(&s, "pgscan %lu\n",
1449 		       memcg_events(memcg, PGSCAN_KSWAPD) +
1450 		       memcg_events(memcg, PGSCAN_DIRECT));
1451 	seq_buf_printf(&s, "pgsteal %lu\n",
1452 		       memcg_events(memcg, PGSTEAL_KSWAPD) +
1453 		       memcg_events(memcg, PGSTEAL_DIRECT));
1454 	seq_buf_printf(&s, "pgactivate %lu\n", memcg_events(memcg, PGACTIVATE));
1455 	seq_buf_printf(&s, "pgdeactivate %lu\n", memcg_events(memcg, PGDEACTIVATE));
1456 	seq_buf_printf(&s, "pglazyfree %lu\n", memcg_events(memcg, PGLAZYFREE));
1457 	seq_buf_printf(&s, "pglazyfreed %lu\n", memcg_events(memcg, PGLAZYFREED));
1458 
1459 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1460 	seq_buf_printf(&s, "thp_fault_alloc %lu\n",
1461 		       memcg_events(memcg, THP_FAULT_ALLOC));
1462 	seq_buf_printf(&s, "thp_collapse_alloc %lu\n",
1463 		       memcg_events(memcg, THP_COLLAPSE_ALLOC));
1464 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1465 
1466 	/* The above should easily fit into one page */
1467 	WARN_ON_ONCE(seq_buf_has_overflowed(&s));
1468 
1469 	return s.buffer;
1470 }
1471 
1472 #define K(x) ((x) << (PAGE_SHIFT-10))
1473 /**
1474  * mem_cgroup_print_oom_context: Print OOM information relevant to
1475  * memory controller.
1476  * @memcg: The memory cgroup that went over limit
1477  * @p: Task that is going to be killed
1478  *
1479  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1480  * enabled
1481  */
1482 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1483 {
1484 	rcu_read_lock();
1485 
1486 	if (memcg) {
1487 		pr_cont(",oom_memcg=");
1488 		pr_cont_cgroup_path(memcg->css.cgroup);
1489 	} else
1490 		pr_cont(",global_oom");
1491 	if (p) {
1492 		pr_cont(",task_memcg=");
1493 		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1494 	}
1495 	rcu_read_unlock();
1496 }
1497 
1498 /**
1499  * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1500  * memory controller.
1501  * @memcg: The memory cgroup that went over limit
1502  */
1503 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1504 {
1505 	char *buf;
1506 
1507 	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1508 		K((u64)page_counter_read(&memcg->memory)),
1509 		K((u64)memcg->memory.max), memcg->memory.failcnt);
1510 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1511 		pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1512 			K((u64)page_counter_read(&memcg->swap)),
1513 			K((u64)memcg->swap.max), memcg->swap.failcnt);
1514 	else {
1515 		pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1516 			K((u64)page_counter_read(&memcg->memsw)),
1517 			K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1518 		pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1519 			K((u64)page_counter_read(&memcg->kmem)),
1520 			K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1521 	}
1522 
1523 	pr_info("Memory cgroup stats for ");
1524 	pr_cont_cgroup_path(memcg->css.cgroup);
1525 	pr_cont(":");
1526 	buf = memory_stat_format(memcg);
1527 	if (!buf)
1528 		return;
1529 	pr_info("%s", buf);
1530 	kfree(buf);
1531 }
1532 
1533 /*
1534  * Return the memory (and swap, if configured) limit for a memcg.
1535  */
1536 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1537 {
1538 	unsigned long max;
1539 
1540 	max = memcg->memory.max;
1541 	if (mem_cgroup_swappiness(memcg)) {
1542 		unsigned long memsw_max;
1543 		unsigned long swap_max;
1544 
1545 		memsw_max = memcg->memsw.max;
1546 		swap_max = memcg->swap.max;
1547 		swap_max = min(swap_max, (unsigned long)total_swap_pages);
1548 		max = min(max + swap_max, memsw_max);
1549 	}
1550 	return max;
1551 }
1552 
1553 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1554 {
1555 	return page_counter_read(&memcg->memory);
1556 }
1557 
1558 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1559 				     int order)
1560 {
1561 	struct oom_control oc = {
1562 		.zonelist = NULL,
1563 		.nodemask = NULL,
1564 		.memcg = memcg,
1565 		.gfp_mask = gfp_mask,
1566 		.order = order,
1567 	};
1568 	bool ret;
1569 
1570 	if (mutex_lock_killable(&oom_lock))
1571 		return true;
1572 	/*
1573 	 * A few threads which were not waiting at mutex_lock_killable() can
1574 	 * fail to bail out. Therefore, check again after holding oom_lock.
1575 	 */
1576 	ret = should_force_charge() || out_of_memory(&oc);
1577 	mutex_unlock(&oom_lock);
1578 	return ret;
1579 }
1580 
1581 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1582 				   pg_data_t *pgdat,
1583 				   gfp_t gfp_mask,
1584 				   unsigned long *total_scanned)
1585 {
1586 	struct mem_cgroup *victim = NULL;
1587 	int total = 0;
1588 	int loop = 0;
1589 	unsigned long excess;
1590 	unsigned long nr_scanned;
1591 	struct mem_cgroup_reclaim_cookie reclaim = {
1592 		.pgdat = pgdat,
1593 	};
1594 
1595 	excess = soft_limit_excess(root_memcg);
1596 
1597 	while (1) {
1598 		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1599 		if (!victim) {
1600 			loop++;
1601 			if (loop >= 2) {
1602 				/*
1603 				 * If we have not been able to reclaim
1604 				 * anything, it might because there are
1605 				 * no reclaimable pages under this hierarchy
1606 				 */
1607 				if (!total)
1608 					break;
1609 				/*
1610 				 * We want to do more targeted reclaim.
1611 				 * excess >> 2 is not to excessive so as to
1612 				 * reclaim too much, nor too less that we keep
1613 				 * coming back to reclaim from this cgroup
1614 				 */
1615 				if (total >= (excess >> 2) ||
1616 					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1617 					break;
1618 			}
1619 			continue;
1620 		}
1621 		total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1622 					pgdat, &nr_scanned);
1623 		*total_scanned += nr_scanned;
1624 		if (!soft_limit_excess(root_memcg))
1625 			break;
1626 	}
1627 	mem_cgroup_iter_break(root_memcg, victim);
1628 	return total;
1629 }
1630 
1631 #ifdef CONFIG_LOCKDEP
1632 static struct lockdep_map memcg_oom_lock_dep_map = {
1633 	.name = "memcg_oom_lock",
1634 };
1635 #endif
1636 
1637 static DEFINE_SPINLOCK(memcg_oom_lock);
1638 
1639 /*
1640  * Check OOM-Killer is already running under our hierarchy.
1641  * If someone is running, return false.
1642  */
1643 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1644 {
1645 	struct mem_cgroup *iter, *failed = NULL;
1646 
1647 	spin_lock(&memcg_oom_lock);
1648 
1649 	for_each_mem_cgroup_tree(iter, memcg) {
1650 		if (iter->oom_lock) {
1651 			/*
1652 			 * this subtree of our hierarchy is already locked
1653 			 * so we cannot give a lock.
1654 			 */
1655 			failed = iter;
1656 			mem_cgroup_iter_break(memcg, iter);
1657 			break;
1658 		} else
1659 			iter->oom_lock = true;
1660 	}
1661 
1662 	if (failed) {
1663 		/*
1664 		 * OK, we failed to lock the whole subtree so we have
1665 		 * to clean up what we set up to the failing subtree
1666 		 */
1667 		for_each_mem_cgroup_tree(iter, memcg) {
1668 			if (iter == failed) {
1669 				mem_cgroup_iter_break(memcg, iter);
1670 				break;
1671 			}
1672 			iter->oom_lock = false;
1673 		}
1674 	} else
1675 		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1676 
1677 	spin_unlock(&memcg_oom_lock);
1678 
1679 	return !failed;
1680 }
1681 
1682 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1683 {
1684 	struct mem_cgroup *iter;
1685 
1686 	spin_lock(&memcg_oom_lock);
1687 	mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1688 	for_each_mem_cgroup_tree(iter, memcg)
1689 		iter->oom_lock = false;
1690 	spin_unlock(&memcg_oom_lock);
1691 }
1692 
1693 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1694 {
1695 	struct mem_cgroup *iter;
1696 
1697 	spin_lock(&memcg_oom_lock);
1698 	for_each_mem_cgroup_tree(iter, memcg)
1699 		iter->under_oom++;
1700 	spin_unlock(&memcg_oom_lock);
1701 }
1702 
1703 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1704 {
1705 	struct mem_cgroup *iter;
1706 
1707 	/*
1708 	 * When a new child is created while the hierarchy is under oom,
1709 	 * mem_cgroup_oom_lock() may not be called. Watch for underflow.
1710 	 */
1711 	spin_lock(&memcg_oom_lock);
1712 	for_each_mem_cgroup_tree(iter, memcg)
1713 		if (iter->under_oom > 0)
1714 			iter->under_oom--;
1715 	spin_unlock(&memcg_oom_lock);
1716 }
1717 
1718 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1719 
1720 struct oom_wait_info {
1721 	struct mem_cgroup *memcg;
1722 	wait_queue_entry_t	wait;
1723 };
1724 
1725 static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1726 	unsigned mode, int sync, void *arg)
1727 {
1728 	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1729 	struct mem_cgroup *oom_wait_memcg;
1730 	struct oom_wait_info *oom_wait_info;
1731 
1732 	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1733 	oom_wait_memcg = oom_wait_info->memcg;
1734 
1735 	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1736 	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1737 		return 0;
1738 	return autoremove_wake_function(wait, mode, sync, arg);
1739 }
1740 
1741 static void memcg_oom_recover(struct mem_cgroup *memcg)
1742 {
1743 	/*
1744 	 * For the following lockless ->under_oom test, the only required
1745 	 * guarantee is that it must see the state asserted by an OOM when
1746 	 * this function is called as a result of userland actions
1747 	 * triggered by the notification of the OOM.  This is trivially
1748 	 * achieved by invoking mem_cgroup_mark_under_oom() before
1749 	 * triggering notification.
1750 	 */
1751 	if (memcg && memcg->under_oom)
1752 		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1753 }
1754 
1755 enum oom_status {
1756 	OOM_SUCCESS,
1757 	OOM_FAILED,
1758 	OOM_ASYNC,
1759 	OOM_SKIPPED
1760 };
1761 
1762 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1763 {
1764 	enum oom_status ret;
1765 	bool locked;
1766 
1767 	if (order > PAGE_ALLOC_COSTLY_ORDER)
1768 		return OOM_SKIPPED;
1769 
1770 	memcg_memory_event(memcg, MEMCG_OOM);
1771 
1772 	/*
1773 	 * We are in the middle of the charge context here, so we
1774 	 * don't want to block when potentially sitting on a callstack
1775 	 * that holds all kinds of filesystem and mm locks.
1776 	 *
1777 	 * cgroup1 allows disabling the OOM killer and waiting for outside
1778 	 * handling until the charge can succeed; remember the context and put
1779 	 * the task to sleep at the end of the page fault when all locks are
1780 	 * released.
1781 	 *
1782 	 * On the other hand, in-kernel OOM killer allows for an async victim
1783 	 * memory reclaim (oom_reaper) and that means that we are not solely
1784 	 * relying on the oom victim to make a forward progress and we can
1785 	 * invoke the oom killer here.
1786 	 *
1787 	 * Please note that mem_cgroup_out_of_memory might fail to find a
1788 	 * victim and then we have to bail out from the charge path.
1789 	 */
1790 	if (memcg->oom_kill_disable) {
1791 		if (!current->in_user_fault)
1792 			return OOM_SKIPPED;
1793 		css_get(&memcg->css);
1794 		current->memcg_in_oom = memcg;
1795 		current->memcg_oom_gfp_mask = mask;
1796 		current->memcg_oom_order = order;
1797 
1798 		return OOM_ASYNC;
1799 	}
1800 
1801 	mem_cgroup_mark_under_oom(memcg);
1802 
1803 	locked = mem_cgroup_oom_trylock(memcg);
1804 
1805 	if (locked)
1806 		mem_cgroup_oom_notify(memcg);
1807 
1808 	mem_cgroup_unmark_under_oom(memcg);
1809 	if (mem_cgroup_out_of_memory(memcg, mask, order))
1810 		ret = OOM_SUCCESS;
1811 	else
1812 		ret = OOM_FAILED;
1813 
1814 	if (locked)
1815 		mem_cgroup_oom_unlock(memcg);
1816 
1817 	return ret;
1818 }
1819 
1820 /**
1821  * mem_cgroup_oom_synchronize - complete memcg OOM handling
1822  * @handle: actually kill/wait or just clean up the OOM state
1823  *
1824  * This has to be called at the end of a page fault if the memcg OOM
1825  * handler was enabled.
1826  *
1827  * Memcg supports userspace OOM handling where failed allocations must
1828  * sleep on a waitqueue until the userspace task resolves the
1829  * situation.  Sleeping directly in the charge context with all kinds
1830  * of locks held is not a good idea, instead we remember an OOM state
1831  * in the task and mem_cgroup_oom_synchronize() has to be called at
1832  * the end of the page fault to complete the OOM handling.
1833  *
1834  * Returns %true if an ongoing memcg OOM situation was detected and
1835  * completed, %false otherwise.
1836  */
1837 bool mem_cgroup_oom_synchronize(bool handle)
1838 {
1839 	struct mem_cgroup *memcg = current->memcg_in_oom;
1840 	struct oom_wait_info owait;
1841 	bool locked;
1842 
1843 	/* OOM is global, do not handle */
1844 	if (!memcg)
1845 		return false;
1846 
1847 	if (!handle)
1848 		goto cleanup;
1849 
1850 	owait.memcg = memcg;
1851 	owait.wait.flags = 0;
1852 	owait.wait.func = memcg_oom_wake_function;
1853 	owait.wait.private = current;
1854 	INIT_LIST_HEAD(&owait.wait.entry);
1855 
1856 	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1857 	mem_cgroup_mark_under_oom(memcg);
1858 
1859 	locked = mem_cgroup_oom_trylock(memcg);
1860 
1861 	if (locked)
1862 		mem_cgroup_oom_notify(memcg);
1863 
1864 	if (locked && !memcg->oom_kill_disable) {
1865 		mem_cgroup_unmark_under_oom(memcg);
1866 		finish_wait(&memcg_oom_waitq, &owait.wait);
1867 		mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1868 					 current->memcg_oom_order);
1869 	} else {
1870 		schedule();
1871 		mem_cgroup_unmark_under_oom(memcg);
1872 		finish_wait(&memcg_oom_waitq, &owait.wait);
1873 	}
1874 
1875 	if (locked) {
1876 		mem_cgroup_oom_unlock(memcg);
1877 		/*
1878 		 * There is no guarantee that an OOM-lock contender
1879 		 * sees the wakeups triggered by the OOM kill
1880 		 * uncharges.  Wake any sleepers explicitely.
1881 		 */
1882 		memcg_oom_recover(memcg);
1883 	}
1884 cleanup:
1885 	current->memcg_in_oom = NULL;
1886 	css_put(&memcg->css);
1887 	return true;
1888 }
1889 
1890 /**
1891  * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1892  * @victim: task to be killed by the OOM killer
1893  * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1894  *
1895  * Returns a pointer to a memory cgroup, which has to be cleaned up
1896  * by killing all belonging OOM-killable tasks.
1897  *
1898  * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1899  */
1900 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
1901 					    struct mem_cgroup *oom_domain)
1902 {
1903 	struct mem_cgroup *oom_group = NULL;
1904 	struct mem_cgroup *memcg;
1905 
1906 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1907 		return NULL;
1908 
1909 	if (!oom_domain)
1910 		oom_domain = root_mem_cgroup;
1911 
1912 	rcu_read_lock();
1913 
1914 	memcg = mem_cgroup_from_task(victim);
1915 	if (memcg == root_mem_cgroup)
1916 		goto out;
1917 
1918 	/*
1919 	 * Traverse the memory cgroup hierarchy from the victim task's
1920 	 * cgroup up to the OOMing cgroup (or root) to find the
1921 	 * highest-level memory cgroup with oom.group set.
1922 	 */
1923 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1924 		if (memcg->oom_group)
1925 			oom_group = memcg;
1926 
1927 		if (memcg == oom_domain)
1928 			break;
1929 	}
1930 
1931 	if (oom_group)
1932 		css_get(&oom_group->css);
1933 out:
1934 	rcu_read_unlock();
1935 
1936 	return oom_group;
1937 }
1938 
1939 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1940 {
1941 	pr_info("Tasks in ");
1942 	pr_cont_cgroup_path(memcg->css.cgroup);
1943 	pr_cont(" are going to be killed due to memory.oom.group set\n");
1944 }
1945 
1946 /**
1947  * lock_page_memcg - lock a page->mem_cgroup binding
1948  * @page: the page
1949  *
1950  * This function protects unlocked LRU pages from being moved to
1951  * another cgroup.
1952  *
1953  * It ensures lifetime of the returned memcg. Caller is responsible
1954  * for the lifetime of the page; __unlock_page_memcg() is available
1955  * when @page might get freed inside the locked section.
1956  */
1957 struct mem_cgroup *lock_page_memcg(struct page *page)
1958 {
1959 	struct mem_cgroup *memcg;
1960 	unsigned long flags;
1961 
1962 	/*
1963 	 * The RCU lock is held throughout the transaction.  The fast
1964 	 * path can get away without acquiring the memcg->move_lock
1965 	 * because page moving starts with an RCU grace period.
1966 	 *
1967 	 * The RCU lock also protects the memcg from being freed when
1968 	 * the page state that is going to change is the only thing
1969 	 * preventing the page itself from being freed. E.g. writeback
1970 	 * doesn't hold a page reference and relies on PG_writeback to
1971 	 * keep off truncation, migration and so forth.
1972          */
1973 	rcu_read_lock();
1974 
1975 	if (mem_cgroup_disabled())
1976 		return NULL;
1977 again:
1978 	memcg = page->mem_cgroup;
1979 	if (unlikely(!memcg))
1980 		return NULL;
1981 
1982 	if (atomic_read(&memcg->moving_account) <= 0)
1983 		return memcg;
1984 
1985 	spin_lock_irqsave(&memcg->move_lock, flags);
1986 	if (memcg != page->mem_cgroup) {
1987 		spin_unlock_irqrestore(&memcg->move_lock, flags);
1988 		goto again;
1989 	}
1990 
1991 	/*
1992 	 * When charge migration first begins, we can have locked and
1993 	 * unlocked page stat updates happening concurrently.  Track
1994 	 * the task who has the lock for unlock_page_memcg().
1995 	 */
1996 	memcg->move_lock_task = current;
1997 	memcg->move_lock_flags = flags;
1998 
1999 	return memcg;
2000 }
2001 EXPORT_SYMBOL(lock_page_memcg);
2002 
2003 /**
2004  * __unlock_page_memcg - unlock and unpin a memcg
2005  * @memcg: the memcg
2006  *
2007  * Unlock and unpin a memcg returned by lock_page_memcg().
2008  */
2009 void __unlock_page_memcg(struct mem_cgroup *memcg)
2010 {
2011 	if (memcg && memcg->move_lock_task == current) {
2012 		unsigned long flags = memcg->move_lock_flags;
2013 
2014 		memcg->move_lock_task = NULL;
2015 		memcg->move_lock_flags = 0;
2016 
2017 		spin_unlock_irqrestore(&memcg->move_lock, flags);
2018 	}
2019 
2020 	rcu_read_unlock();
2021 }
2022 
2023 /**
2024  * unlock_page_memcg - unlock a page->mem_cgroup binding
2025  * @page: the page
2026  */
2027 void unlock_page_memcg(struct page *page)
2028 {
2029 	__unlock_page_memcg(page->mem_cgroup);
2030 }
2031 EXPORT_SYMBOL(unlock_page_memcg);
2032 
2033 struct memcg_stock_pcp {
2034 	struct mem_cgroup *cached; /* this never be root cgroup */
2035 	unsigned int nr_pages;
2036 	struct work_struct work;
2037 	unsigned long flags;
2038 #define FLUSHING_CACHED_CHARGE	0
2039 };
2040 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2041 static DEFINE_MUTEX(percpu_charge_mutex);
2042 
2043 /**
2044  * consume_stock: Try to consume stocked charge on this cpu.
2045  * @memcg: memcg to consume from.
2046  * @nr_pages: how many pages to charge.
2047  *
2048  * The charges will only happen if @memcg matches the current cpu's memcg
2049  * stock, and at least @nr_pages are available in that stock.  Failure to
2050  * service an allocation will refill the stock.
2051  *
2052  * returns true if successful, false otherwise.
2053  */
2054 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2055 {
2056 	struct memcg_stock_pcp *stock;
2057 	unsigned long flags;
2058 	bool ret = false;
2059 
2060 	if (nr_pages > MEMCG_CHARGE_BATCH)
2061 		return ret;
2062 
2063 	local_irq_save(flags);
2064 
2065 	stock = this_cpu_ptr(&memcg_stock);
2066 	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
2067 		stock->nr_pages -= nr_pages;
2068 		ret = true;
2069 	}
2070 
2071 	local_irq_restore(flags);
2072 
2073 	return ret;
2074 }
2075 
2076 /*
2077  * Returns stocks cached in percpu and reset cached information.
2078  */
2079 static void drain_stock(struct memcg_stock_pcp *stock)
2080 {
2081 	struct mem_cgroup *old = stock->cached;
2082 
2083 	if (stock->nr_pages) {
2084 		page_counter_uncharge(&old->memory, stock->nr_pages);
2085 		if (do_memsw_account())
2086 			page_counter_uncharge(&old->memsw, stock->nr_pages);
2087 		css_put_many(&old->css, stock->nr_pages);
2088 		stock->nr_pages = 0;
2089 	}
2090 	stock->cached = NULL;
2091 }
2092 
2093 static void drain_local_stock(struct work_struct *dummy)
2094 {
2095 	struct memcg_stock_pcp *stock;
2096 	unsigned long flags;
2097 
2098 	/*
2099 	 * The only protection from memory hotplug vs. drain_stock races is
2100 	 * that we always operate on local CPU stock here with IRQ disabled
2101 	 */
2102 	local_irq_save(flags);
2103 
2104 	stock = this_cpu_ptr(&memcg_stock);
2105 	drain_stock(stock);
2106 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2107 
2108 	local_irq_restore(flags);
2109 }
2110 
2111 /*
2112  * Cache charges(val) to local per_cpu area.
2113  * This will be consumed by consume_stock() function, later.
2114  */
2115 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2116 {
2117 	struct memcg_stock_pcp *stock;
2118 	unsigned long flags;
2119 
2120 	local_irq_save(flags);
2121 
2122 	stock = this_cpu_ptr(&memcg_stock);
2123 	if (stock->cached != memcg) { /* reset if necessary */
2124 		drain_stock(stock);
2125 		stock->cached = memcg;
2126 	}
2127 	stock->nr_pages += nr_pages;
2128 
2129 	if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2130 		drain_stock(stock);
2131 
2132 	local_irq_restore(flags);
2133 }
2134 
2135 /*
2136  * Drains all per-CPU charge caches for given root_memcg resp. subtree
2137  * of the hierarchy under it.
2138  */
2139 static void drain_all_stock(struct mem_cgroup *root_memcg)
2140 {
2141 	int cpu, curcpu;
2142 
2143 	/* If someone's already draining, avoid adding running more workers. */
2144 	if (!mutex_trylock(&percpu_charge_mutex))
2145 		return;
2146 	/*
2147 	 * Notify other cpus that system-wide "drain" is running
2148 	 * We do not care about races with the cpu hotplug because cpu down
2149 	 * as well as workers from this path always operate on the local
2150 	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2151 	 */
2152 	curcpu = get_cpu();
2153 	for_each_online_cpu(cpu) {
2154 		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2155 		struct mem_cgroup *memcg;
2156 		bool flush = false;
2157 
2158 		rcu_read_lock();
2159 		memcg = stock->cached;
2160 		if (memcg && stock->nr_pages &&
2161 		    mem_cgroup_is_descendant(memcg, root_memcg))
2162 			flush = true;
2163 		rcu_read_unlock();
2164 
2165 		if (flush &&
2166 		    !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2167 			if (cpu == curcpu)
2168 				drain_local_stock(&stock->work);
2169 			else
2170 				schedule_work_on(cpu, &stock->work);
2171 		}
2172 	}
2173 	put_cpu();
2174 	mutex_unlock(&percpu_charge_mutex);
2175 }
2176 
2177 static int memcg_hotplug_cpu_dead(unsigned int cpu)
2178 {
2179 	struct memcg_stock_pcp *stock;
2180 	struct mem_cgroup *memcg, *mi;
2181 
2182 	stock = &per_cpu(memcg_stock, cpu);
2183 	drain_stock(stock);
2184 
2185 	for_each_mem_cgroup(memcg) {
2186 		int i;
2187 
2188 		for (i = 0; i < MEMCG_NR_STAT; i++) {
2189 			int nid;
2190 			long x;
2191 
2192 			x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0);
2193 			if (x)
2194 				for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
2195 					atomic_long_add(x, &memcg->vmstats[i]);
2196 
2197 			if (i >= NR_VM_NODE_STAT_ITEMS)
2198 				continue;
2199 
2200 			for_each_node(nid) {
2201 				struct mem_cgroup_per_node *pn;
2202 
2203 				pn = mem_cgroup_nodeinfo(memcg, nid);
2204 				x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
2205 				if (x)
2206 					do {
2207 						atomic_long_add(x, &pn->lruvec_stat[i]);
2208 					} while ((pn = parent_nodeinfo(pn, nid)));
2209 			}
2210 		}
2211 
2212 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
2213 			long x;
2214 
2215 			x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0);
2216 			if (x)
2217 				for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
2218 					atomic_long_add(x, &memcg->vmevents[i]);
2219 		}
2220 	}
2221 
2222 	return 0;
2223 }
2224 
2225 static void reclaim_high(struct mem_cgroup *memcg,
2226 			 unsigned int nr_pages,
2227 			 gfp_t gfp_mask)
2228 {
2229 	do {
2230 		if (page_counter_read(&memcg->memory) <= memcg->high)
2231 			continue;
2232 		memcg_memory_event(memcg, MEMCG_HIGH);
2233 		try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
2234 	} while ((memcg = parent_mem_cgroup(memcg)));
2235 }
2236 
2237 static void high_work_func(struct work_struct *work)
2238 {
2239 	struct mem_cgroup *memcg;
2240 
2241 	memcg = container_of(work, struct mem_cgroup, high_work);
2242 	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2243 }
2244 
2245 /*
2246  * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2247  * enough to still cause a significant slowdown in most cases, while still
2248  * allowing diagnostics and tracing to proceed without becoming stuck.
2249  */
2250 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2251 
2252 /*
2253  * When calculating the delay, we use these either side of the exponentiation to
2254  * maintain precision and scale to a reasonable number of jiffies (see the table
2255  * below.
2256  *
2257  * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2258  *   overage ratio to a delay.
2259  * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down down the
2260  *   proposed penalty in order to reduce to a reasonable number of jiffies, and
2261  *   to produce a reasonable delay curve.
2262  *
2263  * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2264  * reasonable delay curve compared to precision-adjusted overage, not
2265  * penalising heavily at first, but still making sure that growth beyond the
2266  * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2267  * example, with a high of 100 megabytes:
2268  *
2269  *  +-------+------------------------+
2270  *  | usage | time to allocate in ms |
2271  *  +-------+------------------------+
2272  *  | 100M  |                      0 |
2273  *  | 101M  |                      6 |
2274  *  | 102M  |                     25 |
2275  *  | 103M  |                     57 |
2276  *  | 104M  |                    102 |
2277  *  | 105M  |                    159 |
2278  *  | 106M  |                    230 |
2279  *  | 107M  |                    313 |
2280  *  | 108M  |                    409 |
2281  *  | 109M  |                    518 |
2282  *  | 110M  |                    639 |
2283  *  | 111M  |                    774 |
2284  *  | 112M  |                    921 |
2285  *  | 113M  |                   1081 |
2286  *  | 114M  |                   1254 |
2287  *  | 115M  |                   1439 |
2288  *  | 116M  |                   1638 |
2289  *  | 117M  |                   1849 |
2290  *  | 118M  |                   2000 |
2291  *  | 119M  |                   2000 |
2292  *  | 120M  |                   2000 |
2293  *  +-------+------------------------+
2294  */
2295  #define MEMCG_DELAY_PRECISION_SHIFT 20
2296  #define MEMCG_DELAY_SCALING_SHIFT 14
2297 
2298 /*
2299  * Scheduled by try_charge() to be executed from the userland return path
2300  * and reclaims memory over the high limit.
2301  */
2302 void mem_cgroup_handle_over_high(void)
2303 {
2304 	unsigned long usage, high, clamped_high;
2305 	unsigned long pflags;
2306 	unsigned long penalty_jiffies, overage;
2307 	unsigned int nr_pages = current->memcg_nr_pages_over_high;
2308 	struct mem_cgroup *memcg;
2309 
2310 	if (likely(!nr_pages))
2311 		return;
2312 
2313 	memcg = get_mem_cgroup_from_mm(current->mm);
2314 	reclaim_high(memcg, nr_pages, GFP_KERNEL);
2315 	current->memcg_nr_pages_over_high = 0;
2316 
2317 	/*
2318 	 * memory.high is breached and reclaim is unable to keep up. Throttle
2319 	 * allocators proactively to slow down excessive growth.
2320 	 *
2321 	 * We use overage compared to memory.high to calculate the number of
2322 	 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2323 	 * fairly lenient on small overages, and increasingly harsh when the
2324 	 * memcg in question makes it clear that it has no intention of stopping
2325 	 * its crazy behaviour, so we exponentially increase the delay based on
2326 	 * overage amount.
2327 	 */
2328 
2329 	usage = page_counter_read(&memcg->memory);
2330 	high = READ_ONCE(memcg->high);
2331 
2332 	if (usage <= high)
2333 		goto out;
2334 
2335 	/*
2336 	 * Prevent division by 0 in overage calculation by acting as if it was a
2337 	 * threshold of 1 page
2338 	 */
2339 	clamped_high = max(high, 1UL);
2340 
2341 	overage = div_u64((u64)(usage - high) << MEMCG_DELAY_PRECISION_SHIFT,
2342 			  clamped_high);
2343 
2344 	penalty_jiffies = ((u64)overage * overage * HZ)
2345 		>> (MEMCG_DELAY_PRECISION_SHIFT + MEMCG_DELAY_SCALING_SHIFT);
2346 
2347 	/*
2348 	 * Factor in the task's own contribution to the overage, such that four
2349 	 * N-sized allocations are throttled approximately the same as one
2350 	 * 4N-sized allocation.
2351 	 *
2352 	 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2353 	 * larger the current charge patch is than that.
2354 	 */
2355 	penalty_jiffies = penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2356 
2357 	/*
2358 	 * Clamp the max delay per usermode return so as to still keep the
2359 	 * application moving forwards and also permit diagnostics, albeit
2360 	 * extremely slowly.
2361 	 */
2362 	penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2363 
2364 	/*
2365 	 * Don't sleep if the amount of jiffies this memcg owes us is so low
2366 	 * that it's not even worth doing, in an attempt to be nice to those who
2367 	 * go only a small amount over their memory.high value and maybe haven't
2368 	 * been aggressively reclaimed enough yet.
2369 	 */
2370 	if (penalty_jiffies <= HZ / 100)
2371 		goto out;
2372 
2373 	/*
2374 	 * If we exit early, we're guaranteed to die (since
2375 	 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2376 	 * need to account for any ill-begotten jiffies to pay them off later.
2377 	 */
2378 	psi_memstall_enter(&pflags);
2379 	schedule_timeout_killable(penalty_jiffies);
2380 	psi_memstall_leave(&pflags);
2381 
2382 out:
2383 	css_put(&memcg->css);
2384 }
2385 
2386 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2387 		      unsigned int nr_pages)
2388 {
2389 	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2390 	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2391 	struct mem_cgroup *mem_over_limit;
2392 	struct page_counter *counter;
2393 	unsigned long nr_reclaimed;
2394 	bool may_swap = true;
2395 	bool drained = false;
2396 	enum oom_status oom_status;
2397 
2398 	if (mem_cgroup_is_root(memcg))
2399 		return 0;
2400 retry:
2401 	if (consume_stock(memcg, nr_pages))
2402 		return 0;
2403 
2404 	if (!do_memsw_account() ||
2405 	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2406 		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2407 			goto done_restock;
2408 		if (do_memsw_account())
2409 			page_counter_uncharge(&memcg->memsw, batch);
2410 		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2411 	} else {
2412 		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2413 		may_swap = false;
2414 	}
2415 
2416 	if (batch > nr_pages) {
2417 		batch = nr_pages;
2418 		goto retry;
2419 	}
2420 
2421 	/*
2422 	 * Memcg doesn't have a dedicated reserve for atomic
2423 	 * allocations. But like the global atomic pool, we need to
2424 	 * put the burden of reclaim on regular allocation requests
2425 	 * and let these go through as privileged allocations.
2426 	 */
2427 	if (gfp_mask & __GFP_ATOMIC)
2428 		goto force;
2429 
2430 	/*
2431 	 * Unlike in global OOM situations, memcg is not in a physical
2432 	 * memory shortage.  Allow dying and OOM-killed tasks to
2433 	 * bypass the last charges so that they can exit quickly and
2434 	 * free their memory.
2435 	 */
2436 	if (unlikely(should_force_charge()))
2437 		goto force;
2438 
2439 	/*
2440 	 * Prevent unbounded recursion when reclaim operations need to
2441 	 * allocate memory. This might exceed the limits temporarily,
2442 	 * but we prefer facilitating memory reclaim and getting back
2443 	 * under the limit over triggering OOM kills in these cases.
2444 	 */
2445 	if (unlikely(current->flags & PF_MEMALLOC))
2446 		goto force;
2447 
2448 	if (unlikely(task_in_memcg_oom(current)))
2449 		goto nomem;
2450 
2451 	if (!gfpflags_allow_blocking(gfp_mask))
2452 		goto nomem;
2453 
2454 	memcg_memory_event(mem_over_limit, MEMCG_MAX);
2455 
2456 	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2457 						    gfp_mask, may_swap);
2458 
2459 	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2460 		goto retry;
2461 
2462 	if (!drained) {
2463 		drain_all_stock(mem_over_limit);
2464 		drained = true;
2465 		goto retry;
2466 	}
2467 
2468 	if (gfp_mask & __GFP_NORETRY)
2469 		goto nomem;
2470 	/*
2471 	 * Even though the limit is exceeded at this point, reclaim
2472 	 * may have been able to free some pages.  Retry the charge
2473 	 * before killing the task.
2474 	 *
2475 	 * Only for regular pages, though: huge pages are rather
2476 	 * unlikely to succeed so close to the limit, and we fall back
2477 	 * to regular pages anyway in case of failure.
2478 	 */
2479 	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2480 		goto retry;
2481 	/*
2482 	 * At task move, charge accounts can be doubly counted. So, it's
2483 	 * better to wait until the end of task_move if something is going on.
2484 	 */
2485 	if (mem_cgroup_wait_acct_move(mem_over_limit))
2486 		goto retry;
2487 
2488 	if (nr_retries--)
2489 		goto retry;
2490 
2491 	if (gfp_mask & __GFP_RETRY_MAYFAIL)
2492 		goto nomem;
2493 
2494 	if (gfp_mask & __GFP_NOFAIL)
2495 		goto force;
2496 
2497 	if (fatal_signal_pending(current))
2498 		goto force;
2499 
2500 	/*
2501 	 * keep retrying as long as the memcg oom killer is able to make
2502 	 * a forward progress or bypass the charge if the oom killer
2503 	 * couldn't make any progress.
2504 	 */
2505 	oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask,
2506 		       get_order(nr_pages * PAGE_SIZE));
2507 	switch (oom_status) {
2508 	case OOM_SUCCESS:
2509 		nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2510 		goto retry;
2511 	case OOM_FAILED:
2512 		goto force;
2513 	default:
2514 		goto nomem;
2515 	}
2516 nomem:
2517 	if (!(gfp_mask & __GFP_NOFAIL))
2518 		return -ENOMEM;
2519 force:
2520 	/*
2521 	 * The allocation either can't fail or will lead to more memory
2522 	 * being freed very soon.  Allow memory usage go over the limit
2523 	 * temporarily by force charging it.
2524 	 */
2525 	page_counter_charge(&memcg->memory, nr_pages);
2526 	if (do_memsw_account())
2527 		page_counter_charge(&memcg->memsw, nr_pages);
2528 	css_get_many(&memcg->css, nr_pages);
2529 
2530 	return 0;
2531 
2532 done_restock:
2533 	css_get_many(&memcg->css, batch);
2534 	if (batch > nr_pages)
2535 		refill_stock(memcg, batch - nr_pages);
2536 
2537 	/*
2538 	 * If the hierarchy is above the normal consumption range, schedule
2539 	 * reclaim on returning to userland.  We can perform reclaim here
2540 	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2541 	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2542 	 * not recorded as it most likely matches current's and won't
2543 	 * change in the meantime.  As high limit is checked again before
2544 	 * reclaim, the cost of mismatch is negligible.
2545 	 */
2546 	do {
2547 		if (page_counter_read(&memcg->memory) > memcg->high) {
2548 			/* Don't bother a random interrupted task */
2549 			if (in_interrupt()) {
2550 				schedule_work(&memcg->high_work);
2551 				break;
2552 			}
2553 			current->memcg_nr_pages_over_high += batch;
2554 			set_notify_resume(current);
2555 			break;
2556 		}
2557 	} while ((memcg = parent_mem_cgroup(memcg)));
2558 
2559 	return 0;
2560 }
2561 
2562 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2563 {
2564 	if (mem_cgroup_is_root(memcg))
2565 		return;
2566 
2567 	page_counter_uncharge(&memcg->memory, nr_pages);
2568 	if (do_memsw_account())
2569 		page_counter_uncharge(&memcg->memsw, nr_pages);
2570 
2571 	css_put_many(&memcg->css, nr_pages);
2572 }
2573 
2574 static void lock_page_lru(struct page *page, int *isolated)
2575 {
2576 	pg_data_t *pgdat = page_pgdat(page);
2577 
2578 	spin_lock_irq(&pgdat->lru_lock);
2579 	if (PageLRU(page)) {
2580 		struct lruvec *lruvec;
2581 
2582 		lruvec = mem_cgroup_page_lruvec(page, pgdat);
2583 		ClearPageLRU(page);
2584 		del_page_from_lru_list(page, lruvec, page_lru(page));
2585 		*isolated = 1;
2586 	} else
2587 		*isolated = 0;
2588 }
2589 
2590 static void unlock_page_lru(struct page *page, int isolated)
2591 {
2592 	pg_data_t *pgdat = page_pgdat(page);
2593 
2594 	if (isolated) {
2595 		struct lruvec *lruvec;
2596 
2597 		lruvec = mem_cgroup_page_lruvec(page, pgdat);
2598 		VM_BUG_ON_PAGE(PageLRU(page), page);
2599 		SetPageLRU(page);
2600 		add_page_to_lru_list(page, lruvec, page_lru(page));
2601 	}
2602 	spin_unlock_irq(&pgdat->lru_lock);
2603 }
2604 
2605 static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2606 			  bool lrucare)
2607 {
2608 	int isolated;
2609 
2610 	VM_BUG_ON_PAGE(page->mem_cgroup, page);
2611 
2612 	/*
2613 	 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2614 	 * may already be on some other mem_cgroup's LRU.  Take care of it.
2615 	 */
2616 	if (lrucare)
2617 		lock_page_lru(page, &isolated);
2618 
2619 	/*
2620 	 * Nobody should be changing or seriously looking at
2621 	 * page->mem_cgroup at this point:
2622 	 *
2623 	 * - the page is uncharged
2624 	 *
2625 	 * - the page is off-LRU
2626 	 *
2627 	 * - an anonymous fault has exclusive page access, except for
2628 	 *   a locked page table
2629 	 *
2630 	 * - a page cache insertion, a swapin fault, or a migration
2631 	 *   have the page locked
2632 	 */
2633 	page->mem_cgroup = memcg;
2634 
2635 	if (lrucare)
2636 		unlock_page_lru(page, isolated);
2637 }
2638 
2639 #ifdef CONFIG_MEMCG_KMEM
2640 static int memcg_alloc_cache_id(void)
2641 {
2642 	int id, size;
2643 	int err;
2644 
2645 	id = ida_simple_get(&memcg_cache_ida,
2646 			    0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2647 	if (id < 0)
2648 		return id;
2649 
2650 	if (id < memcg_nr_cache_ids)
2651 		return id;
2652 
2653 	/*
2654 	 * There's no space for the new id in memcg_caches arrays,
2655 	 * so we have to grow them.
2656 	 */
2657 	down_write(&memcg_cache_ids_sem);
2658 
2659 	size = 2 * (id + 1);
2660 	if (size < MEMCG_CACHES_MIN_SIZE)
2661 		size = MEMCG_CACHES_MIN_SIZE;
2662 	else if (size > MEMCG_CACHES_MAX_SIZE)
2663 		size = MEMCG_CACHES_MAX_SIZE;
2664 
2665 	err = memcg_update_all_caches(size);
2666 	if (!err)
2667 		err = memcg_update_all_list_lrus(size);
2668 	if (!err)
2669 		memcg_nr_cache_ids = size;
2670 
2671 	up_write(&memcg_cache_ids_sem);
2672 
2673 	if (err) {
2674 		ida_simple_remove(&memcg_cache_ida, id);
2675 		return err;
2676 	}
2677 	return id;
2678 }
2679 
2680 static void memcg_free_cache_id(int id)
2681 {
2682 	ida_simple_remove(&memcg_cache_ida, id);
2683 }
2684 
2685 struct memcg_kmem_cache_create_work {
2686 	struct mem_cgroup *memcg;
2687 	struct kmem_cache *cachep;
2688 	struct work_struct work;
2689 };
2690 
2691 static void memcg_kmem_cache_create_func(struct work_struct *w)
2692 {
2693 	struct memcg_kmem_cache_create_work *cw =
2694 		container_of(w, struct memcg_kmem_cache_create_work, work);
2695 	struct mem_cgroup *memcg = cw->memcg;
2696 	struct kmem_cache *cachep = cw->cachep;
2697 
2698 	memcg_create_kmem_cache(memcg, cachep);
2699 
2700 	css_put(&memcg->css);
2701 	kfree(cw);
2702 }
2703 
2704 /*
2705  * Enqueue the creation of a per-memcg kmem_cache.
2706  */
2707 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2708 					       struct kmem_cache *cachep)
2709 {
2710 	struct memcg_kmem_cache_create_work *cw;
2711 
2712 	if (!css_tryget_online(&memcg->css))
2713 		return;
2714 
2715 	cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
2716 	if (!cw)
2717 		return;
2718 
2719 	cw->memcg = memcg;
2720 	cw->cachep = cachep;
2721 	INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
2722 
2723 	queue_work(memcg_kmem_cache_wq, &cw->work);
2724 }
2725 
2726 static inline bool memcg_kmem_bypass(void)
2727 {
2728 	if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
2729 		return true;
2730 	return false;
2731 }
2732 
2733 /**
2734  * memcg_kmem_get_cache: select the correct per-memcg cache for allocation
2735  * @cachep: the original global kmem cache
2736  *
2737  * Return the kmem_cache we're supposed to use for a slab allocation.
2738  * We try to use the current memcg's version of the cache.
2739  *
2740  * If the cache does not exist yet, if we are the first user of it, we
2741  * create it asynchronously in a workqueue and let the current allocation
2742  * go through with the original cache.
2743  *
2744  * This function takes a reference to the cache it returns to assure it
2745  * won't get destroyed while we are working with it. Once the caller is
2746  * done with it, memcg_kmem_put_cache() must be called to release the
2747  * reference.
2748  */
2749 struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
2750 {
2751 	struct mem_cgroup *memcg;
2752 	struct kmem_cache *memcg_cachep;
2753 	struct memcg_cache_array *arr;
2754 	int kmemcg_id;
2755 
2756 	VM_BUG_ON(!is_root_cache(cachep));
2757 
2758 	if (memcg_kmem_bypass())
2759 		return cachep;
2760 
2761 	rcu_read_lock();
2762 
2763 	if (unlikely(current->active_memcg))
2764 		memcg = current->active_memcg;
2765 	else
2766 		memcg = mem_cgroup_from_task(current);
2767 
2768 	if (!memcg || memcg == root_mem_cgroup)
2769 		goto out_unlock;
2770 
2771 	kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2772 	if (kmemcg_id < 0)
2773 		goto out_unlock;
2774 
2775 	arr = rcu_dereference(cachep->memcg_params.memcg_caches);
2776 
2777 	/*
2778 	 * Make sure we will access the up-to-date value. The code updating
2779 	 * memcg_caches issues a write barrier to match the data dependency
2780 	 * barrier inside READ_ONCE() (see memcg_create_kmem_cache()).
2781 	 */
2782 	memcg_cachep = READ_ONCE(arr->entries[kmemcg_id]);
2783 
2784 	/*
2785 	 * If we are in a safe context (can wait, and not in interrupt
2786 	 * context), we could be be predictable and return right away.
2787 	 * This would guarantee that the allocation being performed
2788 	 * already belongs in the new cache.
2789 	 *
2790 	 * However, there are some clashes that can arrive from locking.
2791 	 * For instance, because we acquire the slab_mutex while doing
2792 	 * memcg_create_kmem_cache, this means no further allocation
2793 	 * could happen with the slab_mutex held. So it's better to
2794 	 * defer everything.
2795 	 *
2796 	 * If the memcg is dying or memcg_cache is about to be released,
2797 	 * don't bother creating new kmem_caches. Because memcg_cachep
2798 	 * is ZEROed as the fist step of kmem offlining, we don't need
2799 	 * percpu_ref_tryget_live() here. css_tryget_online() check in
2800 	 * memcg_schedule_kmem_cache_create() will prevent us from
2801 	 * creation of a new kmem_cache.
2802 	 */
2803 	if (unlikely(!memcg_cachep))
2804 		memcg_schedule_kmem_cache_create(memcg, cachep);
2805 	else if (percpu_ref_tryget(&memcg_cachep->memcg_params.refcnt))
2806 		cachep = memcg_cachep;
2807 out_unlock:
2808 	rcu_read_unlock();
2809 	return cachep;
2810 }
2811 
2812 /**
2813  * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache
2814  * @cachep: the cache returned by memcg_kmem_get_cache
2815  */
2816 void memcg_kmem_put_cache(struct kmem_cache *cachep)
2817 {
2818 	if (!is_root_cache(cachep))
2819 		percpu_ref_put(&cachep->memcg_params.refcnt);
2820 }
2821 
2822 /**
2823  * __memcg_kmem_charge_memcg: charge a kmem page
2824  * @page: page to charge
2825  * @gfp: reclaim mode
2826  * @order: allocation order
2827  * @memcg: memory cgroup to charge
2828  *
2829  * Returns 0 on success, an error code on failure.
2830  */
2831 int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
2832 			    struct mem_cgroup *memcg)
2833 {
2834 	unsigned int nr_pages = 1 << order;
2835 	struct page_counter *counter;
2836 	int ret;
2837 
2838 	ret = try_charge(memcg, gfp, nr_pages);
2839 	if (ret)
2840 		return ret;
2841 
2842 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2843 	    !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2844 
2845 		/*
2846 		 * Enforce __GFP_NOFAIL allocation because callers are not
2847 		 * prepared to see failures and likely do not have any failure
2848 		 * handling code.
2849 		 */
2850 		if (gfp & __GFP_NOFAIL) {
2851 			page_counter_charge(&memcg->kmem, nr_pages);
2852 			return 0;
2853 		}
2854 		cancel_charge(memcg, nr_pages);
2855 		return -ENOMEM;
2856 	}
2857 	return 0;
2858 }
2859 
2860 /**
2861  * __memcg_kmem_charge: charge a kmem page to the current memory cgroup
2862  * @page: page to charge
2863  * @gfp: reclaim mode
2864  * @order: allocation order
2865  *
2866  * Returns 0 on success, an error code on failure.
2867  */
2868 int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
2869 {
2870 	struct mem_cgroup *memcg;
2871 	int ret = 0;
2872 
2873 	if (memcg_kmem_bypass())
2874 		return 0;
2875 
2876 	memcg = get_mem_cgroup_from_current();
2877 	if (!mem_cgroup_is_root(memcg)) {
2878 		ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg);
2879 		if (!ret) {
2880 			page->mem_cgroup = memcg;
2881 			__SetPageKmemcg(page);
2882 		}
2883 	}
2884 	css_put(&memcg->css);
2885 	return ret;
2886 }
2887 
2888 /**
2889  * __memcg_kmem_uncharge_memcg: uncharge a kmem page
2890  * @memcg: memcg to uncharge
2891  * @nr_pages: number of pages to uncharge
2892  */
2893 void __memcg_kmem_uncharge_memcg(struct mem_cgroup *memcg,
2894 				 unsigned int nr_pages)
2895 {
2896 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2897 		page_counter_uncharge(&memcg->kmem, nr_pages);
2898 
2899 	page_counter_uncharge(&memcg->memory, nr_pages);
2900 	if (do_memsw_account())
2901 		page_counter_uncharge(&memcg->memsw, nr_pages);
2902 }
2903 /**
2904  * __memcg_kmem_uncharge: uncharge a kmem page
2905  * @page: page to uncharge
2906  * @order: allocation order
2907  */
2908 void __memcg_kmem_uncharge(struct page *page, int order)
2909 {
2910 	struct mem_cgroup *memcg = page->mem_cgroup;
2911 	unsigned int nr_pages = 1 << order;
2912 
2913 	if (!memcg)
2914 		return;
2915 
2916 	VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
2917 	__memcg_kmem_uncharge_memcg(memcg, nr_pages);
2918 	page->mem_cgroup = NULL;
2919 
2920 	/* slab pages do not have PageKmemcg flag set */
2921 	if (PageKmemcg(page))
2922 		__ClearPageKmemcg(page);
2923 
2924 	css_put_many(&memcg->css, nr_pages);
2925 }
2926 #endif /* CONFIG_MEMCG_KMEM */
2927 
2928 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2929 
2930 /*
2931  * Because tail pages are not marked as "used", set it. We're under
2932  * pgdat->lru_lock and migration entries setup in all page mappings.
2933  */
2934 void mem_cgroup_split_huge_fixup(struct page *head)
2935 {
2936 	int i;
2937 
2938 	if (mem_cgroup_disabled())
2939 		return;
2940 
2941 	for (i = 1; i < HPAGE_PMD_NR; i++)
2942 		head[i].mem_cgroup = head->mem_cgroup;
2943 
2944 	__mod_memcg_state(head->mem_cgroup, MEMCG_RSS_HUGE, -HPAGE_PMD_NR);
2945 }
2946 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2947 
2948 #ifdef CONFIG_MEMCG_SWAP
2949 /**
2950  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2951  * @entry: swap entry to be moved
2952  * @from:  mem_cgroup which the entry is moved from
2953  * @to:  mem_cgroup which the entry is moved to
2954  *
2955  * It succeeds only when the swap_cgroup's record for this entry is the same
2956  * as the mem_cgroup's id of @from.
2957  *
2958  * Returns 0 on success, -EINVAL on failure.
2959  *
2960  * The caller must have charged to @to, IOW, called page_counter_charge() about
2961  * both res and memsw, and called css_get().
2962  */
2963 static int mem_cgroup_move_swap_account(swp_entry_t entry,
2964 				struct mem_cgroup *from, struct mem_cgroup *to)
2965 {
2966 	unsigned short old_id, new_id;
2967 
2968 	old_id = mem_cgroup_id(from);
2969 	new_id = mem_cgroup_id(to);
2970 
2971 	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2972 		mod_memcg_state(from, MEMCG_SWAP, -1);
2973 		mod_memcg_state(to, MEMCG_SWAP, 1);
2974 		return 0;
2975 	}
2976 	return -EINVAL;
2977 }
2978 #else
2979 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2980 				struct mem_cgroup *from, struct mem_cgroup *to)
2981 {
2982 	return -EINVAL;
2983 }
2984 #endif
2985 
2986 static DEFINE_MUTEX(memcg_max_mutex);
2987 
2988 static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
2989 				 unsigned long max, bool memsw)
2990 {
2991 	bool enlarge = false;
2992 	bool drained = false;
2993 	int ret;
2994 	bool limits_invariant;
2995 	struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
2996 
2997 	do {
2998 		if (signal_pending(current)) {
2999 			ret = -EINTR;
3000 			break;
3001 		}
3002 
3003 		mutex_lock(&memcg_max_mutex);
3004 		/*
3005 		 * Make sure that the new limit (memsw or memory limit) doesn't
3006 		 * break our basic invariant rule memory.max <= memsw.max.
3007 		 */
3008 		limits_invariant = memsw ? max >= memcg->memory.max :
3009 					   max <= memcg->memsw.max;
3010 		if (!limits_invariant) {
3011 			mutex_unlock(&memcg_max_mutex);
3012 			ret = -EINVAL;
3013 			break;
3014 		}
3015 		if (max > counter->max)
3016 			enlarge = true;
3017 		ret = page_counter_set_max(counter, max);
3018 		mutex_unlock(&memcg_max_mutex);
3019 
3020 		if (!ret)
3021 			break;
3022 
3023 		if (!drained) {
3024 			drain_all_stock(memcg);
3025 			drained = true;
3026 			continue;
3027 		}
3028 
3029 		if (!try_to_free_mem_cgroup_pages(memcg, 1,
3030 					GFP_KERNEL, !memsw)) {
3031 			ret = -EBUSY;
3032 			break;
3033 		}
3034 	} while (true);
3035 
3036 	if (!ret && enlarge)
3037 		memcg_oom_recover(memcg);
3038 
3039 	return ret;
3040 }
3041 
3042 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3043 					    gfp_t gfp_mask,
3044 					    unsigned long *total_scanned)
3045 {
3046 	unsigned long nr_reclaimed = 0;
3047 	struct mem_cgroup_per_node *mz, *next_mz = NULL;
3048 	unsigned long reclaimed;
3049 	int loop = 0;
3050 	struct mem_cgroup_tree_per_node *mctz;
3051 	unsigned long excess;
3052 	unsigned long nr_scanned;
3053 
3054 	if (order > 0)
3055 		return 0;
3056 
3057 	mctz = soft_limit_tree_node(pgdat->node_id);
3058 
3059 	/*
3060 	 * Do not even bother to check the largest node if the root
3061 	 * is empty. Do it lockless to prevent lock bouncing. Races
3062 	 * are acceptable as soft limit is best effort anyway.
3063 	 */
3064 	if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3065 		return 0;
3066 
3067 	/*
3068 	 * This loop can run a while, specially if mem_cgroup's continuously
3069 	 * keep exceeding their soft limit and putting the system under
3070 	 * pressure
3071 	 */
3072 	do {
3073 		if (next_mz)
3074 			mz = next_mz;
3075 		else
3076 			mz = mem_cgroup_largest_soft_limit_node(mctz);
3077 		if (!mz)
3078 			break;
3079 
3080 		nr_scanned = 0;
3081 		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3082 						    gfp_mask, &nr_scanned);
3083 		nr_reclaimed += reclaimed;
3084 		*total_scanned += nr_scanned;
3085 		spin_lock_irq(&mctz->lock);
3086 		__mem_cgroup_remove_exceeded(mz, mctz);
3087 
3088 		/*
3089 		 * If we failed to reclaim anything from this memory cgroup
3090 		 * it is time to move on to the next cgroup
3091 		 */
3092 		next_mz = NULL;
3093 		if (!reclaimed)
3094 			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3095 
3096 		excess = soft_limit_excess(mz->memcg);
3097 		/*
3098 		 * One school of thought says that we should not add
3099 		 * back the node to the tree if reclaim returns 0.
3100 		 * But our reclaim could return 0, simply because due
3101 		 * to priority we are exposing a smaller subset of
3102 		 * memory to reclaim from. Consider this as a longer
3103 		 * term TODO.
3104 		 */
3105 		/* If excess == 0, no tree ops */
3106 		__mem_cgroup_insert_exceeded(mz, mctz, excess);
3107 		spin_unlock_irq(&mctz->lock);
3108 		css_put(&mz->memcg->css);
3109 		loop++;
3110 		/*
3111 		 * Could not reclaim anything and there are no more
3112 		 * mem cgroups to try or we seem to be looping without
3113 		 * reclaiming anything.
3114 		 */
3115 		if (!nr_reclaimed &&
3116 			(next_mz == NULL ||
3117 			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3118 			break;
3119 	} while (!nr_reclaimed);
3120 	if (next_mz)
3121 		css_put(&next_mz->memcg->css);
3122 	return nr_reclaimed;
3123 }
3124 
3125 /*
3126  * Test whether @memcg has children, dead or alive.  Note that this
3127  * function doesn't care whether @memcg has use_hierarchy enabled and
3128  * returns %true if there are child csses according to the cgroup
3129  * hierarchy.  Testing use_hierarchy is the caller's responsiblity.
3130  */
3131 static inline bool memcg_has_children(struct mem_cgroup *memcg)
3132 {
3133 	bool ret;
3134 
3135 	rcu_read_lock();
3136 	ret = css_next_child(NULL, &memcg->css);
3137 	rcu_read_unlock();
3138 	return ret;
3139 }
3140 
3141 /*
3142  * Reclaims as many pages from the given memcg as possible.
3143  *
3144  * Caller is responsible for holding css reference for memcg.
3145  */
3146 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3147 {
3148 	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
3149 
3150 	/* we call try-to-free pages for make this cgroup empty */
3151 	lru_add_drain_all();
3152 
3153 	drain_all_stock(memcg);
3154 
3155 	/* try to free all pages in this cgroup */
3156 	while (nr_retries && page_counter_read(&memcg->memory)) {
3157 		int progress;
3158 
3159 		if (signal_pending(current))
3160 			return -EINTR;
3161 
3162 		progress = try_to_free_mem_cgroup_pages(memcg, 1,
3163 							GFP_KERNEL, true);
3164 		if (!progress) {
3165 			nr_retries--;
3166 			/* maybe some writeback is necessary */
3167 			congestion_wait(BLK_RW_ASYNC, HZ/10);
3168 		}
3169 
3170 	}
3171 
3172 	return 0;
3173 }
3174 
3175 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3176 					    char *buf, size_t nbytes,
3177 					    loff_t off)
3178 {
3179 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3180 
3181 	if (mem_cgroup_is_root(memcg))
3182 		return -EINVAL;
3183 	return mem_cgroup_force_empty(memcg) ?: nbytes;
3184 }
3185 
3186 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3187 				     struct cftype *cft)
3188 {
3189 	return mem_cgroup_from_css(css)->use_hierarchy;
3190 }
3191 
3192 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3193 				      struct cftype *cft, u64 val)
3194 {
3195 	int retval = 0;
3196 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3197 	struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
3198 
3199 	if (memcg->use_hierarchy == val)
3200 		return 0;
3201 
3202 	/*
3203 	 * If parent's use_hierarchy is set, we can't make any modifications
3204 	 * in the child subtrees. If it is unset, then the change can
3205 	 * occur, provided the current cgroup has no children.
3206 	 *
3207 	 * For the root cgroup, parent_mem is NULL, we allow value to be
3208 	 * set if there are no children.
3209 	 */
3210 	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
3211 				(val == 1 || val == 0)) {
3212 		if (!memcg_has_children(memcg))
3213 			memcg->use_hierarchy = val;
3214 		else
3215 			retval = -EBUSY;
3216 	} else
3217 		retval = -EINVAL;
3218 
3219 	return retval;
3220 }
3221 
3222 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3223 {
3224 	unsigned long val;
3225 
3226 	if (mem_cgroup_is_root(memcg)) {
3227 		val = memcg_page_state(memcg, MEMCG_CACHE) +
3228 			memcg_page_state(memcg, MEMCG_RSS);
3229 		if (swap)
3230 			val += memcg_page_state(memcg, MEMCG_SWAP);
3231 	} else {
3232 		if (!swap)
3233 			val = page_counter_read(&memcg->memory);
3234 		else
3235 			val = page_counter_read(&memcg->memsw);
3236 	}
3237 	return val;
3238 }
3239 
3240 enum {
3241 	RES_USAGE,
3242 	RES_LIMIT,
3243 	RES_MAX_USAGE,
3244 	RES_FAILCNT,
3245 	RES_SOFT_LIMIT,
3246 };
3247 
3248 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3249 			       struct cftype *cft)
3250 {
3251 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3252 	struct page_counter *counter;
3253 
3254 	switch (MEMFILE_TYPE(cft->private)) {
3255 	case _MEM:
3256 		counter = &memcg->memory;
3257 		break;
3258 	case _MEMSWAP:
3259 		counter = &memcg->memsw;
3260 		break;
3261 	case _KMEM:
3262 		counter = &memcg->kmem;
3263 		break;
3264 	case _TCP:
3265 		counter = &memcg->tcpmem;
3266 		break;
3267 	default:
3268 		BUG();
3269 	}
3270 
3271 	switch (MEMFILE_ATTR(cft->private)) {
3272 	case RES_USAGE:
3273 		if (counter == &memcg->memory)
3274 			return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3275 		if (counter == &memcg->memsw)
3276 			return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3277 		return (u64)page_counter_read(counter) * PAGE_SIZE;
3278 	case RES_LIMIT:
3279 		return (u64)counter->max * PAGE_SIZE;
3280 	case RES_MAX_USAGE:
3281 		return (u64)counter->watermark * PAGE_SIZE;
3282 	case RES_FAILCNT:
3283 		return counter->failcnt;
3284 	case RES_SOFT_LIMIT:
3285 		return (u64)memcg->soft_limit * PAGE_SIZE;
3286 	default:
3287 		BUG();
3288 	}
3289 }
3290 
3291 static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg, bool slab_only)
3292 {
3293 	unsigned long stat[MEMCG_NR_STAT];
3294 	struct mem_cgroup *mi;
3295 	int node, cpu, i;
3296 	int min_idx, max_idx;
3297 
3298 	if (slab_only) {
3299 		min_idx = NR_SLAB_RECLAIMABLE;
3300 		max_idx = NR_SLAB_UNRECLAIMABLE;
3301 	} else {
3302 		min_idx = 0;
3303 		max_idx = MEMCG_NR_STAT;
3304 	}
3305 
3306 	for (i = min_idx; i < max_idx; i++)
3307 		stat[i] = 0;
3308 
3309 	for_each_online_cpu(cpu)
3310 		for (i = min_idx; i < max_idx; i++)
3311 			stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
3312 
3313 	for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3314 		for (i = min_idx; i < max_idx; i++)
3315 			atomic_long_add(stat[i], &mi->vmstats[i]);
3316 
3317 	if (!slab_only)
3318 		max_idx = NR_VM_NODE_STAT_ITEMS;
3319 
3320 	for_each_node(node) {
3321 		struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
3322 		struct mem_cgroup_per_node *pi;
3323 
3324 		for (i = min_idx; i < max_idx; i++)
3325 			stat[i] = 0;
3326 
3327 		for_each_online_cpu(cpu)
3328 			for (i = min_idx; i < max_idx; i++)
3329 				stat[i] += per_cpu(
3330 					pn->lruvec_stat_cpu->count[i], cpu);
3331 
3332 		for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
3333 			for (i = min_idx; i < max_idx; i++)
3334 				atomic_long_add(stat[i], &pi->lruvec_stat[i]);
3335 	}
3336 }
3337 
3338 static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg)
3339 {
3340 	unsigned long events[NR_VM_EVENT_ITEMS];
3341 	struct mem_cgroup *mi;
3342 	int cpu, i;
3343 
3344 	for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3345 		events[i] = 0;
3346 
3347 	for_each_online_cpu(cpu)
3348 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3349 			events[i] += per_cpu(memcg->vmstats_percpu->events[i],
3350 					     cpu);
3351 
3352 	for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3353 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3354 			atomic_long_add(events[i], &mi->vmevents[i]);
3355 }
3356 
3357 #ifdef CONFIG_MEMCG_KMEM
3358 static int memcg_online_kmem(struct mem_cgroup *memcg)
3359 {
3360 	int memcg_id;
3361 
3362 	if (cgroup_memory_nokmem)
3363 		return 0;
3364 
3365 	BUG_ON(memcg->kmemcg_id >= 0);
3366 	BUG_ON(memcg->kmem_state);
3367 
3368 	memcg_id = memcg_alloc_cache_id();
3369 	if (memcg_id < 0)
3370 		return memcg_id;
3371 
3372 	static_branch_inc(&memcg_kmem_enabled_key);
3373 	/*
3374 	 * A memory cgroup is considered kmem-online as soon as it gets
3375 	 * kmemcg_id. Setting the id after enabling static branching will
3376 	 * guarantee no one starts accounting before all call sites are
3377 	 * patched.
3378 	 */
3379 	memcg->kmemcg_id = memcg_id;
3380 	memcg->kmem_state = KMEM_ONLINE;
3381 	INIT_LIST_HEAD(&memcg->kmem_caches);
3382 
3383 	return 0;
3384 }
3385 
3386 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3387 {
3388 	struct cgroup_subsys_state *css;
3389 	struct mem_cgroup *parent, *child;
3390 	int kmemcg_id;
3391 
3392 	if (memcg->kmem_state != KMEM_ONLINE)
3393 		return;
3394 	/*
3395 	 * Clear the online state before clearing memcg_caches array
3396 	 * entries. The slab_mutex in memcg_deactivate_kmem_caches()
3397 	 * guarantees that no cache will be created for this cgroup
3398 	 * after we are done (see memcg_create_kmem_cache()).
3399 	 */
3400 	memcg->kmem_state = KMEM_ALLOCATED;
3401 
3402 	parent = parent_mem_cgroup(memcg);
3403 	if (!parent)
3404 		parent = root_mem_cgroup;
3405 
3406 	/*
3407 	 * Deactivate and reparent kmem_caches. Then flush percpu
3408 	 * slab statistics to have precise values at the parent and
3409 	 * all ancestor levels. It's required to keep slab stats
3410 	 * accurate after the reparenting of kmem_caches.
3411 	 */
3412 	memcg_deactivate_kmem_caches(memcg, parent);
3413 	memcg_flush_percpu_vmstats(memcg, true);
3414 
3415 	kmemcg_id = memcg->kmemcg_id;
3416 	BUG_ON(kmemcg_id < 0);
3417 
3418 	/*
3419 	 * Change kmemcg_id of this cgroup and all its descendants to the
3420 	 * parent's id, and then move all entries from this cgroup's list_lrus
3421 	 * to ones of the parent. After we have finished, all list_lrus
3422 	 * corresponding to this cgroup are guaranteed to remain empty. The
3423 	 * ordering is imposed by list_lru_node->lock taken by
3424 	 * memcg_drain_all_list_lrus().
3425 	 */
3426 	rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
3427 	css_for_each_descendant_pre(css, &memcg->css) {
3428 		child = mem_cgroup_from_css(css);
3429 		BUG_ON(child->kmemcg_id != kmemcg_id);
3430 		child->kmemcg_id = parent->kmemcg_id;
3431 		if (!memcg->use_hierarchy)
3432 			break;
3433 	}
3434 	rcu_read_unlock();
3435 
3436 	memcg_drain_all_list_lrus(kmemcg_id, parent);
3437 
3438 	memcg_free_cache_id(kmemcg_id);
3439 }
3440 
3441 static void memcg_free_kmem(struct mem_cgroup *memcg)
3442 {
3443 	/* css_alloc() failed, offlining didn't happen */
3444 	if (unlikely(memcg->kmem_state == KMEM_ONLINE))
3445 		memcg_offline_kmem(memcg);
3446 
3447 	if (memcg->kmem_state == KMEM_ALLOCATED) {
3448 		WARN_ON(!list_empty(&memcg->kmem_caches));
3449 		static_branch_dec(&memcg_kmem_enabled_key);
3450 	}
3451 }
3452 #else
3453 static int memcg_online_kmem(struct mem_cgroup *memcg)
3454 {
3455 	return 0;
3456 }
3457 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3458 {
3459 }
3460 static void memcg_free_kmem(struct mem_cgroup *memcg)
3461 {
3462 }
3463 #endif /* CONFIG_MEMCG_KMEM */
3464 
3465 static int memcg_update_kmem_max(struct mem_cgroup *memcg,
3466 				 unsigned long max)
3467 {
3468 	int ret;
3469 
3470 	mutex_lock(&memcg_max_mutex);
3471 	ret = page_counter_set_max(&memcg->kmem, max);
3472 	mutex_unlock(&memcg_max_mutex);
3473 	return ret;
3474 }
3475 
3476 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
3477 {
3478 	int ret;
3479 
3480 	mutex_lock(&memcg_max_mutex);
3481 
3482 	ret = page_counter_set_max(&memcg->tcpmem, max);
3483 	if (ret)
3484 		goto out;
3485 
3486 	if (!memcg->tcpmem_active) {
3487 		/*
3488 		 * The active flag needs to be written after the static_key
3489 		 * update. This is what guarantees that the socket activation
3490 		 * function is the last one to run. See mem_cgroup_sk_alloc()
3491 		 * for details, and note that we don't mark any socket as
3492 		 * belonging to this memcg until that flag is up.
3493 		 *
3494 		 * We need to do this, because static_keys will span multiple
3495 		 * sites, but we can't control their order. If we mark a socket
3496 		 * as accounted, but the accounting functions are not patched in
3497 		 * yet, we'll lose accounting.
3498 		 *
3499 		 * We never race with the readers in mem_cgroup_sk_alloc(),
3500 		 * because when this value change, the code to process it is not
3501 		 * patched in yet.
3502 		 */
3503 		static_branch_inc(&memcg_sockets_enabled_key);
3504 		memcg->tcpmem_active = true;
3505 	}
3506 out:
3507 	mutex_unlock(&memcg_max_mutex);
3508 	return ret;
3509 }
3510 
3511 /*
3512  * The user of this function is...
3513  * RES_LIMIT.
3514  */
3515 static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3516 				char *buf, size_t nbytes, loff_t off)
3517 {
3518 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3519 	unsigned long nr_pages;
3520 	int ret;
3521 
3522 	buf = strstrip(buf);
3523 	ret = page_counter_memparse(buf, "-1", &nr_pages);
3524 	if (ret)
3525 		return ret;
3526 
3527 	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3528 	case RES_LIMIT:
3529 		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3530 			ret = -EINVAL;
3531 			break;
3532 		}
3533 		switch (MEMFILE_TYPE(of_cft(of)->private)) {
3534 		case _MEM:
3535 			ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3536 			break;
3537 		case _MEMSWAP:
3538 			ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3539 			break;
3540 		case _KMEM:
3541 			pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
3542 				     "Please report your usecase to linux-mm@kvack.org if you "
3543 				     "depend on this functionality.\n");
3544 			ret = memcg_update_kmem_max(memcg, nr_pages);
3545 			break;
3546 		case _TCP:
3547 			ret = memcg_update_tcp_max(memcg, nr_pages);
3548 			break;
3549 		}
3550 		break;
3551 	case RES_SOFT_LIMIT:
3552 		memcg->soft_limit = nr_pages;
3553 		ret = 0;
3554 		break;
3555 	}
3556 	return ret ?: nbytes;
3557 }
3558 
3559 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3560 				size_t nbytes, loff_t off)
3561 {
3562 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3563 	struct page_counter *counter;
3564 
3565 	switch (MEMFILE_TYPE(of_cft(of)->private)) {
3566 	case _MEM:
3567 		counter = &memcg->memory;
3568 		break;
3569 	case _MEMSWAP:
3570 		counter = &memcg->memsw;
3571 		break;
3572 	case _KMEM:
3573 		counter = &memcg->kmem;
3574 		break;
3575 	case _TCP:
3576 		counter = &memcg->tcpmem;
3577 		break;
3578 	default:
3579 		BUG();
3580 	}
3581 
3582 	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3583 	case RES_MAX_USAGE:
3584 		page_counter_reset_watermark(counter);
3585 		break;
3586 	case RES_FAILCNT:
3587 		counter->failcnt = 0;
3588 		break;
3589 	default:
3590 		BUG();
3591 	}
3592 
3593 	return nbytes;
3594 }
3595 
3596 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3597 					struct cftype *cft)
3598 {
3599 	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3600 }
3601 
3602 #ifdef CONFIG_MMU
3603 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3604 					struct cftype *cft, u64 val)
3605 {
3606 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3607 
3608 	if (val & ~MOVE_MASK)
3609 		return -EINVAL;
3610 
3611 	/*
3612 	 * No kind of locking is needed in here, because ->can_attach() will
3613 	 * check this value once in the beginning of the process, and then carry
3614 	 * on with stale data. This means that changes to this value will only
3615 	 * affect task migrations starting after the change.
3616 	 */
3617 	memcg->move_charge_at_immigrate = val;
3618 	return 0;
3619 }
3620 #else
3621 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3622 					struct cftype *cft, u64 val)
3623 {
3624 	return -ENOSYS;
3625 }
3626 #endif
3627 
3628 #ifdef CONFIG_NUMA
3629 
3630 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
3631 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
3632 #define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)
3633 
3634 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
3635 					   int nid, unsigned int lru_mask)
3636 {
3637 	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
3638 	unsigned long nr = 0;
3639 	enum lru_list lru;
3640 
3641 	VM_BUG_ON((unsigned)nid >= nr_node_ids);
3642 
3643 	for_each_lru(lru) {
3644 		if (!(BIT(lru) & lru_mask))
3645 			continue;
3646 		nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
3647 	}
3648 	return nr;
3649 }
3650 
3651 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
3652 					     unsigned int lru_mask)
3653 {
3654 	unsigned long nr = 0;
3655 	enum lru_list lru;
3656 
3657 	for_each_lru(lru) {
3658 		if (!(BIT(lru) & lru_mask))
3659 			continue;
3660 		nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
3661 	}
3662 	return nr;
3663 }
3664 
3665 static int memcg_numa_stat_show(struct seq_file *m, void *v)
3666 {
3667 	struct numa_stat {
3668 		const char *name;
3669 		unsigned int lru_mask;
3670 	};
3671 
3672 	static const struct numa_stat stats[] = {
3673 		{ "total", LRU_ALL },
3674 		{ "file", LRU_ALL_FILE },
3675 		{ "anon", LRU_ALL_ANON },
3676 		{ "unevictable", BIT(LRU_UNEVICTABLE) },
3677 	};
3678 	const struct numa_stat *stat;
3679 	int nid;
3680 	unsigned long nr;
3681 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3682 
3683 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3684 		nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3685 		seq_printf(m, "%s=%lu", stat->name, nr);
3686 		for_each_node_state(nid, N_MEMORY) {
3687 			nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3688 							  stat->lru_mask);
3689 			seq_printf(m, " N%d=%lu", nid, nr);
3690 		}
3691 		seq_putc(m, '\n');
3692 	}
3693 
3694 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3695 		struct mem_cgroup *iter;
3696 
3697 		nr = 0;
3698 		for_each_mem_cgroup_tree(iter, memcg)
3699 			nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3700 		seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3701 		for_each_node_state(nid, N_MEMORY) {
3702 			nr = 0;
3703 			for_each_mem_cgroup_tree(iter, memcg)
3704 				nr += mem_cgroup_node_nr_lru_pages(
3705 					iter, nid, stat->lru_mask);
3706 			seq_printf(m, " N%d=%lu", nid, nr);
3707 		}
3708 		seq_putc(m, '\n');
3709 	}
3710 
3711 	return 0;
3712 }
3713 #endif /* CONFIG_NUMA */
3714 
3715 static const unsigned int memcg1_stats[] = {
3716 	MEMCG_CACHE,
3717 	MEMCG_RSS,
3718 	MEMCG_RSS_HUGE,
3719 	NR_SHMEM,
3720 	NR_FILE_MAPPED,
3721 	NR_FILE_DIRTY,
3722 	NR_WRITEBACK,
3723 	MEMCG_SWAP,
3724 };
3725 
3726 static const char *const memcg1_stat_names[] = {
3727 	"cache",
3728 	"rss",
3729 	"rss_huge",
3730 	"shmem",
3731 	"mapped_file",
3732 	"dirty",
3733 	"writeback",
3734 	"swap",
3735 };
3736 
3737 /* Universal VM events cgroup1 shows, original sort order */
3738 static const unsigned int memcg1_events[] = {
3739 	PGPGIN,
3740 	PGPGOUT,
3741 	PGFAULT,
3742 	PGMAJFAULT,
3743 };
3744 
3745 static const char *const memcg1_event_names[] = {
3746 	"pgpgin",
3747 	"pgpgout",
3748 	"pgfault",
3749 	"pgmajfault",
3750 };
3751 
3752 static int memcg_stat_show(struct seq_file *m, void *v)
3753 {
3754 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3755 	unsigned long memory, memsw;
3756 	struct mem_cgroup *mi;
3757 	unsigned int i;
3758 
3759 	BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
3760 	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3761 
3762 	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3763 		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3764 			continue;
3765 		seq_printf(m, "%s %lu\n", memcg1_stat_names[i],
3766 			   memcg_page_state_local(memcg, memcg1_stats[i]) *
3767 			   PAGE_SIZE);
3768 	}
3769 
3770 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3771 		seq_printf(m, "%s %lu\n", memcg1_event_names[i],
3772 			   memcg_events_local(memcg, memcg1_events[i]));
3773 
3774 	for (i = 0; i < NR_LRU_LISTS; i++)
3775 		seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
3776 			   memcg_page_state_local(memcg, NR_LRU_BASE + i) *
3777 			   PAGE_SIZE);
3778 
3779 	/* Hierarchical information */
3780 	memory = memsw = PAGE_COUNTER_MAX;
3781 	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3782 		memory = min(memory, mi->memory.max);
3783 		memsw = min(memsw, mi->memsw.max);
3784 	}
3785 	seq_printf(m, "hierarchical_memory_limit %llu\n",
3786 		   (u64)memory * PAGE_SIZE);
3787 	if (do_memsw_account())
3788 		seq_printf(m, "hierarchical_memsw_limit %llu\n",
3789 			   (u64)memsw * PAGE_SIZE);
3790 
3791 	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3792 		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3793 			continue;
3794 		seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
3795 			   (u64)memcg_page_state(memcg, memcg1_stats[i]) *
3796 			   PAGE_SIZE);
3797 	}
3798 
3799 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3800 		seq_printf(m, "total_%s %llu\n", memcg1_event_names[i],
3801 			   (u64)memcg_events(memcg, memcg1_events[i]));
3802 
3803 	for (i = 0; i < NR_LRU_LISTS; i++)
3804 		seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i],
3805 			   (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
3806 			   PAGE_SIZE);
3807 
3808 #ifdef CONFIG_DEBUG_VM
3809 	{
3810 		pg_data_t *pgdat;
3811 		struct mem_cgroup_per_node *mz;
3812 		struct zone_reclaim_stat *rstat;
3813 		unsigned long recent_rotated[2] = {0, 0};
3814 		unsigned long recent_scanned[2] = {0, 0};
3815 
3816 		for_each_online_pgdat(pgdat) {
3817 			mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
3818 			rstat = &mz->lruvec.reclaim_stat;
3819 
3820 			recent_rotated[0] += rstat->recent_rotated[0];
3821 			recent_rotated[1] += rstat->recent_rotated[1];
3822 			recent_scanned[0] += rstat->recent_scanned[0];
3823 			recent_scanned[1] += rstat->recent_scanned[1];
3824 		}
3825 		seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3826 		seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3827 		seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3828 		seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
3829 	}
3830 #endif
3831 
3832 	return 0;
3833 }
3834 
3835 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3836 				      struct cftype *cft)
3837 {
3838 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3839 
3840 	return mem_cgroup_swappiness(memcg);
3841 }
3842 
3843 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3844 				       struct cftype *cft, u64 val)
3845 {
3846 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3847 
3848 	if (val > 100)
3849 		return -EINVAL;
3850 
3851 	if (css->parent)
3852 		memcg->swappiness = val;
3853 	else
3854 		vm_swappiness = val;
3855 
3856 	return 0;
3857 }
3858 
3859 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3860 {
3861 	struct mem_cgroup_threshold_ary *t;
3862 	unsigned long usage;
3863 	int i;
3864 
3865 	rcu_read_lock();
3866 	if (!swap)
3867 		t = rcu_dereference(memcg->thresholds.primary);
3868 	else
3869 		t = rcu_dereference(memcg->memsw_thresholds.primary);
3870 
3871 	if (!t)
3872 		goto unlock;
3873 
3874 	usage = mem_cgroup_usage(memcg, swap);
3875 
3876 	/*
3877 	 * current_threshold points to threshold just below or equal to usage.
3878 	 * If it's not true, a threshold was crossed after last
3879 	 * call of __mem_cgroup_threshold().
3880 	 */
3881 	i = t->current_threshold;
3882 
3883 	/*
3884 	 * Iterate backward over array of thresholds starting from
3885 	 * current_threshold and check if a threshold is crossed.
3886 	 * If none of thresholds below usage is crossed, we read
3887 	 * only one element of the array here.
3888 	 */
3889 	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3890 		eventfd_signal(t->entries[i].eventfd, 1);
3891 
3892 	/* i = current_threshold + 1 */
3893 	i++;
3894 
3895 	/*
3896 	 * Iterate forward over array of thresholds starting from
3897 	 * current_threshold+1 and check if a threshold is crossed.
3898 	 * If none of thresholds above usage is crossed, we read
3899 	 * only one element of the array here.
3900 	 */
3901 	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3902 		eventfd_signal(t->entries[i].eventfd, 1);
3903 
3904 	/* Update current_threshold */
3905 	t->current_threshold = i - 1;
3906 unlock:
3907 	rcu_read_unlock();
3908 }
3909 
3910 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3911 {
3912 	while (memcg) {
3913 		__mem_cgroup_threshold(memcg, false);
3914 		if (do_memsw_account())
3915 			__mem_cgroup_threshold(memcg, true);
3916 
3917 		memcg = parent_mem_cgroup(memcg);
3918 	}
3919 }
3920 
3921 static int compare_thresholds(const void *a, const void *b)
3922 {
3923 	const struct mem_cgroup_threshold *_a = a;
3924 	const struct mem_cgroup_threshold *_b = b;
3925 
3926 	if (_a->threshold > _b->threshold)
3927 		return 1;
3928 
3929 	if (_a->threshold < _b->threshold)
3930 		return -1;
3931 
3932 	return 0;
3933 }
3934 
3935 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
3936 {
3937 	struct mem_cgroup_eventfd_list *ev;
3938 
3939 	spin_lock(&memcg_oom_lock);
3940 
3941 	list_for_each_entry(ev, &memcg->oom_notify, list)
3942 		eventfd_signal(ev->eventfd, 1);
3943 
3944 	spin_unlock(&memcg_oom_lock);
3945 	return 0;
3946 }
3947 
3948 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
3949 {
3950 	struct mem_cgroup *iter;
3951 
3952 	for_each_mem_cgroup_tree(iter, memcg)
3953 		mem_cgroup_oom_notify_cb(iter);
3954 }
3955 
3956 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3957 	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
3958 {
3959 	struct mem_cgroup_thresholds *thresholds;
3960 	struct mem_cgroup_threshold_ary *new;
3961 	unsigned long threshold;
3962 	unsigned long usage;
3963 	int i, size, ret;
3964 
3965 	ret = page_counter_memparse(args, "-1", &threshold);
3966 	if (ret)
3967 		return ret;
3968 
3969 	mutex_lock(&memcg->thresholds_lock);
3970 
3971 	if (type == _MEM) {
3972 		thresholds = &memcg->thresholds;
3973 		usage = mem_cgroup_usage(memcg, false);
3974 	} else if (type == _MEMSWAP) {
3975 		thresholds = &memcg->memsw_thresholds;
3976 		usage = mem_cgroup_usage(memcg, true);
3977 	} else
3978 		BUG();
3979 
3980 	/* Check if a threshold crossed before adding a new one */
3981 	if (thresholds->primary)
3982 		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
3983 
3984 	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3985 
3986 	/* Allocate memory for new array of thresholds */
3987 	new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
3988 	if (!new) {
3989 		ret = -ENOMEM;
3990 		goto unlock;
3991 	}
3992 	new->size = size;
3993 
3994 	/* Copy thresholds (if any) to new array */
3995 	if (thresholds->primary) {
3996 		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3997 				sizeof(struct mem_cgroup_threshold));
3998 	}
3999 
4000 	/* Add new threshold */
4001 	new->entries[size - 1].eventfd = eventfd;
4002 	new->entries[size - 1].threshold = threshold;
4003 
4004 	/* Sort thresholds. Registering of new threshold isn't time-critical */
4005 	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
4006 			compare_thresholds, NULL);
4007 
4008 	/* Find current threshold */
4009 	new->current_threshold = -1;
4010 	for (i = 0; i < size; i++) {
4011 		if (new->entries[i].threshold <= usage) {
4012 			/*
4013 			 * new->current_threshold will not be used until
4014 			 * rcu_assign_pointer(), so it's safe to increment
4015 			 * it here.
4016 			 */
4017 			++new->current_threshold;
4018 		} else
4019 			break;
4020 	}
4021 
4022 	/* Free old spare buffer and save old primary buffer as spare */
4023 	kfree(thresholds->spare);
4024 	thresholds->spare = thresholds->primary;
4025 
4026 	rcu_assign_pointer(thresholds->primary, new);
4027 
4028 	/* To be sure that nobody uses thresholds */
4029 	synchronize_rcu();
4030 
4031 unlock:
4032 	mutex_unlock(&memcg->thresholds_lock);
4033 
4034 	return ret;
4035 }
4036 
4037 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4038 	struct eventfd_ctx *eventfd, const char *args)
4039 {
4040 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4041 }
4042 
4043 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4044 	struct eventfd_ctx *eventfd, const char *args)
4045 {
4046 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4047 }
4048 
4049 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4050 	struct eventfd_ctx *eventfd, enum res_type type)
4051 {
4052 	struct mem_cgroup_thresholds *thresholds;
4053 	struct mem_cgroup_threshold_ary *new;
4054 	unsigned long usage;
4055 	int i, j, size;
4056 
4057 	mutex_lock(&memcg->thresholds_lock);
4058 
4059 	if (type == _MEM) {
4060 		thresholds = &memcg->thresholds;
4061 		usage = mem_cgroup_usage(memcg, false);
4062 	} else if (type == _MEMSWAP) {
4063 		thresholds = &memcg->memsw_thresholds;
4064 		usage = mem_cgroup_usage(memcg, true);
4065 	} else
4066 		BUG();
4067 
4068 	if (!thresholds->primary)
4069 		goto unlock;
4070 
4071 	/* Check if a threshold crossed before removing */
4072 	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4073 
4074 	/* Calculate new number of threshold */
4075 	size = 0;
4076 	for (i = 0; i < thresholds->primary->size; i++) {
4077 		if (thresholds->primary->entries[i].eventfd != eventfd)
4078 			size++;
4079 	}
4080 
4081 	new = thresholds->spare;
4082 
4083 	/* Set thresholds array to NULL if we don't have thresholds */
4084 	if (!size) {
4085 		kfree(new);
4086 		new = NULL;
4087 		goto swap_buffers;
4088 	}
4089 
4090 	new->size = size;
4091 
4092 	/* Copy thresholds and find current threshold */
4093 	new->current_threshold = -1;
4094 	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4095 		if (thresholds->primary->entries[i].eventfd == eventfd)
4096 			continue;
4097 
4098 		new->entries[j] = thresholds->primary->entries[i];
4099 		if (new->entries[j].threshold <= usage) {
4100 			/*
4101 			 * new->current_threshold will not be used
4102 			 * until rcu_assign_pointer(), so it's safe to increment
4103 			 * it here.
4104 			 */
4105 			++new->current_threshold;
4106 		}
4107 		j++;
4108 	}
4109 
4110 swap_buffers:
4111 	/* Swap primary and spare array */
4112 	thresholds->spare = thresholds->primary;
4113 
4114 	rcu_assign_pointer(thresholds->primary, new);
4115 
4116 	/* To be sure that nobody uses thresholds */
4117 	synchronize_rcu();
4118 
4119 	/* If all events are unregistered, free the spare array */
4120 	if (!new) {
4121 		kfree(thresholds->spare);
4122 		thresholds->spare = NULL;
4123 	}
4124 unlock:
4125 	mutex_unlock(&memcg->thresholds_lock);
4126 }
4127 
4128 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4129 	struct eventfd_ctx *eventfd)
4130 {
4131 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4132 }
4133 
4134 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4135 	struct eventfd_ctx *eventfd)
4136 {
4137 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4138 }
4139 
4140 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4141 	struct eventfd_ctx *eventfd, const char *args)
4142 {
4143 	struct mem_cgroup_eventfd_list *event;
4144 
4145 	event = kmalloc(sizeof(*event),	GFP_KERNEL);
4146 	if (!event)
4147 		return -ENOMEM;
4148 
4149 	spin_lock(&memcg_oom_lock);
4150 
4151 	event->eventfd = eventfd;
4152 	list_add(&event->list, &memcg->oom_notify);
4153 
4154 	/* already in OOM ? */
4155 	if (memcg->under_oom)
4156 		eventfd_signal(eventfd, 1);
4157 	spin_unlock(&memcg_oom_lock);
4158 
4159 	return 0;
4160 }
4161 
4162 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4163 	struct eventfd_ctx *eventfd)
4164 {
4165 	struct mem_cgroup_eventfd_list *ev, *tmp;
4166 
4167 	spin_lock(&memcg_oom_lock);
4168 
4169 	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4170 		if (ev->eventfd == eventfd) {
4171 			list_del(&ev->list);
4172 			kfree(ev);
4173 		}
4174 	}
4175 
4176 	spin_unlock(&memcg_oom_lock);
4177 }
4178 
4179 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4180 {
4181 	struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4182 
4183 	seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
4184 	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4185 	seq_printf(sf, "oom_kill %lu\n",
4186 		   atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4187 	return 0;
4188 }
4189 
4190 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4191 	struct cftype *cft, u64 val)
4192 {
4193 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4194 
4195 	/* cannot set to root cgroup and only 0 and 1 are allowed */
4196 	if (!css->parent || !((val == 0) || (val == 1)))
4197 		return -EINVAL;
4198 
4199 	memcg->oom_kill_disable = val;
4200 	if (!val)
4201 		memcg_oom_recover(memcg);
4202 
4203 	return 0;
4204 }
4205 
4206 #ifdef CONFIG_CGROUP_WRITEBACK
4207 
4208 #include <trace/events/writeback.h>
4209 
4210 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4211 {
4212 	return wb_domain_init(&memcg->cgwb_domain, gfp);
4213 }
4214 
4215 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4216 {
4217 	wb_domain_exit(&memcg->cgwb_domain);
4218 }
4219 
4220 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4221 {
4222 	wb_domain_size_changed(&memcg->cgwb_domain);
4223 }
4224 
4225 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4226 {
4227 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4228 
4229 	if (!memcg->css.parent)
4230 		return NULL;
4231 
4232 	return &memcg->cgwb_domain;
4233 }
4234 
4235 /*
4236  * idx can be of type enum memcg_stat_item or node_stat_item.
4237  * Keep in sync with memcg_exact_page().
4238  */
4239 static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
4240 {
4241 	long x = atomic_long_read(&memcg->vmstats[idx]);
4242 	int cpu;
4243 
4244 	for_each_online_cpu(cpu)
4245 		x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx];
4246 	if (x < 0)
4247 		x = 0;
4248 	return x;
4249 }
4250 
4251 /**
4252  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4253  * @wb: bdi_writeback in question
4254  * @pfilepages: out parameter for number of file pages
4255  * @pheadroom: out parameter for number of allocatable pages according to memcg
4256  * @pdirty: out parameter for number of dirty pages
4257  * @pwriteback: out parameter for number of pages under writeback
4258  *
4259  * Determine the numbers of file, headroom, dirty, and writeback pages in
4260  * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
4261  * is a bit more involved.
4262  *
4263  * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
4264  * headroom is calculated as the lowest headroom of itself and the
4265  * ancestors.  Note that this doesn't consider the actual amount of
4266  * available memory in the system.  The caller should further cap
4267  * *@pheadroom accordingly.
4268  */
4269 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4270 			 unsigned long *pheadroom, unsigned long *pdirty,
4271 			 unsigned long *pwriteback)
4272 {
4273 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4274 	struct mem_cgroup *parent;
4275 
4276 	*pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY);
4277 
4278 	/* this should eventually include NR_UNSTABLE_NFS */
4279 	*pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK);
4280 	*pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) +
4281 			memcg_exact_page_state(memcg, NR_ACTIVE_FILE);
4282 	*pheadroom = PAGE_COUNTER_MAX;
4283 
4284 	while ((parent = parent_mem_cgroup(memcg))) {
4285 		unsigned long ceiling = min(memcg->memory.max, memcg->high);
4286 		unsigned long used = page_counter_read(&memcg->memory);
4287 
4288 		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4289 		memcg = parent;
4290 	}
4291 }
4292 
4293 /*
4294  * Foreign dirty flushing
4295  *
4296  * There's an inherent mismatch between memcg and writeback.  The former
4297  * trackes ownership per-page while the latter per-inode.  This was a
4298  * deliberate design decision because honoring per-page ownership in the
4299  * writeback path is complicated, may lead to higher CPU and IO overheads
4300  * and deemed unnecessary given that write-sharing an inode across
4301  * different cgroups isn't a common use-case.
4302  *
4303  * Combined with inode majority-writer ownership switching, this works well
4304  * enough in most cases but there are some pathological cases.  For
4305  * example, let's say there are two cgroups A and B which keep writing to
4306  * different but confined parts of the same inode.  B owns the inode and
4307  * A's memory is limited far below B's.  A's dirty ratio can rise enough to
4308  * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4309  * triggering background writeback.  A will be slowed down without a way to
4310  * make writeback of the dirty pages happen.
4311  *
4312  * Conditions like the above can lead to a cgroup getting repatedly and
4313  * severely throttled after making some progress after each
4314  * dirty_expire_interval while the underyling IO device is almost
4315  * completely idle.
4316  *
4317  * Solving this problem completely requires matching the ownership tracking
4318  * granularities between memcg and writeback in either direction.  However,
4319  * the more egregious behaviors can be avoided by simply remembering the
4320  * most recent foreign dirtying events and initiating remote flushes on
4321  * them when local writeback isn't enough to keep the memory clean enough.
4322  *
4323  * The following two functions implement such mechanism.  When a foreign
4324  * page - a page whose memcg and writeback ownerships don't match - is
4325  * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4326  * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
4327  * decides that the memcg needs to sleep due to high dirty ratio, it calls
4328  * mem_cgroup_flush_foreign() which queues writeback on the recorded
4329  * foreign bdi_writebacks which haven't expired.  Both the numbers of
4330  * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4331  * limited to MEMCG_CGWB_FRN_CNT.
4332  *
4333  * The mechanism only remembers IDs and doesn't hold any object references.
4334  * As being wrong occasionally doesn't matter, updates and accesses to the
4335  * records are lockless and racy.
4336  */
4337 void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
4338 					     struct bdi_writeback *wb)
4339 {
4340 	struct mem_cgroup *memcg = page->mem_cgroup;
4341 	struct memcg_cgwb_frn *frn;
4342 	u64 now = get_jiffies_64();
4343 	u64 oldest_at = now;
4344 	int oldest = -1;
4345 	int i;
4346 
4347 	trace_track_foreign_dirty(page, wb);
4348 
4349 	/*
4350 	 * Pick the slot to use.  If there is already a slot for @wb, keep
4351 	 * using it.  If not replace the oldest one which isn't being
4352 	 * written out.
4353 	 */
4354 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4355 		frn = &memcg->cgwb_frn[i];
4356 		if (frn->bdi_id == wb->bdi->id &&
4357 		    frn->memcg_id == wb->memcg_css->id)
4358 			break;
4359 		if (time_before64(frn->at, oldest_at) &&
4360 		    atomic_read(&frn->done.cnt) == 1) {
4361 			oldest = i;
4362 			oldest_at = frn->at;
4363 		}
4364 	}
4365 
4366 	if (i < MEMCG_CGWB_FRN_CNT) {
4367 		/*
4368 		 * Re-using an existing one.  Update timestamp lazily to
4369 		 * avoid making the cacheline hot.  We want them to be
4370 		 * reasonably up-to-date and significantly shorter than
4371 		 * dirty_expire_interval as that's what expires the record.
4372 		 * Use the shorter of 1s and dirty_expire_interval / 8.
4373 		 */
4374 		unsigned long update_intv =
4375 			min_t(unsigned long, HZ,
4376 			      msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4377 
4378 		if (time_before64(frn->at, now - update_intv))
4379 			frn->at = now;
4380 	} else if (oldest >= 0) {
4381 		/* replace the oldest free one */
4382 		frn = &memcg->cgwb_frn[oldest];
4383 		frn->bdi_id = wb->bdi->id;
4384 		frn->memcg_id = wb->memcg_css->id;
4385 		frn->at = now;
4386 	}
4387 }
4388 
4389 /* issue foreign writeback flushes for recorded foreign dirtying events */
4390 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4391 {
4392 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4393 	unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4394 	u64 now = jiffies_64;
4395 	int i;
4396 
4397 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4398 		struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4399 
4400 		/*
4401 		 * If the record is older than dirty_expire_interval,
4402 		 * writeback on it has already started.  No need to kick it
4403 		 * off again.  Also, don't start a new one if there's
4404 		 * already one in flight.
4405 		 */
4406 		if (time_after64(frn->at, now - intv) &&
4407 		    atomic_read(&frn->done.cnt) == 1) {
4408 			frn->at = 0;
4409 			trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4410 			cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 0,
4411 					       WB_REASON_FOREIGN_FLUSH,
4412 					       &frn->done);
4413 		}
4414 	}
4415 }
4416 
4417 #else	/* CONFIG_CGROUP_WRITEBACK */
4418 
4419 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4420 {
4421 	return 0;
4422 }
4423 
4424 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4425 {
4426 }
4427 
4428 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4429 {
4430 }
4431 
4432 #endif	/* CONFIG_CGROUP_WRITEBACK */
4433 
4434 /*
4435  * DO NOT USE IN NEW FILES.
4436  *
4437  * "cgroup.event_control" implementation.
4438  *
4439  * This is way over-engineered.  It tries to support fully configurable
4440  * events for each user.  Such level of flexibility is completely
4441  * unnecessary especially in the light of the planned unified hierarchy.
4442  *
4443  * Please deprecate this and replace with something simpler if at all
4444  * possible.
4445  */
4446 
4447 /*
4448  * Unregister event and free resources.
4449  *
4450  * Gets called from workqueue.
4451  */
4452 static void memcg_event_remove(struct work_struct *work)
4453 {
4454 	struct mem_cgroup_event *event =
4455 		container_of(work, struct mem_cgroup_event, remove);
4456 	struct mem_cgroup *memcg = event->memcg;
4457 
4458 	remove_wait_queue(event->wqh, &event->wait);
4459 
4460 	event->unregister_event(memcg, event->eventfd);
4461 
4462 	/* Notify userspace the event is going away. */
4463 	eventfd_signal(event->eventfd, 1);
4464 
4465 	eventfd_ctx_put(event->eventfd);
4466 	kfree(event);
4467 	css_put(&memcg->css);
4468 }
4469 
4470 /*
4471  * Gets called on EPOLLHUP on eventfd when user closes it.
4472  *
4473  * Called with wqh->lock held and interrupts disabled.
4474  */
4475 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4476 			    int sync, void *key)
4477 {
4478 	struct mem_cgroup_event *event =
4479 		container_of(wait, struct mem_cgroup_event, wait);
4480 	struct mem_cgroup *memcg = event->memcg;
4481 	__poll_t flags = key_to_poll(key);
4482 
4483 	if (flags & EPOLLHUP) {
4484 		/*
4485 		 * If the event has been detached at cgroup removal, we
4486 		 * can simply return knowing the other side will cleanup
4487 		 * for us.
4488 		 *
4489 		 * We can't race against event freeing since the other
4490 		 * side will require wqh->lock via remove_wait_queue(),
4491 		 * which we hold.
4492 		 */
4493 		spin_lock(&memcg->event_list_lock);
4494 		if (!list_empty(&event->list)) {
4495 			list_del_init(&event->list);
4496 			/*
4497 			 * We are in atomic context, but cgroup_event_remove()
4498 			 * may sleep, so we have to call it in workqueue.
4499 			 */
4500 			schedule_work(&event->remove);
4501 		}
4502 		spin_unlock(&memcg->event_list_lock);
4503 	}
4504 
4505 	return 0;
4506 }
4507 
4508 static void memcg_event_ptable_queue_proc(struct file *file,
4509 		wait_queue_head_t *wqh, poll_table *pt)
4510 {
4511 	struct mem_cgroup_event *event =
4512 		container_of(pt, struct mem_cgroup_event, pt);
4513 
4514 	event->wqh = wqh;
4515 	add_wait_queue(wqh, &event->wait);
4516 }
4517 
4518 /*
4519  * DO NOT USE IN NEW FILES.
4520  *
4521  * Parse input and register new cgroup event handler.
4522  *
4523  * Input must be in format '<event_fd> <control_fd> <args>'.
4524  * Interpretation of args is defined by control file implementation.
4525  */
4526 static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4527 					 char *buf, size_t nbytes, loff_t off)
4528 {
4529 	struct cgroup_subsys_state *css = of_css(of);
4530 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4531 	struct mem_cgroup_event *event;
4532 	struct cgroup_subsys_state *cfile_css;
4533 	unsigned int efd, cfd;
4534 	struct fd efile;
4535 	struct fd cfile;
4536 	const char *name;
4537 	char *endp;
4538 	int ret;
4539 
4540 	buf = strstrip(buf);
4541 
4542 	efd = simple_strtoul(buf, &endp, 10);
4543 	if (*endp != ' ')
4544 		return -EINVAL;
4545 	buf = endp + 1;
4546 
4547 	cfd = simple_strtoul(buf, &endp, 10);
4548 	if ((*endp != ' ') && (*endp != '\0'))
4549 		return -EINVAL;
4550 	buf = endp + 1;
4551 
4552 	event = kzalloc(sizeof(*event), GFP_KERNEL);
4553 	if (!event)
4554 		return -ENOMEM;
4555 
4556 	event->memcg = memcg;
4557 	INIT_LIST_HEAD(&event->list);
4558 	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4559 	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4560 	INIT_WORK(&event->remove, memcg_event_remove);
4561 
4562 	efile = fdget(efd);
4563 	if (!efile.file) {
4564 		ret = -EBADF;
4565 		goto out_kfree;
4566 	}
4567 
4568 	event->eventfd = eventfd_ctx_fileget(efile.file);
4569 	if (IS_ERR(event->eventfd)) {
4570 		ret = PTR_ERR(event->eventfd);
4571 		goto out_put_efile;
4572 	}
4573 
4574 	cfile = fdget(cfd);
4575 	if (!cfile.file) {
4576 		ret = -EBADF;
4577 		goto out_put_eventfd;
4578 	}
4579 
4580 	/* the process need read permission on control file */
4581 	/* AV: shouldn't we check that it's been opened for read instead? */
4582 	ret = inode_permission(file_inode(cfile.file), MAY_READ);
4583 	if (ret < 0)
4584 		goto out_put_cfile;
4585 
4586 	/*
4587 	 * Determine the event callbacks and set them in @event.  This used
4588 	 * to be done via struct cftype but cgroup core no longer knows
4589 	 * about these events.  The following is crude but the whole thing
4590 	 * is for compatibility anyway.
4591 	 *
4592 	 * DO NOT ADD NEW FILES.
4593 	 */
4594 	name = cfile.file->f_path.dentry->d_name.name;
4595 
4596 	if (!strcmp(name, "memory.usage_in_bytes")) {
4597 		event->register_event = mem_cgroup_usage_register_event;
4598 		event->unregister_event = mem_cgroup_usage_unregister_event;
4599 	} else if (!strcmp(name, "memory.oom_control")) {
4600 		event->register_event = mem_cgroup_oom_register_event;
4601 		event->unregister_event = mem_cgroup_oom_unregister_event;
4602 	} else if (!strcmp(name, "memory.pressure_level")) {
4603 		event->register_event = vmpressure_register_event;
4604 		event->unregister_event = vmpressure_unregister_event;
4605 	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
4606 		event->register_event = memsw_cgroup_usage_register_event;
4607 		event->unregister_event = memsw_cgroup_usage_unregister_event;
4608 	} else {
4609 		ret = -EINVAL;
4610 		goto out_put_cfile;
4611 	}
4612 
4613 	/*
4614 	 * Verify @cfile should belong to @css.  Also, remaining events are
4615 	 * automatically removed on cgroup destruction but the removal is
4616 	 * asynchronous, so take an extra ref on @css.
4617 	 */
4618 	cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
4619 					       &memory_cgrp_subsys);
4620 	ret = -EINVAL;
4621 	if (IS_ERR(cfile_css))
4622 		goto out_put_cfile;
4623 	if (cfile_css != css) {
4624 		css_put(cfile_css);
4625 		goto out_put_cfile;
4626 	}
4627 
4628 	ret = event->register_event(memcg, event->eventfd, buf);
4629 	if (ret)
4630 		goto out_put_css;
4631 
4632 	vfs_poll(efile.file, &event->pt);
4633 
4634 	spin_lock(&memcg->event_list_lock);
4635 	list_add(&event->list, &memcg->event_list);
4636 	spin_unlock(&memcg->event_list_lock);
4637 
4638 	fdput(cfile);
4639 	fdput(efile);
4640 
4641 	return nbytes;
4642 
4643 out_put_css:
4644 	css_put(css);
4645 out_put_cfile:
4646 	fdput(cfile);
4647 out_put_eventfd:
4648 	eventfd_ctx_put(event->eventfd);
4649 out_put_efile:
4650 	fdput(efile);
4651 out_kfree:
4652 	kfree(event);
4653 
4654 	return ret;
4655 }
4656 
4657 static struct cftype mem_cgroup_legacy_files[] = {
4658 	{
4659 		.name = "usage_in_bytes",
4660 		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4661 		.read_u64 = mem_cgroup_read_u64,
4662 	},
4663 	{
4664 		.name = "max_usage_in_bytes",
4665 		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4666 		.write = mem_cgroup_reset,
4667 		.read_u64 = mem_cgroup_read_u64,
4668 	},
4669 	{
4670 		.name = "limit_in_bytes",
4671 		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4672 		.write = mem_cgroup_write,
4673 		.read_u64 = mem_cgroup_read_u64,
4674 	},
4675 	{
4676 		.name = "soft_limit_in_bytes",
4677 		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4678 		.write = mem_cgroup_write,
4679 		.read_u64 = mem_cgroup_read_u64,
4680 	},
4681 	{
4682 		.name = "failcnt",
4683 		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4684 		.write = mem_cgroup_reset,
4685 		.read_u64 = mem_cgroup_read_u64,
4686 	},
4687 	{
4688 		.name = "stat",
4689 		.seq_show = memcg_stat_show,
4690 	},
4691 	{
4692 		.name = "force_empty",
4693 		.write = mem_cgroup_force_empty_write,
4694 	},
4695 	{
4696 		.name = "use_hierarchy",
4697 		.write_u64 = mem_cgroup_hierarchy_write,
4698 		.read_u64 = mem_cgroup_hierarchy_read,
4699 	},
4700 	{
4701 		.name = "cgroup.event_control",		/* XXX: for compat */
4702 		.write = memcg_write_event_control,
4703 		.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
4704 	},
4705 	{
4706 		.name = "swappiness",
4707 		.read_u64 = mem_cgroup_swappiness_read,
4708 		.write_u64 = mem_cgroup_swappiness_write,
4709 	},
4710 	{
4711 		.name = "move_charge_at_immigrate",
4712 		.read_u64 = mem_cgroup_move_charge_read,
4713 		.write_u64 = mem_cgroup_move_charge_write,
4714 	},
4715 	{
4716 		.name = "oom_control",
4717 		.seq_show = mem_cgroup_oom_control_read,
4718 		.write_u64 = mem_cgroup_oom_control_write,
4719 		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4720 	},
4721 	{
4722 		.name = "pressure_level",
4723 	},
4724 #ifdef CONFIG_NUMA
4725 	{
4726 		.name = "numa_stat",
4727 		.seq_show = memcg_numa_stat_show,
4728 	},
4729 #endif
4730 	{
4731 		.name = "kmem.limit_in_bytes",
4732 		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
4733 		.write = mem_cgroup_write,
4734 		.read_u64 = mem_cgroup_read_u64,
4735 	},
4736 	{
4737 		.name = "kmem.usage_in_bytes",
4738 		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
4739 		.read_u64 = mem_cgroup_read_u64,
4740 	},
4741 	{
4742 		.name = "kmem.failcnt",
4743 		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
4744 		.write = mem_cgroup_reset,
4745 		.read_u64 = mem_cgroup_read_u64,
4746 	},
4747 	{
4748 		.name = "kmem.max_usage_in_bytes",
4749 		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
4750 		.write = mem_cgroup_reset,
4751 		.read_u64 = mem_cgroup_read_u64,
4752 	},
4753 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
4754 	{
4755 		.name = "kmem.slabinfo",
4756 		.seq_start = memcg_slab_start,
4757 		.seq_next = memcg_slab_next,
4758 		.seq_stop = memcg_slab_stop,
4759 		.seq_show = memcg_slab_show,
4760 	},
4761 #endif
4762 	{
4763 		.name = "kmem.tcp.limit_in_bytes",
4764 		.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
4765 		.write = mem_cgroup_write,
4766 		.read_u64 = mem_cgroup_read_u64,
4767 	},
4768 	{
4769 		.name = "kmem.tcp.usage_in_bytes",
4770 		.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
4771 		.read_u64 = mem_cgroup_read_u64,
4772 	},
4773 	{
4774 		.name = "kmem.tcp.failcnt",
4775 		.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
4776 		.write = mem_cgroup_reset,
4777 		.read_u64 = mem_cgroup_read_u64,
4778 	},
4779 	{
4780 		.name = "kmem.tcp.max_usage_in_bytes",
4781 		.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
4782 		.write = mem_cgroup_reset,
4783 		.read_u64 = mem_cgroup_read_u64,
4784 	},
4785 	{ },	/* terminate */
4786 };
4787 
4788 /*
4789  * Private memory cgroup IDR
4790  *
4791  * Swap-out records and page cache shadow entries need to store memcg
4792  * references in constrained space, so we maintain an ID space that is
4793  * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
4794  * memory-controlled cgroups to 64k.
4795  *
4796  * However, there usually are many references to the oflline CSS after
4797  * the cgroup has been destroyed, such as page cache or reclaimable
4798  * slab objects, that don't need to hang on to the ID. We want to keep
4799  * those dead CSS from occupying IDs, or we might quickly exhaust the
4800  * relatively small ID space and prevent the creation of new cgroups
4801  * even when there are much fewer than 64k cgroups - possibly none.
4802  *
4803  * Maintain a private 16-bit ID space for memcg, and allow the ID to
4804  * be freed and recycled when it's no longer needed, which is usually
4805  * when the CSS is offlined.
4806  *
4807  * The only exception to that are records of swapped out tmpfs/shmem
4808  * pages that need to be attributed to live ancestors on swapin. But
4809  * those references are manageable from userspace.
4810  */
4811 
4812 static DEFINE_IDR(mem_cgroup_idr);
4813 
4814 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
4815 {
4816 	if (memcg->id.id > 0) {
4817 		idr_remove(&mem_cgroup_idr, memcg->id.id);
4818 		memcg->id.id = 0;
4819 	}
4820 }
4821 
4822 static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
4823 {
4824 	refcount_add(n, &memcg->id.ref);
4825 }
4826 
4827 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
4828 {
4829 	if (refcount_sub_and_test(n, &memcg->id.ref)) {
4830 		mem_cgroup_id_remove(memcg);
4831 
4832 		/* Memcg ID pins CSS */
4833 		css_put(&memcg->css);
4834 	}
4835 }
4836 
4837 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
4838 {
4839 	mem_cgroup_id_put_many(memcg, 1);
4840 }
4841 
4842 /**
4843  * mem_cgroup_from_id - look up a memcg from a memcg id
4844  * @id: the memcg id to look up
4845  *
4846  * Caller must hold rcu_read_lock().
4847  */
4848 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
4849 {
4850 	WARN_ON_ONCE(!rcu_read_lock_held());
4851 	return idr_find(&mem_cgroup_idr, id);
4852 }
4853 
4854 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4855 {
4856 	struct mem_cgroup_per_node *pn;
4857 	int tmp = node;
4858 	/*
4859 	 * This routine is called against possible nodes.
4860 	 * But it's BUG to call kmalloc() against offline node.
4861 	 *
4862 	 * TODO: this routine can waste much memory for nodes which will
4863 	 *       never be onlined. It's better to use memory hotplug callback
4864 	 *       function.
4865 	 */
4866 	if (!node_state(node, N_NORMAL_MEMORY))
4867 		tmp = -1;
4868 	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4869 	if (!pn)
4870 		return 1;
4871 
4872 	pn->lruvec_stat_local = alloc_percpu(struct lruvec_stat);
4873 	if (!pn->lruvec_stat_local) {
4874 		kfree(pn);
4875 		return 1;
4876 	}
4877 
4878 	pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat);
4879 	if (!pn->lruvec_stat_cpu) {
4880 		free_percpu(pn->lruvec_stat_local);
4881 		kfree(pn);
4882 		return 1;
4883 	}
4884 
4885 	lruvec_init(&pn->lruvec);
4886 	pn->usage_in_excess = 0;
4887 	pn->on_tree = false;
4888 	pn->memcg = memcg;
4889 
4890 	memcg->nodeinfo[node] = pn;
4891 	return 0;
4892 }
4893 
4894 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4895 {
4896 	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
4897 
4898 	if (!pn)
4899 		return;
4900 
4901 	free_percpu(pn->lruvec_stat_cpu);
4902 	free_percpu(pn->lruvec_stat_local);
4903 	kfree(pn);
4904 }
4905 
4906 static void __mem_cgroup_free(struct mem_cgroup *memcg)
4907 {
4908 	int node;
4909 
4910 	for_each_node(node)
4911 		free_mem_cgroup_per_node_info(memcg, node);
4912 	free_percpu(memcg->vmstats_percpu);
4913 	free_percpu(memcg->vmstats_local);
4914 	kfree(memcg);
4915 }
4916 
4917 static void mem_cgroup_free(struct mem_cgroup *memcg)
4918 {
4919 	memcg_wb_domain_exit(memcg);
4920 	/*
4921 	 * Flush percpu vmstats and vmevents to guarantee the value correctness
4922 	 * on parent's and all ancestor levels.
4923 	 */
4924 	memcg_flush_percpu_vmstats(memcg, false);
4925 	memcg_flush_percpu_vmevents(memcg);
4926 	__mem_cgroup_free(memcg);
4927 }
4928 
4929 static struct mem_cgroup *mem_cgroup_alloc(void)
4930 {
4931 	struct mem_cgroup *memcg;
4932 	unsigned int size;
4933 	int node;
4934 	int __maybe_unused i;
4935 
4936 	size = sizeof(struct mem_cgroup);
4937 	size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
4938 
4939 	memcg = kzalloc(size, GFP_KERNEL);
4940 	if (!memcg)
4941 		return NULL;
4942 
4943 	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
4944 				 1, MEM_CGROUP_ID_MAX,
4945 				 GFP_KERNEL);
4946 	if (memcg->id.id < 0)
4947 		goto fail;
4948 
4949 	memcg->vmstats_local = alloc_percpu(struct memcg_vmstats_percpu);
4950 	if (!memcg->vmstats_local)
4951 		goto fail;
4952 
4953 	memcg->vmstats_percpu = alloc_percpu(struct memcg_vmstats_percpu);
4954 	if (!memcg->vmstats_percpu)
4955 		goto fail;
4956 
4957 	for_each_node(node)
4958 		if (alloc_mem_cgroup_per_node_info(memcg, node))
4959 			goto fail;
4960 
4961 	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
4962 		goto fail;
4963 
4964 	INIT_WORK(&memcg->high_work, high_work_func);
4965 	INIT_LIST_HEAD(&memcg->oom_notify);
4966 	mutex_init(&memcg->thresholds_lock);
4967 	spin_lock_init(&memcg->move_lock);
4968 	vmpressure_init(&memcg->vmpressure);
4969 	INIT_LIST_HEAD(&memcg->event_list);
4970 	spin_lock_init(&memcg->event_list_lock);
4971 	memcg->socket_pressure = jiffies;
4972 #ifdef CONFIG_MEMCG_KMEM
4973 	memcg->kmemcg_id = -1;
4974 #endif
4975 #ifdef CONFIG_CGROUP_WRITEBACK
4976 	INIT_LIST_HEAD(&memcg->cgwb_list);
4977 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
4978 		memcg->cgwb_frn[i].done =
4979 			__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
4980 #endif
4981 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4982 	spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
4983 	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
4984 	memcg->deferred_split_queue.split_queue_len = 0;
4985 #endif
4986 	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
4987 	return memcg;
4988 fail:
4989 	mem_cgroup_id_remove(memcg);
4990 	__mem_cgroup_free(memcg);
4991 	return NULL;
4992 }
4993 
4994 static struct cgroup_subsys_state * __ref
4995 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4996 {
4997 	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
4998 	struct mem_cgroup *memcg;
4999 	long error = -ENOMEM;
5000 
5001 	memcg = mem_cgroup_alloc();
5002 	if (!memcg)
5003 		return ERR_PTR(error);
5004 
5005 	memcg->high = PAGE_COUNTER_MAX;
5006 	memcg->soft_limit = PAGE_COUNTER_MAX;
5007 	if (parent) {
5008 		memcg->swappiness = mem_cgroup_swappiness(parent);
5009 		memcg->oom_kill_disable = parent->oom_kill_disable;
5010 	}
5011 	if (parent && parent->use_hierarchy) {
5012 		memcg->use_hierarchy = true;
5013 		page_counter_init(&memcg->memory, &parent->memory);
5014 		page_counter_init(&memcg->swap, &parent->swap);
5015 		page_counter_init(&memcg->memsw, &parent->memsw);
5016 		page_counter_init(&memcg->kmem, &parent->kmem);
5017 		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5018 	} else {
5019 		page_counter_init(&memcg->memory, NULL);
5020 		page_counter_init(&memcg->swap, NULL);
5021 		page_counter_init(&memcg->memsw, NULL);
5022 		page_counter_init(&memcg->kmem, NULL);
5023 		page_counter_init(&memcg->tcpmem, NULL);
5024 		/*
5025 		 * Deeper hierachy with use_hierarchy == false doesn't make
5026 		 * much sense so let cgroup subsystem know about this
5027 		 * unfortunate state in our controller.
5028 		 */
5029 		if (parent != root_mem_cgroup)
5030 			memory_cgrp_subsys.broken_hierarchy = true;
5031 	}
5032 
5033 	/* The following stuff does not apply to the root */
5034 	if (!parent) {
5035 #ifdef CONFIG_MEMCG_KMEM
5036 		INIT_LIST_HEAD(&memcg->kmem_caches);
5037 #endif
5038 		root_mem_cgroup = memcg;
5039 		return &memcg->css;
5040 	}
5041 
5042 	error = memcg_online_kmem(memcg);
5043 	if (error)
5044 		goto fail;
5045 
5046 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5047 		static_branch_inc(&memcg_sockets_enabled_key);
5048 
5049 	return &memcg->css;
5050 fail:
5051 	mem_cgroup_id_remove(memcg);
5052 	mem_cgroup_free(memcg);
5053 	return ERR_PTR(-ENOMEM);
5054 }
5055 
5056 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5057 {
5058 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5059 
5060 	/*
5061 	 * A memcg must be visible for memcg_expand_shrinker_maps()
5062 	 * by the time the maps are allocated. So, we allocate maps
5063 	 * here, when for_each_mem_cgroup() can't skip it.
5064 	 */
5065 	if (memcg_alloc_shrinker_maps(memcg)) {
5066 		mem_cgroup_id_remove(memcg);
5067 		return -ENOMEM;
5068 	}
5069 
5070 	/* Online state pins memcg ID, memcg ID pins CSS */
5071 	refcount_set(&memcg->id.ref, 1);
5072 	css_get(css);
5073 	return 0;
5074 }
5075 
5076 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5077 {
5078 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5079 	struct mem_cgroup_event *event, *tmp;
5080 
5081 	/*
5082 	 * Unregister events and notify userspace.
5083 	 * Notify userspace about cgroup removing only after rmdir of cgroup
5084 	 * directory to avoid race between userspace and kernelspace.
5085 	 */
5086 	spin_lock(&memcg->event_list_lock);
5087 	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5088 		list_del_init(&event->list);
5089 		schedule_work(&event->remove);
5090 	}
5091 	spin_unlock(&memcg->event_list_lock);
5092 
5093 	page_counter_set_min(&memcg->memory, 0);
5094 	page_counter_set_low(&memcg->memory, 0);
5095 
5096 	memcg_offline_kmem(memcg);
5097 	wb_memcg_offline(memcg);
5098 
5099 	drain_all_stock(memcg);
5100 
5101 	mem_cgroup_id_put(memcg);
5102 }
5103 
5104 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5105 {
5106 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5107 
5108 	invalidate_reclaim_iterators(memcg);
5109 }
5110 
5111 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5112 {
5113 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5114 	int __maybe_unused i;
5115 
5116 #ifdef CONFIG_CGROUP_WRITEBACK
5117 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5118 		wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5119 #endif
5120 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5121 		static_branch_dec(&memcg_sockets_enabled_key);
5122 
5123 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5124 		static_branch_dec(&memcg_sockets_enabled_key);
5125 
5126 	vmpressure_cleanup(&memcg->vmpressure);
5127 	cancel_work_sync(&memcg->high_work);
5128 	mem_cgroup_remove_from_trees(memcg);
5129 	memcg_free_shrinker_maps(memcg);
5130 	memcg_free_kmem(memcg);
5131 	mem_cgroup_free(memcg);
5132 }
5133 
5134 /**
5135  * mem_cgroup_css_reset - reset the states of a mem_cgroup
5136  * @css: the target css
5137  *
5138  * Reset the states of the mem_cgroup associated with @css.  This is
5139  * invoked when the userland requests disabling on the default hierarchy
5140  * but the memcg is pinned through dependency.  The memcg should stop
5141  * applying policies and should revert to the vanilla state as it may be
5142  * made visible again.
5143  *
5144  * The current implementation only resets the essential configurations.
5145  * This needs to be expanded to cover all the visible parts.
5146  */
5147 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5148 {
5149 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5150 
5151 	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5152 	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5153 	page_counter_set_max(&memcg->memsw, PAGE_COUNTER_MAX);
5154 	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5155 	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5156 	page_counter_set_min(&memcg->memory, 0);
5157 	page_counter_set_low(&memcg->memory, 0);
5158 	memcg->high = PAGE_COUNTER_MAX;
5159 	memcg->soft_limit = PAGE_COUNTER_MAX;
5160 	memcg_wb_domain_size_changed(memcg);
5161 }
5162 
5163 #ifdef CONFIG_MMU
5164 /* Handlers for move charge at task migration. */
5165 static int mem_cgroup_do_precharge(unsigned long count)
5166 {
5167 	int ret;
5168 
5169 	/* Try a single bulk charge without reclaim first, kswapd may wake */
5170 	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5171 	if (!ret) {
5172 		mc.precharge += count;
5173 		return ret;
5174 	}
5175 
5176 	/* Try charges one by one with reclaim, but do not retry */
5177 	while (count--) {
5178 		ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5179 		if (ret)
5180 			return ret;
5181 		mc.precharge++;
5182 		cond_resched();
5183 	}
5184 	return 0;
5185 }
5186 
5187 union mc_target {
5188 	struct page	*page;
5189 	swp_entry_t	ent;
5190 };
5191 
5192 enum mc_target_type {
5193 	MC_TARGET_NONE = 0,
5194 	MC_TARGET_PAGE,
5195 	MC_TARGET_SWAP,
5196 	MC_TARGET_DEVICE,
5197 };
5198 
5199 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5200 						unsigned long addr, pte_t ptent)
5201 {
5202 	struct page *page = vm_normal_page(vma, addr, ptent);
5203 
5204 	if (!page || !page_mapped(page))
5205 		return NULL;
5206 	if (PageAnon(page)) {
5207 		if (!(mc.flags & MOVE_ANON))
5208 			return NULL;
5209 	} else {
5210 		if (!(mc.flags & MOVE_FILE))
5211 			return NULL;
5212 	}
5213 	if (!get_page_unless_zero(page))
5214 		return NULL;
5215 
5216 	return page;
5217 }
5218 
5219 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
5220 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5221 			pte_t ptent, swp_entry_t *entry)
5222 {
5223 	struct page *page = NULL;
5224 	swp_entry_t ent = pte_to_swp_entry(ptent);
5225 
5226 	if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
5227 		return NULL;
5228 
5229 	/*
5230 	 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to
5231 	 * a device and because they are not accessible by CPU they are store
5232 	 * as special swap entry in the CPU page table.
5233 	 */
5234 	if (is_device_private_entry(ent)) {
5235 		page = device_private_entry_to_page(ent);
5236 		/*
5237 		 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
5238 		 * a refcount of 1 when free (unlike normal page)
5239 		 */
5240 		if (!page_ref_add_unless(page, 1, 1))
5241 			return NULL;
5242 		return page;
5243 	}
5244 
5245 	/*
5246 	 * Because lookup_swap_cache() updates some statistics counter,
5247 	 * we call find_get_page() with swapper_space directly.
5248 	 */
5249 	page = find_get_page(swap_address_space(ent), swp_offset(ent));
5250 	if (do_memsw_account())
5251 		entry->val = ent.val;
5252 
5253 	return page;
5254 }
5255 #else
5256 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5257 			pte_t ptent, swp_entry_t *entry)
5258 {
5259 	return NULL;
5260 }
5261 #endif
5262 
5263 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5264 			unsigned long addr, pte_t ptent, swp_entry_t *entry)
5265 {
5266 	struct page *page = NULL;
5267 	struct address_space *mapping;
5268 	pgoff_t pgoff;
5269 
5270 	if (!vma->vm_file) /* anonymous vma */
5271 		return NULL;
5272 	if (!(mc.flags & MOVE_FILE))
5273 		return NULL;
5274 
5275 	mapping = vma->vm_file->f_mapping;
5276 	pgoff = linear_page_index(vma, addr);
5277 
5278 	/* page is moved even if it's not RSS of this task(page-faulted). */
5279 #ifdef CONFIG_SWAP
5280 	/* shmem/tmpfs may report page out on swap: account for that too. */
5281 	if (shmem_mapping(mapping)) {
5282 		page = find_get_entry(mapping, pgoff);
5283 		if (xa_is_value(page)) {
5284 			swp_entry_t swp = radix_to_swp_entry(page);
5285 			if (do_memsw_account())
5286 				*entry = swp;
5287 			page = find_get_page(swap_address_space(swp),
5288 					     swp_offset(swp));
5289 		}
5290 	} else
5291 		page = find_get_page(mapping, pgoff);
5292 #else
5293 	page = find_get_page(mapping, pgoff);
5294 #endif
5295 	return page;
5296 }
5297 
5298 /**
5299  * mem_cgroup_move_account - move account of the page
5300  * @page: the page
5301  * @compound: charge the page as compound or small page
5302  * @from: mem_cgroup which the page is moved from.
5303  * @to:	mem_cgroup which the page is moved to. @from != @to.
5304  *
5305  * The caller must make sure the page is not on LRU (isolate_page() is useful.)
5306  *
5307  * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5308  * from old cgroup.
5309  */
5310 static int mem_cgroup_move_account(struct page *page,
5311 				   bool compound,
5312 				   struct mem_cgroup *from,
5313 				   struct mem_cgroup *to)
5314 {
5315 	struct lruvec *from_vec, *to_vec;
5316 	struct pglist_data *pgdat;
5317 	unsigned long flags;
5318 	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5319 	int ret;
5320 	bool anon;
5321 
5322 	VM_BUG_ON(from == to);
5323 	VM_BUG_ON_PAGE(PageLRU(page), page);
5324 	VM_BUG_ON(compound && !PageTransHuge(page));
5325 
5326 	/*
5327 	 * Prevent mem_cgroup_migrate() from looking at
5328 	 * page->mem_cgroup of its source page while we change it.
5329 	 */
5330 	ret = -EBUSY;
5331 	if (!trylock_page(page))
5332 		goto out;
5333 
5334 	ret = -EINVAL;
5335 	if (page->mem_cgroup != from)
5336 		goto out_unlock;
5337 
5338 	anon = PageAnon(page);
5339 
5340 	pgdat = page_pgdat(page);
5341 	from_vec = mem_cgroup_lruvec(from, pgdat);
5342 	to_vec = mem_cgroup_lruvec(to, pgdat);
5343 
5344 	spin_lock_irqsave(&from->move_lock, flags);
5345 
5346 	if (!anon && page_mapped(page)) {
5347 		__mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5348 		__mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5349 	}
5350 
5351 	/*
5352 	 * move_lock grabbed above and caller set from->moving_account, so
5353 	 * mod_memcg_page_state will serialize updates to PageDirty.
5354 	 * So mapping should be stable for dirty pages.
5355 	 */
5356 	if (!anon && PageDirty(page)) {
5357 		struct address_space *mapping = page_mapping(page);
5358 
5359 		if (mapping_cap_account_dirty(mapping)) {
5360 			__mod_lruvec_state(from_vec, NR_FILE_DIRTY, -nr_pages);
5361 			__mod_lruvec_state(to_vec, NR_FILE_DIRTY, nr_pages);
5362 		}
5363 	}
5364 
5365 	if (PageWriteback(page)) {
5366 		__mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5367 		__mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5368 	}
5369 
5370 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5371 	if (compound && !list_empty(page_deferred_list(page))) {
5372 		spin_lock(&from->deferred_split_queue.split_queue_lock);
5373 		list_del_init(page_deferred_list(page));
5374 		from->deferred_split_queue.split_queue_len--;
5375 		spin_unlock(&from->deferred_split_queue.split_queue_lock);
5376 	}
5377 #endif
5378 	/*
5379 	 * It is safe to change page->mem_cgroup here because the page
5380 	 * is referenced, charged, and isolated - we can't race with
5381 	 * uncharging, charging, migration, or LRU putback.
5382 	 */
5383 
5384 	/* caller should have done css_get */
5385 	page->mem_cgroup = to;
5386 
5387 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5388 	if (compound && list_empty(page_deferred_list(page))) {
5389 		spin_lock(&to->deferred_split_queue.split_queue_lock);
5390 		list_add_tail(page_deferred_list(page),
5391 			      &to->deferred_split_queue.split_queue);
5392 		to->deferred_split_queue.split_queue_len++;
5393 		spin_unlock(&to->deferred_split_queue.split_queue_lock);
5394 	}
5395 #endif
5396 
5397 	spin_unlock_irqrestore(&from->move_lock, flags);
5398 
5399 	ret = 0;
5400 
5401 	local_irq_disable();
5402 	mem_cgroup_charge_statistics(to, page, compound, nr_pages);
5403 	memcg_check_events(to, page);
5404 	mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
5405 	memcg_check_events(from, page);
5406 	local_irq_enable();
5407 out_unlock:
5408 	unlock_page(page);
5409 out:
5410 	return ret;
5411 }
5412 
5413 /**
5414  * get_mctgt_type - get target type of moving charge
5415  * @vma: the vma the pte to be checked belongs
5416  * @addr: the address corresponding to the pte to be checked
5417  * @ptent: the pte to be checked
5418  * @target: the pointer the target page or swap ent will be stored(can be NULL)
5419  *
5420  * Returns
5421  *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
5422  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5423  *     move charge. if @target is not NULL, the page is stored in target->page
5424  *     with extra refcnt got(Callers should handle it).
5425  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5426  *     target for charge migration. if @target is not NULL, the entry is stored
5427  *     in target->ent.
5428  *   3(MC_TARGET_DEVICE): like MC_TARGET_PAGE  but page is MEMORY_DEVICE_PRIVATE
5429  *     (so ZONE_DEVICE page and thus not on the lru).
5430  *     For now we such page is charge like a regular page would be as for all
5431  *     intent and purposes it is just special memory taking the place of a
5432  *     regular page.
5433  *
5434  *     See Documentations/vm/hmm.txt and include/linux/hmm.h
5435  *
5436  * Called with pte lock held.
5437  */
5438 
5439 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
5440 		unsigned long addr, pte_t ptent, union mc_target *target)
5441 {
5442 	struct page *page = NULL;
5443 	enum mc_target_type ret = MC_TARGET_NONE;
5444 	swp_entry_t ent = { .val = 0 };
5445 
5446 	if (pte_present(ptent))
5447 		page = mc_handle_present_pte(vma, addr, ptent);
5448 	else if (is_swap_pte(ptent))
5449 		page = mc_handle_swap_pte(vma, ptent, &ent);
5450 	else if (pte_none(ptent))
5451 		page = mc_handle_file_pte(vma, addr, ptent, &ent);
5452 
5453 	if (!page && !ent.val)
5454 		return ret;
5455 	if (page) {
5456 		/*
5457 		 * Do only loose check w/o serialization.
5458 		 * mem_cgroup_move_account() checks the page is valid or
5459 		 * not under LRU exclusion.
5460 		 */
5461 		if (page->mem_cgroup == mc.from) {
5462 			ret = MC_TARGET_PAGE;
5463 			if (is_device_private_page(page))
5464 				ret = MC_TARGET_DEVICE;
5465 			if (target)
5466 				target->page = page;
5467 		}
5468 		if (!ret || !target)
5469 			put_page(page);
5470 	}
5471 	/*
5472 	 * There is a swap entry and a page doesn't exist or isn't charged.
5473 	 * But we cannot move a tail-page in a THP.
5474 	 */
5475 	if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
5476 	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
5477 		ret = MC_TARGET_SWAP;
5478 		if (target)
5479 			target->ent = ent;
5480 	}
5481 	return ret;
5482 }
5483 
5484 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5485 /*
5486  * We don't consider PMD mapped swapping or file mapped pages because THP does
5487  * not support them for now.
5488  * Caller should make sure that pmd_trans_huge(pmd) is true.
5489  */
5490 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5491 		unsigned long addr, pmd_t pmd, union mc_target *target)
5492 {
5493 	struct page *page = NULL;
5494 	enum mc_target_type ret = MC_TARGET_NONE;
5495 
5496 	if (unlikely(is_swap_pmd(pmd))) {
5497 		VM_BUG_ON(thp_migration_supported() &&
5498 				  !is_pmd_migration_entry(pmd));
5499 		return ret;
5500 	}
5501 	page = pmd_page(pmd);
5502 	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
5503 	if (!(mc.flags & MOVE_ANON))
5504 		return ret;
5505 	if (page->mem_cgroup == mc.from) {
5506 		ret = MC_TARGET_PAGE;
5507 		if (target) {
5508 			get_page(page);
5509 			target->page = page;
5510 		}
5511 	}
5512 	return ret;
5513 }
5514 #else
5515 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5516 		unsigned long addr, pmd_t pmd, union mc_target *target)
5517 {
5518 	return MC_TARGET_NONE;
5519 }
5520 #endif
5521 
5522 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5523 					unsigned long addr, unsigned long end,
5524 					struct mm_walk *walk)
5525 {
5526 	struct vm_area_struct *vma = walk->vma;
5527 	pte_t *pte;
5528 	spinlock_t *ptl;
5529 
5530 	ptl = pmd_trans_huge_lock(pmd, vma);
5531 	if (ptl) {
5532 		/*
5533 		 * Note their can not be MC_TARGET_DEVICE for now as we do not
5534 		 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
5535 		 * this might change.
5536 		 */
5537 		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5538 			mc.precharge += HPAGE_PMD_NR;
5539 		spin_unlock(ptl);
5540 		return 0;
5541 	}
5542 
5543 	if (pmd_trans_unstable(pmd))
5544 		return 0;
5545 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5546 	for (; addr != end; pte++, addr += PAGE_SIZE)
5547 		if (get_mctgt_type(vma, addr, *pte, NULL))
5548 			mc.precharge++;	/* increment precharge temporarily */
5549 	pte_unmap_unlock(pte - 1, ptl);
5550 	cond_resched();
5551 
5552 	return 0;
5553 }
5554 
5555 static const struct mm_walk_ops precharge_walk_ops = {
5556 	.pmd_entry	= mem_cgroup_count_precharge_pte_range,
5557 };
5558 
5559 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5560 {
5561 	unsigned long precharge;
5562 
5563 	down_read(&mm->mmap_sem);
5564 	walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
5565 	up_read(&mm->mmap_sem);
5566 
5567 	precharge = mc.precharge;
5568 	mc.precharge = 0;
5569 
5570 	return precharge;
5571 }
5572 
5573 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5574 {
5575 	unsigned long precharge = mem_cgroup_count_precharge(mm);
5576 
5577 	VM_BUG_ON(mc.moving_task);
5578 	mc.moving_task = current;
5579 	return mem_cgroup_do_precharge(precharge);
5580 }
5581 
5582 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5583 static void __mem_cgroup_clear_mc(void)
5584 {
5585 	struct mem_cgroup *from = mc.from;
5586 	struct mem_cgroup *to = mc.to;
5587 
5588 	/* we must uncharge all the leftover precharges from mc.to */
5589 	if (mc.precharge) {
5590 		cancel_charge(mc.to, mc.precharge);
5591 		mc.precharge = 0;
5592 	}
5593 	/*
5594 	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5595 	 * we must uncharge here.
5596 	 */
5597 	if (mc.moved_charge) {
5598 		cancel_charge(mc.from, mc.moved_charge);
5599 		mc.moved_charge = 0;
5600 	}
5601 	/* we must fixup refcnts and charges */
5602 	if (mc.moved_swap) {
5603 		/* uncharge swap account from the old cgroup */
5604 		if (!mem_cgroup_is_root(mc.from))
5605 			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
5606 
5607 		mem_cgroup_id_put_many(mc.from, mc.moved_swap);
5608 
5609 		/*
5610 		 * we charged both to->memory and to->memsw, so we
5611 		 * should uncharge to->memory.
5612 		 */
5613 		if (!mem_cgroup_is_root(mc.to))
5614 			page_counter_uncharge(&mc.to->memory, mc.moved_swap);
5615 
5616 		mem_cgroup_id_get_many(mc.to, mc.moved_swap);
5617 		css_put_many(&mc.to->css, mc.moved_swap);
5618 
5619 		mc.moved_swap = 0;
5620 	}
5621 	memcg_oom_recover(from);
5622 	memcg_oom_recover(to);
5623 	wake_up_all(&mc.waitq);
5624 }
5625 
5626 static void mem_cgroup_clear_mc(void)
5627 {
5628 	struct mm_struct *mm = mc.mm;
5629 
5630 	/*
5631 	 * we must clear moving_task before waking up waiters at the end of
5632 	 * task migration.
5633 	 */
5634 	mc.moving_task = NULL;
5635 	__mem_cgroup_clear_mc();
5636 	spin_lock(&mc.lock);
5637 	mc.from = NULL;
5638 	mc.to = NULL;
5639 	mc.mm = NULL;
5640 	spin_unlock(&mc.lock);
5641 
5642 	mmput(mm);
5643 }
5644 
5645 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5646 {
5647 	struct cgroup_subsys_state *css;
5648 	struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
5649 	struct mem_cgroup *from;
5650 	struct task_struct *leader, *p;
5651 	struct mm_struct *mm;
5652 	unsigned long move_flags;
5653 	int ret = 0;
5654 
5655 	/* charge immigration isn't supported on the default hierarchy */
5656 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5657 		return 0;
5658 
5659 	/*
5660 	 * Multi-process migrations only happen on the default hierarchy
5661 	 * where charge immigration is not used.  Perform charge
5662 	 * immigration if @tset contains a leader and whine if there are
5663 	 * multiple.
5664 	 */
5665 	p = NULL;
5666 	cgroup_taskset_for_each_leader(leader, css, tset) {
5667 		WARN_ON_ONCE(p);
5668 		p = leader;
5669 		memcg = mem_cgroup_from_css(css);
5670 	}
5671 	if (!p)
5672 		return 0;
5673 
5674 	/*
5675 	 * We are now commited to this value whatever it is. Changes in this
5676 	 * tunable will only affect upcoming migrations, not the current one.
5677 	 * So we need to save it, and keep it going.
5678 	 */
5679 	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
5680 	if (!move_flags)
5681 		return 0;
5682 
5683 	from = mem_cgroup_from_task(p);
5684 
5685 	VM_BUG_ON(from == memcg);
5686 
5687 	mm = get_task_mm(p);
5688 	if (!mm)
5689 		return 0;
5690 	/* We move charges only when we move a owner of the mm */
5691 	if (mm->owner == p) {
5692 		VM_BUG_ON(mc.from);
5693 		VM_BUG_ON(mc.to);
5694 		VM_BUG_ON(mc.precharge);
5695 		VM_BUG_ON(mc.moved_charge);
5696 		VM_BUG_ON(mc.moved_swap);
5697 
5698 		spin_lock(&mc.lock);
5699 		mc.mm = mm;
5700 		mc.from = from;
5701 		mc.to = memcg;
5702 		mc.flags = move_flags;
5703 		spin_unlock(&mc.lock);
5704 		/* We set mc.moving_task later */
5705 
5706 		ret = mem_cgroup_precharge_mc(mm);
5707 		if (ret)
5708 			mem_cgroup_clear_mc();
5709 	} else {
5710 		mmput(mm);
5711 	}
5712 	return ret;
5713 }
5714 
5715 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5716 {
5717 	if (mc.to)
5718 		mem_cgroup_clear_mc();
5719 }
5720 
5721 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5722 				unsigned long addr, unsigned long end,
5723 				struct mm_walk *walk)
5724 {
5725 	int ret = 0;
5726 	struct vm_area_struct *vma = walk->vma;
5727 	pte_t *pte;
5728 	spinlock_t *ptl;
5729 	enum mc_target_type target_type;
5730 	union mc_target target;
5731 	struct page *page;
5732 
5733 	ptl = pmd_trans_huge_lock(pmd, vma);
5734 	if (ptl) {
5735 		if (mc.precharge < HPAGE_PMD_NR) {
5736 			spin_unlock(ptl);
5737 			return 0;
5738 		}
5739 		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
5740 		if (target_type == MC_TARGET_PAGE) {
5741 			page = target.page;
5742 			if (!isolate_lru_page(page)) {
5743 				if (!mem_cgroup_move_account(page, true,
5744 							     mc.from, mc.to)) {
5745 					mc.precharge -= HPAGE_PMD_NR;
5746 					mc.moved_charge += HPAGE_PMD_NR;
5747 				}
5748 				putback_lru_page(page);
5749 			}
5750 			put_page(page);
5751 		} else if (target_type == MC_TARGET_DEVICE) {
5752 			page = target.page;
5753 			if (!mem_cgroup_move_account(page, true,
5754 						     mc.from, mc.to)) {
5755 				mc.precharge -= HPAGE_PMD_NR;
5756 				mc.moved_charge += HPAGE_PMD_NR;
5757 			}
5758 			put_page(page);
5759 		}
5760 		spin_unlock(ptl);
5761 		return 0;
5762 	}
5763 
5764 	if (pmd_trans_unstable(pmd))
5765 		return 0;
5766 retry:
5767 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5768 	for (; addr != end; addr += PAGE_SIZE) {
5769 		pte_t ptent = *(pte++);
5770 		bool device = false;
5771 		swp_entry_t ent;
5772 
5773 		if (!mc.precharge)
5774 			break;
5775 
5776 		switch (get_mctgt_type(vma, addr, ptent, &target)) {
5777 		case MC_TARGET_DEVICE:
5778 			device = true;
5779 			/* fall through */
5780 		case MC_TARGET_PAGE:
5781 			page = target.page;
5782 			/*
5783 			 * We can have a part of the split pmd here. Moving it
5784 			 * can be done but it would be too convoluted so simply
5785 			 * ignore such a partial THP and keep it in original
5786 			 * memcg. There should be somebody mapping the head.
5787 			 */
5788 			if (PageTransCompound(page))
5789 				goto put;
5790 			if (!device && isolate_lru_page(page))
5791 				goto put;
5792 			if (!mem_cgroup_move_account(page, false,
5793 						mc.from, mc.to)) {
5794 				mc.precharge--;
5795 				/* we uncharge from mc.from later. */
5796 				mc.moved_charge++;
5797 			}
5798 			if (!device)
5799 				putback_lru_page(page);
5800 put:			/* get_mctgt_type() gets the page */
5801 			put_page(page);
5802 			break;
5803 		case MC_TARGET_SWAP:
5804 			ent = target.ent;
5805 			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
5806 				mc.precharge--;
5807 				/* we fixup refcnts and charges later. */
5808 				mc.moved_swap++;
5809 			}
5810 			break;
5811 		default:
5812 			break;
5813 		}
5814 	}
5815 	pte_unmap_unlock(pte - 1, ptl);
5816 	cond_resched();
5817 
5818 	if (addr != end) {
5819 		/*
5820 		 * We have consumed all precharges we got in can_attach().
5821 		 * We try charge one by one, but don't do any additional
5822 		 * charges to mc.to if we have failed in charge once in attach()
5823 		 * phase.
5824 		 */
5825 		ret = mem_cgroup_do_precharge(1);
5826 		if (!ret)
5827 			goto retry;
5828 	}
5829 
5830 	return ret;
5831 }
5832 
5833 static const struct mm_walk_ops charge_walk_ops = {
5834 	.pmd_entry	= mem_cgroup_move_charge_pte_range,
5835 };
5836 
5837 static void mem_cgroup_move_charge(void)
5838 {
5839 	lru_add_drain_all();
5840 	/*
5841 	 * Signal lock_page_memcg() to take the memcg's move_lock
5842 	 * while we're moving its pages to another memcg. Then wait
5843 	 * for already started RCU-only updates to finish.
5844 	 */
5845 	atomic_inc(&mc.from->moving_account);
5846 	synchronize_rcu();
5847 retry:
5848 	if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
5849 		/*
5850 		 * Someone who are holding the mmap_sem might be waiting in
5851 		 * waitq. So we cancel all extra charges, wake up all waiters,
5852 		 * and retry. Because we cancel precharges, we might not be able
5853 		 * to move enough charges, but moving charge is a best-effort
5854 		 * feature anyway, so it wouldn't be a big problem.
5855 		 */
5856 		__mem_cgroup_clear_mc();
5857 		cond_resched();
5858 		goto retry;
5859 	}
5860 	/*
5861 	 * When we have consumed all precharges and failed in doing
5862 	 * additional charge, the page walk just aborts.
5863 	 */
5864 	walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
5865 			NULL);
5866 
5867 	up_read(&mc.mm->mmap_sem);
5868 	atomic_dec(&mc.from->moving_account);
5869 }
5870 
5871 static void mem_cgroup_move_task(void)
5872 {
5873 	if (mc.to) {
5874 		mem_cgroup_move_charge();
5875 		mem_cgroup_clear_mc();
5876 	}
5877 }
5878 #else	/* !CONFIG_MMU */
5879 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5880 {
5881 	return 0;
5882 }
5883 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5884 {
5885 }
5886 static void mem_cgroup_move_task(void)
5887 {
5888 }
5889 #endif
5890 
5891 /*
5892  * Cgroup retains root cgroups across [un]mount cycles making it necessary
5893  * to verify whether we're attached to the default hierarchy on each mount
5894  * attempt.
5895  */
5896 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
5897 {
5898 	/*
5899 	 * use_hierarchy is forced on the default hierarchy.  cgroup core
5900 	 * guarantees that @root doesn't have any children, so turning it
5901 	 * on for the root memcg is enough.
5902 	 */
5903 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5904 		root_mem_cgroup->use_hierarchy = true;
5905 	else
5906 		root_mem_cgroup->use_hierarchy = false;
5907 }
5908 
5909 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
5910 {
5911 	if (value == PAGE_COUNTER_MAX)
5912 		seq_puts(m, "max\n");
5913 	else
5914 		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
5915 
5916 	return 0;
5917 }
5918 
5919 static u64 memory_current_read(struct cgroup_subsys_state *css,
5920 			       struct cftype *cft)
5921 {
5922 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5923 
5924 	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
5925 }
5926 
5927 static int memory_min_show(struct seq_file *m, void *v)
5928 {
5929 	return seq_puts_memcg_tunable(m,
5930 		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
5931 }
5932 
5933 static ssize_t memory_min_write(struct kernfs_open_file *of,
5934 				char *buf, size_t nbytes, loff_t off)
5935 {
5936 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5937 	unsigned long min;
5938 	int err;
5939 
5940 	buf = strstrip(buf);
5941 	err = page_counter_memparse(buf, "max", &min);
5942 	if (err)
5943 		return err;
5944 
5945 	page_counter_set_min(&memcg->memory, min);
5946 
5947 	return nbytes;
5948 }
5949 
5950 static int memory_low_show(struct seq_file *m, void *v)
5951 {
5952 	return seq_puts_memcg_tunable(m,
5953 		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
5954 }
5955 
5956 static ssize_t memory_low_write(struct kernfs_open_file *of,
5957 				char *buf, size_t nbytes, loff_t off)
5958 {
5959 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5960 	unsigned long low;
5961 	int err;
5962 
5963 	buf = strstrip(buf);
5964 	err = page_counter_memparse(buf, "max", &low);
5965 	if (err)
5966 		return err;
5967 
5968 	page_counter_set_low(&memcg->memory, low);
5969 
5970 	return nbytes;
5971 }
5972 
5973 static int memory_high_show(struct seq_file *m, void *v)
5974 {
5975 	return seq_puts_memcg_tunable(m, READ_ONCE(mem_cgroup_from_seq(m)->high));
5976 }
5977 
5978 static ssize_t memory_high_write(struct kernfs_open_file *of,
5979 				 char *buf, size_t nbytes, loff_t off)
5980 {
5981 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5982 	unsigned int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
5983 	bool drained = false;
5984 	unsigned long high;
5985 	int err;
5986 
5987 	buf = strstrip(buf);
5988 	err = page_counter_memparse(buf, "max", &high);
5989 	if (err)
5990 		return err;
5991 
5992 	memcg->high = high;
5993 
5994 	for (;;) {
5995 		unsigned long nr_pages = page_counter_read(&memcg->memory);
5996 		unsigned long reclaimed;
5997 
5998 		if (nr_pages <= high)
5999 			break;
6000 
6001 		if (signal_pending(current))
6002 			break;
6003 
6004 		if (!drained) {
6005 			drain_all_stock(memcg);
6006 			drained = true;
6007 			continue;
6008 		}
6009 
6010 		reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6011 							 GFP_KERNEL, true);
6012 
6013 		if (!reclaimed && !nr_retries--)
6014 			break;
6015 	}
6016 
6017 	return nbytes;
6018 }
6019 
6020 static int memory_max_show(struct seq_file *m, void *v)
6021 {
6022 	return seq_puts_memcg_tunable(m,
6023 		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6024 }
6025 
6026 static ssize_t memory_max_write(struct kernfs_open_file *of,
6027 				char *buf, size_t nbytes, loff_t off)
6028 {
6029 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6030 	unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
6031 	bool drained = false;
6032 	unsigned long max;
6033 	int err;
6034 
6035 	buf = strstrip(buf);
6036 	err = page_counter_memparse(buf, "max", &max);
6037 	if (err)
6038 		return err;
6039 
6040 	xchg(&memcg->memory.max, max);
6041 
6042 	for (;;) {
6043 		unsigned long nr_pages = page_counter_read(&memcg->memory);
6044 
6045 		if (nr_pages <= max)
6046 			break;
6047 
6048 		if (signal_pending(current))
6049 			break;
6050 
6051 		if (!drained) {
6052 			drain_all_stock(memcg);
6053 			drained = true;
6054 			continue;
6055 		}
6056 
6057 		if (nr_reclaims) {
6058 			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6059 							  GFP_KERNEL, true))
6060 				nr_reclaims--;
6061 			continue;
6062 		}
6063 
6064 		memcg_memory_event(memcg, MEMCG_OOM);
6065 		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6066 			break;
6067 	}
6068 
6069 	memcg_wb_domain_size_changed(memcg);
6070 	return nbytes;
6071 }
6072 
6073 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6074 {
6075 	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6076 	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6077 	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6078 	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6079 	seq_printf(m, "oom_kill %lu\n",
6080 		   atomic_long_read(&events[MEMCG_OOM_KILL]));
6081 }
6082 
6083 static int memory_events_show(struct seq_file *m, void *v)
6084 {
6085 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6086 
6087 	__memory_events_show(m, memcg->memory_events);
6088 	return 0;
6089 }
6090 
6091 static int memory_events_local_show(struct seq_file *m, void *v)
6092 {
6093 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6094 
6095 	__memory_events_show(m, memcg->memory_events_local);
6096 	return 0;
6097 }
6098 
6099 static int memory_stat_show(struct seq_file *m, void *v)
6100 {
6101 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6102 	char *buf;
6103 
6104 	buf = memory_stat_format(memcg);
6105 	if (!buf)
6106 		return -ENOMEM;
6107 	seq_puts(m, buf);
6108 	kfree(buf);
6109 	return 0;
6110 }
6111 
6112 static int memory_oom_group_show(struct seq_file *m, void *v)
6113 {
6114 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6115 
6116 	seq_printf(m, "%d\n", memcg->oom_group);
6117 
6118 	return 0;
6119 }
6120 
6121 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6122 				      char *buf, size_t nbytes, loff_t off)
6123 {
6124 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6125 	int ret, oom_group;
6126 
6127 	buf = strstrip(buf);
6128 	if (!buf)
6129 		return -EINVAL;
6130 
6131 	ret = kstrtoint(buf, 0, &oom_group);
6132 	if (ret)
6133 		return ret;
6134 
6135 	if (oom_group != 0 && oom_group != 1)
6136 		return -EINVAL;
6137 
6138 	memcg->oom_group = oom_group;
6139 
6140 	return nbytes;
6141 }
6142 
6143 static struct cftype memory_files[] = {
6144 	{
6145 		.name = "current",
6146 		.flags = CFTYPE_NOT_ON_ROOT,
6147 		.read_u64 = memory_current_read,
6148 	},
6149 	{
6150 		.name = "min",
6151 		.flags = CFTYPE_NOT_ON_ROOT,
6152 		.seq_show = memory_min_show,
6153 		.write = memory_min_write,
6154 	},
6155 	{
6156 		.name = "low",
6157 		.flags = CFTYPE_NOT_ON_ROOT,
6158 		.seq_show = memory_low_show,
6159 		.write = memory_low_write,
6160 	},
6161 	{
6162 		.name = "high",
6163 		.flags = CFTYPE_NOT_ON_ROOT,
6164 		.seq_show = memory_high_show,
6165 		.write = memory_high_write,
6166 	},
6167 	{
6168 		.name = "max",
6169 		.flags = CFTYPE_NOT_ON_ROOT,
6170 		.seq_show = memory_max_show,
6171 		.write = memory_max_write,
6172 	},
6173 	{
6174 		.name = "events",
6175 		.flags = CFTYPE_NOT_ON_ROOT,
6176 		.file_offset = offsetof(struct mem_cgroup, events_file),
6177 		.seq_show = memory_events_show,
6178 	},
6179 	{
6180 		.name = "events.local",
6181 		.flags = CFTYPE_NOT_ON_ROOT,
6182 		.file_offset = offsetof(struct mem_cgroup, events_local_file),
6183 		.seq_show = memory_events_local_show,
6184 	},
6185 	{
6186 		.name = "stat",
6187 		.flags = CFTYPE_NOT_ON_ROOT,
6188 		.seq_show = memory_stat_show,
6189 	},
6190 	{
6191 		.name = "oom.group",
6192 		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
6193 		.seq_show = memory_oom_group_show,
6194 		.write = memory_oom_group_write,
6195 	},
6196 	{ }	/* terminate */
6197 };
6198 
6199 struct cgroup_subsys memory_cgrp_subsys = {
6200 	.css_alloc = mem_cgroup_css_alloc,
6201 	.css_online = mem_cgroup_css_online,
6202 	.css_offline = mem_cgroup_css_offline,
6203 	.css_released = mem_cgroup_css_released,
6204 	.css_free = mem_cgroup_css_free,
6205 	.css_reset = mem_cgroup_css_reset,
6206 	.can_attach = mem_cgroup_can_attach,
6207 	.cancel_attach = mem_cgroup_cancel_attach,
6208 	.post_attach = mem_cgroup_move_task,
6209 	.bind = mem_cgroup_bind,
6210 	.dfl_cftypes = memory_files,
6211 	.legacy_cftypes = mem_cgroup_legacy_files,
6212 	.early_init = 0,
6213 };
6214 
6215 /**
6216  * mem_cgroup_protected - check if memory consumption is in the normal range
6217  * @root: the top ancestor of the sub-tree being checked
6218  * @memcg: the memory cgroup to check
6219  *
6220  * WARNING: This function is not stateless! It can only be used as part
6221  *          of a top-down tree iteration, not for isolated queries.
6222  *
6223  * Returns one of the following:
6224  *   MEMCG_PROT_NONE: cgroup memory is not protected
6225  *   MEMCG_PROT_LOW: cgroup memory is protected as long there is
6226  *     an unprotected supply of reclaimable memory from other cgroups.
6227  *   MEMCG_PROT_MIN: cgroup memory is protected
6228  *
6229  * @root is exclusive; it is never protected when looked at directly
6230  *
6231  * To provide a proper hierarchical behavior, effective memory.min/low values
6232  * are used. Below is the description of how effective memory.low is calculated.
6233  * Effective memory.min values is calculated in the same way.
6234  *
6235  * Effective memory.low is always equal or less than the original memory.low.
6236  * If there is no memory.low overcommittment (which is always true for
6237  * top-level memory cgroups), these two values are equal.
6238  * Otherwise, it's a part of parent's effective memory.low,
6239  * calculated as a cgroup's memory.low usage divided by sum of sibling's
6240  * memory.low usages, where memory.low usage is the size of actually
6241  * protected memory.
6242  *
6243  *                                             low_usage
6244  * elow = min( memory.low, parent->elow * ------------------ ),
6245  *                                        siblings_low_usage
6246  *
6247  *             | memory.current, if memory.current < memory.low
6248  * low_usage = |
6249  *	       | 0, otherwise.
6250  *
6251  *
6252  * Such definition of the effective memory.low provides the expected
6253  * hierarchical behavior: parent's memory.low value is limiting
6254  * children, unprotected memory is reclaimed first and cgroups,
6255  * which are not using their guarantee do not affect actual memory
6256  * distribution.
6257  *
6258  * For example, if there are memcgs A, A/B, A/C, A/D and A/E:
6259  *
6260  *     A      A/memory.low = 2G, A/memory.current = 6G
6261  *    //\\
6262  *   BC  DE   B/memory.low = 3G  B/memory.current = 2G
6263  *            C/memory.low = 1G  C/memory.current = 2G
6264  *            D/memory.low = 0   D/memory.current = 2G
6265  *            E/memory.low = 10G E/memory.current = 0
6266  *
6267  * and the memory pressure is applied, the following memory distribution
6268  * is expected (approximately):
6269  *
6270  *     A/memory.current = 2G
6271  *
6272  *     B/memory.current = 1.3G
6273  *     C/memory.current = 0.6G
6274  *     D/memory.current = 0
6275  *     E/memory.current = 0
6276  *
6277  * These calculations require constant tracking of the actual low usages
6278  * (see propagate_protected_usage()), as well as recursive calculation of
6279  * effective memory.low values. But as we do call mem_cgroup_protected()
6280  * path for each memory cgroup top-down from the reclaim,
6281  * it's possible to optimize this part, and save calculated elow
6282  * for next usage. This part is intentionally racy, but it's ok,
6283  * as memory.low is a best-effort mechanism.
6284  */
6285 enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
6286 						struct mem_cgroup *memcg)
6287 {
6288 	struct mem_cgroup *parent;
6289 	unsigned long emin, parent_emin;
6290 	unsigned long elow, parent_elow;
6291 	unsigned long usage;
6292 
6293 	if (mem_cgroup_disabled())
6294 		return MEMCG_PROT_NONE;
6295 
6296 	if (!root)
6297 		root = root_mem_cgroup;
6298 	if (memcg == root)
6299 		return MEMCG_PROT_NONE;
6300 
6301 	usage = page_counter_read(&memcg->memory);
6302 	if (!usage)
6303 		return MEMCG_PROT_NONE;
6304 
6305 	emin = memcg->memory.min;
6306 	elow = memcg->memory.low;
6307 
6308 	parent = parent_mem_cgroup(memcg);
6309 	/* No parent means a non-hierarchical mode on v1 memcg */
6310 	if (!parent)
6311 		return MEMCG_PROT_NONE;
6312 
6313 	if (parent == root)
6314 		goto exit;
6315 
6316 	parent_emin = READ_ONCE(parent->memory.emin);
6317 	emin = min(emin, parent_emin);
6318 	if (emin && parent_emin) {
6319 		unsigned long min_usage, siblings_min_usage;
6320 
6321 		min_usage = min(usage, memcg->memory.min);
6322 		siblings_min_usage = atomic_long_read(
6323 			&parent->memory.children_min_usage);
6324 
6325 		if (min_usage && siblings_min_usage)
6326 			emin = min(emin, parent_emin * min_usage /
6327 				   siblings_min_usage);
6328 	}
6329 
6330 	parent_elow = READ_ONCE(parent->memory.elow);
6331 	elow = min(elow, parent_elow);
6332 	if (elow && parent_elow) {
6333 		unsigned long low_usage, siblings_low_usage;
6334 
6335 		low_usage = min(usage, memcg->memory.low);
6336 		siblings_low_usage = atomic_long_read(
6337 			&parent->memory.children_low_usage);
6338 
6339 		if (low_usage && siblings_low_usage)
6340 			elow = min(elow, parent_elow * low_usage /
6341 				   siblings_low_usage);
6342 	}
6343 
6344 exit:
6345 	memcg->memory.emin = emin;
6346 	memcg->memory.elow = elow;
6347 
6348 	if (usage <= emin)
6349 		return MEMCG_PROT_MIN;
6350 	else if (usage <= elow)
6351 		return MEMCG_PROT_LOW;
6352 	else
6353 		return MEMCG_PROT_NONE;
6354 }
6355 
6356 /**
6357  * mem_cgroup_try_charge - try charging a page
6358  * @page: page to charge
6359  * @mm: mm context of the victim
6360  * @gfp_mask: reclaim mode
6361  * @memcgp: charged memcg return
6362  * @compound: charge the page as compound or small page
6363  *
6364  * Try to charge @page to the memcg that @mm belongs to, reclaiming
6365  * pages according to @gfp_mask if necessary.
6366  *
6367  * Returns 0 on success, with *@memcgp pointing to the charged memcg.
6368  * Otherwise, an error code is returned.
6369  *
6370  * After page->mapping has been set up, the caller must finalize the
6371  * charge with mem_cgroup_commit_charge().  Or abort the transaction
6372  * with mem_cgroup_cancel_charge() in case page instantiation fails.
6373  */
6374 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
6375 			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
6376 			  bool compound)
6377 {
6378 	struct mem_cgroup *memcg = NULL;
6379 	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
6380 	int ret = 0;
6381 
6382 	if (mem_cgroup_disabled())
6383 		goto out;
6384 
6385 	if (PageSwapCache(page)) {
6386 		/*
6387 		 * Every swap fault against a single page tries to charge the
6388 		 * page, bail as early as possible.  shmem_unuse() encounters
6389 		 * already charged pages, too.  The USED bit is protected by
6390 		 * the page lock, which serializes swap cache removal, which
6391 		 * in turn serializes uncharging.
6392 		 */
6393 		VM_BUG_ON_PAGE(!PageLocked(page), page);
6394 		if (compound_head(page)->mem_cgroup)
6395 			goto out;
6396 
6397 		if (do_swap_account) {
6398 			swp_entry_t ent = { .val = page_private(page), };
6399 			unsigned short id = lookup_swap_cgroup_id(ent);
6400 
6401 			rcu_read_lock();
6402 			memcg = mem_cgroup_from_id(id);
6403 			if (memcg && !css_tryget_online(&memcg->css))
6404 				memcg = NULL;
6405 			rcu_read_unlock();
6406 		}
6407 	}
6408 
6409 	if (!memcg)
6410 		memcg = get_mem_cgroup_from_mm(mm);
6411 
6412 	ret = try_charge(memcg, gfp_mask, nr_pages);
6413 
6414 	css_put(&memcg->css);
6415 out:
6416 	*memcgp = memcg;
6417 	return ret;
6418 }
6419 
6420 int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
6421 			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
6422 			  bool compound)
6423 {
6424 	struct mem_cgroup *memcg;
6425 	int ret;
6426 
6427 	ret = mem_cgroup_try_charge(page, mm, gfp_mask, memcgp, compound);
6428 	memcg = *memcgp;
6429 	mem_cgroup_throttle_swaprate(memcg, page_to_nid(page), gfp_mask);
6430 	return ret;
6431 }
6432 
6433 /**
6434  * mem_cgroup_commit_charge - commit a page charge
6435  * @page: page to charge
6436  * @memcg: memcg to charge the page to
6437  * @lrucare: page might be on LRU already
6438  * @compound: charge the page as compound or small page
6439  *
6440  * Finalize a charge transaction started by mem_cgroup_try_charge(),
6441  * after page->mapping has been set up.  This must happen atomically
6442  * as part of the page instantiation, i.e. under the page table lock
6443  * for anonymous pages, under the page lock for page and swap cache.
6444  *
6445  * In addition, the page must not be on the LRU during the commit, to
6446  * prevent racing with task migration.  If it might be, use @lrucare.
6447  *
6448  * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
6449  */
6450 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
6451 			      bool lrucare, bool compound)
6452 {
6453 	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
6454 
6455 	VM_BUG_ON_PAGE(!page->mapping, page);
6456 	VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
6457 
6458 	if (mem_cgroup_disabled())
6459 		return;
6460 	/*
6461 	 * Swap faults will attempt to charge the same page multiple
6462 	 * times.  But reuse_swap_page() might have removed the page
6463 	 * from swapcache already, so we can't check PageSwapCache().
6464 	 */
6465 	if (!memcg)
6466 		return;
6467 
6468 	commit_charge(page, memcg, lrucare);
6469 
6470 	local_irq_disable();
6471 	mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
6472 	memcg_check_events(memcg, page);
6473 	local_irq_enable();
6474 
6475 	if (do_memsw_account() && PageSwapCache(page)) {
6476 		swp_entry_t entry = { .val = page_private(page) };
6477 		/*
6478 		 * The swap entry might not get freed for a long time,
6479 		 * let's not wait for it.  The page already received a
6480 		 * memory+swap charge, drop the swap entry duplicate.
6481 		 */
6482 		mem_cgroup_uncharge_swap(entry, nr_pages);
6483 	}
6484 }
6485 
6486 /**
6487  * mem_cgroup_cancel_charge - cancel a page charge
6488  * @page: page to charge
6489  * @memcg: memcg to charge the page to
6490  * @compound: charge the page as compound or small page
6491  *
6492  * Cancel a charge transaction started by mem_cgroup_try_charge().
6493  */
6494 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
6495 		bool compound)
6496 {
6497 	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
6498 
6499 	if (mem_cgroup_disabled())
6500 		return;
6501 	/*
6502 	 * Swap faults will attempt to charge the same page multiple
6503 	 * times.  But reuse_swap_page() might have removed the page
6504 	 * from swapcache already, so we can't check PageSwapCache().
6505 	 */
6506 	if (!memcg)
6507 		return;
6508 
6509 	cancel_charge(memcg, nr_pages);
6510 }
6511 
6512 struct uncharge_gather {
6513 	struct mem_cgroup *memcg;
6514 	unsigned long pgpgout;
6515 	unsigned long nr_anon;
6516 	unsigned long nr_file;
6517 	unsigned long nr_kmem;
6518 	unsigned long nr_huge;
6519 	unsigned long nr_shmem;
6520 	struct page *dummy_page;
6521 };
6522 
6523 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
6524 {
6525 	memset(ug, 0, sizeof(*ug));
6526 }
6527 
6528 static void uncharge_batch(const struct uncharge_gather *ug)
6529 {
6530 	unsigned long nr_pages = ug->nr_anon + ug->nr_file + ug->nr_kmem;
6531 	unsigned long flags;
6532 
6533 	if (!mem_cgroup_is_root(ug->memcg)) {
6534 		page_counter_uncharge(&ug->memcg->memory, nr_pages);
6535 		if (do_memsw_account())
6536 			page_counter_uncharge(&ug->memcg->memsw, nr_pages);
6537 		if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
6538 			page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
6539 		memcg_oom_recover(ug->memcg);
6540 	}
6541 
6542 	local_irq_save(flags);
6543 	__mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon);
6544 	__mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file);
6545 	__mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
6546 	__mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem);
6547 	__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
6548 	__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, nr_pages);
6549 	memcg_check_events(ug->memcg, ug->dummy_page);
6550 	local_irq_restore(flags);
6551 
6552 	if (!mem_cgroup_is_root(ug->memcg))
6553 		css_put_many(&ug->memcg->css, nr_pages);
6554 }
6555 
6556 static void uncharge_page(struct page *page, struct uncharge_gather *ug)
6557 {
6558 	VM_BUG_ON_PAGE(PageLRU(page), page);
6559 	VM_BUG_ON_PAGE(page_count(page) && !is_zone_device_page(page) &&
6560 			!PageHWPoison(page) , page);
6561 
6562 	if (!page->mem_cgroup)
6563 		return;
6564 
6565 	/*
6566 	 * Nobody should be changing or seriously looking at
6567 	 * page->mem_cgroup at this point, we have fully
6568 	 * exclusive access to the page.
6569 	 */
6570 
6571 	if (ug->memcg != page->mem_cgroup) {
6572 		if (ug->memcg) {
6573 			uncharge_batch(ug);
6574 			uncharge_gather_clear(ug);
6575 		}
6576 		ug->memcg = page->mem_cgroup;
6577 	}
6578 
6579 	if (!PageKmemcg(page)) {
6580 		unsigned int nr_pages = 1;
6581 
6582 		if (PageTransHuge(page)) {
6583 			nr_pages = compound_nr(page);
6584 			ug->nr_huge += nr_pages;
6585 		}
6586 		if (PageAnon(page))
6587 			ug->nr_anon += nr_pages;
6588 		else {
6589 			ug->nr_file += nr_pages;
6590 			if (PageSwapBacked(page))
6591 				ug->nr_shmem += nr_pages;
6592 		}
6593 		ug->pgpgout++;
6594 	} else {
6595 		ug->nr_kmem += compound_nr(page);
6596 		__ClearPageKmemcg(page);
6597 	}
6598 
6599 	ug->dummy_page = page;
6600 	page->mem_cgroup = NULL;
6601 }
6602 
6603 static void uncharge_list(struct list_head *page_list)
6604 {
6605 	struct uncharge_gather ug;
6606 	struct list_head *next;
6607 
6608 	uncharge_gather_clear(&ug);
6609 
6610 	/*
6611 	 * Note that the list can be a single page->lru; hence the
6612 	 * do-while loop instead of a simple list_for_each_entry().
6613 	 */
6614 	next = page_list->next;
6615 	do {
6616 		struct page *page;
6617 
6618 		page = list_entry(next, struct page, lru);
6619 		next = page->lru.next;
6620 
6621 		uncharge_page(page, &ug);
6622 	} while (next != page_list);
6623 
6624 	if (ug.memcg)
6625 		uncharge_batch(&ug);
6626 }
6627 
6628 /**
6629  * mem_cgroup_uncharge - uncharge a page
6630  * @page: page to uncharge
6631  *
6632  * Uncharge a page previously charged with mem_cgroup_try_charge() and
6633  * mem_cgroup_commit_charge().
6634  */
6635 void mem_cgroup_uncharge(struct page *page)
6636 {
6637 	struct uncharge_gather ug;
6638 
6639 	if (mem_cgroup_disabled())
6640 		return;
6641 
6642 	/* Don't touch page->lru of any random page, pre-check: */
6643 	if (!page->mem_cgroup)
6644 		return;
6645 
6646 	uncharge_gather_clear(&ug);
6647 	uncharge_page(page, &ug);
6648 	uncharge_batch(&ug);
6649 }
6650 
6651 /**
6652  * mem_cgroup_uncharge_list - uncharge a list of page
6653  * @page_list: list of pages to uncharge
6654  *
6655  * Uncharge a list of pages previously charged with
6656  * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
6657  */
6658 void mem_cgroup_uncharge_list(struct list_head *page_list)
6659 {
6660 	if (mem_cgroup_disabled())
6661 		return;
6662 
6663 	if (!list_empty(page_list))
6664 		uncharge_list(page_list);
6665 }
6666 
6667 /**
6668  * mem_cgroup_migrate - charge a page's replacement
6669  * @oldpage: currently circulating page
6670  * @newpage: replacement page
6671  *
6672  * Charge @newpage as a replacement page for @oldpage. @oldpage will
6673  * be uncharged upon free.
6674  *
6675  * Both pages must be locked, @newpage->mapping must be set up.
6676  */
6677 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
6678 {
6679 	struct mem_cgroup *memcg;
6680 	unsigned int nr_pages;
6681 	bool compound;
6682 	unsigned long flags;
6683 
6684 	VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
6685 	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
6686 	VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
6687 	VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
6688 		       newpage);
6689 
6690 	if (mem_cgroup_disabled())
6691 		return;
6692 
6693 	/* Page cache replacement: new page already charged? */
6694 	if (newpage->mem_cgroup)
6695 		return;
6696 
6697 	/* Swapcache readahead pages can get replaced before being charged */
6698 	memcg = oldpage->mem_cgroup;
6699 	if (!memcg)
6700 		return;
6701 
6702 	/* Force-charge the new page. The old one will be freed soon */
6703 	compound = PageTransHuge(newpage);
6704 	nr_pages = compound ? hpage_nr_pages(newpage) : 1;
6705 
6706 	page_counter_charge(&memcg->memory, nr_pages);
6707 	if (do_memsw_account())
6708 		page_counter_charge(&memcg->memsw, nr_pages);
6709 	css_get_many(&memcg->css, nr_pages);
6710 
6711 	commit_charge(newpage, memcg, false);
6712 
6713 	local_irq_save(flags);
6714 	mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
6715 	memcg_check_events(memcg, newpage);
6716 	local_irq_restore(flags);
6717 }
6718 
6719 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
6720 EXPORT_SYMBOL(memcg_sockets_enabled_key);
6721 
6722 void mem_cgroup_sk_alloc(struct sock *sk)
6723 {
6724 	struct mem_cgroup *memcg;
6725 
6726 	if (!mem_cgroup_sockets_enabled)
6727 		return;
6728 
6729 	/*
6730 	 * Socket cloning can throw us here with sk_memcg already
6731 	 * filled. It won't however, necessarily happen from
6732 	 * process context. So the test for root memcg given
6733 	 * the current task's memcg won't help us in this case.
6734 	 *
6735 	 * Respecting the original socket's memcg is a better
6736 	 * decision in this case.
6737 	 */
6738 	if (sk->sk_memcg) {
6739 		css_get(&sk->sk_memcg->css);
6740 		return;
6741 	}
6742 
6743 	rcu_read_lock();
6744 	memcg = mem_cgroup_from_task(current);
6745 	if (memcg == root_mem_cgroup)
6746 		goto out;
6747 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
6748 		goto out;
6749 	if (css_tryget_online(&memcg->css))
6750 		sk->sk_memcg = memcg;
6751 out:
6752 	rcu_read_unlock();
6753 }
6754 
6755 void mem_cgroup_sk_free(struct sock *sk)
6756 {
6757 	if (sk->sk_memcg)
6758 		css_put(&sk->sk_memcg->css);
6759 }
6760 
6761 /**
6762  * mem_cgroup_charge_skmem - charge socket memory
6763  * @memcg: memcg to charge
6764  * @nr_pages: number of pages to charge
6765  *
6766  * Charges @nr_pages to @memcg. Returns %true if the charge fit within
6767  * @memcg's configured limit, %false if the charge had to be forced.
6768  */
6769 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
6770 {
6771 	gfp_t gfp_mask = GFP_KERNEL;
6772 
6773 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
6774 		struct page_counter *fail;
6775 
6776 		if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
6777 			memcg->tcpmem_pressure = 0;
6778 			return true;
6779 		}
6780 		page_counter_charge(&memcg->tcpmem, nr_pages);
6781 		memcg->tcpmem_pressure = 1;
6782 		return false;
6783 	}
6784 
6785 	/* Don't block in the packet receive path */
6786 	if (in_softirq())
6787 		gfp_mask = GFP_NOWAIT;
6788 
6789 	mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
6790 
6791 	if (try_charge(memcg, gfp_mask, nr_pages) == 0)
6792 		return true;
6793 
6794 	try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
6795 	return false;
6796 }
6797 
6798 /**
6799  * mem_cgroup_uncharge_skmem - uncharge socket memory
6800  * @memcg: memcg to uncharge
6801  * @nr_pages: number of pages to uncharge
6802  */
6803 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
6804 {
6805 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
6806 		page_counter_uncharge(&memcg->tcpmem, nr_pages);
6807 		return;
6808 	}
6809 
6810 	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
6811 
6812 	refill_stock(memcg, nr_pages);
6813 }
6814 
6815 static int __init cgroup_memory(char *s)
6816 {
6817 	char *token;
6818 
6819 	while ((token = strsep(&s, ",")) != NULL) {
6820 		if (!*token)
6821 			continue;
6822 		if (!strcmp(token, "nosocket"))
6823 			cgroup_memory_nosocket = true;
6824 		if (!strcmp(token, "nokmem"))
6825 			cgroup_memory_nokmem = true;
6826 	}
6827 	return 0;
6828 }
6829 __setup("cgroup.memory=", cgroup_memory);
6830 
6831 /*
6832  * subsys_initcall() for memory controller.
6833  *
6834  * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
6835  * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
6836  * basically everything that doesn't depend on a specific mem_cgroup structure
6837  * should be initialized from here.
6838  */
6839 static int __init mem_cgroup_init(void)
6840 {
6841 	int cpu, node;
6842 
6843 #ifdef CONFIG_MEMCG_KMEM
6844 	/*
6845 	 * Kmem cache creation is mostly done with the slab_mutex held,
6846 	 * so use a workqueue with limited concurrency to avoid stalling
6847 	 * all worker threads in case lots of cgroups are created and
6848 	 * destroyed simultaneously.
6849 	 */
6850 	memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1);
6851 	BUG_ON(!memcg_kmem_cache_wq);
6852 #endif
6853 
6854 	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
6855 				  memcg_hotplug_cpu_dead);
6856 
6857 	for_each_possible_cpu(cpu)
6858 		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
6859 			  drain_local_stock);
6860 
6861 	for_each_node(node) {
6862 		struct mem_cgroup_tree_per_node *rtpn;
6863 
6864 		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
6865 				    node_online(node) ? node : NUMA_NO_NODE);
6866 
6867 		rtpn->rb_root = RB_ROOT;
6868 		rtpn->rb_rightmost = NULL;
6869 		spin_lock_init(&rtpn->lock);
6870 		soft_limit_tree.rb_tree_per_node[node] = rtpn;
6871 	}
6872 
6873 	return 0;
6874 }
6875 subsys_initcall(mem_cgroup_init);
6876 
6877 #ifdef CONFIG_MEMCG_SWAP
6878 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
6879 {
6880 	while (!refcount_inc_not_zero(&memcg->id.ref)) {
6881 		/*
6882 		 * The root cgroup cannot be destroyed, so it's refcount must
6883 		 * always be >= 1.
6884 		 */
6885 		if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
6886 			VM_BUG_ON(1);
6887 			break;
6888 		}
6889 		memcg = parent_mem_cgroup(memcg);
6890 		if (!memcg)
6891 			memcg = root_mem_cgroup;
6892 	}
6893 	return memcg;
6894 }
6895 
6896 /**
6897  * mem_cgroup_swapout - transfer a memsw charge to swap
6898  * @page: page whose memsw charge to transfer
6899  * @entry: swap entry to move the charge to
6900  *
6901  * Transfer the memsw charge of @page to @entry.
6902  */
6903 void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
6904 {
6905 	struct mem_cgroup *memcg, *swap_memcg;
6906 	unsigned int nr_entries;
6907 	unsigned short oldid;
6908 
6909 	VM_BUG_ON_PAGE(PageLRU(page), page);
6910 	VM_BUG_ON_PAGE(page_count(page), page);
6911 
6912 	if (!do_memsw_account())
6913 		return;
6914 
6915 	memcg = page->mem_cgroup;
6916 
6917 	/* Readahead page, never charged */
6918 	if (!memcg)
6919 		return;
6920 
6921 	/*
6922 	 * In case the memcg owning these pages has been offlined and doesn't
6923 	 * have an ID allocated to it anymore, charge the closest online
6924 	 * ancestor for the swap instead and transfer the memory+swap charge.
6925 	 */
6926 	swap_memcg = mem_cgroup_id_get_online(memcg);
6927 	nr_entries = hpage_nr_pages(page);
6928 	/* Get references for the tail pages, too */
6929 	if (nr_entries > 1)
6930 		mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
6931 	oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
6932 				   nr_entries);
6933 	VM_BUG_ON_PAGE(oldid, page);
6934 	mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
6935 
6936 	page->mem_cgroup = NULL;
6937 
6938 	if (!mem_cgroup_is_root(memcg))
6939 		page_counter_uncharge(&memcg->memory, nr_entries);
6940 
6941 	if (memcg != swap_memcg) {
6942 		if (!mem_cgroup_is_root(swap_memcg))
6943 			page_counter_charge(&swap_memcg->memsw, nr_entries);
6944 		page_counter_uncharge(&memcg->memsw, nr_entries);
6945 	}
6946 
6947 	/*
6948 	 * Interrupts should be disabled here because the caller holds the
6949 	 * i_pages lock which is taken with interrupts-off. It is
6950 	 * important here to have the interrupts disabled because it is the
6951 	 * only synchronisation we have for updating the per-CPU variables.
6952 	 */
6953 	VM_BUG_ON(!irqs_disabled());
6954 	mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page),
6955 				     -nr_entries);
6956 	memcg_check_events(memcg, page);
6957 
6958 	if (!mem_cgroup_is_root(memcg))
6959 		css_put_many(&memcg->css, nr_entries);
6960 }
6961 
6962 /**
6963  * mem_cgroup_try_charge_swap - try charging swap space for a page
6964  * @page: page being added to swap
6965  * @entry: swap entry to charge
6966  *
6967  * Try to charge @page's memcg for the swap space at @entry.
6968  *
6969  * Returns 0 on success, -ENOMEM on failure.
6970  */
6971 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
6972 {
6973 	unsigned int nr_pages = hpage_nr_pages(page);
6974 	struct page_counter *counter;
6975 	struct mem_cgroup *memcg;
6976 	unsigned short oldid;
6977 
6978 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
6979 		return 0;
6980 
6981 	memcg = page->mem_cgroup;
6982 
6983 	/* Readahead page, never charged */
6984 	if (!memcg)
6985 		return 0;
6986 
6987 	if (!entry.val) {
6988 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
6989 		return 0;
6990 	}
6991 
6992 	memcg = mem_cgroup_id_get_online(memcg);
6993 
6994 	if (!mem_cgroup_is_root(memcg) &&
6995 	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
6996 		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
6997 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
6998 		mem_cgroup_id_put(memcg);
6999 		return -ENOMEM;
7000 	}
7001 
7002 	/* Get references for the tail pages, too */
7003 	if (nr_pages > 1)
7004 		mem_cgroup_id_get_many(memcg, nr_pages - 1);
7005 	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7006 	VM_BUG_ON_PAGE(oldid, page);
7007 	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7008 
7009 	return 0;
7010 }
7011 
7012 /**
7013  * mem_cgroup_uncharge_swap - uncharge swap space
7014  * @entry: swap entry to uncharge
7015  * @nr_pages: the amount of swap space to uncharge
7016  */
7017 void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7018 {
7019 	struct mem_cgroup *memcg;
7020 	unsigned short id;
7021 
7022 	if (!do_swap_account)
7023 		return;
7024 
7025 	id = swap_cgroup_record(entry, 0, nr_pages);
7026 	rcu_read_lock();
7027 	memcg = mem_cgroup_from_id(id);
7028 	if (memcg) {
7029 		if (!mem_cgroup_is_root(memcg)) {
7030 			if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7031 				page_counter_uncharge(&memcg->swap, nr_pages);
7032 			else
7033 				page_counter_uncharge(&memcg->memsw, nr_pages);
7034 		}
7035 		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7036 		mem_cgroup_id_put_many(memcg, nr_pages);
7037 	}
7038 	rcu_read_unlock();
7039 }
7040 
7041 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7042 {
7043 	long nr_swap_pages = get_nr_swap_pages();
7044 
7045 	if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7046 		return nr_swap_pages;
7047 	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
7048 		nr_swap_pages = min_t(long, nr_swap_pages,
7049 				      READ_ONCE(memcg->swap.max) -
7050 				      page_counter_read(&memcg->swap));
7051 	return nr_swap_pages;
7052 }
7053 
7054 bool mem_cgroup_swap_full(struct page *page)
7055 {
7056 	struct mem_cgroup *memcg;
7057 
7058 	VM_BUG_ON_PAGE(!PageLocked(page), page);
7059 
7060 	if (vm_swap_full())
7061 		return true;
7062 	if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7063 		return false;
7064 
7065 	memcg = page->mem_cgroup;
7066 	if (!memcg)
7067 		return false;
7068 
7069 	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
7070 		if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.max)
7071 			return true;
7072 
7073 	return false;
7074 }
7075 
7076 /* for remember boot option*/
7077 #ifdef CONFIG_MEMCG_SWAP_ENABLED
7078 static int really_do_swap_account __initdata = 1;
7079 #else
7080 static int really_do_swap_account __initdata;
7081 #endif
7082 
7083 static int __init enable_swap_account(char *s)
7084 {
7085 	if (!strcmp(s, "1"))
7086 		really_do_swap_account = 1;
7087 	else if (!strcmp(s, "0"))
7088 		really_do_swap_account = 0;
7089 	return 1;
7090 }
7091 __setup("swapaccount=", enable_swap_account);
7092 
7093 static u64 swap_current_read(struct cgroup_subsys_state *css,
7094 			     struct cftype *cft)
7095 {
7096 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7097 
7098 	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7099 }
7100 
7101 static int swap_max_show(struct seq_file *m, void *v)
7102 {
7103 	return seq_puts_memcg_tunable(m,
7104 		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
7105 }
7106 
7107 static ssize_t swap_max_write(struct kernfs_open_file *of,
7108 			      char *buf, size_t nbytes, loff_t off)
7109 {
7110 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7111 	unsigned long max;
7112 	int err;
7113 
7114 	buf = strstrip(buf);
7115 	err = page_counter_memparse(buf, "max", &max);
7116 	if (err)
7117 		return err;
7118 
7119 	xchg(&memcg->swap.max, max);
7120 
7121 	return nbytes;
7122 }
7123 
7124 static int swap_events_show(struct seq_file *m, void *v)
7125 {
7126 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
7127 
7128 	seq_printf(m, "max %lu\n",
7129 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
7130 	seq_printf(m, "fail %lu\n",
7131 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
7132 
7133 	return 0;
7134 }
7135 
7136 static struct cftype swap_files[] = {
7137 	{
7138 		.name = "swap.current",
7139 		.flags = CFTYPE_NOT_ON_ROOT,
7140 		.read_u64 = swap_current_read,
7141 	},
7142 	{
7143 		.name = "swap.max",
7144 		.flags = CFTYPE_NOT_ON_ROOT,
7145 		.seq_show = swap_max_show,
7146 		.write = swap_max_write,
7147 	},
7148 	{
7149 		.name = "swap.events",
7150 		.flags = CFTYPE_NOT_ON_ROOT,
7151 		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
7152 		.seq_show = swap_events_show,
7153 	},
7154 	{ }	/* terminate */
7155 };
7156 
7157 static struct cftype memsw_cgroup_files[] = {
7158 	{
7159 		.name = "memsw.usage_in_bytes",
7160 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
7161 		.read_u64 = mem_cgroup_read_u64,
7162 	},
7163 	{
7164 		.name = "memsw.max_usage_in_bytes",
7165 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
7166 		.write = mem_cgroup_reset,
7167 		.read_u64 = mem_cgroup_read_u64,
7168 	},
7169 	{
7170 		.name = "memsw.limit_in_bytes",
7171 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
7172 		.write = mem_cgroup_write,
7173 		.read_u64 = mem_cgroup_read_u64,
7174 	},
7175 	{
7176 		.name = "memsw.failcnt",
7177 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
7178 		.write = mem_cgroup_reset,
7179 		.read_u64 = mem_cgroup_read_u64,
7180 	},
7181 	{ },	/* terminate */
7182 };
7183 
7184 static int __init mem_cgroup_swap_init(void)
7185 {
7186 	if (!mem_cgroup_disabled() && really_do_swap_account) {
7187 		do_swap_account = 1;
7188 		WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys,
7189 					       swap_files));
7190 		WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
7191 						  memsw_cgroup_files));
7192 	}
7193 	return 0;
7194 }
7195 subsys_initcall(mem_cgroup_swap_init);
7196 
7197 #endif /* CONFIG_MEMCG_SWAP */
7198