xref: /linux/mm/memcontrol.c (revision 06bd48b6cd97ef3889b68c8e09014d81dbc463f1)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <xemul@openvz.org>
9  *
10  * Memory thresholds
11  * Copyright (C) 2009 Nokia Corporation
12  * Author: Kirill A. Shutemov
13  *
14  * Kernel Memory Controller
15  * Copyright (C) 2012 Parallels Inc. and Google Inc.
16  * Authors: Glauber Costa and Suleiman Souhlal
17  *
18  * Native page reclaim
19  * Charge lifetime sanitation
20  * Lockless page tracking & accounting
21  * Unified hierarchy configuration model
22  * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23  */
24 
25 #include <linux/page_counter.h>
26 #include <linux/memcontrol.h>
27 #include <linux/cgroup.h>
28 #include <linux/pagewalk.h>
29 #include <linux/sched/mm.h>
30 #include <linux/shmem_fs.h>
31 #include <linux/hugetlb.h>
32 #include <linux/pagemap.h>
33 #include <linux/vm_event_item.h>
34 #include <linux/smp.h>
35 #include <linux/page-flags.h>
36 #include <linux/backing-dev.h>
37 #include <linux/bit_spinlock.h>
38 #include <linux/rcupdate.h>
39 #include <linux/limits.h>
40 #include <linux/export.h>
41 #include <linux/mutex.h>
42 #include <linux/rbtree.h>
43 #include <linux/slab.h>
44 #include <linux/swap.h>
45 #include <linux/swapops.h>
46 #include <linux/spinlock.h>
47 #include <linux/eventfd.h>
48 #include <linux/poll.h>
49 #include <linux/sort.h>
50 #include <linux/fs.h>
51 #include <linux/seq_file.h>
52 #include <linux/vmpressure.h>
53 #include <linux/mm_inline.h>
54 #include <linux/swap_cgroup.h>
55 #include <linux/cpu.h>
56 #include <linux/oom.h>
57 #include <linux/lockdep.h>
58 #include <linux/file.h>
59 #include <linux/tracehook.h>
60 #include <linux/psi.h>
61 #include <linux/seq_buf.h>
62 #include "internal.h"
63 #include <net/sock.h>
64 #include <net/ip.h>
65 #include "slab.h"
66 
67 #include <linux/uaccess.h>
68 
69 #include <trace/events/vmscan.h>
70 
71 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
72 EXPORT_SYMBOL(memory_cgrp_subsys);
73 
74 struct mem_cgroup *root_mem_cgroup __read_mostly;
75 
76 #define MEM_CGROUP_RECLAIM_RETRIES	5
77 
78 /* Socket memory accounting disabled? */
79 static bool cgroup_memory_nosocket;
80 
81 /* Kernel memory accounting disabled? */
82 static bool cgroup_memory_nokmem;
83 
84 /* Whether the swap controller is active */
85 #ifdef CONFIG_MEMCG_SWAP
86 int do_swap_account __read_mostly;
87 #else
88 #define do_swap_account		0
89 #endif
90 
91 #ifdef CONFIG_CGROUP_WRITEBACK
92 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
93 #endif
94 
95 /* Whether legacy memory+swap accounting is active */
96 static bool do_memsw_account(void)
97 {
98 	return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
99 }
100 
101 #define THRESHOLDS_EVENTS_TARGET 128
102 #define SOFTLIMIT_EVENTS_TARGET 1024
103 
104 /*
105  * Cgroups above their limits are maintained in a RB-Tree, independent of
106  * their hierarchy representation
107  */
108 
109 struct mem_cgroup_tree_per_node {
110 	struct rb_root rb_root;
111 	struct rb_node *rb_rightmost;
112 	spinlock_t lock;
113 };
114 
115 struct mem_cgroup_tree {
116 	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
117 };
118 
119 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
120 
121 /* for OOM */
122 struct mem_cgroup_eventfd_list {
123 	struct list_head list;
124 	struct eventfd_ctx *eventfd;
125 };
126 
127 /*
128  * cgroup_event represents events which userspace want to receive.
129  */
130 struct mem_cgroup_event {
131 	/*
132 	 * memcg which the event belongs to.
133 	 */
134 	struct mem_cgroup *memcg;
135 	/*
136 	 * eventfd to signal userspace about the event.
137 	 */
138 	struct eventfd_ctx *eventfd;
139 	/*
140 	 * Each of these stored in a list by the cgroup.
141 	 */
142 	struct list_head list;
143 	/*
144 	 * register_event() callback will be used to add new userspace
145 	 * waiter for changes related to this event.  Use eventfd_signal()
146 	 * on eventfd to send notification to userspace.
147 	 */
148 	int (*register_event)(struct mem_cgroup *memcg,
149 			      struct eventfd_ctx *eventfd, const char *args);
150 	/*
151 	 * unregister_event() callback will be called when userspace closes
152 	 * the eventfd or on cgroup removing.  This callback must be set,
153 	 * if you want provide notification functionality.
154 	 */
155 	void (*unregister_event)(struct mem_cgroup *memcg,
156 				 struct eventfd_ctx *eventfd);
157 	/*
158 	 * All fields below needed to unregister event when
159 	 * userspace closes eventfd.
160 	 */
161 	poll_table pt;
162 	wait_queue_head_t *wqh;
163 	wait_queue_entry_t wait;
164 	struct work_struct remove;
165 };
166 
167 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
168 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
169 
170 /* Stuffs for move charges at task migration. */
171 /*
172  * Types of charges to be moved.
173  */
174 #define MOVE_ANON	0x1U
175 #define MOVE_FILE	0x2U
176 #define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
177 
178 /* "mc" and its members are protected by cgroup_mutex */
179 static struct move_charge_struct {
180 	spinlock_t	  lock; /* for from, to */
181 	struct mm_struct  *mm;
182 	struct mem_cgroup *from;
183 	struct mem_cgroup *to;
184 	unsigned long flags;
185 	unsigned long precharge;
186 	unsigned long moved_charge;
187 	unsigned long moved_swap;
188 	struct task_struct *moving_task;	/* a task moving charges */
189 	wait_queue_head_t waitq;		/* a waitq for other context */
190 } mc = {
191 	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
192 	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
193 };
194 
195 /*
196  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
197  * limit reclaim to prevent infinite loops, if they ever occur.
198  */
199 #define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
200 #define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
201 
202 enum charge_type {
203 	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
204 	MEM_CGROUP_CHARGE_TYPE_ANON,
205 	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
206 	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
207 	NR_CHARGE_TYPE,
208 };
209 
210 /* for encoding cft->private value on file */
211 enum res_type {
212 	_MEM,
213 	_MEMSWAP,
214 	_OOM_TYPE,
215 	_KMEM,
216 	_TCP,
217 };
218 
219 #define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
220 #define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
221 #define MEMFILE_ATTR(val)	((val) & 0xffff)
222 /* Used for OOM nofiier */
223 #define OOM_CONTROL		(0)
224 
225 /*
226  * Iteration constructs for visiting all cgroups (under a tree).  If
227  * loops are exited prematurely (break), mem_cgroup_iter_break() must
228  * be used for reference counting.
229  */
230 #define for_each_mem_cgroup_tree(iter, root)		\
231 	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
232 	     iter != NULL;				\
233 	     iter = mem_cgroup_iter(root, iter, NULL))
234 
235 #define for_each_mem_cgroup(iter)			\
236 	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
237 	     iter != NULL;				\
238 	     iter = mem_cgroup_iter(NULL, iter, NULL))
239 
240 static inline bool should_force_charge(void)
241 {
242 	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
243 		(current->flags & PF_EXITING);
244 }
245 
246 /* Some nice accessors for the vmpressure. */
247 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
248 {
249 	if (!memcg)
250 		memcg = root_mem_cgroup;
251 	return &memcg->vmpressure;
252 }
253 
254 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
255 {
256 	return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
257 }
258 
259 #ifdef CONFIG_MEMCG_KMEM
260 /*
261  * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
262  * The main reason for not using cgroup id for this:
263  *  this works better in sparse environments, where we have a lot of memcgs,
264  *  but only a few kmem-limited. Or also, if we have, for instance, 200
265  *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
266  *  200 entry array for that.
267  *
268  * The current size of the caches array is stored in memcg_nr_cache_ids. It
269  * will double each time we have to increase it.
270  */
271 static DEFINE_IDA(memcg_cache_ida);
272 int memcg_nr_cache_ids;
273 
274 /* Protects memcg_nr_cache_ids */
275 static DECLARE_RWSEM(memcg_cache_ids_sem);
276 
277 void memcg_get_cache_ids(void)
278 {
279 	down_read(&memcg_cache_ids_sem);
280 }
281 
282 void memcg_put_cache_ids(void)
283 {
284 	up_read(&memcg_cache_ids_sem);
285 }
286 
287 /*
288  * MIN_SIZE is different than 1, because we would like to avoid going through
289  * the alloc/free process all the time. In a small machine, 4 kmem-limited
290  * cgroups is a reasonable guess. In the future, it could be a parameter or
291  * tunable, but that is strictly not necessary.
292  *
293  * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
294  * this constant directly from cgroup, but it is understandable that this is
295  * better kept as an internal representation in cgroup.c. In any case, the
296  * cgrp_id space is not getting any smaller, and we don't have to necessarily
297  * increase ours as well if it increases.
298  */
299 #define MEMCG_CACHES_MIN_SIZE 4
300 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
301 
302 /*
303  * A lot of the calls to the cache allocation functions are expected to be
304  * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
305  * conditional to this static branch, we'll have to allow modules that does
306  * kmem_cache_alloc and the such to see this symbol as well
307  */
308 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
309 EXPORT_SYMBOL(memcg_kmem_enabled_key);
310 
311 struct workqueue_struct *memcg_kmem_cache_wq;
312 #endif
313 
314 static int memcg_shrinker_map_size;
315 static DEFINE_MUTEX(memcg_shrinker_map_mutex);
316 
317 static void memcg_free_shrinker_map_rcu(struct rcu_head *head)
318 {
319 	kvfree(container_of(head, struct memcg_shrinker_map, rcu));
320 }
321 
322 static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
323 					 int size, int old_size)
324 {
325 	struct memcg_shrinker_map *new, *old;
326 	int nid;
327 
328 	lockdep_assert_held(&memcg_shrinker_map_mutex);
329 
330 	for_each_node(nid) {
331 		old = rcu_dereference_protected(
332 			mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
333 		/* Not yet online memcg */
334 		if (!old)
335 			return 0;
336 
337 		new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid);
338 		if (!new)
339 			return -ENOMEM;
340 
341 		/* Set all old bits, clear all new bits */
342 		memset(new->map, (int)0xff, old_size);
343 		memset((void *)new->map + old_size, 0, size - old_size);
344 
345 		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new);
346 		call_rcu(&old->rcu, memcg_free_shrinker_map_rcu);
347 	}
348 
349 	return 0;
350 }
351 
352 static void memcg_free_shrinker_maps(struct mem_cgroup *memcg)
353 {
354 	struct mem_cgroup_per_node *pn;
355 	struct memcg_shrinker_map *map;
356 	int nid;
357 
358 	if (mem_cgroup_is_root(memcg))
359 		return;
360 
361 	for_each_node(nid) {
362 		pn = mem_cgroup_nodeinfo(memcg, nid);
363 		map = rcu_dereference_protected(pn->shrinker_map, true);
364 		if (map)
365 			kvfree(map);
366 		rcu_assign_pointer(pn->shrinker_map, NULL);
367 	}
368 }
369 
370 static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
371 {
372 	struct memcg_shrinker_map *map;
373 	int nid, size, ret = 0;
374 
375 	if (mem_cgroup_is_root(memcg))
376 		return 0;
377 
378 	mutex_lock(&memcg_shrinker_map_mutex);
379 	size = memcg_shrinker_map_size;
380 	for_each_node(nid) {
381 		map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid);
382 		if (!map) {
383 			memcg_free_shrinker_maps(memcg);
384 			ret = -ENOMEM;
385 			break;
386 		}
387 		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map);
388 	}
389 	mutex_unlock(&memcg_shrinker_map_mutex);
390 
391 	return ret;
392 }
393 
394 int memcg_expand_shrinker_maps(int new_id)
395 {
396 	int size, old_size, ret = 0;
397 	struct mem_cgroup *memcg;
398 
399 	size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
400 	old_size = memcg_shrinker_map_size;
401 	if (size <= old_size)
402 		return 0;
403 
404 	mutex_lock(&memcg_shrinker_map_mutex);
405 	if (!root_mem_cgroup)
406 		goto unlock;
407 
408 	for_each_mem_cgroup(memcg) {
409 		if (mem_cgroup_is_root(memcg))
410 			continue;
411 		ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
412 		if (ret) {
413 			mem_cgroup_iter_break(NULL, memcg);
414 			goto unlock;
415 		}
416 	}
417 unlock:
418 	if (!ret)
419 		memcg_shrinker_map_size = size;
420 	mutex_unlock(&memcg_shrinker_map_mutex);
421 	return ret;
422 }
423 
424 void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
425 {
426 	if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
427 		struct memcg_shrinker_map *map;
428 
429 		rcu_read_lock();
430 		map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map);
431 		/* Pairs with smp mb in shrink_slab() */
432 		smp_mb__before_atomic();
433 		set_bit(shrinker_id, map->map);
434 		rcu_read_unlock();
435 	}
436 }
437 
438 /**
439  * mem_cgroup_css_from_page - css of the memcg associated with a page
440  * @page: page of interest
441  *
442  * If memcg is bound to the default hierarchy, css of the memcg associated
443  * with @page is returned.  The returned css remains associated with @page
444  * until it is released.
445  *
446  * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
447  * is returned.
448  */
449 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
450 {
451 	struct mem_cgroup *memcg;
452 
453 	memcg = page->mem_cgroup;
454 
455 	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
456 		memcg = root_mem_cgroup;
457 
458 	return &memcg->css;
459 }
460 
461 /**
462  * page_cgroup_ino - return inode number of the memcg a page is charged to
463  * @page: the page
464  *
465  * Look up the closest online ancestor of the memory cgroup @page is charged to
466  * and return its inode number or 0 if @page is not charged to any cgroup. It
467  * is safe to call this function without holding a reference to @page.
468  *
469  * Note, this function is inherently racy, because there is nothing to prevent
470  * the cgroup inode from getting torn down and potentially reallocated a moment
471  * after page_cgroup_ino() returns, so it only should be used by callers that
472  * do not care (such as procfs interfaces).
473  */
474 ino_t page_cgroup_ino(struct page *page)
475 {
476 	struct mem_cgroup *memcg;
477 	unsigned long ino = 0;
478 
479 	rcu_read_lock();
480 	if (PageSlab(page) && !PageTail(page))
481 		memcg = memcg_from_slab_page(page);
482 	else
483 		memcg = READ_ONCE(page->mem_cgroup);
484 	while (memcg && !(memcg->css.flags & CSS_ONLINE))
485 		memcg = parent_mem_cgroup(memcg);
486 	if (memcg)
487 		ino = cgroup_ino(memcg->css.cgroup);
488 	rcu_read_unlock();
489 	return ino;
490 }
491 
492 static struct mem_cgroup_per_node *
493 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
494 {
495 	int nid = page_to_nid(page);
496 
497 	return memcg->nodeinfo[nid];
498 }
499 
500 static struct mem_cgroup_tree_per_node *
501 soft_limit_tree_node(int nid)
502 {
503 	return soft_limit_tree.rb_tree_per_node[nid];
504 }
505 
506 static struct mem_cgroup_tree_per_node *
507 soft_limit_tree_from_page(struct page *page)
508 {
509 	int nid = page_to_nid(page);
510 
511 	return soft_limit_tree.rb_tree_per_node[nid];
512 }
513 
514 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
515 					 struct mem_cgroup_tree_per_node *mctz,
516 					 unsigned long new_usage_in_excess)
517 {
518 	struct rb_node **p = &mctz->rb_root.rb_node;
519 	struct rb_node *parent = NULL;
520 	struct mem_cgroup_per_node *mz_node;
521 	bool rightmost = true;
522 
523 	if (mz->on_tree)
524 		return;
525 
526 	mz->usage_in_excess = new_usage_in_excess;
527 	if (!mz->usage_in_excess)
528 		return;
529 	while (*p) {
530 		parent = *p;
531 		mz_node = rb_entry(parent, struct mem_cgroup_per_node,
532 					tree_node);
533 		if (mz->usage_in_excess < mz_node->usage_in_excess) {
534 			p = &(*p)->rb_left;
535 			rightmost = false;
536 		}
537 
538 		/*
539 		 * We can't avoid mem cgroups that are over their soft
540 		 * limit by the same amount
541 		 */
542 		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
543 			p = &(*p)->rb_right;
544 	}
545 
546 	if (rightmost)
547 		mctz->rb_rightmost = &mz->tree_node;
548 
549 	rb_link_node(&mz->tree_node, parent, p);
550 	rb_insert_color(&mz->tree_node, &mctz->rb_root);
551 	mz->on_tree = true;
552 }
553 
554 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
555 					 struct mem_cgroup_tree_per_node *mctz)
556 {
557 	if (!mz->on_tree)
558 		return;
559 
560 	if (&mz->tree_node == mctz->rb_rightmost)
561 		mctz->rb_rightmost = rb_prev(&mz->tree_node);
562 
563 	rb_erase(&mz->tree_node, &mctz->rb_root);
564 	mz->on_tree = false;
565 }
566 
567 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
568 				       struct mem_cgroup_tree_per_node *mctz)
569 {
570 	unsigned long flags;
571 
572 	spin_lock_irqsave(&mctz->lock, flags);
573 	__mem_cgroup_remove_exceeded(mz, mctz);
574 	spin_unlock_irqrestore(&mctz->lock, flags);
575 }
576 
577 static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
578 {
579 	unsigned long nr_pages = page_counter_read(&memcg->memory);
580 	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
581 	unsigned long excess = 0;
582 
583 	if (nr_pages > soft_limit)
584 		excess = nr_pages - soft_limit;
585 
586 	return excess;
587 }
588 
589 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
590 {
591 	unsigned long excess;
592 	struct mem_cgroup_per_node *mz;
593 	struct mem_cgroup_tree_per_node *mctz;
594 
595 	mctz = soft_limit_tree_from_page(page);
596 	if (!mctz)
597 		return;
598 	/*
599 	 * Necessary to update all ancestors when hierarchy is used.
600 	 * because their event counter is not touched.
601 	 */
602 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
603 		mz = mem_cgroup_page_nodeinfo(memcg, page);
604 		excess = soft_limit_excess(memcg);
605 		/*
606 		 * We have to update the tree if mz is on RB-tree or
607 		 * mem is over its softlimit.
608 		 */
609 		if (excess || mz->on_tree) {
610 			unsigned long flags;
611 
612 			spin_lock_irqsave(&mctz->lock, flags);
613 			/* if on-tree, remove it */
614 			if (mz->on_tree)
615 				__mem_cgroup_remove_exceeded(mz, mctz);
616 			/*
617 			 * Insert again. mz->usage_in_excess will be updated.
618 			 * If excess is 0, no tree ops.
619 			 */
620 			__mem_cgroup_insert_exceeded(mz, mctz, excess);
621 			spin_unlock_irqrestore(&mctz->lock, flags);
622 		}
623 	}
624 }
625 
626 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
627 {
628 	struct mem_cgroup_tree_per_node *mctz;
629 	struct mem_cgroup_per_node *mz;
630 	int nid;
631 
632 	for_each_node(nid) {
633 		mz = mem_cgroup_nodeinfo(memcg, nid);
634 		mctz = soft_limit_tree_node(nid);
635 		if (mctz)
636 			mem_cgroup_remove_exceeded(mz, mctz);
637 	}
638 }
639 
640 static struct mem_cgroup_per_node *
641 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
642 {
643 	struct mem_cgroup_per_node *mz;
644 
645 retry:
646 	mz = NULL;
647 	if (!mctz->rb_rightmost)
648 		goto done;		/* Nothing to reclaim from */
649 
650 	mz = rb_entry(mctz->rb_rightmost,
651 		      struct mem_cgroup_per_node, tree_node);
652 	/*
653 	 * Remove the node now but someone else can add it back,
654 	 * we will to add it back at the end of reclaim to its correct
655 	 * position in the tree.
656 	 */
657 	__mem_cgroup_remove_exceeded(mz, mctz);
658 	if (!soft_limit_excess(mz->memcg) ||
659 	    !css_tryget(&mz->memcg->css))
660 		goto retry;
661 done:
662 	return mz;
663 }
664 
665 static struct mem_cgroup_per_node *
666 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
667 {
668 	struct mem_cgroup_per_node *mz;
669 
670 	spin_lock_irq(&mctz->lock);
671 	mz = __mem_cgroup_largest_soft_limit_node(mctz);
672 	spin_unlock_irq(&mctz->lock);
673 	return mz;
674 }
675 
676 /**
677  * __mod_memcg_state - update cgroup memory statistics
678  * @memcg: the memory cgroup
679  * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
680  * @val: delta to add to the counter, can be negative
681  */
682 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
683 {
684 	long x;
685 
686 	if (mem_cgroup_disabled())
687 		return;
688 
689 	x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
690 	if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
691 		struct mem_cgroup *mi;
692 
693 		/*
694 		 * Batch local counters to keep them in sync with
695 		 * the hierarchical ones.
696 		 */
697 		__this_cpu_add(memcg->vmstats_local->stat[idx], x);
698 		for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
699 			atomic_long_add(x, &mi->vmstats[idx]);
700 		x = 0;
701 	}
702 	__this_cpu_write(memcg->vmstats_percpu->stat[idx], x);
703 }
704 
705 static struct mem_cgroup_per_node *
706 parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid)
707 {
708 	struct mem_cgroup *parent;
709 
710 	parent = parent_mem_cgroup(pn->memcg);
711 	if (!parent)
712 		return NULL;
713 	return mem_cgroup_nodeinfo(parent, nid);
714 }
715 
716 /**
717  * __mod_lruvec_state - update lruvec memory statistics
718  * @lruvec: the lruvec
719  * @idx: the stat item
720  * @val: delta to add to the counter, can be negative
721  *
722  * The lruvec is the intersection of the NUMA node and a cgroup. This
723  * function updates the all three counters that are affected by a
724  * change of state at this level: per-node, per-cgroup, per-lruvec.
725  */
726 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
727 			int val)
728 {
729 	pg_data_t *pgdat = lruvec_pgdat(lruvec);
730 	struct mem_cgroup_per_node *pn;
731 	struct mem_cgroup *memcg;
732 	long x;
733 
734 	/* Update node */
735 	__mod_node_page_state(pgdat, idx, val);
736 
737 	if (mem_cgroup_disabled())
738 		return;
739 
740 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
741 	memcg = pn->memcg;
742 
743 	/* Update memcg */
744 	__mod_memcg_state(memcg, idx, val);
745 
746 	/* Update lruvec */
747 	__this_cpu_add(pn->lruvec_stat_local->count[idx], val);
748 
749 	x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
750 	if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
751 		struct mem_cgroup_per_node *pi;
752 
753 		for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
754 			atomic_long_add(x, &pi->lruvec_stat[idx]);
755 		x = 0;
756 	}
757 	__this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
758 }
759 
760 void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
761 {
762 	pg_data_t *pgdat = page_pgdat(virt_to_page(p));
763 	struct mem_cgroup *memcg;
764 	struct lruvec *lruvec;
765 
766 	rcu_read_lock();
767 	memcg = mem_cgroup_from_obj(p);
768 
769 	/* Untracked pages have no memcg, no lruvec. Update only the node */
770 	if (!memcg || memcg == root_mem_cgroup) {
771 		__mod_node_page_state(pgdat, idx, val);
772 	} else {
773 		lruvec = mem_cgroup_lruvec(memcg, pgdat);
774 		__mod_lruvec_state(lruvec, idx, val);
775 	}
776 	rcu_read_unlock();
777 }
778 
779 void mod_memcg_obj_state(void *p, int idx, int val)
780 {
781 	struct mem_cgroup *memcg;
782 
783 	rcu_read_lock();
784 	memcg = mem_cgroup_from_obj(p);
785 	if (memcg)
786 		mod_memcg_state(memcg, idx, val);
787 	rcu_read_unlock();
788 }
789 
790 /**
791  * __count_memcg_events - account VM events in a cgroup
792  * @memcg: the memory cgroup
793  * @idx: the event item
794  * @count: the number of events that occured
795  */
796 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
797 			  unsigned long count)
798 {
799 	unsigned long x;
800 
801 	if (mem_cgroup_disabled())
802 		return;
803 
804 	x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]);
805 	if (unlikely(x > MEMCG_CHARGE_BATCH)) {
806 		struct mem_cgroup *mi;
807 
808 		/*
809 		 * Batch local counters to keep them in sync with
810 		 * the hierarchical ones.
811 		 */
812 		__this_cpu_add(memcg->vmstats_local->events[idx], x);
813 		for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
814 			atomic_long_add(x, &mi->vmevents[idx]);
815 		x = 0;
816 	}
817 	__this_cpu_write(memcg->vmstats_percpu->events[idx], x);
818 }
819 
820 static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
821 {
822 	return atomic_long_read(&memcg->vmevents[event]);
823 }
824 
825 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
826 {
827 	long x = 0;
828 	int cpu;
829 
830 	for_each_possible_cpu(cpu)
831 		x += per_cpu(memcg->vmstats_local->events[event], cpu);
832 	return x;
833 }
834 
835 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
836 					 struct page *page,
837 					 bool compound, int nr_pages)
838 {
839 	/*
840 	 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
841 	 * counted as CACHE even if it's on ANON LRU.
842 	 */
843 	if (PageAnon(page))
844 		__mod_memcg_state(memcg, MEMCG_RSS, nr_pages);
845 	else {
846 		__mod_memcg_state(memcg, MEMCG_CACHE, nr_pages);
847 		if (PageSwapBacked(page))
848 			__mod_memcg_state(memcg, NR_SHMEM, nr_pages);
849 	}
850 
851 	if (compound) {
852 		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
853 		__mod_memcg_state(memcg, MEMCG_RSS_HUGE, nr_pages);
854 	}
855 
856 	/* pagein of a big page is an event. So, ignore page size */
857 	if (nr_pages > 0)
858 		__count_memcg_events(memcg, PGPGIN, 1);
859 	else {
860 		__count_memcg_events(memcg, PGPGOUT, 1);
861 		nr_pages = -nr_pages; /* for event */
862 	}
863 
864 	__this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
865 }
866 
867 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
868 				       enum mem_cgroup_events_target target)
869 {
870 	unsigned long val, next;
871 
872 	val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
873 	next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
874 	/* from time_after() in jiffies.h */
875 	if ((long)(next - val) < 0) {
876 		switch (target) {
877 		case MEM_CGROUP_TARGET_THRESH:
878 			next = val + THRESHOLDS_EVENTS_TARGET;
879 			break;
880 		case MEM_CGROUP_TARGET_SOFTLIMIT:
881 			next = val + SOFTLIMIT_EVENTS_TARGET;
882 			break;
883 		default:
884 			break;
885 		}
886 		__this_cpu_write(memcg->vmstats_percpu->targets[target], next);
887 		return true;
888 	}
889 	return false;
890 }
891 
892 /*
893  * Check events in order.
894  *
895  */
896 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
897 {
898 	/* threshold event is triggered in finer grain than soft limit */
899 	if (unlikely(mem_cgroup_event_ratelimit(memcg,
900 						MEM_CGROUP_TARGET_THRESH))) {
901 		bool do_softlimit;
902 
903 		do_softlimit = mem_cgroup_event_ratelimit(memcg,
904 						MEM_CGROUP_TARGET_SOFTLIMIT);
905 		mem_cgroup_threshold(memcg);
906 		if (unlikely(do_softlimit))
907 			mem_cgroup_update_tree(memcg, page);
908 	}
909 }
910 
911 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
912 {
913 	/*
914 	 * mm_update_next_owner() may clear mm->owner to NULL
915 	 * if it races with swapoff, page migration, etc.
916 	 * So this can be called with p == NULL.
917 	 */
918 	if (unlikely(!p))
919 		return NULL;
920 
921 	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
922 }
923 EXPORT_SYMBOL(mem_cgroup_from_task);
924 
925 /**
926  * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
927  * @mm: mm from which memcg should be extracted. It can be NULL.
928  *
929  * Obtain a reference on mm->memcg and returns it if successful. Otherwise
930  * root_mem_cgroup is returned. However if mem_cgroup is disabled, NULL is
931  * returned.
932  */
933 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
934 {
935 	struct mem_cgroup *memcg;
936 
937 	if (mem_cgroup_disabled())
938 		return NULL;
939 
940 	rcu_read_lock();
941 	do {
942 		/*
943 		 * Page cache insertions can happen withou an
944 		 * actual mm context, e.g. during disk probing
945 		 * on boot, loopback IO, acct() writes etc.
946 		 */
947 		if (unlikely(!mm))
948 			memcg = root_mem_cgroup;
949 		else {
950 			memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
951 			if (unlikely(!memcg))
952 				memcg = root_mem_cgroup;
953 		}
954 	} while (!css_tryget(&memcg->css));
955 	rcu_read_unlock();
956 	return memcg;
957 }
958 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
959 
960 /**
961  * get_mem_cgroup_from_page: Obtain a reference on given page's memcg.
962  * @page: page from which memcg should be extracted.
963  *
964  * Obtain a reference on page->memcg and returns it if successful. Otherwise
965  * root_mem_cgroup is returned.
966  */
967 struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
968 {
969 	struct mem_cgroup *memcg = page->mem_cgroup;
970 
971 	if (mem_cgroup_disabled())
972 		return NULL;
973 
974 	rcu_read_lock();
975 	/* Page should not get uncharged and freed memcg under us. */
976 	if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css)))
977 		memcg = root_mem_cgroup;
978 	rcu_read_unlock();
979 	return memcg;
980 }
981 EXPORT_SYMBOL(get_mem_cgroup_from_page);
982 
983 /**
984  * If current->active_memcg is non-NULL, do not fallback to current->mm->memcg.
985  */
986 static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void)
987 {
988 	if (unlikely(current->active_memcg)) {
989 		struct mem_cgroup *memcg;
990 
991 		rcu_read_lock();
992 		/* current->active_memcg must hold a ref. */
993 		if (WARN_ON_ONCE(!css_tryget(&current->active_memcg->css)))
994 			memcg = root_mem_cgroup;
995 		else
996 			memcg = current->active_memcg;
997 		rcu_read_unlock();
998 		return memcg;
999 	}
1000 	return get_mem_cgroup_from_mm(current->mm);
1001 }
1002 
1003 /**
1004  * mem_cgroup_iter - iterate over memory cgroup hierarchy
1005  * @root: hierarchy root
1006  * @prev: previously returned memcg, NULL on first invocation
1007  * @reclaim: cookie for shared reclaim walks, NULL for full walks
1008  *
1009  * Returns references to children of the hierarchy below @root, or
1010  * @root itself, or %NULL after a full round-trip.
1011  *
1012  * Caller must pass the return value in @prev on subsequent
1013  * invocations for reference counting, or use mem_cgroup_iter_break()
1014  * to cancel a hierarchy walk before the round-trip is complete.
1015  *
1016  * Reclaimers can specify a node and a priority level in @reclaim to
1017  * divide up the memcgs in the hierarchy among all concurrent
1018  * reclaimers operating on the same node and priority.
1019  */
1020 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1021 				   struct mem_cgroup *prev,
1022 				   struct mem_cgroup_reclaim_cookie *reclaim)
1023 {
1024 	struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
1025 	struct cgroup_subsys_state *css = NULL;
1026 	struct mem_cgroup *memcg = NULL;
1027 	struct mem_cgroup *pos = NULL;
1028 
1029 	if (mem_cgroup_disabled())
1030 		return NULL;
1031 
1032 	if (!root)
1033 		root = root_mem_cgroup;
1034 
1035 	if (prev && !reclaim)
1036 		pos = prev;
1037 
1038 	if (!root->use_hierarchy && root != root_mem_cgroup) {
1039 		if (prev)
1040 			goto out;
1041 		return root;
1042 	}
1043 
1044 	rcu_read_lock();
1045 
1046 	if (reclaim) {
1047 		struct mem_cgroup_per_node *mz;
1048 
1049 		mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
1050 		iter = &mz->iter;
1051 
1052 		if (prev && reclaim->generation != iter->generation)
1053 			goto out_unlock;
1054 
1055 		while (1) {
1056 			pos = READ_ONCE(iter->position);
1057 			if (!pos || css_tryget(&pos->css))
1058 				break;
1059 			/*
1060 			 * css reference reached zero, so iter->position will
1061 			 * be cleared by ->css_released. However, we should not
1062 			 * rely on this happening soon, because ->css_released
1063 			 * is called from a work queue, and by busy-waiting we
1064 			 * might block it. So we clear iter->position right
1065 			 * away.
1066 			 */
1067 			(void)cmpxchg(&iter->position, pos, NULL);
1068 		}
1069 	}
1070 
1071 	if (pos)
1072 		css = &pos->css;
1073 
1074 	for (;;) {
1075 		css = css_next_descendant_pre(css, &root->css);
1076 		if (!css) {
1077 			/*
1078 			 * Reclaimers share the hierarchy walk, and a
1079 			 * new one might jump in right at the end of
1080 			 * the hierarchy - make sure they see at least
1081 			 * one group and restart from the beginning.
1082 			 */
1083 			if (!prev)
1084 				continue;
1085 			break;
1086 		}
1087 
1088 		/*
1089 		 * Verify the css and acquire a reference.  The root
1090 		 * is provided by the caller, so we know it's alive
1091 		 * and kicking, and don't take an extra reference.
1092 		 */
1093 		memcg = mem_cgroup_from_css(css);
1094 
1095 		if (css == &root->css)
1096 			break;
1097 
1098 		if (css_tryget(css))
1099 			break;
1100 
1101 		memcg = NULL;
1102 	}
1103 
1104 	if (reclaim) {
1105 		/*
1106 		 * The position could have already been updated by a competing
1107 		 * thread, so check that the value hasn't changed since we read
1108 		 * it to avoid reclaiming from the same cgroup twice.
1109 		 */
1110 		(void)cmpxchg(&iter->position, pos, memcg);
1111 
1112 		if (pos)
1113 			css_put(&pos->css);
1114 
1115 		if (!memcg)
1116 			iter->generation++;
1117 		else if (!prev)
1118 			reclaim->generation = iter->generation;
1119 	}
1120 
1121 out_unlock:
1122 	rcu_read_unlock();
1123 out:
1124 	if (prev && prev != root)
1125 		css_put(&prev->css);
1126 
1127 	return memcg;
1128 }
1129 
1130 /**
1131  * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1132  * @root: hierarchy root
1133  * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1134  */
1135 void mem_cgroup_iter_break(struct mem_cgroup *root,
1136 			   struct mem_cgroup *prev)
1137 {
1138 	if (!root)
1139 		root = root_mem_cgroup;
1140 	if (prev && prev != root)
1141 		css_put(&prev->css);
1142 }
1143 
1144 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1145 					struct mem_cgroup *dead_memcg)
1146 {
1147 	struct mem_cgroup_reclaim_iter *iter;
1148 	struct mem_cgroup_per_node *mz;
1149 	int nid;
1150 
1151 	for_each_node(nid) {
1152 		mz = mem_cgroup_nodeinfo(from, nid);
1153 		iter = &mz->iter;
1154 		cmpxchg(&iter->position, dead_memcg, NULL);
1155 	}
1156 }
1157 
1158 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1159 {
1160 	struct mem_cgroup *memcg = dead_memcg;
1161 	struct mem_cgroup *last;
1162 
1163 	do {
1164 		__invalidate_reclaim_iterators(memcg, dead_memcg);
1165 		last = memcg;
1166 	} while ((memcg = parent_mem_cgroup(memcg)));
1167 
1168 	/*
1169 	 * When cgruop1 non-hierarchy mode is used,
1170 	 * parent_mem_cgroup() does not walk all the way up to the
1171 	 * cgroup root (root_mem_cgroup). So we have to handle
1172 	 * dead_memcg from cgroup root separately.
1173 	 */
1174 	if (last != root_mem_cgroup)
1175 		__invalidate_reclaim_iterators(root_mem_cgroup,
1176 						dead_memcg);
1177 }
1178 
1179 /**
1180  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1181  * @memcg: hierarchy root
1182  * @fn: function to call for each task
1183  * @arg: argument passed to @fn
1184  *
1185  * This function iterates over tasks attached to @memcg or to any of its
1186  * descendants and calls @fn for each task. If @fn returns a non-zero
1187  * value, the function breaks the iteration loop and returns the value.
1188  * Otherwise, it will iterate over all tasks and return 0.
1189  *
1190  * This function must not be called for the root memory cgroup.
1191  */
1192 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1193 			  int (*fn)(struct task_struct *, void *), void *arg)
1194 {
1195 	struct mem_cgroup *iter;
1196 	int ret = 0;
1197 
1198 	BUG_ON(memcg == root_mem_cgroup);
1199 
1200 	for_each_mem_cgroup_tree(iter, memcg) {
1201 		struct css_task_iter it;
1202 		struct task_struct *task;
1203 
1204 		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1205 		while (!ret && (task = css_task_iter_next(&it)))
1206 			ret = fn(task, arg);
1207 		css_task_iter_end(&it);
1208 		if (ret) {
1209 			mem_cgroup_iter_break(memcg, iter);
1210 			break;
1211 		}
1212 	}
1213 	return ret;
1214 }
1215 
1216 /**
1217  * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
1218  * @page: the page
1219  * @pgdat: pgdat of the page
1220  *
1221  * This function is only safe when following the LRU page isolation
1222  * and putback protocol: the LRU lock must be held, and the page must
1223  * either be PageLRU() or the caller must have isolated/allocated it.
1224  */
1225 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
1226 {
1227 	struct mem_cgroup_per_node *mz;
1228 	struct mem_cgroup *memcg;
1229 	struct lruvec *lruvec;
1230 
1231 	if (mem_cgroup_disabled()) {
1232 		lruvec = &pgdat->__lruvec;
1233 		goto out;
1234 	}
1235 
1236 	memcg = page->mem_cgroup;
1237 	/*
1238 	 * Swapcache readahead pages are added to the LRU - and
1239 	 * possibly migrated - before they are charged.
1240 	 */
1241 	if (!memcg)
1242 		memcg = root_mem_cgroup;
1243 
1244 	mz = mem_cgroup_page_nodeinfo(memcg, page);
1245 	lruvec = &mz->lruvec;
1246 out:
1247 	/*
1248 	 * Since a node can be onlined after the mem_cgroup was created,
1249 	 * we have to be prepared to initialize lruvec->zone here;
1250 	 * and if offlined then reonlined, we need to reinitialize it.
1251 	 */
1252 	if (unlikely(lruvec->pgdat != pgdat))
1253 		lruvec->pgdat = pgdat;
1254 	return lruvec;
1255 }
1256 
1257 /**
1258  * mem_cgroup_update_lru_size - account for adding or removing an lru page
1259  * @lruvec: mem_cgroup per zone lru vector
1260  * @lru: index of lru list the page is sitting on
1261  * @zid: zone id of the accounted pages
1262  * @nr_pages: positive when adding or negative when removing
1263  *
1264  * This function must be called under lru_lock, just before a page is added
1265  * to or just after a page is removed from an lru list (that ordering being
1266  * so as to allow it to check that lru_size 0 is consistent with list_empty).
1267  */
1268 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1269 				int zid, int nr_pages)
1270 {
1271 	struct mem_cgroup_per_node *mz;
1272 	unsigned long *lru_size;
1273 	long size;
1274 
1275 	if (mem_cgroup_disabled())
1276 		return;
1277 
1278 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1279 	lru_size = &mz->lru_zone_size[zid][lru];
1280 
1281 	if (nr_pages < 0)
1282 		*lru_size += nr_pages;
1283 
1284 	size = *lru_size;
1285 	if (WARN_ONCE(size < 0,
1286 		"%s(%p, %d, %d): lru_size %ld\n",
1287 		__func__, lruvec, lru, nr_pages, size)) {
1288 		VM_BUG_ON(1);
1289 		*lru_size = 0;
1290 	}
1291 
1292 	if (nr_pages > 0)
1293 		*lru_size += nr_pages;
1294 }
1295 
1296 /**
1297  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1298  * @memcg: the memory cgroup
1299  *
1300  * Returns the maximum amount of memory @mem can be charged with, in
1301  * pages.
1302  */
1303 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1304 {
1305 	unsigned long margin = 0;
1306 	unsigned long count;
1307 	unsigned long limit;
1308 
1309 	count = page_counter_read(&memcg->memory);
1310 	limit = READ_ONCE(memcg->memory.max);
1311 	if (count < limit)
1312 		margin = limit - count;
1313 
1314 	if (do_memsw_account()) {
1315 		count = page_counter_read(&memcg->memsw);
1316 		limit = READ_ONCE(memcg->memsw.max);
1317 		if (count <= limit)
1318 			margin = min(margin, limit - count);
1319 		else
1320 			margin = 0;
1321 	}
1322 
1323 	return margin;
1324 }
1325 
1326 /*
1327  * A routine for checking "mem" is under move_account() or not.
1328  *
1329  * Checking a cgroup is mc.from or mc.to or under hierarchy of
1330  * moving cgroups. This is for waiting at high-memory pressure
1331  * caused by "move".
1332  */
1333 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1334 {
1335 	struct mem_cgroup *from;
1336 	struct mem_cgroup *to;
1337 	bool ret = false;
1338 	/*
1339 	 * Unlike task_move routines, we access mc.to, mc.from not under
1340 	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1341 	 */
1342 	spin_lock(&mc.lock);
1343 	from = mc.from;
1344 	to = mc.to;
1345 	if (!from)
1346 		goto unlock;
1347 
1348 	ret = mem_cgroup_is_descendant(from, memcg) ||
1349 		mem_cgroup_is_descendant(to, memcg);
1350 unlock:
1351 	spin_unlock(&mc.lock);
1352 	return ret;
1353 }
1354 
1355 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1356 {
1357 	if (mc.moving_task && current != mc.moving_task) {
1358 		if (mem_cgroup_under_move(memcg)) {
1359 			DEFINE_WAIT(wait);
1360 			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1361 			/* moving charge context might have finished. */
1362 			if (mc.moving_task)
1363 				schedule();
1364 			finish_wait(&mc.waitq, &wait);
1365 			return true;
1366 		}
1367 	}
1368 	return false;
1369 }
1370 
1371 static char *memory_stat_format(struct mem_cgroup *memcg)
1372 {
1373 	struct seq_buf s;
1374 	int i;
1375 
1376 	seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
1377 	if (!s.buffer)
1378 		return NULL;
1379 
1380 	/*
1381 	 * Provide statistics on the state of the memory subsystem as
1382 	 * well as cumulative event counters that show past behavior.
1383 	 *
1384 	 * This list is ordered following a combination of these gradients:
1385 	 * 1) generic big picture -> specifics and details
1386 	 * 2) reflecting userspace activity -> reflecting kernel heuristics
1387 	 *
1388 	 * Current memory state:
1389 	 */
1390 
1391 	seq_buf_printf(&s, "anon %llu\n",
1392 		       (u64)memcg_page_state(memcg, MEMCG_RSS) *
1393 		       PAGE_SIZE);
1394 	seq_buf_printf(&s, "file %llu\n",
1395 		       (u64)memcg_page_state(memcg, MEMCG_CACHE) *
1396 		       PAGE_SIZE);
1397 	seq_buf_printf(&s, "kernel_stack %llu\n",
1398 		       (u64)memcg_page_state(memcg, MEMCG_KERNEL_STACK_KB) *
1399 		       1024);
1400 	seq_buf_printf(&s, "slab %llu\n",
1401 		       (u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) +
1402 			     memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE)) *
1403 		       PAGE_SIZE);
1404 	seq_buf_printf(&s, "sock %llu\n",
1405 		       (u64)memcg_page_state(memcg, MEMCG_SOCK) *
1406 		       PAGE_SIZE);
1407 
1408 	seq_buf_printf(&s, "shmem %llu\n",
1409 		       (u64)memcg_page_state(memcg, NR_SHMEM) *
1410 		       PAGE_SIZE);
1411 	seq_buf_printf(&s, "file_mapped %llu\n",
1412 		       (u64)memcg_page_state(memcg, NR_FILE_MAPPED) *
1413 		       PAGE_SIZE);
1414 	seq_buf_printf(&s, "file_dirty %llu\n",
1415 		       (u64)memcg_page_state(memcg, NR_FILE_DIRTY) *
1416 		       PAGE_SIZE);
1417 	seq_buf_printf(&s, "file_writeback %llu\n",
1418 		       (u64)memcg_page_state(memcg, NR_WRITEBACK) *
1419 		       PAGE_SIZE);
1420 
1421 	/*
1422 	 * TODO: We should eventually replace our own MEMCG_RSS_HUGE counter
1423 	 * with the NR_ANON_THP vm counter, but right now it's a pain in the
1424 	 * arse because it requires migrating the work out of rmap to a place
1425 	 * where the page->mem_cgroup is set up and stable.
1426 	 */
1427 	seq_buf_printf(&s, "anon_thp %llu\n",
1428 		       (u64)memcg_page_state(memcg, MEMCG_RSS_HUGE) *
1429 		       PAGE_SIZE);
1430 
1431 	for (i = 0; i < NR_LRU_LISTS; i++)
1432 		seq_buf_printf(&s, "%s %llu\n", lru_list_name(i),
1433 			       (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
1434 			       PAGE_SIZE);
1435 
1436 	seq_buf_printf(&s, "slab_reclaimable %llu\n",
1437 		       (u64)memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) *
1438 		       PAGE_SIZE);
1439 	seq_buf_printf(&s, "slab_unreclaimable %llu\n",
1440 		       (u64)memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE) *
1441 		       PAGE_SIZE);
1442 
1443 	/* Accumulated memory events */
1444 
1445 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT),
1446 		       memcg_events(memcg, PGFAULT));
1447 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT),
1448 		       memcg_events(memcg, PGMAJFAULT));
1449 
1450 	seq_buf_printf(&s, "workingset_refault %lu\n",
1451 		       memcg_page_state(memcg, WORKINGSET_REFAULT));
1452 	seq_buf_printf(&s, "workingset_activate %lu\n",
1453 		       memcg_page_state(memcg, WORKINGSET_ACTIVATE));
1454 	seq_buf_printf(&s, "workingset_nodereclaim %lu\n",
1455 		       memcg_page_state(memcg, WORKINGSET_NODERECLAIM));
1456 
1457 	seq_buf_printf(&s, "%s %lu\n",  vm_event_name(PGREFILL),
1458 		       memcg_events(memcg, PGREFILL));
1459 	seq_buf_printf(&s, "pgscan %lu\n",
1460 		       memcg_events(memcg, PGSCAN_KSWAPD) +
1461 		       memcg_events(memcg, PGSCAN_DIRECT));
1462 	seq_buf_printf(&s, "pgsteal %lu\n",
1463 		       memcg_events(memcg, PGSTEAL_KSWAPD) +
1464 		       memcg_events(memcg, PGSTEAL_DIRECT));
1465 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE),
1466 		       memcg_events(memcg, PGACTIVATE));
1467 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE),
1468 		       memcg_events(memcg, PGDEACTIVATE));
1469 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE),
1470 		       memcg_events(memcg, PGLAZYFREE));
1471 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED),
1472 		       memcg_events(memcg, PGLAZYFREED));
1473 
1474 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1475 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC),
1476 		       memcg_events(memcg, THP_FAULT_ALLOC));
1477 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC),
1478 		       memcg_events(memcg, THP_COLLAPSE_ALLOC));
1479 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1480 
1481 	/* The above should easily fit into one page */
1482 	WARN_ON_ONCE(seq_buf_has_overflowed(&s));
1483 
1484 	return s.buffer;
1485 }
1486 
1487 #define K(x) ((x) << (PAGE_SHIFT-10))
1488 /**
1489  * mem_cgroup_print_oom_context: Print OOM information relevant to
1490  * memory controller.
1491  * @memcg: The memory cgroup that went over limit
1492  * @p: Task that is going to be killed
1493  *
1494  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1495  * enabled
1496  */
1497 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1498 {
1499 	rcu_read_lock();
1500 
1501 	if (memcg) {
1502 		pr_cont(",oom_memcg=");
1503 		pr_cont_cgroup_path(memcg->css.cgroup);
1504 	} else
1505 		pr_cont(",global_oom");
1506 	if (p) {
1507 		pr_cont(",task_memcg=");
1508 		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1509 	}
1510 	rcu_read_unlock();
1511 }
1512 
1513 /**
1514  * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1515  * memory controller.
1516  * @memcg: The memory cgroup that went over limit
1517  */
1518 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1519 {
1520 	char *buf;
1521 
1522 	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1523 		K((u64)page_counter_read(&memcg->memory)),
1524 		K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1525 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1526 		pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1527 			K((u64)page_counter_read(&memcg->swap)),
1528 			K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1529 	else {
1530 		pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1531 			K((u64)page_counter_read(&memcg->memsw)),
1532 			K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1533 		pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1534 			K((u64)page_counter_read(&memcg->kmem)),
1535 			K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1536 	}
1537 
1538 	pr_info("Memory cgroup stats for ");
1539 	pr_cont_cgroup_path(memcg->css.cgroup);
1540 	pr_cont(":");
1541 	buf = memory_stat_format(memcg);
1542 	if (!buf)
1543 		return;
1544 	pr_info("%s", buf);
1545 	kfree(buf);
1546 }
1547 
1548 /*
1549  * Return the memory (and swap, if configured) limit for a memcg.
1550  */
1551 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1552 {
1553 	unsigned long max;
1554 
1555 	max = READ_ONCE(memcg->memory.max);
1556 	if (mem_cgroup_swappiness(memcg)) {
1557 		unsigned long memsw_max;
1558 		unsigned long swap_max;
1559 
1560 		memsw_max = memcg->memsw.max;
1561 		swap_max = READ_ONCE(memcg->swap.max);
1562 		swap_max = min(swap_max, (unsigned long)total_swap_pages);
1563 		max = min(max + swap_max, memsw_max);
1564 	}
1565 	return max;
1566 }
1567 
1568 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1569 {
1570 	return page_counter_read(&memcg->memory);
1571 }
1572 
1573 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1574 				     int order)
1575 {
1576 	struct oom_control oc = {
1577 		.zonelist = NULL,
1578 		.nodemask = NULL,
1579 		.memcg = memcg,
1580 		.gfp_mask = gfp_mask,
1581 		.order = order,
1582 	};
1583 	bool ret;
1584 
1585 	if (mutex_lock_killable(&oom_lock))
1586 		return true;
1587 	/*
1588 	 * A few threads which were not waiting at mutex_lock_killable() can
1589 	 * fail to bail out. Therefore, check again after holding oom_lock.
1590 	 */
1591 	ret = should_force_charge() || out_of_memory(&oc);
1592 	mutex_unlock(&oom_lock);
1593 	return ret;
1594 }
1595 
1596 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1597 				   pg_data_t *pgdat,
1598 				   gfp_t gfp_mask,
1599 				   unsigned long *total_scanned)
1600 {
1601 	struct mem_cgroup *victim = NULL;
1602 	int total = 0;
1603 	int loop = 0;
1604 	unsigned long excess;
1605 	unsigned long nr_scanned;
1606 	struct mem_cgroup_reclaim_cookie reclaim = {
1607 		.pgdat = pgdat,
1608 	};
1609 
1610 	excess = soft_limit_excess(root_memcg);
1611 
1612 	while (1) {
1613 		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1614 		if (!victim) {
1615 			loop++;
1616 			if (loop >= 2) {
1617 				/*
1618 				 * If we have not been able to reclaim
1619 				 * anything, it might because there are
1620 				 * no reclaimable pages under this hierarchy
1621 				 */
1622 				if (!total)
1623 					break;
1624 				/*
1625 				 * We want to do more targeted reclaim.
1626 				 * excess >> 2 is not to excessive so as to
1627 				 * reclaim too much, nor too less that we keep
1628 				 * coming back to reclaim from this cgroup
1629 				 */
1630 				if (total >= (excess >> 2) ||
1631 					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1632 					break;
1633 			}
1634 			continue;
1635 		}
1636 		total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1637 					pgdat, &nr_scanned);
1638 		*total_scanned += nr_scanned;
1639 		if (!soft_limit_excess(root_memcg))
1640 			break;
1641 	}
1642 	mem_cgroup_iter_break(root_memcg, victim);
1643 	return total;
1644 }
1645 
1646 #ifdef CONFIG_LOCKDEP
1647 static struct lockdep_map memcg_oom_lock_dep_map = {
1648 	.name = "memcg_oom_lock",
1649 };
1650 #endif
1651 
1652 static DEFINE_SPINLOCK(memcg_oom_lock);
1653 
1654 /*
1655  * Check OOM-Killer is already running under our hierarchy.
1656  * If someone is running, return false.
1657  */
1658 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1659 {
1660 	struct mem_cgroup *iter, *failed = NULL;
1661 
1662 	spin_lock(&memcg_oom_lock);
1663 
1664 	for_each_mem_cgroup_tree(iter, memcg) {
1665 		if (iter->oom_lock) {
1666 			/*
1667 			 * this subtree of our hierarchy is already locked
1668 			 * so we cannot give a lock.
1669 			 */
1670 			failed = iter;
1671 			mem_cgroup_iter_break(memcg, iter);
1672 			break;
1673 		} else
1674 			iter->oom_lock = true;
1675 	}
1676 
1677 	if (failed) {
1678 		/*
1679 		 * OK, we failed to lock the whole subtree so we have
1680 		 * to clean up what we set up to the failing subtree
1681 		 */
1682 		for_each_mem_cgroup_tree(iter, memcg) {
1683 			if (iter == failed) {
1684 				mem_cgroup_iter_break(memcg, iter);
1685 				break;
1686 			}
1687 			iter->oom_lock = false;
1688 		}
1689 	} else
1690 		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1691 
1692 	spin_unlock(&memcg_oom_lock);
1693 
1694 	return !failed;
1695 }
1696 
1697 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1698 {
1699 	struct mem_cgroup *iter;
1700 
1701 	spin_lock(&memcg_oom_lock);
1702 	mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1703 	for_each_mem_cgroup_tree(iter, memcg)
1704 		iter->oom_lock = false;
1705 	spin_unlock(&memcg_oom_lock);
1706 }
1707 
1708 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1709 {
1710 	struct mem_cgroup *iter;
1711 
1712 	spin_lock(&memcg_oom_lock);
1713 	for_each_mem_cgroup_tree(iter, memcg)
1714 		iter->under_oom++;
1715 	spin_unlock(&memcg_oom_lock);
1716 }
1717 
1718 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1719 {
1720 	struct mem_cgroup *iter;
1721 
1722 	/*
1723 	 * When a new child is created while the hierarchy is under oom,
1724 	 * mem_cgroup_oom_lock() may not be called. Watch for underflow.
1725 	 */
1726 	spin_lock(&memcg_oom_lock);
1727 	for_each_mem_cgroup_tree(iter, memcg)
1728 		if (iter->under_oom > 0)
1729 			iter->under_oom--;
1730 	spin_unlock(&memcg_oom_lock);
1731 }
1732 
1733 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1734 
1735 struct oom_wait_info {
1736 	struct mem_cgroup *memcg;
1737 	wait_queue_entry_t	wait;
1738 };
1739 
1740 static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1741 	unsigned mode, int sync, void *arg)
1742 {
1743 	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1744 	struct mem_cgroup *oom_wait_memcg;
1745 	struct oom_wait_info *oom_wait_info;
1746 
1747 	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1748 	oom_wait_memcg = oom_wait_info->memcg;
1749 
1750 	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1751 	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1752 		return 0;
1753 	return autoremove_wake_function(wait, mode, sync, arg);
1754 }
1755 
1756 static void memcg_oom_recover(struct mem_cgroup *memcg)
1757 {
1758 	/*
1759 	 * For the following lockless ->under_oom test, the only required
1760 	 * guarantee is that it must see the state asserted by an OOM when
1761 	 * this function is called as a result of userland actions
1762 	 * triggered by the notification of the OOM.  This is trivially
1763 	 * achieved by invoking mem_cgroup_mark_under_oom() before
1764 	 * triggering notification.
1765 	 */
1766 	if (memcg && memcg->under_oom)
1767 		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1768 }
1769 
1770 enum oom_status {
1771 	OOM_SUCCESS,
1772 	OOM_FAILED,
1773 	OOM_ASYNC,
1774 	OOM_SKIPPED
1775 };
1776 
1777 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1778 {
1779 	enum oom_status ret;
1780 	bool locked;
1781 
1782 	if (order > PAGE_ALLOC_COSTLY_ORDER)
1783 		return OOM_SKIPPED;
1784 
1785 	memcg_memory_event(memcg, MEMCG_OOM);
1786 
1787 	/*
1788 	 * We are in the middle of the charge context here, so we
1789 	 * don't want to block when potentially sitting on a callstack
1790 	 * that holds all kinds of filesystem and mm locks.
1791 	 *
1792 	 * cgroup1 allows disabling the OOM killer and waiting for outside
1793 	 * handling until the charge can succeed; remember the context and put
1794 	 * the task to sleep at the end of the page fault when all locks are
1795 	 * released.
1796 	 *
1797 	 * On the other hand, in-kernel OOM killer allows for an async victim
1798 	 * memory reclaim (oom_reaper) and that means that we are not solely
1799 	 * relying on the oom victim to make a forward progress and we can
1800 	 * invoke the oom killer here.
1801 	 *
1802 	 * Please note that mem_cgroup_out_of_memory might fail to find a
1803 	 * victim and then we have to bail out from the charge path.
1804 	 */
1805 	if (memcg->oom_kill_disable) {
1806 		if (!current->in_user_fault)
1807 			return OOM_SKIPPED;
1808 		css_get(&memcg->css);
1809 		current->memcg_in_oom = memcg;
1810 		current->memcg_oom_gfp_mask = mask;
1811 		current->memcg_oom_order = order;
1812 
1813 		return OOM_ASYNC;
1814 	}
1815 
1816 	mem_cgroup_mark_under_oom(memcg);
1817 
1818 	locked = mem_cgroup_oom_trylock(memcg);
1819 
1820 	if (locked)
1821 		mem_cgroup_oom_notify(memcg);
1822 
1823 	mem_cgroup_unmark_under_oom(memcg);
1824 	if (mem_cgroup_out_of_memory(memcg, mask, order))
1825 		ret = OOM_SUCCESS;
1826 	else
1827 		ret = OOM_FAILED;
1828 
1829 	if (locked)
1830 		mem_cgroup_oom_unlock(memcg);
1831 
1832 	return ret;
1833 }
1834 
1835 /**
1836  * mem_cgroup_oom_synchronize - complete memcg OOM handling
1837  * @handle: actually kill/wait or just clean up the OOM state
1838  *
1839  * This has to be called at the end of a page fault if the memcg OOM
1840  * handler was enabled.
1841  *
1842  * Memcg supports userspace OOM handling where failed allocations must
1843  * sleep on a waitqueue until the userspace task resolves the
1844  * situation.  Sleeping directly in the charge context with all kinds
1845  * of locks held is not a good idea, instead we remember an OOM state
1846  * in the task and mem_cgroup_oom_synchronize() has to be called at
1847  * the end of the page fault to complete the OOM handling.
1848  *
1849  * Returns %true if an ongoing memcg OOM situation was detected and
1850  * completed, %false otherwise.
1851  */
1852 bool mem_cgroup_oom_synchronize(bool handle)
1853 {
1854 	struct mem_cgroup *memcg = current->memcg_in_oom;
1855 	struct oom_wait_info owait;
1856 	bool locked;
1857 
1858 	/* OOM is global, do not handle */
1859 	if (!memcg)
1860 		return false;
1861 
1862 	if (!handle)
1863 		goto cleanup;
1864 
1865 	owait.memcg = memcg;
1866 	owait.wait.flags = 0;
1867 	owait.wait.func = memcg_oom_wake_function;
1868 	owait.wait.private = current;
1869 	INIT_LIST_HEAD(&owait.wait.entry);
1870 
1871 	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1872 	mem_cgroup_mark_under_oom(memcg);
1873 
1874 	locked = mem_cgroup_oom_trylock(memcg);
1875 
1876 	if (locked)
1877 		mem_cgroup_oom_notify(memcg);
1878 
1879 	if (locked && !memcg->oom_kill_disable) {
1880 		mem_cgroup_unmark_under_oom(memcg);
1881 		finish_wait(&memcg_oom_waitq, &owait.wait);
1882 		mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1883 					 current->memcg_oom_order);
1884 	} else {
1885 		schedule();
1886 		mem_cgroup_unmark_under_oom(memcg);
1887 		finish_wait(&memcg_oom_waitq, &owait.wait);
1888 	}
1889 
1890 	if (locked) {
1891 		mem_cgroup_oom_unlock(memcg);
1892 		/*
1893 		 * There is no guarantee that an OOM-lock contender
1894 		 * sees the wakeups triggered by the OOM kill
1895 		 * uncharges.  Wake any sleepers explicitely.
1896 		 */
1897 		memcg_oom_recover(memcg);
1898 	}
1899 cleanup:
1900 	current->memcg_in_oom = NULL;
1901 	css_put(&memcg->css);
1902 	return true;
1903 }
1904 
1905 /**
1906  * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1907  * @victim: task to be killed by the OOM killer
1908  * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1909  *
1910  * Returns a pointer to a memory cgroup, which has to be cleaned up
1911  * by killing all belonging OOM-killable tasks.
1912  *
1913  * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1914  */
1915 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
1916 					    struct mem_cgroup *oom_domain)
1917 {
1918 	struct mem_cgroup *oom_group = NULL;
1919 	struct mem_cgroup *memcg;
1920 
1921 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1922 		return NULL;
1923 
1924 	if (!oom_domain)
1925 		oom_domain = root_mem_cgroup;
1926 
1927 	rcu_read_lock();
1928 
1929 	memcg = mem_cgroup_from_task(victim);
1930 	if (memcg == root_mem_cgroup)
1931 		goto out;
1932 
1933 	/*
1934 	 * If the victim task has been asynchronously moved to a different
1935 	 * memory cgroup, we might end up killing tasks outside oom_domain.
1936 	 * In this case it's better to ignore memory.group.oom.
1937 	 */
1938 	if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
1939 		goto out;
1940 
1941 	/*
1942 	 * Traverse the memory cgroup hierarchy from the victim task's
1943 	 * cgroup up to the OOMing cgroup (or root) to find the
1944 	 * highest-level memory cgroup with oom.group set.
1945 	 */
1946 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1947 		if (memcg->oom_group)
1948 			oom_group = memcg;
1949 
1950 		if (memcg == oom_domain)
1951 			break;
1952 	}
1953 
1954 	if (oom_group)
1955 		css_get(&oom_group->css);
1956 out:
1957 	rcu_read_unlock();
1958 
1959 	return oom_group;
1960 }
1961 
1962 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1963 {
1964 	pr_info("Tasks in ");
1965 	pr_cont_cgroup_path(memcg->css.cgroup);
1966 	pr_cont(" are going to be killed due to memory.oom.group set\n");
1967 }
1968 
1969 /**
1970  * lock_page_memcg - lock a page->mem_cgroup binding
1971  * @page: the page
1972  *
1973  * This function protects unlocked LRU pages from being moved to
1974  * another cgroup.
1975  *
1976  * It ensures lifetime of the returned memcg. Caller is responsible
1977  * for the lifetime of the page; __unlock_page_memcg() is available
1978  * when @page might get freed inside the locked section.
1979  */
1980 struct mem_cgroup *lock_page_memcg(struct page *page)
1981 {
1982 	struct mem_cgroup *memcg;
1983 	unsigned long flags;
1984 
1985 	/*
1986 	 * The RCU lock is held throughout the transaction.  The fast
1987 	 * path can get away without acquiring the memcg->move_lock
1988 	 * because page moving starts with an RCU grace period.
1989 	 *
1990 	 * The RCU lock also protects the memcg from being freed when
1991 	 * the page state that is going to change is the only thing
1992 	 * preventing the page itself from being freed. E.g. writeback
1993 	 * doesn't hold a page reference and relies on PG_writeback to
1994 	 * keep off truncation, migration and so forth.
1995          */
1996 	rcu_read_lock();
1997 
1998 	if (mem_cgroup_disabled())
1999 		return NULL;
2000 again:
2001 	memcg = page->mem_cgroup;
2002 	if (unlikely(!memcg))
2003 		return NULL;
2004 
2005 	if (atomic_read(&memcg->moving_account) <= 0)
2006 		return memcg;
2007 
2008 	spin_lock_irqsave(&memcg->move_lock, flags);
2009 	if (memcg != page->mem_cgroup) {
2010 		spin_unlock_irqrestore(&memcg->move_lock, flags);
2011 		goto again;
2012 	}
2013 
2014 	/*
2015 	 * When charge migration first begins, we can have locked and
2016 	 * unlocked page stat updates happening concurrently.  Track
2017 	 * the task who has the lock for unlock_page_memcg().
2018 	 */
2019 	memcg->move_lock_task = current;
2020 	memcg->move_lock_flags = flags;
2021 
2022 	return memcg;
2023 }
2024 EXPORT_SYMBOL(lock_page_memcg);
2025 
2026 /**
2027  * __unlock_page_memcg - unlock and unpin a memcg
2028  * @memcg: the memcg
2029  *
2030  * Unlock and unpin a memcg returned by lock_page_memcg().
2031  */
2032 void __unlock_page_memcg(struct mem_cgroup *memcg)
2033 {
2034 	if (memcg && memcg->move_lock_task == current) {
2035 		unsigned long flags = memcg->move_lock_flags;
2036 
2037 		memcg->move_lock_task = NULL;
2038 		memcg->move_lock_flags = 0;
2039 
2040 		spin_unlock_irqrestore(&memcg->move_lock, flags);
2041 	}
2042 
2043 	rcu_read_unlock();
2044 }
2045 
2046 /**
2047  * unlock_page_memcg - unlock a page->mem_cgroup binding
2048  * @page: the page
2049  */
2050 void unlock_page_memcg(struct page *page)
2051 {
2052 	__unlock_page_memcg(page->mem_cgroup);
2053 }
2054 EXPORT_SYMBOL(unlock_page_memcg);
2055 
2056 struct memcg_stock_pcp {
2057 	struct mem_cgroup *cached; /* this never be root cgroup */
2058 	unsigned int nr_pages;
2059 	struct work_struct work;
2060 	unsigned long flags;
2061 #define FLUSHING_CACHED_CHARGE	0
2062 };
2063 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2064 static DEFINE_MUTEX(percpu_charge_mutex);
2065 
2066 /**
2067  * consume_stock: Try to consume stocked charge on this cpu.
2068  * @memcg: memcg to consume from.
2069  * @nr_pages: how many pages to charge.
2070  *
2071  * The charges will only happen if @memcg matches the current cpu's memcg
2072  * stock, and at least @nr_pages are available in that stock.  Failure to
2073  * service an allocation will refill the stock.
2074  *
2075  * returns true if successful, false otherwise.
2076  */
2077 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2078 {
2079 	struct memcg_stock_pcp *stock;
2080 	unsigned long flags;
2081 	bool ret = false;
2082 
2083 	if (nr_pages > MEMCG_CHARGE_BATCH)
2084 		return ret;
2085 
2086 	local_irq_save(flags);
2087 
2088 	stock = this_cpu_ptr(&memcg_stock);
2089 	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
2090 		stock->nr_pages -= nr_pages;
2091 		ret = true;
2092 	}
2093 
2094 	local_irq_restore(flags);
2095 
2096 	return ret;
2097 }
2098 
2099 /*
2100  * Returns stocks cached in percpu and reset cached information.
2101  */
2102 static void drain_stock(struct memcg_stock_pcp *stock)
2103 {
2104 	struct mem_cgroup *old = stock->cached;
2105 
2106 	if (stock->nr_pages) {
2107 		page_counter_uncharge(&old->memory, stock->nr_pages);
2108 		if (do_memsw_account())
2109 			page_counter_uncharge(&old->memsw, stock->nr_pages);
2110 		css_put_many(&old->css, stock->nr_pages);
2111 		stock->nr_pages = 0;
2112 	}
2113 	stock->cached = NULL;
2114 }
2115 
2116 static void drain_local_stock(struct work_struct *dummy)
2117 {
2118 	struct memcg_stock_pcp *stock;
2119 	unsigned long flags;
2120 
2121 	/*
2122 	 * The only protection from memory hotplug vs. drain_stock races is
2123 	 * that we always operate on local CPU stock here with IRQ disabled
2124 	 */
2125 	local_irq_save(flags);
2126 
2127 	stock = this_cpu_ptr(&memcg_stock);
2128 	drain_stock(stock);
2129 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2130 
2131 	local_irq_restore(flags);
2132 }
2133 
2134 /*
2135  * Cache charges(val) to local per_cpu area.
2136  * This will be consumed by consume_stock() function, later.
2137  */
2138 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2139 {
2140 	struct memcg_stock_pcp *stock;
2141 	unsigned long flags;
2142 
2143 	local_irq_save(flags);
2144 
2145 	stock = this_cpu_ptr(&memcg_stock);
2146 	if (stock->cached != memcg) { /* reset if necessary */
2147 		drain_stock(stock);
2148 		stock->cached = memcg;
2149 	}
2150 	stock->nr_pages += nr_pages;
2151 
2152 	if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2153 		drain_stock(stock);
2154 
2155 	local_irq_restore(flags);
2156 }
2157 
2158 /*
2159  * Drains all per-CPU charge caches for given root_memcg resp. subtree
2160  * of the hierarchy under it.
2161  */
2162 static void drain_all_stock(struct mem_cgroup *root_memcg)
2163 {
2164 	int cpu, curcpu;
2165 
2166 	/* If someone's already draining, avoid adding running more workers. */
2167 	if (!mutex_trylock(&percpu_charge_mutex))
2168 		return;
2169 	/*
2170 	 * Notify other cpus that system-wide "drain" is running
2171 	 * We do not care about races with the cpu hotplug because cpu down
2172 	 * as well as workers from this path always operate on the local
2173 	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2174 	 */
2175 	curcpu = get_cpu();
2176 	for_each_online_cpu(cpu) {
2177 		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2178 		struct mem_cgroup *memcg;
2179 		bool flush = false;
2180 
2181 		rcu_read_lock();
2182 		memcg = stock->cached;
2183 		if (memcg && stock->nr_pages &&
2184 		    mem_cgroup_is_descendant(memcg, root_memcg))
2185 			flush = true;
2186 		rcu_read_unlock();
2187 
2188 		if (flush &&
2189 		    !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2190 			if (cpu == curcpu)
2191 				drain_local_stock(&stock->work);
2192 			else
2193 				schedule_work_on(cpu, &stock->work);
2194 		}
2195 	}
2196 	put_cpu();
2197 	mutex_unlock(&percpu_charge_mutex);
2198 }
2199 
2200 static int memcg_hotplug_cpu_dead(unsigned int cpu)
2201 {
2202 	struct memcg_stock_pcp *stock;
2203 	struct mem_cgroup *memcg, *mi;
2204 
2205 	stock = &per_cpu(memcg_stock, cpu);
2206 	drain_stock(stock);
2207 
2208 	for_each_mem_cgroup(memcg) {
2209 		int i;
2210 
2211 		for (i = 0; i < MEMCG_NR_STAT; i++) {
2212 			int nid;
2213 			long x;
2214 
2215 			x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0);
2216 			if (x)
2217 				for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
2218 					atomic_long_add(x, &memcg->vmstats[i]);
2219 
2220 			if (i >= NR_VM_NODE_STAT_ITEMS)
2221 				continue;
2222 
2223 			for_each_node(nid) {
2224 				struct mem_cgroup_per_node *pn;
2225 
2226 				pn = mem_cgroup_nodeinfo(memcg, nid);
2227 				x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
2228 				if (x)
2229 					do {
2230 						atomic_long_add(x, &pn->lruvec_stat[i]);
2231 					} while ((pn = parent_nodeinfo(pn, nid)));
2232 			}
2233 		}
2234 
2235 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
2236 			long x;
2237 
2238 			x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0);
2239 			if (x)
2240 				for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
2241 					atomic_long_add(x, &memcg->vmevents[i]);
2242 		}
2243 	}
2244 
2245 	return 0;
2246 }
2247 
2248 static void reclaim_high(struct mem_cgroup *memcg,
2249 			 unsigned int nr_pages,
2250 			 gfp_t gfp_mask)
2251 {
2252 	do {
2253 		if (page_counter_read(&memcg->memory) <= READ_ONCE(memcg->high))
2254 			continue;
2255 		memcg_memory_event(memcg, MEMCG_HIGH);
2256 		try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
2257 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2258 		 !mem_cgroup_is_root(memcg));
2259 }
2260 
2261 static void high_work_func(struct work_struct *work)
2262 {
2263 	struct mem_cgroup *memcg;
2264 
2265 	memcg = container_of(work, struct mem_cgroup, high_work);
2266 	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2267 }
2268 
2269 /*
2270  * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2271  * enough to still cause a significant slowdown in most cases, while still
2272  * allowing diagnostics and tracing to proceed without becoming stuck.
2273  */
2274 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2275 
2276 /*
2277  * When calculating the delay, we use these either side of the exponentiation to
2278  * maintain precision and scale to a reasonable number of jiffies (see the table
2279  * below.
2280  *
2281  * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2282  *   overage ratio to a delay.
2283  * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down down the
2284  *   proposed penalty in order to reduce to a reasonable number of jiffies, and
2285  *   to produce a reasonable delay curve.
2286  *
2287  * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2288  * reasonable delay curve compared to precision-adjusted overage, not
2289  * penalising heavily at first, but still making sure that growth beyond the
2290  * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2291  * example, with a high of 100 megabytes:
2292  *
2293  *  +-------+------------------------+
2294  *  | usage | time to allocate in ms |
2295  *  +-------+------------------------+
2296  *  | 100M  |                      0 |
2297  *  | 101M  |                      6 |
2298  *  | 102M  |                     25 |
2299  *  | 103M  |                     57 |
2300  *  | 104M  |                    102 |
2301  *  | 105M  |                    159 |
2302  *  | 106M  |                    230 |
2303  *  | 107M  |                    313 |
2304  *  | 108M  |                    409 |
2305  *  | 109M  |                    518 |
2306  *  | 110M  |                    639 |
2307  *  | 111M  |                    774 |
2308  *  | 112M  |                    921 |
2309  *  | 113M  |                   1081 |
2310  *  | 114M  |                   1254 |
2311  *  | 115M  |                   1439 |
2312  *  | 116M  |                   1638 |
2313  *  | 117M  |                   1849 |
2314  *  | 118M  |                   2000 |
2315  *  | 119M  |                   2000 |
2316  *  | 120M  |                   2000 |
2317  *  +-------+------------------------+
2318  */
2319  #define MEMCG_DELAY_PRECISION_SHIFT 20
2320  #define MEMCG_DELAY_SCALING_SHIFT 14
2321 
2322 /*
2323  * Get the number of jiffies that we should penalise a mischievous cgroup which
2324  * is exceeding its memory.high by checking both it and its ancestors.
2325  */
2326 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2327 					  unsigned int nr_pages)
2328 {
2329 	unsigned long penalty_jiffies;
2330 	u64 max_overage = 0;
2331 
2332 	do {
2333 		unsigned long usage, high;
2334 		u64 overage;
2335 
2336 		usage = page_counter_read(&memcg->memory);
2337 		high = READ_ONCE(memcg->high);
2338 
2339 		/*
2340 		 * Prevent division by 0 in overage calculation by acting as if
2341 		 * it was a threshold of 1 page
2342 		 */
2343 		high = max(high, 1UL);
2344 
2345 		overage = usage - high;
2346 		overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2347 		overage = div64_u64(overage, high);
2348 
2349 		if (overage > max_overage)
2350 			max_overage = overage;
2351 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2352 		 !mem_cgroup_is_root(memcg));
2353 
2354 	if (!max_overage)
2355 		return 0;
2356 
2357 	/*
2358 	 * We use overage compared to memory.high to calculate the number of
2359 	 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2360 	 * fairly lenient on small overages, and increasingly harsh when the
2361 	 * memcg in question makes it clear that it has no intention of stopping
2362 	 * its crazy behaviour, so we exponentially increase the delay based on
2363 	 * overage amount.
2364 	 */
2365 	penalty_jiffies = max_overage * max_overage * HZ;
2366 	penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2367 	penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2368 
2369 	/*
2370 	 * Factor in the task's own contribution to the overage, such that four
2371 	 * N-sized allocations are throttled approximately the same as one
2372 	 * 4N-sized allocation.
2373 	 *
2374 	 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2375 	 * larger the current charge patch is than that.
2376 	 */
2377 	penalty_jiffies = penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2378 
2379 	/*
2380 	 * Clamp the max delay per usermode return so as to still keep the
2381 	 * application moving forwards and also permit diagnostics, albeit
2382 	 * extremely slowly.
2383 	 */
2384 	return min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2385 }
2386 
2387 /*
2388  * Scheduled by try_charge() to be executed from the userland return path
2389  * and reclaims memory over the high limit.
2390  */
2391 void mem_cgroup_handle_over_high(void)
2392 {
2393 	unsigned long penalty_jiffies;
2394 	unsigned long pflags;
2395 	unsigned int nr_pages = current->memcg_nr_pages_over_high;
2396 	struct mem_cgroup *memcg;
2397 
2398 	if (likely(!nr_pages))
2399 		return;
2400 
2401 	memcg = get_mem_cgroup_from_mm(current->mm);
2402 	reclaim_high(memcg, nr_pages, GFP_KERNEL);
2403 	current->memcg_nr_pages_over_high = 0;
2404 
2405 	/*
2406 	 * memory.high is breached and reclaim is unable to keep up. Throttle
2407 	 * allocators proactively to slow down excessive growth.
2408 	 */
2409 	penalty_jiffies = calculate_high_delay(memcg, nr_pages);
2410 
2411 	/*
2412 	 * Don't sleep if the amount of jiffies this memcg owes us is so low
2413 	 * that it's not even worth doing, in an attempt to be nice to those who
2414 	 * go only a small amount over their memory.high value and maybe haven't
2415 	 * been aggressively reclaimed enough yet.
2416 	 */
2417 	if (penalty_jiffies <= HZ / 100)
2418 		goto out;
2419 
2420 	/*
2421 	 * If we exit early, we're guaranteed to die (since
2422 	 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2423 	 * need to account for any ill-begotten jiffies to pay them off later.
2424 	 */
2425 	psi_memstall_enter(&pflags);
2426 	schedule_timeout_killable(penalty_jiffies);
2427 	psi_memstall_leave(&pflags);
2428 
2429 out:
2430 	css_put(&memcg->css);
2431 }
2432 
2433 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2434 		      unsigned int nr_pages)
2435 {
2436 	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2437 	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2438 	struct mem_cgroup *mem_over_limit;
2439 	struct page_counter *counter;
2440 	unsigned long nr_reclaimed;
2441 	bool may_swap = true;
2442 	bool drained = false;
2443 	enum oom_status oom_status;
2444 
2445 	if (mem_cgroup_is_root(memcg))
2446 		return 0;
2447 retry:
2448 	if (consume_stock(memcg, nr_pages))
2449 		return 0;
2450 
2451 	if (!do_memsw_account() ||
2452 	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2453 		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2454 			goto done_restock;
2455 		if (do_memsw_account())
2456 			page_counter_uncharge(&memcg->memsw, batch);
2457 		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2458 	} else {
2459 		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2460 		may_swap = false;
2461 	}
2462 
2463 	if (batch > nr_pages) {
2464 		batch = nr_pages;
2465 		goto retry;
2466 	}
2467 
2468 	/*
2469 	 * Memcg doesn't have a dedicated reserve for atomic
2470 	 * allocations. But like the global atomic pool, we need to
2471 	 * put the burden of reclaim on regular allocation requests
2472 	 * and let these go through as privileged allocations.
2473 	 */
2474 	if (gfp_mask & __GFP_ATOMIC)
2475 		goto force;
2476 
2477 	/*
2478 	 * Unlike in global OOM situations, memcg is not in a physical
2479 	 * memory shortage.  Allow dying and OOM-killed tasks to
2480 	 * bypass the last charges so that they can exit quickly and
2481 	 * free their memory.
2482 	 */
2483 	if (unlikely(should_force_charge()))
2484 		goto force;
2485 
2486 	/*
2487 	 * Prevent unbounded recursion when reclaim operations need to
2488 	 * allocate memory. This might exceed the limits temporarily,
2489 	 * but we prefer facilitating memory reclaim and getting back
2490 	 * under the limit over triggering OOM kills in these cases.
2491 	 */
2492 	if (unlikely(current->flags & PF_MEMALLOC))
2493 		goto force;
2494 
2495 	if (unlikely(task_in_memcg_oom(current)))
2496 		goto nomem;
2497 
2498 	if (!gfpflags_allow_blocking(gfp_mask))
2499 		goto nomem;
2500 
2501 	memcg_memory_event(mem_over_limit, MEMCG_MAX);
2502 
2503 	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2504 						    gfp_mask, may_swap);
2505 
2506 	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2507 		goto retry;
2508 
2509 	if (!drained) {
2510 		drain_all_stock(mem_over_limit);
2511 		drained = true;
2512 		goto retry;
2513 	}
2514 
2515 	if (gfp_mask & __GFP_NORETRY)
2516 		goto nomem;
2517 	/*
2518 	 * Even though the limit is exceeded at this point, reclaim
2519 	 * may have been able to free some pages.  Retry the charge
2520 	 * before killing the task.
2521 	 *
2522 	 * Only for regular pages, though: huge pages are rather
2523 	 * unlikely to succeed so close to the limit, and we fall back
2524 	 * to regular pages anyway in case of failure.
2525 	 */
2526 	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2527 		goto retry;
2528 	/*
2529 	 * At task move, charge accounts can be doubly counted. So, it's
2530 	 * better to wait until the end of task_move if something is going on.
2531 	 */
2532 	if (mem_cgroup_wait_acct_move(mem_over_limit))
2533 		goto retry;
2534 
2535 	if (nr_retries--)
2536 		goto retry;
2537 
2538 	if (gfp_mask & __GFP_RETRY_MAYFAIL)
2539 		goto nomem;
2540 
2541 	if (gfp_mask & __GFP_NOFAIL)
2542 		goto force;
2543 
2544 	if (fatal_signal_pending(current))
2545 		goto force;
2546 
2547 	/*
2548 	 * keep retrying as long as the memcg oom killer is able to make
2549 	 * a forward progress or bypass the charge if the oom killer
2550 	 * couldn't make any progress.
2551 	 */
2552 	oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask,
2553 		       get_order(nr_pages * PAGE_SIZE));
2554 	switch (oom_status) {
2555 	case OOM_SUCCESS:
2556 		nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2557 		goto retry;
2558 	case OOM_FAILED:
2559 		goto force;
2560 	default:
2561 		goto nomem;
2562 	}
2563 nomem:
2564 	if (!(gfp_mask & __GFP_NOFAIL))
2565 		return -ENOMEM;
2566 force:
2567 	/*
2568 	 * The allocation either can't fail or will lead to more memory
2569 	 * being freed very soon.  Allow memory usage go over the limit
2570 	 * temporarily by force charging it.
2571 	 */
2572 	page_counter_charge(&memcg->memory, nr_pages);
2573 	if (do_memsw_account())
2574 		page_counter_charge(&memcg->memsw, nr_pages);
2575 	css_get_many(&memcg->css, nr_pages);
2576 
2577 	return 0;
2578 
2579 done_restock:
2580 	css_get_many(&memcg->css, batch);
2581 	if (batch > nr_pages)
2582 		refill_stock(memcg, batch - nr_pages);
2583 
2584 	/*
2585 	 * If the hierarchy is above the normal consumption range, schedule
2586 	 * reclaim on returning to userland.  We can perform reclaim here
2587 	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2588 	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2589 	 * not recorded as it most likely matches current's and won't
2590 	 * change in the meantime.  As high limit is checked again before
2591 	 * reclaim, the cost of mismatch is negligible.
2592 	 */
2593 	do {
2594 		if (page_counter_read(&memcg->memory) > READ_ONCE(memcg->high)) {
2595 			/* Don't bother a random interrupted task */
2596 			if (in_interrupt()) {
2597 				schedule_work(&memcg->high_work);
2598 				break;
2599 			}
2600 			current->memcg_nr_pages_over_high += batch;
2601 			set_notify_resume(current);
2602 			break;
2603 		}
2604 	} while ((memcg = parent_mem_cgroup(memcg)));
2605 
2606 	return 0;
2607 }
2608 
2609 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2610 {
2611 	if (mem_cgroup_is_root(memcg))
2612 		return;
2613 
2614 	page_counter_uncharge(&memcg->memory, nr_pages);
2615 	if (do_memsw_account())
2616 		page_counter_uncharge(&memcg->memsw, nr_pages);
2617 
2618 	css_put_many(&memcg->css, nr_pages);
2619 }
2620 
2621 static void lock_page_lru(struct page *page, int *isolated)
2622 {
2623 	pg_data_t *pgdat = page_pgdat(page);
2624 
2625 	spin_lock_irq(&pgdat->lru_lock);
2626 	if (PageLRU(page)) {
2627 		struct lruvec *lruvec;
2628 
2629 		lruvec = mem_cgroup_page_lruvec(page, pgdat);
2630 		ClearPageLRU(page);
2631 		del_page_from_lru_list(page, lruvec, page_lru(page));
2632 		*isolated = 1;
2633 	} else
2634 		*isolated = 0;
2635 }
2636 
2637 static void unlock_page_lru(struct page *page, int isolated)
2638 {
2639 	pg_data_t *pgdat = page_pgdat(page);
2640 
2641 	if (isolated) {
2642 		struct lruvec *lruvec;
2643 
2644 		lruvec = mem_cgroup_page_lruvec(page, pgdat);
2645 		VM_BUG_ON_PAGE(PageLRU(page), page);
2646 		SetPageLRU(page);
2647 		add_page_to_lru_list(page, lruvec, page_lru(page));
2648 	}
2649 	spin_unlock_irq(&pgdat->lru_lock);
2650 }
2651 
2652 static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2653 			  bool lrucare)
2654 {
2655 	int isolated;
2656 
2657 	VM_BUG_ON_PAGE(page->mem_cgroup, page);
2658 
2659 	/*
2660 	 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2661 	 * may already be on some other mem_cgroup's LRU.  Take care of it.
2662 	 */
2663 	if (lrucare)
2664 		lock_page_lru(page, &isolated);
2665 
2666 	/*
2667 	 * Nobody should be changing or seriously looking at
2668 	 * page->mem_cgroup at this point:
2669 	 *
2670 	 * - the page is uncharged
2671 	 *
2672 	 * - the page is off-LRU
2673 	 *
2674 	 * - an anonymous fault has exclusive page access, except for
2675 	 *   a locked page table
2676 	 *
2677 	 * - a page cache insertion, a swapin fault, or a migration
2678 	 *   have the page locked
2679 	 */
2680 	page->mem_cgroup = memcg;
2681 
2682 	if (lrucare)
2683 		unlock_page_lru(page, isolated);
2684 }
2685 
2686 #ifdef CONFIG_MEMCG_KMEM
2687 /*
2688  * Returns a pointer to the memory cgroup to which the kernel object is charged.
2689  *
2690  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2691  * cgroup_mutex, etc.
2692  */
2693 struct mem_cgroup *mem_cgroup_from_obj(void *p)
2694 {
2695 	struct page *page;
2696 
2697 	if (mem_cgroup_disabled())
2698 		return NULL;
2699 
2700 	page = virt_to_head_page(p);
2701 
2702 	/*
2703 	 * Slab pages don't have page->mem_cgroup set because corresponding
2704 	 * kmem caches can be reparented during the lifetime. That's why
2705 	 * memcg_from_slab_page() should be used instead.
2706 	 */
2707 	if (PageSlab(page))
2708 		return memcg_from_slab_page(page);
2709 
2710 	/* All other pages use page->mem_cgroup */
2711 	return page->mem_cgroup;
2712 }
2713 
2714 static int memcg_alloc_cache_id(void)
2715 {
2716 	int id, size;
2717 	int err;
2718 
2719 	id = ida_simple_get(&memcg_cache_ida,
2720 			    0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2721 	if (id < 0)
2722 		return id;
2723 
2724 	if (id < memcg_nr_cache_ids)
2725 		return id;
2726 
2727 	/*
2728 	 * There's no space for the new id in memcg_caches arrays,
2729 	 * so we have to grow them.
2730 	 */
2731 	down_write(&memcg_cache_ids_sem);
2732 
2733 	size = 2 * (id + 1);
2734 	if (size < MEMCG_CACHES_MIN_SIZE)
2735 		size = MEMCG_CACHES_MIN_SIZE;
2736 	else if (size > MEMCG_CACHES_MAX_SIZE)
2737 		size = MEMCG_CACHES_MAX_SIZE;
2738 
2739 	err = memcg_update_all_caches(size);
2740 	if (!err)
2741 		err = memcg_update_all_list_lrus(size);
2742 	if (!err)
2743 		memcg_nr_cache_ids = size;
2744 
2745 	up_write(&memcg_cache_ids_sem);
2746 
2747 	if (err) {
2748 		ida_simple_remove(&memcg_cache_ida, id);
2749 		return err;
2750 	}
2751 	return id;
2752 }
2753 
2754 static void memcg_free_cache_id(int id)
2755 {
2756 	ida_simple_remove(&memcg_cache_ida, id);
2757 }
2758 
2759 struct memcg_kmem_cache_create_work {
2760 	struct mem_cgroup *memcg;
2761 	struct kmem_cache *cachep;
2762 	struct work_struct work;
2763 };
2764 
2765 static void memcg_kmem_cache_create_func(struct work_struct *w)
2766 {
2767 	struct memcg_kmem_cache_create_work *cw =
2768 		container_of(w, struct memcg_kmem_cache_create_work, work);
2769 	struct mem_cgroup *memcg = cw->memcg;
2770 	struct kmem_cache *cachep = cw->cachep;
2771 
2772 	memcg_create_kmem_cache(memcg, cachep);
2773 
2774 	css_put(&memcg->css);
2775 	kfree(cw);
2776 }
2777 
2778 /*
2779  * Enqueue the creation of a per-memcg kmem_cache.
2780  */
2781 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2782 					       struct kmem_cache *cachep)
2783 {
2784 	struct memcg_kmem_cache_create_work *cw;
2785 
2786 	if (!css_tryget_online(&memcg->css))
2787 		return;
2788 
2789 	cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
2790 	if (!cw)
2791 		return;
2792 
2793 	cw->memcg = memcg;
2794 	cw->cachep = cachep;
2795 	INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
2796 
2797 	queue_work(memcg_kmem_cache_wq, &cw->work);
2798 }
2799 
2800 static inline bool memcg_kmem_bypass(void)
2801 {
2802 	if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
2803 		return true;
2804 	return false;
2805 }
2806 
2807 /**
2808  * memcg_kmem_get_cache: select the correct per-memcg cache for allocation
2809  * @cachep: the original global kmem cache
2810  *
2811  * Return the kmem_cache we're supposed to use for a slab allocation.
2812  * We try to use the current memcg's version of the cache.
2813  *
2814  * If the cache does not exist yet, if we are the first user of it, we
2815  * create it asynchronously in a workqueue and let the current allocation
2816  * go through with the original cache.
2817  *
2818  * This function takes a reference to the cache it returns to assure it
2819  * won't get destroyed while we are working with it. Once the caller is
2820  * done with it, memcg_kmem_put_cache() must be called to release the
2821  * reference.
2822  */
2823 struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
2824 {
2825 	struct mem_cgroup *memcg;
2826 	struct kmem_cache *memcg_cachep;
2827 	struct memcg_cache_array *arr;
2828 	int kmemcg_id;
2829 
2830 	VM_BUG_ON(!is_root_cache(cachep));
2831 
2832 	if (memcg_kmem_bypass())
2833 		return cachep;
2834 
2835 	rcu_read_lock();
2836 
2837 	if (unlikely(current->active_memcg))
2838 		memcg = current->active_memcg;
2839 	else
2840 		memcg = mem_cgroup_from_task(current);
2841 
2842 	if (!memcg || memcg == root_mem_cgroup)
2843 		goto out_unlock;
2844 
2845 	kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2846 	if (kmemcg_id < 0)
2847 		goto out_unlock;
2848 
2849 	arr = rcu_dereference(cachep->memcg_params.memcg_caches);
2850 
2851 	/*
2852 	 * Make sure we will access the up-to-date value. The code updating
2853 	 * memcg_caches issues a write barrier to match the data dependency
2854 	 * barrier inside READ_ONCE() (see memcg_create_kmem_cache()).
2855 	 */
2856 	memcg_cachep = READ_ONCE(arr->entries[kmemcg_id]);
2857 
2858 	/*
2859 	 * If we are in a safe context (can wait, and not in interrupt
2860 	 * context), we could be be predictable and return right away.
2861 	 * This would guarantee that the allocation being performed
2862 	 * already belongs in the new cache.
2863 	 *
2864 	 * However, there are some clashes that can arrive from locking.
2865 	 * For instance, because we acquire the slab_mutex while doing
2866 	 * memcg_create_kmem_cache, this means no further allocation
2867 	 * could happen with the slab_mutex held. So it's better to
2868 	 * defer everything.
2869 	 *
2870 	 * If the memcg is dying or memcg_cache is about to be released,
2871 	 * don't bother creating new kmem_caches. Because memcg_cachep
2872 	 * is ZEROed as the fist step of kmem offlining, we don't need
2873 	 * percpu_ref_tryget_live() here. css_tryget_online() check in
2874 	 * memcg_schedule_kmem_cache_create() will prevent us from
2875 	 * creation of a new kmem_cache.
2876 	 */
2877 	if (unlikely(!memcg_cachep))
2878 		memcg_schedule_kmem_cache_create(memcg, cachep);
2879 	else if (percpu_ref_tryget(&memcg_cachep->memcg_params.refcnt))
2880 		cachep = memcg_cachep;
2881 out_unlock:
2882 	rcu_read_unlock();
2883 	return cachep;
2884 }
2885 
2886 /**
2887  * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache
2888  * @cachep: the cache returned by memcg_kmem_get_cache
2889  */
2890 void memcg_kmem_put_cache(struct kmem_cache *cachep)
2891 {
2892 	if (!is_root_cache(cachep))
2893 		percpu_ref_put(&cachep->memcg_params.refcnt);
2894 }
2895 
2896 /**
2897  * __memcg_kmem_charge: charge a number of kernel pages to a memcg
2898  * @memcg: memory cgroup to charge
2899  * @gfp: reclaim mode
2900  * @nr_pages: number of pages to charge
2901  *
2902  * Returns 0 on success, an error code on failure.
2903  */
2904 int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
2905 			unsigned int nr_pages)
2906 {
2907 	struct page_counter *counter;
2908 	int ret;
2909 
2910 	ret = try_charge(memcg, gfp, nr_pages);
2911 	if (ret)
2912 		return ret;
2913 
2914 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2915 	    !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2916 
2917 		/*
2918 		 * Enforce __GFP_NOFAIL allocation because callers are not
2919 		 * prepared to see failures and likely do not have any failure
2920 		 * handling code.
2921 		 */
2922 		if (gfp & __GFP_NOFAIL) {
2923 			page_counter_charge(&memcg->kmem, nr_pages);
2924 			return 0;
2925 		}
2926 		cancel_charge(memcg, nr_pages);
2927 		return -ENOMEM;
2928 	}
2929 	return 0;
2930 }
2931 
2932 /**
2933  * __memcg_kmem_uncharge: uncharge a number of kernel pages from a memcg
2934  * @memcg: memcg to uncharge
2935  * @nr_pages: number of pages to uncharge
2936  */
2937 void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages)
2938 {
2939 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2940 		page_counter_uncharge(&memcg->kmem, nr_pages);
2941 
2942 	page_counter_uncharge(&memcg->memory, nr_pages);
2943 	if (do_memsw_account())
2944 		page_counter_uncharge(&memcg->memsw, nr_pages);
2945 }
2946 
2947 /**
2948  * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
2949  * @page: page to charge
2950  * @gfp: reclaim mode
2951  * @order: allocation order
2952  *
2953  * Returns 0 on success, an error code on failure.
2954  */
2955 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
2956 {
2957 	struct mem_cgroup *memcg;
2958 	int ret = 0;
2959 
2960 	if (memcg_kmem_bypass())
2961 		return 0;
2962 
2963 	memcg = get_mem_cgroup_from_current();
2964 	if (!mem_cgroup_is_root(memcg)) {
2965 		ret = __memcg_kmem_charge(memcg, gfp, 1 << order);
2966 		if (!ret) {
2967 			page->mem_cgroup = memcg;
2968 			__SetPageKmemcg(page);
2969 		}
2970 	}
2971 	css_put(&memcg->css);
2972 	return ret;
2973 }
2974 
2975 /**
2976  * __memcg_kmem_uncharge_page: uncharge a kmem page
2977  * @page: page to uncharge
2978  * @order: allocation order
2979  */
2980 void __memcg_kmem_uncharge_page(struct page *page, int order)
2981 {
2982 	struct mem_cgroup *memcg = page->mem_cgroup;
2983 	unsigned int nr_pages = 1 << order;
2984 
2985 	if (!memcg)
2986 		return;
2987 
2988 	VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
2989 	__memcg_kmem_uncharge(memcg, nr_pages);
2990 	page->mem_cgroup = NULL;
2991 
2992 	/* slab pages do not have PageKmemcg flag set */
2993 	if (PageKmemcg(page))
2994 		__ClearPageKmemcg(page);
2995 
2996 	css_put_many(&memcg->css, nr_pages);
2997 }
2998 #endif /* CONFIG_MEMCG_KMEM */
2999 
3000 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3001 
3002 /*
3003  * Because tail pages are not marked as "used", set it. We're under
3004  * pgdat->lru_lock and migration entries setup in all page mappings.
3005  */
3006 void mem_cgroup_split_huge_fixup(struct page *head)
3007 {
3008 	int i;
3009 
3010 	if (mem_cgroup_disabled())
3011 		return;
3012 
3013 	for (i = 1; i < HPAGE_PMD_NR; i++)
3014 		head[i].mem_cgroup = head->mem_cgroup;
3015 
3016 	__mod_memcg_state(head->mem_cgroup, MEMCG_RSS_HUGE, -HPAGE_PMD_NR);
3017 }
3018 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
3019 
3020 #ifdef CONFIG_MEMCG_SWAP
3021 /**
3022  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3023  * @entry: swap entry to be moved
3024  * @from:  mem_cgroup which the entry is moved from
3025  * @to:  mem_cgroup which the entry is moved to
3026  *
3027  * It succeeds only when the swap_cgroup's record for this entry is the same
3028  * as the mem_cgroup's id of @from.
3029  *
3030  * Returns 0 on success, -EINVAL on failure.
3031  *
3032  * The caller must have charged to @to, IOW, called page_counter_charge() about
3033  * both res and memsw, and called css_get().
3034  */
3035 static int mem_cgroup_move_swap_account(swp_entry_t entry,
3036 				struct mem_cgroup *from, struct mem_cgroup *to)
3037 {
3038 	unsigned short old_id, new_id;
3039 
3040 	old_id = mem_cgroup_id(from);
3041 	new_id = mem_cgroup_id(to);
3042 
3043 	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3044 		mod_memcg_state(from, MEMCG_SWAP, -1);
3045 		mod_memcg_state(to, MEMCG_SWAP, 1);
3046 		return 0;
3047 	}
3048 	return -EINVAL;
3049 }
3050 #else
3051 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3052 				struct mem_cgroup *from, struct mem_cgroup *to)
3053 {
3054 	return -EINVAL;
3055 }
3056 #endif
3057 
3058 static DEFINE_MUTEX(memcg_max_mutex);
3059 
3060 static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3061 				 unsigned long max, bool memsw)
3062 {
3063 	bool enlarge = false;
3064 	bool drained = false;
3065 	int ret;
3066 	bool limits_invariant;
3067 	struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3068 
3069 	do {
3070 		if (signal_pending(current)) {
3071 			ret = -EINTR;
3072 			break;
3073 		}
3074 
3075 		mutex_lock(&memcg_max_mutex);
3076 		/*
3077 		 * Make sure that the new limit (memsw or memory limit) doesn't
3078 		 * break our basic invariant rule memory.max <= memsw.max.
3079 		 */
3080 		limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3081 					   max <= memcg->memsw.max;
3082 		if (!limits_invariant) {
3083 			mutex_unlock(&memcg_max_mutex);
3084 			ret = -EINVAL;
3085 			break;
3086 		}
3087 		if (max > counter->max)
3088 			enlarge = true;
3089 		ret = page_counter_set_max(counter, max);
3090 		mutex_unlock(&memcg_max_mutex);
3091 
3092 		if (!ret)
3093 			break;
3094 
3095 		if (!drained) {
3096 			drain_all_stock(memcg);
3097 			drained = true;
3098 			continue;
3099 		}
3100 
3101 		if (!try_to_free_mem_cgroup_pages(memcg, 1,
3102 					GFP_KERNEL, !memsw)) {
3103 			ret = -EBUSY;
3104 			break;
3105 		}
3106 	} while (true);
3107 
3108 	if (!ret && enlarge)
3109 		memcg_oom_recover(memcg);
3110 
3111 	return ret;
3112 }
3113 
3114 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3115 					    gfp_t gfp_mask,
3116 					    unsigned long *total_scanned)
3117 {
3118 	unsigned long nr_reclaimed = 0;
3119 	struct mem_cgroup_per_node *mz, *next_mz = NULL;
3120 	unsigned long reclaimed;
3121 	int loop = 0;
3122 	struct mem_cgroup_tree_per_node *mctz;
3123 	unsigned long excess;
3124 	unsigned long nr_scanned;
3125 
3126 	if (order > 0)
3127 		return 0;
3128 
3129 	mctz = soft_limit_tree_node(pgdat->node_id);
3130 
3131 	/*
3132 	 * Do not even bother to check the largest node if the root
3133 	 * is empty. Do it lockless to prevent lock bouncing. Races
3134 	 * are acceptable as soft limit is best effort anyway.
3135 	 */
3136 	if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3137 		return 0;
3138 
3139 	/*
3140 	 * This loop can run a while, specially if mem_cgroup's continuously
3141 	 * keep exceeding their soft limit and putting the system under
3142 	 * pressure
3143 	 */
3144 	do {
3145 		if (next_mz)
3146 			mz = next_mz;
3147 		else
3148 			mz = mem_cgroup_largest_soft_limit_node(mctz);
3149 		if (!mz)
3150 			break;
3151 
3152 		nr_scanned = 0;
3153 		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3154 						    gfp_mask, &nr_scanned);
3155 		nr_reclaimed += reclaimed;
3156 		*total_scanned += nr_scanned;
3157 		spin_lock_irq(&mctz->lock);
3158 		__mem_cgroup_remove_exceeded(mz, mctz);
3159 
3160 		/*
3161 		 * If we failed to reclaim anything from this memory cgroup
3162 		 * it is time to move on to the next cgroup
3163 		 */
3164 		next_mz = NULL;
3165 		if (!reclaimed)
3166 			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3167 
3168 		excess = soft_limit_excess(mz->memcg);
3169 		/*
3170 		 * One school of thought says that we should not add
3171 		 * back the node to the tree if reclaim returns 0.
3172 		 * But our reclaim could return 0, simply because due
3173 		 * to priority we are exposing a smaller subset of
3174 		 * memory to reclaim from. Consider this as a longer
3175 		 * term TODO.
3176 		 */
3177 		/* If excess == 0, no tree ops */
3178 		__mem_cgroup_insert_exceeded(mz, mctz, excess);
3179 		spin_unlock_irq(&mctz->lock);
3180 		css_put(&mz->memcg->css);
3181 		loop++;
3182 		/*
3183 		 * Could not reclaim anything and there are no more
3184 		 * mem cgroups to try or we seem to be looping without
3185 		 * reclaiming anything.
3186 		 */
3187 		if (!nr_reclaimed &&
3188 			(next_mz == NULL ||
3189 			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3190 			break;
3191 	} while (!nr_reclaimed);
3192 	if (next_mz)
3193 		css_put(&next_mz->memcg->css);
3194 	return nr_reclaimed;
3195 }
3196 
3197 /*
3198  * Test whether @memcg has children, dead or alive.  Note that this
3199  * function doesn't care whether @memcg has use_hierarchy enabled and
3200  * returns %true if there are child csses according to the cgroup
3201  * hierarchy.  Testing use_hierarchy is the caller's responsiblity.
3202  */
3203 static inline bool memcg_has_children(struct mem_cgroup *memcg)
3204 {
3205 	bool ret;
3206 
3207 	rcu_read_lock();
3208 	ret = css_next_child(NULL, &memcg->css);
3209 	rcu_read_unlock();
3210 	return ret;
3211 }
3212 
3213 /*
3214  * Reclaims as many pages from the given memcg as possible.
3215  *
3216  * Caller is responsible for holding css reference for memcg.
3217  */
3218 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3219 {
3220 	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
3221 
3222 	/* we call try-to-free pages for make this cgroup empty */
3223 	lru_add_drain_all();
3224 
3225 	drain_all_stock(memcg);
3226 
3227 	/* try to free all pages in this cgroup */
3228 	while (nr_retries && page_counter_read(&memcg->memory)) {
3229 		int progress;
3230 
3231 		if (signal_pending(current))
3232 			return -EINTR;
3233 
3234 		progress = try_to_free_mem_cgroup_pages(memcg, 1,
3235 							GFP_KERNEL, true);
3236 		if (!progress) {
3237 			nr_retries--;
3238 			/* maybe some writeback is necessary */
3239 			congestion_wait(BLK_RW_ASYNC, HZ/10);
3240 		}
3241 
3242 	}
3243 
3244 	return 0;
3245 }
3246 
3247 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3248 					    char *buf, size_t nbytes,
3249 					    loff_t off)
3250 {
3251 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3252 
3253 	if (mem_cgroup_is_root(memcg))
3254 		return -EINVAL;
3255 	return mem_cgroup_force_empty(memcg) ?: nbytes;
3256 }
3257 
3258 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3259 				     struct cftype *cft)
3260 {
3261 	return mem_cgroup_from_css(css)->use_hierarchy;
3262 }
3263 
3264 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3265 				      struct cftype *cft, u64 val)
3266 {
3267 	int retval = 0;
3268 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3269 	struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
3270 
3271 	if (memcg->use_hierarchy == val)
3272 		return 0;
3273 
3274 	/*
3275 	 * If parent's use_hierarchy is set, we can't make any modifications
3276 	 * in the child subtrees. If it is unset, then the change can
3277 	 * occur, provided the current cgroup has no children.
3278 	 *
3279 	 * For the root cgroup, parent_mem is NULL, we allow value to be
3280 	 * set if there are no children.
3281 	 */
3282 	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
3283 				(val == 1 || val == 0)) {
3284 		if (!memcg_has_children(memcg))
3285 			memcg->use_hierarchy = val;
3286 		else
3287 			retval = -EBUSY;
3288 	} else
3289 		retval = -EINVAL;
3290 
3291 	return retval;
3292 }
3293 
3294 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3295 {
3296 	unsigned long val;
3297 
3298 	if (mem_cgroup_is_root(memcg)) {
3299 		val = memcg_page_state(memcg, MEMCG_CACHE) +
3300 			memcg_page_state(memcg, MEMCG_RSS);
3301 		if (swap)
3302 			val += memcg_page_state(memcg, MEMCG_SWAP);
3303 	} else {
3304 		if (!swap)
3305 			val = page_counter_read(&memcg->memory);
3306 		else
3307 			val = page_counter_read(&memcg->memsw);
3308 	}
3309 	return val;
3310 }
3311 
3312 enum {
3313 	RES_USAGE,
3314 	RES_LIMIT,
3315 	RES_MAX_USAGE,
3316 	RES_FAILCNT,
3317 	RES_SOFT_LIMIT,
3318 };
3319 
3320 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3321 			       struct cftype *cft)
3322 {
3323 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3324 	struct page_counter *counter;
3325 
3326 	switch (MEMFILE_TYPE(cft->private)) {
3327 	case _MEM:
3328 		counter = &memcg->memory;
3329 		break;
3330 	case _MEMSWAP:
3331 		counter = &memcg->memsw;
3332 		break;
3333 	case _KMEM:
3334 		counter = &memcg->kmem;
3335 		break;
3336 	case _TCP:
3337 		counter = &memcg->tcpmem;
3338 		break;
3339 	default:
3340 		BUG();
3341 	}
3342 
3343 	switch (MEMFILE_ATTR(cft->private)) {
3344 	case RES_USAGE:
3345 		if (counter == &memcg->memory)
3346 			return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3347 		if (counter == &memcg->memsw)
3348 			return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3349 		return (u64)page_counter_read(counter) * PAGE_SIZE;
3350 	case RES_LIMIT:
3351 		return (u64)counter->max * PAGE_SIZE;
3352 	case RES_MAX_USAGE:
3353 		return (u64)counter->watermark * PAGE_SIZE;
3354 	case RES_FAILCNT:
3355 		return counter->failcnt;
3356 	case RES_SOFT_LIMIT:
3357 		return (u64)memcg->soft_limit * PAGE_SIZE;
3358 	default:
3359 		BUG();
3360 	}
3361 }
3362 
3363 static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg)
3364 {
3365 	unsigned long stat[MEMCG_NR_STAT] = {0};
3366 	struct mem_cgroup *mi;
3367 	int node, cpu, i;
3368 
3369 	for_each_online_cpu(cpu)
3370 		for (i = 0; i < MEMCG_NR_STAT; i++)
3371 			stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
3372 
3373 	for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3374 		for (i = 0; i < MEMCG_NR_STAT; i++)
3375 			atomic_long_add(stat[i], &mi->vmstats[i]);
3376 
3377 	for_each_node(node) {
3378 		struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
3379 		struct mem_cgroup_per_node *pi;
3380 
3381 		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3382 			stat[i] = 0;
3383 
3384 		for_each_online_cpu(cpu)
3385 			for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3386 				stat[i] += per_cpu(
3387 					pn->lruvec_stat_cpu->count[i], cpu);
3388 
3389 		for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
3390 			for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3391 				atomic_long_add(stat[i], &pi->lruvec_stat[i]);
3392 	}
3393 }
3394 
3395 static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg)
3396 {
3397 	unsigned long events[NR_VM_EVENT_ITEMS];
3398 	struct mem_cgroup *mi;
3399 	int cpu, i;
3400 
3401 	for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3402 		events[i] = 0;
3403 
3404 	for_each_online_cpu(cpu)
3405 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3406 			events[i] += per_cpu(memcg->vmstats_percpu->events[i],
3407 					     cpu);
3408 
3409 	for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3410 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3411 			atomic_long_add(events[i], &mi->vmevents[i]);
3412 }
3413 
3414 #ifdef CONFIG_MEMCG_KMEM
3415 static int memcg_online_kmem(struct mem_cgroup *memcg)
3416 {
3417 	int memcg_id;
3418 
3419 	if (cgroup_memory_nokmem)
3420 		return 0;
3421 
3422 	BUG_ON(memcg->kmemcg_id >= 0);
3423 	BUG_ON(memcg->kmem_state);
3424 
3425 	memcg_id = memcg_alloc_cache_id();
3426 	if (memcg_id < 0)
3427 		return memcg_id;
3428 
3429 	static_branch_inc(&memcg_kmem_enabled_key);
3430 	/*
3431 	 * A memory cgroup is considered kmem-online as soon as it gets
3432 	 * kmemcg_id. Setting the id after enabling static branching will
3433 	 * guarantee no one starts accounting before all call sites are
3434 	 * patched.
3435 	 */
3436 	memcg->kmemcg_id = memcg_id;
3437 	memcg->kmem_state = KMEM_ONLINE;
3438 	INIT_LIST_HEAD(&memcg->kmem_caches);
3439 
3440 	return 0;
3441 }
3442 
3443 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3444 {
3445 	struct cgroup_subsys_state *css;
3446 	struct mem_cgroup *parent, *child;
3447 	int kmemcg_id;
3448 
3449 	if (memcg->kmem_state != KMEM_ONLINE)
3450 		return;
3451 	/*
3452 	 * Clear the online state before clearing memcg_caches array
3453 	 * entries. The slab_mutex in memcg_deactivate_kmem_caches()
3454 	 * guarantees that no cache will be created for this cgroup
3455 	 * after we are done (see memcg_create_kmem_cache()).
3456 	 */
3457 	memcg->kmem_state = KMEM_ALLOCATED;
3458 
3459 	parent = parent_mem_cgroup(memcg);
3460 	if (!parent)
3461 		parent = root_mem_cgroup;
3462 
3463 	/*
3464 	 * Deactivate and reparent kmem_caches.
3465 	 */
3466 	memcg_deactivate_kmem_caches(memcg, parent);
3467 
3468 	kmemcg_id = memcg->kmemcg_id;
3469 	BUG_ON(kmemcg_id < 0);
3470 
3471 	/*
3472 	 * Change kmemcg_id of this cgroup and all its descendants to the
3473 	 * parent's id, and then move all entries from this cgroup's list_lrus
3474 	 * to ones of the parent. After we have finished, all list_lrus
3475 	 * corresponding to this cgroup are guaranteed to remain empty. The
3476 	 * ordering is imposed by list_lru_node->lock taken by
3477 	 * memcg_drain_all_list_lrus().
3478 	 */
3479 	rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
3480 	css_for_each_descendant_pre(css, &memcg->css) {
3481 		child = mem_cgroup_from_css(css);
3482 		BUG_ON(child->kmemcg_id != kmemcg_id);
3483 		child->kmemcg_id = parent->kmemcg_id;
3484 		if (!memcg->use_hierarchy)
3485 			break;
3486 	}
3487 	rcu_read_unlock();
3488 
3489 	memcg_drain_all_list_lrus(kmemcg_id, parent);
3490 
3491 	memcg_free_cache_id(kmemcg_id);
3492 }
3493 
3494 static void memcg_free_kmem(struct mem_cgroup *memcg)
3495 {
3496 	/* css_alloc() failed, offlining didn't happen */
3497 	if (unlikely(memcg->kmem_state == KMEM_ONLINE))
3498 		memcg_offline_kmem(memcg);
3499 
3500 	if (memcg->kmem_state == KMEM_ALLOCATED) {
3501 		WARN_ON(!list_empty(&memcg->kmem_caches));
3502 		static_branch_dec(&memcg_kmem_enabled_key);
3503 	}
3504 }
3505 #else
3506 static int memcg_online_kmem(struct mem_cgroup *memcg)
3507 {
3508 	return 0;
3509 }
3510 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3511 {
3512 }
3513 static void memcg_free_kmem(struct mem_cgroup *memcg)
3514 {
3515 }
3516 #endif /* CONFIG_MEMCG_KMEM */
3517 
3518 static int memcg_update_kmem_max(struct mem_cgroup *memcg,
3519 				 unsigned long max)
3520 {
3521 	int ret;
3522 
3523 	mutex_lock(&memcg_max_mutex);
3524 	ret = page_counter_set_max(&memcg->kmem, max);
3525 	mutex_unlock(&memcg_max_mutex);
3526 	return ret;
3527 }
3528 
3529 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
3530 {
3531 	int ret;
3532 
3533 	mutex_lock(&memcg_max_mutex);
3534 
3535 	ret = page_counter_set_max(&memcg->tcpmem, max);
3536 	if (ret)
3537 		goto out;
3538 
3539 	if (!memcg->tcpmem_active) {
3540 		/*
3541 		 * The active flag needs to be written after the static_key
3542 		 * update. This is what guarantees that the socket activation
3543 		 * function is the last one to run. See mem_cgroup_sk_alloc()
3544 		 * for details, and note that we don't mark any socket as
3545 		 * belonging to this memcg until that flag is up.
3546 		 *
3547 		 * We need to do this, because static_keys will span multiple
3548 		 * sites, but we can't control their order. If we mark a socket
3549 		 * as accounted, but the accounting functions are not patched in
3550 		 * yet, we'll lose accounting.
3551 		 *
3552 		 * We never race with the readers in mem_cgroup_sk_alloc(),
3553 		 * because when this value change, the code to process it is not
3554 		 * patched in yet.
3555 		 */
3556 		static_branch_inc(&memcg_sockets_enabled_key);
3557 		memcg->tcpmem_active = true;
3558 	}
3559 out:
3560 	mutex_unlock(&memcg_max_mutex);
3561 	return ret;
3562 }
3563 
3564 /*
3565  * The user of this function is...
3566  * RES_LIMIT.
3567  */
3568 static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3569 				char *buf, size_t nbytes, loff_t off)
3570 {
3571 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3572 	unsigned long nr_pages;
3573 	int ret;
3574 
3575 	buf = strstrip(buf);
3576 	ret = page_counter_memparse(buf, "-1", &nr_pages);
3577 	if (ret)
3578 		return ret;
3579 
3580 	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3581 	case RES_LIMIT:
3582 		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3583 			ret = -EINVAL;
3584 			break;
3585 		}
3586 		switch (MEMFILE_TYPE(of_cft(of)->private)) {
3587 		case _MEM:
3588 			ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3589 			break;
3590 		case _MEMSWAP:
3591 			ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3592 			break;
3593 		case _KMEM:
3594 			pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
3595 				     "Please report your usecase to linux-mm@kvack.org if you "
3596 				     "depend on this functionality.\n");
3597 			ret = memcg_update_kmem_max(memcg, nr_pages);
3598 			break;
3599 		case _TCP:
3600 			ret = memcg_update_tcp_max(memcg, nr_pages);
3601 			break;
3602 		}
3603 		break;
3604 	case RES_SOFT_LIMIT:
3605 		memcg->soft_limit = nr_pages;
3606 		ret = 0;
3607 		break;
3608 	}
3609 	return ret ?: nbytes;
3610 }
3611 
3612 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3613 				size_t nbytes, loff_t off)
3614 {
3615 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3616 	struct page_counter *counter;
3617 
3618 	switch (MEMFILE_TYPE(of_cft(of)->private)) {
3619 	case _MEM:
3620 		counter = &memcg->memory;
3621 		break;
3622 	case _MEMSWAP:
3623 		counter = &memcg->memsw;
3624 		break;
3625 	case _KMEM:
3626 		counter = &memcg->kmem;
3627 		break;
3628 	case _TCP:
3629 		counter = &memcg->tcpmem;
3630 		break;
3631 	default:
3632 		BUG();
3633 	}
3634 
3635 	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3636 	case RES_MAX_USAGE:
3637 		page_counter_reset_watermark(counter);
3638 		break;
3639 	case RES_FAILCNT:
3640 		counter->failcnt = 0;
3641 		break;
3642 	default:
3643 		BUG();
3644 	}
3645 
3646 	return nbytes;
3647 }
3648 
3649 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3650 					struct cftype *cft)
3651 {
3652 	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3653 }
3654 
3655 #ifdef CONFIG_MMU
3656 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3657 					struct cftype *cft, u64 val)
3658 {
3659 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3660 
3661 	if (val & ~MOVE_MASK)
3662 		return -EINVAL;
3663 
3664 	/*
3665 	 * No kind of locking is needed in here, because ->can_attach() will
3666 	 * check this value once in the beginning of the process, and then carry
3667 	 * on with stale data. This means that changes to this value will only
3668 	 * affect task migrations starting after the change.
3669 	 */
3670 	memcg->move_charge_at_immigrate = val;
3671 	return 0;
3672 }
3673 #else
3674 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3675 					struct cftype *cft, u64 val)
3676 {
3677 	return -ENOSYS;
3678 }
3679 #endif
3680 
3681 #ifdef CONFIG_NUMA
3682 
3683 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
3684 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
3685 #define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)
3686 
3687 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
3688 					   int nid, unsigned int lru_mask)
3689 {
3690 	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
3691 	unsigned long nr = 0;
3692 	enum lru_list lru;
3693 
3694 	VM_BUG_ON((unsigned)nid >= nr_node_ids);
3695 
3696 	for_each_lru(lru) {
3697 		if (!(BIT(lru) & lru_mask))
3698 			continue;
3699 		nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
3700 	}
3701 	return nr;
3702 }
3703 
3704 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
3705 					     unsigned int lru_mask)
3706 {
3707 	unsigned long nr = 0;
3708 	enum lru_list lru;
3709 
3710 	for_each_lru(lru) {
3711 		if (!(BIT(lru) & lru_mask))
3712 			continue;
3713 		nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
3714 	}
3715 	return nr;
3716 }
3717 
3718 static int memcg_numa_stat_show(struct seq_file *m, void *v)
3719 {
3720 	struct numa_stat {
3721 		const char *name;
3722 		unsigned int lru_mask;
3723 	};
3724 
3725 	static const struct numa_stat stats[] = {
3726 		{ "total", LRU_ALL },
3727 		{ "file", LRU_ALL_FILE },
3728 		{ "anon", LRU_ALL_ANON },
3729 		{ "unevictable", BIT(LRU_UNEVICTABLE) },
3730 	};
3731 	const struct numa_stat *stat;
3732 	int nid;
3733 	unsigned long nr;
3734 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3735 
3736 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3737 		nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3738 		seq_printf(m, "%s=%lu", stat->name, nr);
3739 		for_each_node_state(nid, N_MEMORY) {
3740 			nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3741 							  stat->lru_mask);
3742 			seq_printf(m, " N%d=%lu", nid, nr);
3743 		}
3744 		seq_putc(m, '\n');
3745 	}
3746 
3747 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3748 		struct mem_cgroup *iter;
3749 
3750 		nr = 0;
3751 		for_each_mem_cgroup_tree(iter, memcg)
3752 			nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3753 		seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3754 		for_each_node_state(nid, N_MEMORY) {
3755 			nr = 0;
3756 			for_each_mem_cgroup_tree(iter, memcg)
3757 				nr += mem_cgroup_node_nr_lru_pages(
3758 					iter, nid, stat->lru_mask);
3759 			seq_printf(m, " N%d=%lu", nid, nr);
3760 		}
3761 		seq_putc(m, '\n');
3762 	}
3763 
3764 	return 0;
3765 }
3766 #endif /* CONFIG_NUMA */
3767 
3768 static const unsigned int memcg1_stats[] = {
3769 	MEMCG_CACHE,
3770 	MEMCG_RSS,
3771 	MEMCG_RSS_HUGE,
3772 	NR_SHMEM,
3773 	NR_FILE_MAPPED,
3774 	NR_FILE_DIRTY,
3775 	NR_WRITEBACK,
3776 	MEMCG_SWAP,
3777 };
3778 
3779 static const char *const memcg1_stat_names[] = {
3780 	"cache",
3781 	"rss",
3782 	"rss_huge",
3783 	"shmem",
3784 	"mapped_file",
3785 	"dirty",
3786 	"writeback",
3787 	"swap",
3788 };
3789 
3790 /* Universal VM events cgroup1 shows, original sort order */
3791 static const unsigned int memcg1_events[] = {
3792 	PGPGIN,
3793 	PGPGOUT,
3794 	PGFAULT,
3795 	PGMAJFAULT,
3796 };
3797 
3798 static int memcg_stat_show(struct seq_file *m, void *v)
3799 {
3800 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3801 	unsigned long memory, memsw;
3802 	struct mem_cgroup *mi;
3803 	unsigned int i;
3804 
3805 	BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
3806 
3807 	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3808 		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3809 			continue;
3810 		seq_printf(m, "%s %lu\n", memcg1_stat_names[i],
3811 			   memcg_page_state_local(memcg, memcg1_stats[i]) *
3812 			   PAGE_SIZE);
3813 	}
3814 
3815 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3816 		seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]),
3817 			   memcg_events_local(memcg, memcg1_events[i]));
3818 
3819 	for (i = 0; i < NR_LRU_LISTS; i++)
3820 		seq_printf(m, "%s %lu\n", lru_list_name(i),
3821 			   memcg_page_state_local(memcg, NR_LRU_BASE + i) *
3822 			   PAGE_SIZE);
3823 
3824 	/* Hierarchical information */
3825 	memory = memsw = PAGE_COUNTER_MAX;
3826 	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3827 		memory = min(memory, READ_ONCE(mi->memory.max));
3828 		memsw = min(memsw, READ_ONCE(mi->memsw.max));
3829 	}
3830 	seq_printf(m, "hierarchical_memory_limit %llu\n",
3831 		   (u64)memory * PAGE_SIZE);
3832 	if (do_memsw_account())
3833 		seq_printf(m, "hierarchical_memsw_limit %llu\n",
3834 			   (u64)memsw * PAGE_SIZE);
3835 
3836 	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3837 		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3838 			continue;
3839 		seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
3840 			   (u64)memcg_page_state(memcg, memcg1_stats[i]) *
3841 			   PAGE_SIZE);
3842 	}
3843 
3844 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3845 		seq_printf(m, "total_%s %llu\n",
3846 			   vm_event_name(memcg1_events[i]),
3847 			   (u64)memcg_events(memcg, memcg1_events[i]));
3848 
3849 	for (i = 0; i < NR_LRU_LISTS; i++)
3850 		seq_printf(m, "total_%s %llu\n", lru_list_name(i),
3851 			   (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
3852 			   PAGE_SIZE);
3853 
3854 #ifdef CONFIG_DEBUG_VM
3855 	{
3856 		pg_data_t *pgdat;
3857 		struct mem_cgroup_per_node *mz;
3858 		struct zone_reclaim_stat *rstat;
3859 		unsigned long recent_rotated[2] = {0, 0};
3860 		unsigned long recent_scanned[2] = {0, 0};
3861 
3862 		for_each_online_pgdat(pgdat) {
3863 			mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
3864 			rstat = &mz->lruvec.reclaim_stat;
3865 
3866 			recent_rotated[0] += rstat->recent_rotated[0];
3867 			recent_rotated[1] += rstat->recent_rotated[1];
3868 			recent_scanned[0] += rstat->recent_scanned[0];
3869 			recent_scanned[1] += rstat->recent_scanned[1];
3870 		}
3871 		seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3872 		seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3873 		seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3874 		seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
3875 	}
3876 #endif
3877 
3878 	return 0;
3879 }
3880 
3881 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3882 				      struct cftype *cft)
3883 {
3884 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3885 
3886 	return mem_cgroup_swappiness(memcg);
3887 }
3888 
3889 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3890 				       struct cftype *cft, u64 val)
3891 {
3892 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3893 
3894 	if (val > 100)
3895 		return -EINVAL;
3896 
3897 	if (css->parent)
3898 		memcg->swappiness = val;
3899 	else
3900 		vm_swappiness = val;
3901 
3902 	return 0;
3903 }
3904 
3905 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3906 {
3907 	struct mem_cgroup_threshold_ary *t;
3908 	unsigned long usage;
3909 	int i;
3910 
3911 	rcu_read_lock();
3912 	if (!swap)
3913 		t = rcu_dereference(memcg->thresholds.primary);
3914 	else
3915 		t = rcu_dereference(memcg->memsw_thresholds.primary);
3916 
3917 	if (!t)
3918 		goto unlock;
3919 
3920 	usage = mem_cgroup_usage(memcg, swap);
3921 
3922 	/*
3923 	 * current_threshold points to threshold just below or equal to usage.
3924 	 * If it's not true, a threshold was crossed after last
3925 	 * call of __mem_cgroup_threshold().
3926 	 */
3927 	i = t->current_threshold;
3928 
3929 	/*
3930 	 * Iterate backward over array of thresholds starting from
3931 	 * current_threshold and check if a threshold is crossed.
3932 	 * If none of thresholds below usage is crossed, we read
3933 	 * only one element of the array here.
3934 	 */
3935 	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3936 		eventfd_signal(t->entries[i].eventfd, 1);
3937 
3938 	/* i = current_threshold + 1 */
3939 	i++;
3940 
3941 	/*
3942 	 * Iterate forward over array of thresholds starting from
3943 	 * current_threshold+1 and check if a threshold is crossed.
3944 	 * If none of thresholds above usage is crossed, we read
3945 	 * only one element of the array here.
3946 	 */
3947 	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3948 		eventfd_signal(t->entries[i].eventfd, 1);
3949 
3950 	/* Update current_threshold */
3951 	t->current_threshold = i - 1;
3952 unlock:
3953 	rcu_read_unlock();
3954 }
3955 
3956 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3957 {
3958 	while (memcg) {
3959 		__mem_cgroup_threshold(memcg, false);
3960 		if (do_memsw_account())
3961 			__mem_cgroup_threshold(memcg, true);
3962 
3963 		memcg = parent_mem_cgroup(memcg);
3964 	}
3965 }
3966 
3967 static int compare_thresholds(const void *a, const void *b)
3968 {
3969 	const struct mem_cgroup_threshold *_a = a;
3970 	const struct mem_cgroup_threshold *_b = b;
3971 
3972 	if (_a->threshold > _b->threshold)
3973 		return 1;
3974 
3975 	if (_a->threshold < _b->threshold)
3976 		return -1;
3977 
3978 	return 0;
3979 }
3980 
3981 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
3982 {
3983 	struct mem_cgroup_eventfd_list *ev;
3984 
3985 	spin_lock(&memcg_oom_lock);
3986 
3987 	list_for_each_entry(ev, &memcg->oom_notify, list)
3988 		eventfd_signal(ev->eventfd, 1);
3989 
3990 	spin_unlock(&memcg_oom_lock);
3991 	return 0;
3992 }
3993 
3994 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
3995 {
3996 	struct mem_cgroup *iter;
3997 
3998 	for_each_mem_cgroup_tree(iter, memcg)
3999 		mem_cgroup_oom_notify_cb(iter);
4000 }
4001 
4002 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4003 	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4004 {
4005 	struct mem_cgroup_thresholds *thresholds;
4006 	struct mem_cgroup_threshold_ary *new;
4007 	unsigned long threshold;
4008 	unsigned long usage;
4009 	int i, size, ret;
4010 
4011 	ret = page_counter_memparse(args, "-1", &threshold);
4012 	if (ret)
4013 		return ret;
4014 
4015 	mutex_lock(&memcg->thresholds_lock);
4016 
4017 	if (type == _MEM) {
4018 		thresholds = &memcg->thresholds;
4019 		usage = mem_cgroup_usage(memcg, false);
4020 	} else if (type == _MEMSWAP) {
4021 		thresholds = &memcg->memsw_thresholds;
4022 		usage = mem_cgroup_usage(memcg, true);
4023 	} else
4024 		BUG();
4025 
4026 	/* Check if a threshold crossed before adding a new one */
4027 	if (thresholds->primary)
4028 		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4029 
4030 	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4031 
4032 	/* Allocate memory for new array of thresholds */
4033 	new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4034 	if (!new) {
4035 		ret = -ENOMEM;
4036 		goto unlock;
4037 	}
4038 	new->size = size;
4039 
4040 	/* Copy thresholds (if any) to new array */
4041 	if (thresholds->primary) {
4042 		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
4043 				sizeof(struct mem_cgroup_threshold));
4044 	}
4045 
4046 	/* Add new threshold */
4047 	new->entries[size - 1].eventfd = eventfd;
4048 	new->entries[size - 1].threshold = threshold;
4049 
4050 	/* Sort thresholds. Registering of new threshold isn't time-critical */
4051 	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
4052 			compare_thresholds, NULL);
4053 
4054 	/* Find current threshold */
4055 	new->current_threshold = -1;
4056 	for (i = 0; i < size; i++) {
4057 		if (new->entries[i].threshold <= usage) {
4058 			/*
4059 			 * new->current_threshold will not be used until
4060 			 * rcu_assign_pointer(), so it's safe to increment
4061 			 * it here.
4062 			 */
4063 			++new->current_threshold;
4064 		} else
4065 			break;
4066 	}
4067 
4068 	/* Free old spare buffer and save old primary buffer as spare */
4069 	kfree(thresholds->spare);
4070 	thresholds->spare = thresholds->primary;
4071 
4072 	rcu_assign_pointer(thresholds->primary, new);
4073 
4074 	/* To be sure that nobody uses thresholds */
4075 	synchronize_rcu();
4076 
4077 unlock:
4078 	mutex_unlock(&memcg->thresholds_lock);
4079 
4080 	return ret;
4081 }
4082 
4083 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4084 	struct eventfd_ctx *eventfd, const char *args)
4085 {
4086 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4087 }
4088 
4089 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4090 	struct eventfd_ctx *eventfd, const char *args)
4091 {
4092 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4093 }
4094 
4095 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4096 	struct eventfd_ctx *eventfd, enum res_type type)
4097 {
4098 	struct mem_cgroup_thresholds *thresholds;
4099 	struct mem_cgroup_threshold_ary *new;
4100 	unsigned long usage;
4101 	int i, j, size, entries;
4102 
4103 	mutex_lock(&memcg->thresholds_lock);
4104 
4105 	if (type == _MEM) {
4106 		thresholds = &memcg->thresholds;
4107 		usage = mem_cgroup_usage(memcg, false);
4108 	} else if (type == _MEMSWAP) {
4109 		thresholds = &memcg->memsw_thresholds;
4110 		usage = mem_cgroup_usage(memcg, true);
4111 	} else
4112 		BUG();
4113 
4114 	if (!thresholds->primary)
4115 		goto unlock;
4116 
4117 	/* Check if a threshold crossed before removing */
4118 	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4119 
4120 	/* Calculate new number of threshold */
4121 	size = entries = 0;
4122 	for (i = 0; i < thresholds->primary->size; i++) {
4123 		if (thresholds->primary->entries[i].eventfd != eventfd)
4124 			size++;
4125 		else
4126 			entries++;
4127 	}
4128 
4129 	new = thresholds->spare;
4130 
4131 	/* If no items related to eventfd have been cleared, nothing to do */
4132 	if (!entries)
4133 		goto unlock;
4134 
4135 	/* Set thresholds array to NULL if we don't have thresholds */
4136 	if (!size) {
4137 		kfree(new);
4138 		new = NULL;
4139 		goto swap_buffers;
4140 	}
4141 
4142 	new->size = size;
4143 
4144 	/* Copy thresholds and find current threshold */
4145 	new->current_threshold = -1;
4146 	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4147 		if (thresholds->primary->entries[i].eventfd == eventfd)
4148 			continue;
4149 
4150 		new->entries[j] = thresholds->primary->entries[i];
4151 		if (new->entries[j].threshold <= usage) {
4152 			/*
4153 			 * new->current_threshold will not be used
4154 			 * until rcu_assign_pointer(), so it's safe to increment
4155 			 * it here.
4156 			 */
4157 			++new->current_threshold;
4158 		}
4159 		j++;
4160 	}
4161 
4162 swap_buffers:
4163 	/* Swap primary and spare array */
4164 	thresholds->spare = thresholds->primary;
4165 
4166 	rcu_assign_pointer(thresholds->primary, new);
4167 
4168 	/* To be sure that nobody uses thresholds */
4169 	synchronize_rcu();
4170 
4171 	/* If all events are unregistered, free the spare array */
4172 	if (!new) {
4173 		kfree(thresholds->spare);
4174 		thresholds->spare = NULL;
4175 	}
4176 unlock:
4177 	mutex_unlock(&memcg->thresholds_lock);
4178 }
4179 
4180 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4181 	struct eventfd_ctx *eventfd)
4182 {
4183 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4184 }
4185 
4186 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4187 	struct eventfd_ctx *eventfd)
4188 {
4189 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4190 }
4191 
4192 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4193 	struct eventfd_ctx *eventfd, const char *args)
4194 {
4195 	struct mem_cgroup_eventfd_list *event;
4196 
4197 	event = kmalloc(sizeof(*event),	GFP_KERNEL);
4198 	if (!event)
4199 		return -ENOMEM;
4200 
4201 	spin_lock(&memcg_oom_lock);
4202 
4203 	event->eventfd = eventfd;
4204 	list_add(&event->list, &memcg->oom_notify);
4205 
4206 	/* already in OOM ? */
4207 	if (memcg->under_oom)
4208 		eventfd_signal(eventfd, 1);
4209 	spin_unlock(&memcg_oom_lock);
4210 
4211 	return 0;
4212 }
4213 
4214 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4215 	struct eventfd_ctx *eventfd)
4216 {
4217 	struct mem_cgroup_eventfd_list *ev, *tmp;
4218 
4219 	spin_lock(&memcg_oom_lock);
4220 
4221 	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4222 		if (ev->eventfd == eventfd) {
4223 			list_del(&ev->list);
4224 			kfree(ev);
4225 		}
4226 	}
4227 
4228 	spin_unlock(&memcg_oom_lock);
4229 }
4230 
4231 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4232 {
4233 	struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4234 
4235 	seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
4236 	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4237 	seq_printf(sf, "oom_kill %lu\n",
4238 		   atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4239 	return 0;
4240 }
4241 
4242 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4243 	struct cftype *cft, u64 val)
4244 {
4245 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4246 
4247 	/* cannot set to root cgroup and only 0 and 1 are allowed */
4248 	if (!css->parent || !((val == 0) || (val == 1)))
4249 		return -EINVAL;
4250 
4251 	memcg->oom_kill_disable = val;
4252 	if (!val)
4253 		memcg_oom_recover(memcg);
4254 
4255 	return 0;
4256 }
4257 
4258 #ifdef CONFIG_CGROUP_WRITEBACK
4259 
4260 #include <trace/events/writeback.h>
4261 
4262 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4263 {
4264 	return wb_domain_init(&memcg->cgwb_domain, gfp);
4265 }
4266 
4267 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4268 {
4269 	wb_domain_exit(&memcg->cgwb_domain);
4270 }
4271 
4272 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4273 {
4274 	wb_domain_size_changed(&memcg->cgwb_domain);
4275 }
4276 
4277 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4278 {
4279 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4280 
4281 	if (!memcg->css.parent)
4282 		return NULL;
4283 
4284 	return &memcg->cgwb_domain;
4285 }
4286 
4287 /*
4288  * idx can be of type enum memcg_stat_item or node_stat_item.
4289  * Keep in sync with memcg_exact_page().
4290  */
4291 static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
4292 {
4293 	long x = atomic_long_read(&memcg->vmstats[idx]);
4294 	int cpu;
4295 
4296 	for_each_online_cpu(cpu)
4297 		x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx];
4298 	if (x < 0)
4299 		x = 0;
4300 	return x;
4301 }
4302 
4303 /**
4304  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4305  * @wb: bdi_writeback in question
4306  * @pfilepages: out parameter for number of file pages
4307  * @pheadroom: out parameter for number of allocatable pages according to memcg
4308  * @pdirty: out parameter for number of dirty pages
4309  * @pwriteback: out parameter for number of pages under writeback
4310  *
4311  * Determine the numbers of file, headroom, dirty, and writeback pages in
4312  * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
4313  * is a bit more involved.
4314  *
4315  * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
4316  * headroom is calculated as the lowest headroom of itself and the
4317  * ancestors.  Note that this doesn't consider the actual amount of
4318  * available memory in the system.  The caller should further cap
4319  * *@pheadroom accordingly.
4320  */
4321 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4322 			 unsigned long *pheadroom, unsigned long *pdirty,
4323 			 unsigned long *pwriteback)
4324 {
4325 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4326 	struct mem_cgroup *parent;
4327 
4328 	*pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY);
4329 
4330 	/* this should eventually include NR_UNSTABLE_NFS */
4331 	*pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK);
4332 	*pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) +
4333 			memcg_exact_page_state(memcg, NR_ACTIVE_FILE);
4334 	*pheadroom = PAGE_COUNTER_MAX;
4335 
4336 	while ((parent = parent_mem_cgroup(memcg))) {
4337 		unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4338 					    READ_ONCE(memcg->high));
4339 		unsigned long used = page_counter_read(&memcg->memory);
4340 
4341 		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4342 		memcg = parent;
4343 	}
4344 }
4345 
4346 /*
4347  * Foreign dirty flushing
4348  *
4349  * There's an inherent mismatch between memcg and writeback.  The former
4350  * trackes ownership per-page while the latter per-inode.  This was a
4351  * deliberate design decision because honoring per-page ownership in the
4352  * writeback path is complicated, may lead to higher CPU and IO overheads
4353  * and deemed unnecessary given that write-sharing an inode across
4354  * different cgroups isn't a common use-case.
4355  *
4356  * Combined with inode majority-writer ownership switching, this works well
4357  * enough in most cases but there are some pathological cases.  For
4358  * example, let's say there are two cgroups A and B which keep writing to
4359  * different but confined parts of the same inode.  B owns the inode and
4360  * A's memory is limited far below B's.  A's dirty ratio can rise enough to
4361  * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4362  * triggering background writeback.  A will be slowed down without a way to
4363  * make writeback of the dirty pages happen.
4364  *
4365  * Conditions like the above can lead to a cgroup getting repatedly and
4366  * severely throttled after making some progress after each
4367  * dirty_expire_interval while the underyling IO device is almost
4368  * completely idle.
4369  *
4370  * Solving this problem completely requires matching the ownership tracking
4371  * granularities between memcg and writeback in either direction.  However,
4372  * the more egregious behaviors can be avoided by simply remembering the
4373  * most recent foreign dirtying events and initiating remote flushes on
4374  * them when local writeback isn't enough to keep the memory clean enough.
4375  *
4376  * The following two functions implement such mechanism.  When a foreign
4377  * page - a page whose memcg and writeback ownerships don't match - is
4378  * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4379  * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
4380  * decides that the memcg needs to sleep due to high dirty ratio, it calls
4381  * mem_cgroup_flush_foreign() which queues writeback on the recorded
4382  * foreign bdi_writebacks which haven't expired.  Both the numbers of
4383  * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4384  * limited to MEMCG_CGWB_FRN_CNT.
4385  *
4386  * The mechanism only remembers IDs and doesn't hold any object references.
4387  * As being wrong occasionally doesn't matter, updates and accesses to the
4388  * records are lockless and racy.
4389  */
4390 void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
4391 					     struct bdi_writeback *wb)
4392 {
4393 	struct mem_cgroup *memcg = page->mem_cgroup;
4394 	struct memcg_cgwb_frn *frn;
4395 	u64 now = get_jiffies_64();
4396 	u64 oldest_at = now;
4397 	int oldest = -1;
4398 	int i;
4399 
4400 	trace_track_foreign_dirty(page, wb);
4401 
4402 	/*
4403 	 * Pick the slot to use.  If there is already a slot for @wb, keep
4404 	 * using it.  If not replace the oldest one which isn't being
4405 	 * written out.
4406 	 */
4407 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4408 		frn = &memcg->cgwb_frn[i];
4409 		if (frn->bdi_id == wb->bdi->id &&
4410 		    frn->memcg_id == wb->memcg_css->id)
4411 			break;
4412 		if (time_before64(frn->at, oldest_at) &&
4413 		    atomic_read(&frn->done.cnt) == 1) {
4414 			oldest = i;
4415 			oldest_at = frn->at;
4416 		}
4417 	}
4418 
4419 	if (i < MEMCG_CGWB_FRN_CNT) {
4420 		/*
4421 		 * Re-using an existing one.  Update timestamp lazily to
4422 		 * avoid making the cacheline hot.  We want them to be
4423 		 * reasonably up-to-date and significantly shorter than
4424 		 * dirty_expire_interval as that's what expires the record.
4425 		 * Use the shorter of 1s and dirty_expire_interval / 8.
4426 		 */
4427 		unsigned long update_intv =
4428 			min_t(unsigned long, HZ,
4429 			      msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4430 
4431 		if (time_before64(frn->at, now - update_intv))
4432 			frn->at = now;
4433 	} else if (oldest >= 0) {
4434 		/* replace the oldest free one */
4435 		frn = &memcg->cgwb_frn[oldest];
4436 		frn->bdi_id = wb->bdi->id;
4437 		frn->memcg_id = wb->memcg_css->id;
4438 		frn->at = now;
4439 	}
4440 }
4441 
4442 /* issue foreign writeback flushes for recorded foreign dirtying events */
4443 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4444 {
4445 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4446 	unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4447 	u64 now = jiffies_64;
4448 	int i;
4449 
4450 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4451 		struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4452 
4453 		/*
4454 		 * If the record is older than dirty_expire_interval,
4455 		 * writeback on it has already started.  No need to kick it
4456 		 * off again.  Also, don't start a new one if there's
4457 		 * already one in flight.
4458 		 */
4459 		if (time_after64(frn->at, now - intv) &&
4460 		    atomic_read(&frn->done.cnt) == 1) {
4461 			frn->at = 0;
4462 			trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4463 			cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 0,
4464 					       WB_REASON_FOREIGN_FLUSH,
4465 					       &frn->done);
4466 		}
4467 	}
4468 }
4469 
4470 #else	/* CONFIG_CGROUP_WRITEBACK */
4471 
4472 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4473 {
4474 	return 0;
4475 }
4476 
4477 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4478 {
4479 }
4480 
4481 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4482 {
4483 }
4484 
4485 #endif	/* CONFIG_CGROUP_WRITEBACK */
4486 
4487 /*
4488  * DO NOT USE IN NEW FILES.
4489  *
4490  * "cgroup.event_control" implementation.
4491  *
4492  * This is way over-engineered.  It tries to support fully configurable
4493  * events for each user.  Such level of flexibility is completely
4494  * unnecessary especially in the light of the planned unified hierarchy.
4495  *
4496  * Please deprecate this and replace with something simpler if at all
4497  * possible.
4498  */
4499 
4500 /*
4501  * Unregister event and free resources.
4502  *
4503  * Gets called from workqueue.
4504  */
4505 static void memcg_event_remove(struct work_struct *work)
4506 {
4507 	struct mem_cgroup_event *event =
4508 		container_of(work, struct mem_cgroup_event, remove);
4509 	struct mem_cgroup *memcg = event->memcg;
4510 
4511 	remove_wait_queue(event->wqh, &event->wait);
4512 
4513 	event->unregister_event(memcg, event->eventfd);
4514 
4515 	/* Notify userspace the event is going away. */
4516 	eventfd_signal(event->eventfd, 1);
4517 
4518 	eventfd_ctx_put(event->eventfd);
4519 	kfree(event);
4520 	css_put(&memcg->css);
4521 }
4522 
4523 /*
4524  * Gets called on EPOLLHUP on eventfd when user closes it.
4525  *
4526  * Called with wqh->lock held and interrupts disabled.
4527  */
4528 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4529 			    int sync, void *key)
4530 {
4531 	struct mem_cgroup_event *event =
4532 		container_of(wait, struct mem_cgroup_event, wait);
4533 	struct mem_cgroup *memcg = event->memcg;
4534 	__poll_t flags = key_to_poll(key);
4535 
4536 	if (flags & EPOLLHUP) {
4537 		/*
4538 		 * If the event has been detached at cgroup removal, we
4539 		 * can simply return knowing the other side will cleanup
4540 		 * for us.
4541 		 *
4542 		 * We can't race against event freeing since the other
4543 		 * side will require wqh->lock via remove_wait_queue(),
4544 		 * which we hold.
4545 		 */
4546 		spin_lock(&memcg->event_list_lock);
4547 		if (!list_empty(&event->list)) {
4548 			list_del_init(&event->list);
4549 			/*
4550 			 * We are in atomic context, but cgroup_event_remove()
4551 			 * may sleep, so we have to call it in workqueue.
4552 			 */
4553 			schedule_work(&event->remove);
4554 		}
4555 		spin_unlock(&memcg->event_list_lock);
4556 	}
4557 
4558 	return 0;
4559 }
4560 
4561 static void memcg_event_ptable_queue_proc(struct file *file,
4562 		wait_queue_head_t *wqh, poll_table *pt)
4563 {
4564 	struct mem_cgroup_event *event =
4565 		container_of(pt, struct mem_cgroup_event, pt);
4566 
4567 	event->wqh = wqh;
4568 	add_wait_queue(wqh, &event->wait);
4569 }
4570 
4571 /*
4572  * DO NOT USE IN NEW FILES.
4573  *
4574  * Parse input and register new cgroup event handler.
4575  *
4576  * Input must be in format '<event_fd> <control_fd> <args>'.
4577  * Interpretation of args is defined by control file implementation.
4578  */
4579 static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4580 					 char *buf, size_t nbytes, loff_t off)
4581 {
4582 	struct cgroup_subsys_state *css = of_css(of);
4583 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4584 	struct mem_cgroup_event *event;
4585 	struct cgroup_subsys_state *cfile_css;
4586 	unsigned int efd, cfd;
4587 	struct fd efile;
4588 	struct fd cfile;
4589 	const char *name;
4590 	char *endp;
4591 	int ret;
4592 
4593 	buf = strstrip(buf);
4594 
4595 	efd = simple_strtoul(buf, &endp, 10);
4596 	if (*endp != ' ')
4597 		return -EINVAL;
4598 	buf = endp + 1;
4599 
4600 	cfd = simple_strtoul(buf, &endp, 10);
4601 	if ((*endp != ' ') && (*endp != '\0'))
4602 		return -EINVAL;
4603 	buf = endp + 1;
4604 
4605 	event = kzalloc(sizeof(*event), GFP_KERNEL);
4606 	if (!event)
4607 		return -ENOMEM;
4608 
4609 	event->memcg = memcg;
4610 	INIT_LIST_HEAD(&event->list);
4611 	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4612 	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4613 	INIT_WORK(&event->remove, memcg_event_remove);
4614 
4615 	efile = fdget(efd);
4616 	if (!efile.file) {
4617 		ret = -EBADF;
4618 		goto out_kfree;
4619 	}
4620 
4621 	event->eventfd = eventfd_ctx_fileget(efile.file);
4622 	if (IS_ERR(event->eventfd)) {
4623 		ret = PTR_ERR(event->eventfd);
4624 		goto out_put_efile;
4625 	}
4626 
4627 	cfile = fdget(cfd);
4628 	if (!cfile.file) {
4629 		ret = -EBADF;
4630 		goto out_put_eventfd;
4631 	}
4632 
4633 	/* the process need read permission on control file */
4634 	/* AV: shouldn't we check that it's been opened for read instead? */
4635 	ret = inode_permission(file_inode(cfile.file), MAY_READ);
4636 	if (ret < 0)
4637 		goto out_put_cfile;
4638 
4639 	/*
4640 	 * Determine the event callbacks and set them in @event.  This used
4641 	 * to be done via struct cftype but cgroup core no longer knows
4642 	 * about these events.  The following is crude but the whole thing
4643 	 * is for compatibility anyway.
4644 	 *
4645 	 * DO NOT ADD NEW FILES.
4646 	 */
4647 	name = cfile.file->f_path.dentry->d_name.name;
4648 
4649 	if (!strcmp(name, "memory.usage_in_bytes")) {
4650 		event->register_event = mem_cgroup_usage_register_event;
4651 		event->unregister_event = mem_cgroup_usage_unregister_event;
4652 	} else if (!strcmp(name, "memory.oom_control")) {
4653 		event->register_event = mem_cgroup_oom_register_event;
4654 		event->unregister_event = mem_cgroup_oom_unregister_event;
4655 	} else if (!strcmp(name, "memory.pressure_level")) {
4656 		event->register_event = vmpressure_register_event;
4657 		event->unregister_event = vmpressure_unregister_event;
4658 	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
4659 		event->register_event = memsw_cgroup_usage_register_event;
4660 		event->unregister_event = memsw_cgroup_usage_unregister_event;
4661 	} else {
4662 		ret = -EINVAL;
4663 		goto out_put_cfile;
4664 	}
4665 
4666 	/*
4667 	 * Verify @cfile should belong to @css.  Also, remaining events are
4668 	 * automatically removed on cgroup destruction but the removal is
4669 	 * asynchronous, so take an extra ref on @css.
4670 	 */
4671 	cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
4672 					       &memory_cgrp_subsys);
4673 	ret = -EINVAL;
4674 	if (IS_ERR(cfile_css))
4675 		goto out_put_cfile;
4676 	if (cfile_css != css) {
4677 		css_put(cfile_css);
4678 		goto out_put_cfile;
4679 	}
4680 
4681 	ret = event->register_event(memcg, event->eventfd, buf);
4682 	if (ret)
4683 		goto out_put_css;
4684 
4685 	vfs_poll(efile.file, &event->pt);
4686 
4687 	spin_lock(&memcg->event_list_lock);
4688 	list_add(&event->list, &memcg->event_list);
4689 	spin_unlock(&memcg->event_list_lock);
4690 
4691 	fdput(cfile);
4692 	fdput(efile);
4693 
4694 	return nbytes;
4695 
4696 out_put_css:
4697 	css_put(css);
4698 out_put_cfile:
4699 	fdput(cfile);
4700 out_put_eventfd:
4701 	eventfd_ctx_put(event->eventfd);
4702 out_put_efile:
4703 	fdput(efile);
4704 out_kfree:
4705 	kfree(event);
4706 
4707 	return ret;
4708 }
4709 
4710 static struct cftype mem_cgroup_legacy_files[] = {
4711 	{
4712 		.name = "usage_in_bytes",
4713 		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4714 		.read_u64 = mem_cgroup_read_u64,
4715 	},
4716 	{
4717 		.name = "max_usage_in_bytes",
4718 		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4719 		.write = mem_cgroup_reset,
4720 		.read_u64 = mem_cgroup_read_u64,
4721 	},
4722 	{
4723 		.name = "limit_in_bytes",
4724 		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4725 		.write = mem_cgroup_write,
4726 		.read_u64 = mem_cgroup_read_u64,
4727 	},
4728 	{
4729 		.name = "soft_limit_in_bytes",
4730 		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4731 		.write = mem_cgroup_write,
4732 		.read_u64 = mem_cgroup_read_u64,
4733 	},
4734 	{
4735 		.name = "failcnt",
4736 		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4737 		.write = mem_cgroup_reset,
4738 		.read_u64 = mem_cgroup_read_u64,
4739 	},
4740 	{
4741 		.name = "stat",
4742 		.seq_show = memcg_stat_show,
4743 	},
4744 	{
4745 		.name = "force_empty",
4746 		.write = mem_cgroup_force_empty_write,
4747 	},
4748 	{
4749 		.name = "use_hierarchy",
4750 		.write_u64 = mem_cgroup_hierarchy_write,
4751 		.read_u64 = mem_cgroup_hierarchy_read,
4752 	},
4753 	{
4754 		.name = "cgroup.event_control",		/* XXX: for compat */
4755 		.write = memcg_write_event_control,
4756 		.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
4757 	},
4758 	{
4759 		.name = "swappiness",
4760 		.read_u64 = mem_cgroup_swappiness_read,
4761 		.write_u64 = mem_cgroup_swappiness_write,
4762 	},
4763 	{
4764 		.name = "move_charge_at_immigrate",
4765 		.read_u64 = mem_cgroup_move_charge_read,
4766 		.write_u64 = mem_cgroup_move_charge_write,
4767 	},
4768 	{
4769 		.name = "oom_control",
4770 		.seq_show = mem_cgroup_oom_control_read,
4771 		.write_u64 = mem_cgroup_oom_control_write,
4772 		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4773 	},
4774 	{
4775 		.name = "pressure_level",
4776 	},
4777 #ifdef CONFIG_NUMA
4778 	{
4779 		.name = "numa_stat",
4780 		.seq_show = memcg_numa_stat_show,
4781 	},
4782 #endif
4783 	{
4784 		.name = "kmem.limit_in_bytes",
4785 		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
4786 		.write = mem_cgroup_write,
4787 		.read_u64 = mem_cgroup_read_u64,
4788 	},
4789 	{
4790 		.name = "kmem.usage_in_bytes",
4791 		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
4792 		.read_u64 = mem_cgroup_read_u64,
4793 	},
4794 	{
4795 		.name = "kmem.failcnt",
4796 		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
4797 		.write = mem_cgroup_reset,
4798 		.read_u64 = mem_cgroup_read_u64,
4799 	},
4800 	{
4801 		.name = "kmem.max_usage_in_bytes",
4802 		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
4803 		.write = mem_cgroup_reset,
4804 		.read_u64 = mem_cgroup_read_u64,
4805 	},
4806 #if defined(CONFIG_MEMCG_KMEM) && \
4807 	(defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
4808 	{
4809 		.name = "kmem.slabinfo",
4810 		.seq_start = memcg_slab_start,
4811 		.seq_next = memcg_slab_next,
4812 		.seq_stop = memcg_slab_stop,
4813 		.seq_show = memcg_slab_show,
4814 	},
4815 #endif
4816 	{
4817 		.name = "kmem.tcp.limit_in_bytes",
4818 		.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
4819 		.write = mem_cgroup_write,
4820 		.read_u64 = mem_cgroup_read_u64,
4821 	},
4822 	{
4823 		.name = "kmem.tcp.usage_in_bytes",
4824 		.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
4825 		.read_u64 = mem_cgroup_read_u64,
4826 	},
4827 	{
4828 		.name = "kmem.tcp.failcnt",
4829 		.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
4830 		.write = mem_cgroup_reset,
4831 		.read_u64 = mem_cgroup_read_u64,
4832 	},
4833 	{
4834 		.name = "kmem.tcp.max_usage_in_bytes",
4835 		.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
4836 		.write = mem_cgroup_reset,
4837 		.read_u64 = mem_cgroup_read_u64,
4838 	},
4839 	{ },	/* terminate */
4840 };
4841 
4842 /*
4843  * Private memory cgroup IDR
4844  *
4845  * Swap-out records and page cache shadow entries need to store memcg
4846  * references in constrained space, so we maintain an ID space that is
4847  * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
4848  * memory-controlled cgroups to 64k.
4849  *
4850  * However, there usually are many references to the oflline CSS after
4851  * the cgroup has been destroyed, such as page cache or reclaimable
4852  * slab objects, that don't need to hang on to the ID. We want to keep
4853  * those dead CSS from occupying IDs, or we might quickly exhaust the
4854  * relatively small ID space and prevent the creation of new cgroups
4855  * even when there are much fewer than 64k cgroups - possibly none.
4856  *
4857  * Maintain a private 16-bit ID space for memcg, and allow the ID to
4858  * be freed and recycled when it's no longer needed, which is usually
4859  * when the CSS is offlined.
4860  *
4861  * The only exception to that are records of swapped out tmpfs/shmem
4862  * pages that need to be attributed to live ancestors on swapin. But
4863  * those references are manageable from userspace.
4864  */
4865 
4866 static DEFINE_IDR(mem_cgroup_idr);
4867 
4868 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
4869 {
4870 	if (memcg->id.id > 0) {
4871 		idr_remove(&mem_cgroup_idr, memcg->id.id);
4872 		memcg->id.id = 0;
4873 	}
4874 }
4875 
4876 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
4877 						  unsigned int n)
4878 {
4879 	refcount_add(n, &memcg->id.ref);
4880 }
4881 
4882 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
4883 {
4884 	if (refcount_sub_and_test(n, &memcg->id.ref)) {
4885 		mem_cgroup_id_remove(memcg);
4886 
4887 		/* Memcg ID pins CSS */
4888 		css_put(&memcg->css);
4889 	}
4890 }
4891 
4892 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
4893 {
4894 	mem_cgroup_id_put_many(memcg, 1);
4895 }
4896 
4897 /**
4898  * mem_cgroup_from_id - look up a memcg from a memcg id
4899  * @id: the memcg id to look up
4900  *
4901  * Caller must hold rcu_read_lock().
4902  */
4903 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
4904 {
4905 	WARN_ON_ONCE(!rcu_read_lock_held());
4906 	return idr_find(&mem_cgroup_idr, id);
4907 }
4908 
4909 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4910 {
4911 	struct mem_cgroup_per_node *pn;
4912 	int tmp = node;
4913 	/*
4914 	 * This routine is called against possible nodes.
4915 	 * But it's BUG to call kmalloc() against offline node.
4916 	 *
4917 	 * TODO: this routine can waste much memory for nodes which will
4918 	 *       never be onlined. It's better to use memory hotplug callback
4919 	 *       function.
4920 	 */
4921 	if (!node_state(node, N_NORMAL_MEMORY))
4922 		tmp = -1;
4923 	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4924 	if (!pn)
4925 		return 1;
4926 
4927 	pn->lruvec_stat_local = alloc_percpu(struct lruvec_stat);
4928 	if (!pn->lruvec_stat_local) {
4929 		kfree(pn);
4930 		return 1;
4931 	}
4932 
4933 	pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat);
4934 	if (!pn->lruvec_stat_cpu) {
4935 		free_percpu(pn->lruvec_stat_local);
4936 		kfree(pn);
4937 		return 1;
4938 	}
4939 
4940 	lruvec_init(&pn->lruvec);
4941 	pn->usage_in_excess = 0;
4942 	pn->on_tree = false;
4943 	pn->memcg = memcg;
4944 
4945 	memcg->nodeinfo[node] = pn;
4946 	return 0;
4947 }
4948 
4949 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4950 {
4951 	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
4952 
4953 	if (!pn)
4954 		return;
4955 
4956 	free_percpu(pn->lruvec_stat_cpu);
4957 	free_percpu(pn->lruvec_stat_local);
4958 	kfree(pn);
4959 }
4960 
4961 static void __mem_cgroup_free(struct mem_cgroup *memcg)
4962 {
4963 	int node;
4964 
4965 	for_each_node(node)
4966 		free_mem_cgroup_per_node_info(memcg, node);
4967 	free_percpu(memcg->vmstats_percpu);
4968 	free_percpu(memcg->vmstats_local);
4969 	kfree(memcg);
4970 }
4971 
4972 static void mem_cgroup_free(struct mem_cgroup *memcg)
4973 {
4974 	memcg_wb_domain_exit(memcg);
4975 	/*
4976 	 * Flush percpu vmstats and vmevents to guarantee the value correctness
4977 	 * on parent's and all ancestor levels.
4978 	 */
4979 	memcg_flush_percpu_vmstats(memcg);
4980 	memcg_flush_percpu_vmevents(memcg);
4981 	__mem_cgroup_free(memcg);
4982 }
4983 
4984 static struct mem_cgroup *mem_cgroup_alloc(void)
4985 {
4986 	struct mem_cgroup *memcg;
4987 	unsigned int size;
4988 	int node;
4989 	int __maybe_unused i;
4990 
4991 	size = sizeof(struct mem_cgroup);
4992 	size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
4993 
4994 	memcg = kzalloc(size, GFP_KERNEL);
4995 	if (!memcg)
4996 		return NULL;
4997 
4998 	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
4999 				 1, MEM_CGROUP_ID_MAX,
5000 				 GFP_KERNEL);
5001 	if (memcg->id.id < 0)
5002 		goto fail;
5003 
5004 	memcg->vmstats_local = alloc_percpu(struct memcg_vmstats_percpu);
5005 	if (!memcg->vmstats_local)
5006 		goto fail;
5007 
5008 	memcg->vmstats_percpu = alloc_percpu(struct memcg_vmstats_percpu);
5009 	if (!memcg->vmstats_percpu)
5010 		goto fail;
5011 
5012 	for_each_node(node)
5013 		if (alloc_mem_cgroup_per_node_info(memcg, node))
5014 			goto fail;
5015 
5016 	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5017 		goto fail;
5018 
5019 	INIT_WORK(&memcg->high_work, high_work_func);
5020 	INIT_LIST_HEAD(&memcg->oom_notify);
5021 	mutex_init(&memcg->thresholds_lock);
5022 	spin_lock_init(&memcg->move_lock);
5023 	vmpressure_init(&memcg->vmpressure);
5024 	INIT_LIST_HEAD(&memcg->event_list);
5025 	spin_lock_init(&memcg->event_list_lock);
5026 	memcg->socket_pressure = jiffies;
5027 #ifdef CONFIG_MEMCG_KMEM
5028 	memcg->kmemcg_id = -1;
5029 #endif
5030 #ifdef CONFIG_CGROUP_WRITEBACK
5031 	INIT_LIST_HEAD(&memcg->cgwb_list);
5032 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5033 		memcg->cgwb_frn[i].done =
5034 			__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5035 #endif
5036 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5037 	spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5038 	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5039 	memcg->deferred_split_queue.split_queue_len = 0;
5040 #endif
5041 	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5042 	return memcg;
5043 fail:
5044 	mem_cgroup_id_remove(memcg);
5045 	__mem_cgroup_free(memcg);
5046 	return NULL;
5047 }
5048 
5049 static struct cgroup_subsys_state * __ref
5050 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5051 {
5052 	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5053 	struct mem_cgroup *memcg;
5054 	long error = -ENOMEM;
5055 
5056 	memcg = mem_cgroup_alloc();
5057 	if (!memcg)
5058 		return ERR_PTR(error);
5059 
5060 	WRITE_ONCE(memcg->high, PAGE_COUNTER_MAX);
5061 	memcg->soft_limit = PAGE_COUNTER_MAX;
5062 	if (parent) {
5063 		memcg->swappiness = mem_cgroup_swappiness(parent);
5064 		memcg->oom_kill_disable = parent->oom_kill_disable;
5065 	}
5066 	if (parent && parent->use_hierarchy) {
5067 		memcg->use_hierarchy = true;
5068 		page_counter_init(&memcg->memory, &parent->memory);
5069 		page_counter_init(&memcg->swap, &parent->swap);
5070 		page_counter_init(&memcg->memsw, &parent->memsw);
5071 		page_counter_init(&memcg->kmem, &parent->kmem);
5072 		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5073 	} else {
5074 		page_counter_init(&memcg->memory, NULL);
5075 		page_counter_init(&memcg->swap, NULL);
5076 		page_counter_init(&memcg->memsw, NULL);
5077 		page_counter_init(&memcg->kmem, NULL);
5078 		page_counter_init(&memcg->tcpmem, NULL);
5079 		/*
5080 		 * Deeper hierachy with use_hierarchy == false doesn't make
5081 		 * much sense so let cgroup subsystem know about this
5082 		 * unfortunate state in our controller.
5083 		 */
5084 		if (parent != root_mem_cgroup)
5085 			memory_cgrp_subsys.broken_hierarchy = true;
5086 	}
5087 
5088 	/* The following stuff does not apply to the root */
5089 	if (!parent) {
5090 #ifdef CONFIG_MEMCG_KMEM
5091 		INIT_LIST_HEAD(&memcg->kmem_caches);
5092 #endif
5093 		root_mem_cgroup = memcg;
5094 		return &memcg->css;
5095 	}
5096 
5097 	error = memcg_online_kmem(memcg);
5098 	if (error)
5099 		goto fail;
5100 
5101 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5102 		static_branch_inc(&memcg_sockets_enabled_key);
5103 
5104 	return &memcg->css;
5105 fail:
5106 	mem_cgroup_id_remove(memcg);
5107 	mem_cgroup_free(memcg);
5108 	return ERR_PTR(-ENOMEM);
5109 }
5110 
5111 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5112 {
5113 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5114 
5115 	/*
5116 	 * A memcg must be visible for memcg_expand_shrinker_maps()
5117 	 * by the time the maps are allocated. So, we allocate maps
5118 	 * here, when for_each_mem_cgroup() can't skip it.
5119 	 */
5120 	if (memcg_alloc_shrinker_maps(memcg)) {
5121 		mem_cgroup_id_remove(memcg);
5122 		return -ENOMEM;
5123 	}
5124 
5125 	/* Online state pins memcg ID, memcg ID pins CSS */
5126 	refcount_set(&memcg->id.ref, 1);
5127 	css_get(css);
5128 	return 0;
5129 }
5130 
5131 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5132 {
5133 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5134 	struct mem_cgroup_event *event, *tmp;
5135 
5136 	/*
5137 	 * Unregister events and notify userspace.
5138 	 * Notify userspace about cgroup removing only after rmdir of cgroup
5139 	 * directory to avoid race between userspace and kernelspace.
5140 	 */
5141 	spin_lock(&memcg->event_list_lock);
5142 	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5143 		list_del_init(&event->list);
5144 		schedule_work(&event->remove);
5145 	}
5146 	spin_unlock(&memcg->event_list_lock);
5147 
5148 	page_counter_set_min(&memcg->memory, 0);
5149 	page_counter_set_low(&memcg->memory, 0);
5150 
5151 	memcg_offline_kmem(memcg);
5152 	wb_memcg_offline(memcg);
5153 
5154 	drain_all_stock(memcg);
5155 
5156 	mem_cgroup_id_put(memcg);
5157 }
5158 
5159 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5160 {
5161 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5162 
5163 	invalidate_reclaim_iterators(memcg);
5164 }
5165 
5166 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5167 {
5168 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5169 	int __maybe_unused i;
5170 
5171 #ifdef CONFIG_CGROUP_WRITEBACK
5172 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5173 		wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5174 #endif
5175 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5176 		static_branch_dec(&memcg_sockets_enabled_key);
5177 
5178 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5179 		static_branch_dec(&memcg_sockets_enabled_key);
5180 
5181 	vmpressure_cleanup(&memcg->vmpressure);
5182 	cancel_work_sync(&memcg->high_work);
5183 	mem_cgroup_remove_from_trees(memcg);
5184 	memcg_free_shrinker_maps(memcg);
5185 	memcg_free_kmem(memcg);
5186 	mem_cgroup_free(memcg);
5187 }
5188 
5189 /**
5190  * mem_cgroup_css_reset - reset the states of a mem_cgroup
5191  * @css: the target css
5192  *
5193  * Reset the states of the mem_cgroup associated with @css.  This is
5194  * invoked when the userland requests disabling on the default hierarchy
5195  * but the memcg is pinned through dependency.  The memcg should stop
5196  * applying policies and should revert to the vanilla state as it may be
5197  * made visible again.
5198  *
5199  * The current implementation only resets the essential configurations.
5200  * This needs to be expanded to cover all the visible parts.
5201  */
5202 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5203 {
5204 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5205 
5206 	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5207 	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5208 	page_counter_set_max(&memcg->memsw, PAGE_COUNTER_MAX);
5209 	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5210 	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5211 	page_counter_set_min(&memcg->memory, 0);
5212 	page_counter_set_low(&memcg->memory, 0);
5213 	WRITE_ONCE(memcg->high, PAGE_COUNTER_MAX);
5214 	memcg->soft_limit = PAGE_COUNTER_MAX;
5215 	memcg_wb_domain_size_changed(memcg);
5216 }
5217 
5218 #ifdef CONFIG_MMU
5219 /* Handlers for move charge at task migration. */
5220 static int mem_cgroup_do_precharge(unsigned long count)
5221 {
5222 	int ret;
5223 
5224 	/* Try a single bulk charge without reclaim first, kswapd may wake */
5225 	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5226 	if (!ret) {
5227 		mc.precharge += count;
5228 		return ret;
5229 	}
5230 
5231 	/* Try charges one by one with reclaim, but do not retry */
5232 	while (count--) {
5233 		ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5234 		if (ret)
5235 			return ret;
5236 		mc.precharge++;
5237 		cond_resched();
5238 	}
5239 	return 0;
5240 }
5241 
5242 union mc_target {
5243 	struct page	*page;
5244 	swp_entry_t	ent;
5245 };
5246 
5247 enum mc_target_type {
5248 	MC_TARGET_NONE = 0,
5249 	MC_TARGET_PAGE,
5250 	MC_TARGET_SWAP,
5251 	MC_TARGET_DEVICE,
5252 };
5253 
5254 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5255 						unsigned long addr, pte_t ptent)
5256 {
5257 	struct page *page = vm_normal_page(vma, addr, ptent);
5258 
5259 	if (!page || !page_mapped(page))
5260 		return NULL;
5261 	if (PageAnon(page)) {
5262 		if (!(mc.flags & MOVE_ANON))
5263 			return NULL;
5264 	} else {
5265 		if (!(mc.flags & MOVE_FILE))
5266 			return NULL;
5267 	}
5268 	if (!get_page_unless_zero(page))
5269 		return NULL;
5270 
5271 	return page;
5272 }
5273 
5274 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
5275 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5276 			pte_t ptent, swp_entry_t *entry)
5277 {
5278 	struct page *page = NULL;
5279 	swp_entry_t ent = pte_to_swp_entry(ptent);
5280 
5281 	if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
5282 		return NULL;
5283 
5284 	/*
5285 	 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to
5286 	 * a device and because they are not accessible by CPU they are store
5287 	 * as special swap entry in the CPU page table.
5288 	 */
5289 	if (is_device_private_entry(ent)) {
5290 		page = device_private_entry_to_page(ent);
5291 		/*
5292 		 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
5293 		 * a refcount of 1 when free (unlike normal page)
5294 		 */
5295 		if (!page_ref_add_unless(page, 1, 1))
5296 			return NULL;
5297 		return page;
5298 	}
5299 
5300 	/*
5301 	 * Because lookup_swap_cache() updates some statistics counter,
5302 	 * we call find_get_page() with swapper_space directly.
5303 	 */
5304 	page = find_get_page(swap_address_space(ent), swp_offset(ent));
5305 	if (do_memsw_account())
5306 		entry->val = ent.val;
5307 
5308 	return page;
5309 }
5310 #else
5311 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5312 			pte_t ptent, swp_entry_t *entry)
5313 {
5314 	return NULL;
5315 }
5316 #endif
5317 
5318 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5319 			unsigned long addr, pte_t ptent, swp_entry_t *entry)
5320 {
5321 	struct page *page = NULL;
5322 	struct address_space *mapping;
5323 	pgoff_t pgoff;
5324 
5325 	if (!vma->vm_file) /* anonymous vma */
5326 		return NULL;
5327 	if (!(mc.flags & MOVE_FILE))
5328 		return NULL;
5329 
5330 	mapping = vma->vm_file->f_mapping;
5331 	pgoff = linear_page_index(vma, addr);
5332 
5333 	/* page is moved even if it's not RSS of this task(page-faulted). */
5334 #ifdef CONFIG_SWAP
5335 	/* shmem/tmpfs may report page out on swap: account for that too. */
5336 	if (shmem_mapping(mapping)) {
5337 		page = find_get_entry(mapping, pgoff);
5338 		if (xa_is_value(page)) {
5339 			swp_entry_t swp = radix_to_swp_entry(page);
5340 			if (do_memsw_account())
5341 				*entry = swp;
5342 			page = find_get_page(swap_address_space(swp),
5343 					     swp_offset(swp));
5344 		}
5345 	} else
5346 		page = find_get_page(mapping, pgoff);
5347 #else
5348 	page = find_get_page(mapping, pgoff);
5349 #endif
5350 	return page;
5351 }
5352 
5353 /**
5354  * mem_cgroup_move_account - move account of the page
5355  * @page: the page
5356  * @compound: charge the page as compound or small page
5357  * @from: mem_cgroup which the page is moved from.
5358  * @to:	mem_cgroup which the page is moved to. @from != @to.
5359  *
5360  * The caller must make sure the page is not on LRU (isolate_page() is useful.)
5361  *
5362  * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5363  * from old cgroup.
5364  */
5365 static int mem_cgroup_move_account(struct page *page,
5366 				   bool compound,
5367 				   struct mem_cgroup *from,
5368 				   struct mem_cgroup *to)
5369 {
5370 	struct lruvec *from_vec, *to_vec;
5371 	struct pglist_data *pgdat;
5372 	unsigned long flags;
5373 	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5374 	int ret;
5375 	bool anon;
5376 
5377 	VM_BUG_ON(from == to);
5378 	VM_BUG_ON_PAGE(PageLRU(page), page);
5379 	VM_BUG_ON(compound && !PageTransHuge(page));
5380 
5381 	/*
5382 	 * Prevent mem_cgroup_migrate() from looking at
5383 	 * page->mem_cgroup of its source page while we change it.
5384 	 */
5385 	ret = -EBUSY;
5386 	if (!trylock_page(page))
5387 		goto out;
5388 
5389 	ret = -EINVAL;
5390 	if (page->mem_cgroup != from)
5391 		goto out_unlock;
5392 
5393 	anon = PageAnon(page);
5394 
5395 	pgdat = page_pgdat(page);
5396 	from_vec = mem_cgroup_lruvec(from, pgdat);
5397 	to_vec = mem_cgroup_lruvec(to, pgdat);
5398 
5399 	spin_lock_irqsave(&from->move_lock, flags);
5400 
5401 	if (!anon && page_mapped(page)) {
5402 		__mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5403 		__mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5404 	}
5405 
5406 	/*
5407 	 * move_lock grabbed above and caller set from->moving_account, so
5408 	 * mod_memcg_page_state will serialize updates to PageDirty.
5409 	 * So mapping should be stable for dirty pages.
5410 	 */
5411 	if (!anon && PageDirty(page)) {
5412 		struct address_space *mapping = page_mapping(page);
5413 
5414 		if (mapping_cap_account_dirty(mapping)) {
5415 			__mod_lruvec_state(from_vec, NR_FILE_DIRTY, -nr_pages);
5416 			__mod_lruvec_state(to_vec, NR_FILE_DIRTY, nr_pages);
5417 		}
5418 	}
5419 
5420 	if (PageWriteback(page)) {
5421 		__mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5422 		__mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5423 	}
5424 
5425 	/*
5426 	 * It is safe to change page->mem_cgroup here because the page
5427 	 * is referenced, charged, and isolated - we can't race with
5428 	 * uncharging, charging, migration, or LRU putback.
5429 	 */
5430 
5431 	/* caller should have done css_get */
5432 	page->mem_cgroup = to;
5433 
5434 	spin_unlock_irqrestore(&from->move_lock, flags);
5435 
5436 	ret = 0;
5437 
5438 	local_irq_disable();
5439 	mem_cgroup_charge_statistics(to, page, compound, nr_pages);
5440 	memcg_check_events(to, page);
5441 	mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
5442 	memcg_check_events(from, page);
5443 	local_irq_enable();
5444 out_unlock:
5445 	unlock_page(page);
5446 out:
5447 	return ret;
5448 }
5449 
5450 /**
5451  * get_mctgt_type - get target type of moving charge
5452  * @vma: the vma the pte to be checked belongs
5453  * @addr: the address corresponding to the pte to be checked
5454  * @ptent: the pte to be checked
5455  * @target: the pointer the target page or swap ent will be stored(can be NULL)
5456  *
5457  * Returns
5458  *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
5459  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5460  *     move charge. if @target is not NULL, the page is stored in target->page
5461  *     with extra refcnt got(Callers should handle it).
5462  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5463  *     target for charge migration. if @target is not NULL, the entry is stored
5464  *     in target->ent.
5465  *   3(MC_TARGET_DEVICE): like MC_TARGET_PAGE  but page is MEMORY_DEVICE_PRIVATE
5466  *     (so ZONE_DEVICE page and thus not on the lru).
5467  *     For now we such page is charge like a regular page would be as for all
5468  *     intent and purposes it is just special memory taking the place of a
5469  *     regular page.
5470  *
5471  *     See Documentations/vm/hmm.txt and include/linux/hmm.h
5472  *
5473  * Called with pte lock held.
5474  */
5475 
5476 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
5477 		unsigned long addr, pte_t ptent, union mc_target *target)
5478 {
5479 	struct page *page = NULL;
5480 	enum mc_target_type ret = MC_TARGET_NONE;
5481 	swp_entry_t ent = { .val = 0 };
5482 
5483 	if (pte_present(ptent))
5484 		page = mc_handle_present_pte(vma, addr, ptent);
5485 	else if (is_swap_pte(ptent))
5486 		page = mc_handle_swap_pte(vma, ptent, &ent);
5487 	else if (pte_none(ptent))
5488 		page = mc_handle_file_pte(vma, addr, ptent, &ent);
5489 
5490 	if (!page && !ent.val)
5491 		return ret;
5492 	if (page) {
5493 		/*
5494 		 * Do only loose check w/o serialization.
5495 		 * mem_cgroup_move_account() checks the page is valid or
5496 		 * not under LRU exclusion.
5497 		 */
5498 		if (page->mem_cgroup == mc.from) {
5499 			ret = MC_TARGET_PAGE;
5500 			if (is_device_private_page(page))
5501 				ret = MC_TARGET_DEVICE;
5502 			if (target)
5503 				target->page = page;
5504 		}
5505 		if (!ret || !target)
5506 			put_page(page);
5507 	}
5508 	/*
5509 	 * There is a swap entry and a page doesn't exist or isn't charged.
5510 	 * But we cannot move a tail-page in a THP.
5511 	 */
5512 	if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
5513 	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
5514 		ret = MC_TARGET_SWAP;
5515 		if (target)
5516 			target->ent = ent;
5517 	}
5518 	return ret;
5519 }
5520 
5521 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5522 /*
5523  * We don't consider PMD mapped swapping or file mapped pages because THP does
5524  * not support them for now.
5525  * Caller should make sure that pmd_trans_huge(pmd) is true.
5526  */
5527 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5528 		unsigned long addr, pmd_t pmd, union mc_target *target)
5529 {
5530 	struct page *page = NULL;
5531 	enum mc_target_type ret = MC_TARGET_NONE;
5532 
5533 	if (unlikely(is_swap_pmd(pmd))) {
5534 		VM_BUG_ON(thp_migration_supported() &&
5535 				  !is_pmd_migration_entry(pmd));
5536 		return ret;
5537 	}
5538 	page = pmd_page(pmd);
5539 	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
5540 	if (!(mc.flags & MOVE_ANON))
5541 		return ret;
5542 	if (page->mem_cgroup == mc.from) {
5543 		ret = MC_TARGET_PAGE;
5544 		if (target) {
5545 			get_page(page);
5546 			target->page = page;
5547 		}
5548 	}
5549 	return ret;
5550 }
5551 #else
5552 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5553 		unsigned long addr, pmd_t pmd, union mc_target *target)
5554 {
5555 	return MC_TARGET_NONE;
5556 }
5557 #endif
5558 
5559 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5560 					unsigned long addr, unsigned long end,
5561 					struct mm_walk *walk)
5562 {
5563 	struct vm_area_struct *vma = walk->vma;
5564 	pte_t *pte;
5565 	spinlock_t *ptl;
5566 
5567 	ptl = pmd_trans_huge_lock(pmd, vma);
5568 	if (ptl) {
5569 		/*
5570 		 * Note their can not be MC_TARGET_DEVICE for now as we do not
5571 		 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
5572 		 * this might change.
5573 		 */
5574 		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5575 			mc.precharge += HPAGE_PMD_NR;
5576 		spin_unlock(ptl);
5577 		return 0;
5578 	}
5579 
5580 	if (pmd_trans_unstable(pmd))
5581 		return 0;
5582 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5583 	for (; addr != end; pte++, addr += PAGE_SIZE)
5584 		if (get_mctgt_type(vma, addr, *pte, NULL))
5585 			mc.precharge++;	/* increment precharge temporarily */
5586 	pte_unmap_unlock(pte - 1, ptl);
5587 	cond_resched();
5588 
5589 	return 0;
5590 }
5591 
5592 static const struct mm_walk_ops precharge_walk_ops = {
5593 	.pmd_entry	= mem_cgroup_count_precharge_pte_range,
5594 };
5595 
5596 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5597 {
5598 	unsigned long precharge;
5599 
5600 	down_read(&mm->mmap_sem);
5601 	walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
5602 	up_read(&mm->mmap_sem);
5603 
5604 	precharge = mc.precharge;
5605 	mc.precharge = 0;
5606 
5607 	return precharge;
5608 }
5609 
5610 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5611 {
5612 	unsigned long precharge = mem_cgroup_count_precharge(mm);
5613 
5614 	VM_BUG_ON(mc.moving_task);
5615 	mc.moving_task = current;
5616 	return mem_cgroup_do_precharge(precharge);
5617 }
5618 
5619 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5620 static void __mem_cgroup_clear_mc(void)
5621 {
5622 	struct mem_cgroup *from = mc.from;
5623 	struct mem_cgroup *to = mc.to;
5624 
5625 	/* we must uncharge all the leftover precharges from mc.to */
5626 	if (mc.precharge) {
5627 		cancel_charge(mc.to, mc.precharge);
5628 		mc.precharge = 0;
5629 	}
5630 	/*
5631 	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5632 	 * we must uncharge here.
5633 	 */
5634 	if (mc.moved_charge) {
5635 		cancel_charge(mc.from, mc.moved_charge);
5636 		mc.moved_charge = 0;
5637 	}
5638 	/* we must fixup refcnts and charges */
5639 	if (mc.moved_swap) {
5640 		/* uncharge swap account from the old cgroup */
5641 		if (!mem_cgroup_is_root(mc.from))
5642 			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
5643 
5644 		mem_cgroup_id_put_many(mc.from, mc.moved_swap);
5645 
5646 		/*
5647 		 * we charged both to->memory and to->memsw, so we
5648 		 * should uncharge to->memory.
5649 		 */
5650 		if (!mem_cgroup_is_root(mc.to))
5651 			page_counter_uncharge(&mc.to->memory, mc.moved_swap);
5652 
5653 		mem_cgroup_id_get_many(mc.to, mc.moved_swap);
5654 		css_put_many(&mc.to->css, mc.moved_swap);
5655 
5656 		mc.moved_swap = 0;
5657 	}
5658 	memcg_oom_recover(from);
5659 	memcg_oom_recover(to);
5660 	wake_up_all(&mc.waitq);
5661 }
5662 
5663 static void mem_cgroup_clear_mc(void)
5664 {
5665 	struct mm_struct *mm = mc.mm;
5666 
5667 	/*
5668 	 * we must clear moving_task before waking up waiters at the end of
5669 	 * task migration.
5670 	 */
5671 	mc.moving_task = NULL;
5672 	__mem_cgroup_clear_mc();
5673 	spin_lock(&mc.lock);
5674 	mc.from = NULL;
5675 	mc.to = NULL;
5676 	mc.mm = NULL;
5677 	spin_unlock(&mc.lock);
5678 
5679 	mmput(mm);
5680 }
5681 
5682 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5683 {
5684 	struct cgroup_subsys_state *css;
5685 	struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
5686 	struct mem_cgroup *from;
5687 	struct task_struct *leader, *p;
5688 	struct mm_struct *mm;
5689 	unsigned long move_flags;
5690 	int ret = 0;
5691 
5692 	/* charge immigration isn't supported on the default hierarchy */
5693 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5694 		return 0;
5695 
5696 	/*
5697 	 * Multi-process migrations only happen on the default hierarchy
5698 	 * where charge immigration is not used.  Perform charge
5699 	 * immigration if @tset contains a leader and whine if there are
5700 	 * multiple.
5701 	 */
5702 	p = NULL;
5703 	cgroup_taskset_for_each_leader(leader, css, tset) {
5704 		WARN_ON_ONCE(p);
5705 		p = leader;
5706 		memcg = mem_cgroup_from_css(css);
5707 	}
5708 	if (!p)
5709 		return 0;
5710 
5711 	/*
5712 	 * We are now commited to this value whatever it is. Changes in this
5713 	 * tunable will only affect upcoming migrations, not the current one.
5714 	 * So we need to save it, and keep it going.
5715 	 */
5716 	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
5717 	if (!move_flags)
5718 		return 0;
5719 
5720 	from = mem_cgroup_from_task(p);
5721 
5722 	VM_BUG_ON(from == memcg);
5723 
5724 	mm = get_task_mm(p);
5725 	if (!mm)
5726 		return 0;
5727 	/* We move charges only when we move a owner of the mm */
5728 	if (mm->owner == p) {
5729 		VM_BUG_ON(mc.from);
5730 		VM_BUG_ON(mc.to);
5731 		VM_BUG_ON(mc.precharge);
5732 		VM_BUG_ON(mc.moved_charge);
5733 		VM_BUG_ON(mc.moved_swap);
5734 
5735 		spin_lock(&mc.lock);
5736 		mc.mm = mm;
5737 		mc.from = from;
5738 		mc.to = memcg;
5739 		mc.flags = move_flags;
5740 		spin_unlock(&mc.lock);
5741 		/* We set mc.moving_task later */
5742 
5743 		ret = mem_cgroup_precharge_mc(mm);
5744 		if (ret)
5745 			mem_cgroup_clear_mc();
5746 	} else {
5747 		mmput(mm);
5748 	}
5749 	return ret;
5750 }
5751 
5752 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5753 {
5754 	if (mc.to)
5755 		mem_cgroup_clear_mc();
5756 }
5757 
5758 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5759 				unsigned long addr, unsigned long end,
5760 				struct mm_walk *walk)
5761 {
5762 	int ret = 0;
5763 	struct vm_area_struct *vma = walk->vma;
5764 	pte_t *pte;
5765 	spinlock_t *ptl;
5766 	enum mc_target_type target_type;
5767 	union mc_target target;
5768 	struct page *page;
5769 
5770 	ptl = pmd_trans_huge_lock(pmd, vma);
5771 	if (ptl) {
5772 		if (mc.precharge < HPAGE_PMD_NR) {
5773 			spin_unlock(ptl);
5774 			return 0;
5775 		}
5776 		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
5777 		if (target_type == MC_TARGET_PAGE) {
5778 			page = target.page;
5779 			if (!isolate_lru_page(page)) {
5780 				if (!mem_cgroup_move_account(page, true,
5781 							     mc.from, mc.to)) {
5782 					mc.precharge -= HPAGE_PMD_NR;
5783 					mc.moved_charge += HPAGE_PMD_NR;
5784 				}
5785 				putback_lru_page(page);
5786 			}
5787 			put_page(page);
5788 		} else if (target_type == MC_TARGET_DEVICE) {
5789 			page = target.page;
5790 			if (!mem_cgroup_move_account(page, true,
5791 						     mc.from, mc.to)) {
5792 				mc.precharge -= HPAGE_PMD_NR;
5793 				mc.moved_charge += HPAGE_PMD_NR;
5794 			}
5795 			put_page(page);
5796 		}
5797 		spin_unlock(ptl);
5798 		return 0;
5799 	}
5800 
5801 	if (pmd_trans_unstable(pmd))
5802 		return 0;
5803 retry:
5804 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5805 	for (; addr != end; addr += PAGE_SIZE) {
5806 		pte_t ptent = *(pte++);
5807 		bool device = false;
5808 		swp_entry_t ent;
5809 
5810 		if (!mc.precharge)
5811 			break;
5812 
5813 		switch (get_mctgt_type(vma, addr, ptent, &target)) {
5814 		case MC_TARGET_DEVICE:
5815 			device = true;
5816 			fallthrough;
5817 		case MC_TARGET_PAGE:
5818 			page = target.page;
5819 			/*
5820 			 * We can have a part of the split pmd here. Moving it
5821 			 * can be done but it would be too convoluted so simply
5822 			 * ignore such a partial THP and keep it in original
5823 			 * memcg. There should be somebody mapping the head.
5824 			 */
5825 			if (PageTransCompound(page))
5826 				goto put;
5827 			if (!device && isolate_lru_page(page))
5828 				goto put;
5829 			if (!mem_cgroup_move_account(page, false,
5830 						mc.from, mc.to)) {
5831 				mc.precharge--;
5832 				/* we uncharge from mc.from later. */
5833 				mc.moved_charge++;
5834 			}
5835 			if (!device)
5836 				putback_lru_page(page);
5837 put:			/* get_mctgt_type() gets the page */
5838 			put_page(page);
5839 			break;
5840 		case MC_TARGET_SWAP:
5841 			ent = target.ent;
5842 			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
5843 				mc.precharge--;
5844 				/* we fixup refcnts and charges later. */
5845 				mc.moved_swap++;
5846 			}
5847 			break;
5848 		default:
5849 			break;
5850 		}
5851 	}
5852 	pte_unmap_unlock(pte - 1, ptl);
5853 	cond_resched();
5854 
5855 	if (addr != end) {
5856 		/*
5857 		 * We have consumed all precharges we got in can_attach().
5858 		 * We try charge one by one, but don't do any additional
5859 		 * charges to mc.to if we have failed in charge once in attach()
5860 		 * phase.
5861 		 */
5862 		ret = mem_cgroup_do_precharge(1);
5863 		if (!ret)
5864 			goto retry;
5865 	}
5866 
5867 	return ret;
5868 }
5869 
5870 static const struct mm_walk_ops charge_walk_ops = {
5871 	.pmd_entry	= mem_cgroup_move_charge_pte_range,
5872 };
5873 
5874 static void mem_cgroup_move_charge(void)
5875 {
5876 	lru_add_drain_all();
5877 	/*
5878 	 * Signal lock_page_memcg() to take the memcg's move_lock
5879 	 * while we're moving its pages to another memcg. Then wait
5880 	 * for already started RCU-only updates to finish.
5881 	 */
5882 	atomic_inc(&mc.from->moving_account);
5883 	synchronize_rcu();
5884 retry:
5885 	if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
5886 		/*
5887 		 * Someone who are holding the mmap_sem might be waiting in
5888 		 * waitq. So we cancel all extra charges, wake up all waiters,
5889 		 * and retry. Because we cancel precharges, we might not be able
5890 		 * to move enough charges, but moving charge is a best-effort
5891 		 * feature anyway, so it wouldn't be a big problem.
5892 		 */
5893 		__mem_cgroup_clear_mc();
5894 		cond_resched();
5895 		goto retry;
5896 	}
5897 	/*
5898 	 * When we have consumed all precharges and failed in doing
5899 	 * additional charge, the page walk just aborts.
5900 	 */
5901 	walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
5902 			NULL);
5903 
5904 	up_read(&mc.mm->mmap_sem);
5905 	atomic_dec(&mc.from->moving_account);
5906 }
5907 
5908 static void mem_cgroup_move_task(void)
5909 {
5910 	if (mc.to) {
5911 		mem_cgroup_move_charge();
5912 		mem_cgroup_clear_mc();
5913 	}
5914 }
5915 #else	/* !CONFIG_MMU */
5916 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5917 {
5918 	return 0;
5919 }
5920 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5921 {
5922 }
5923 static void mem_cgroup_move_task(void)
5924 {
5925 }
5926 #endif
5927 
5928 /*
5929  * Cgroup retains root cgroups across [un]mount cycles making it necessary
5930  * to verify whether we're attached to the default hierarchy on each mount
5931  * attempt.
5932  */
5933 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
5934 {
5935 	/*
5936 	 * use_hierarchy is forced on the default hierarchy.  cgroup core
5937 	 * guarantees that @root doesn't have any children, so turning it
5938 	 * on for the root memcg is enough.
5939 	 */
5940 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5941 		root_mem_cgroup->use_hierarchy = true;
5942 	else
5943 		root_mem_cgroup->use_hierarchy = false;
5944 }
5945 
5946 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
5947 {
5948 	if (value == PAGE_COUNTER_MAX)
5949 		seq_puts(m, "max\n");
5950 	else
5951 		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
5952 
5953 	return 0;
5954 }
5955 
5956 static u64 memory_current_read(struct cgroup_subsys_state *css,
5957 			       struct cftype *cft)
5958 {
5959 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5960 
5961 	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
5962 }
5963 
5964 static int memory_min_show(struct seq_file *m, void *v)
5965 {
5966 	return seq_puts_memcg_tunable(m,
5967 		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
5968 }
5969 
5970 static ssize_t memory_min_write(struct kernfs_open_file *of,
5971 				char *buf, size_t nbytes, loff_t off)
5972 {
5973 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5974 	unsigned long min;
5975 	int err;
5976 
5977 	buf = strstrip(buf);
5978 	err = page_counter_memparse(buf, "max", &min);
5979 	if (err)
5980 		return err;
5981 
5982 	page_counter_set_min(&memcg->memory, min);
5983 
5984 	return nbytes;
5985 }
5986 
5987 static int memory_low_show(struct seq_file *m, void *v)
5988 {
5989 	return seq_puts_memcg_tunable(m,
5990 		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
5991 }
5992 
5993 static ssize_t memory_low_write(struct kernfs_open_file *of,
5994 				char *buf, size_t nbytes, loff_t off)
5995 {
5996 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5997 	unsigned long low;
5998 	int err;
5999 
6000 	buf = strstrip(buf);
6001 	err = page_counter_memparse(buf, "max", &low);
6002 	if (err)
6003 		return err;
6004 
6005 	page_counter_set_low(&memcg->memory, low);
6006 
6007 	return nbytes;
6008 }
6009 
6010 static int memory_high_show(struct seq_file *m, void *v)
6011 {
6012 	return seq_puts_memcg_tunable(m, READ_ONCE(mem_cgroup_from_seq(m)->high));
6013 }
6014 
6015 static ssize_t memory_high_write(struct kernfs_open_file *of,
6016 				 char *buf, size_t nbytes, loff_t off)
6017 {
6018 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6019 	unsigned int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
6020 	bool drained = false;
6021 	unsigned long high;
6022 	int err;
6023 
6024 	buf = strstrip(buf);
6025 	err = page_counter_memparse(buf, "max", &high);
6026 	if (err)
6027 		return err;
6028 
6029 	WRITE_ONCE(memcg->high, high);
6030 
6031 	for (;;) {
6032 		unsigned long nr_pages = page_counter_read(&memcg->memory);
6033 		unsigned long reclaimed;
6034 
6035 		if (nr_pages <= high)
6036 			break;
6037 
6038 		if (signal_pending(current))
6039 			break;
6040 
6041 		if (!drained) {
6042 			drain_all_stock(memcg);
6043 			drained = true;
6044 			continue;
6045 		}
6046 
6047 		reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6048 							 GFP_KERNEL, true);
6049 
6050 		if (!reclaimed && !nr_retries--)
6051 			break;
6052 	}
6053 
6054 	return nbytes;
6055 }
6056 
6057 static int memory_max_show(struct seq_file *m, void *v)
6058 {
6059 	return seq_puts_memcg_tunable(m,
6060 		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6061 }
6062 
6063 static ssize_t memory_max_write(struct kernfs_open_file *of,
6064 				char *buf, size_t nbytes, loff_t off)
6065 {
6066 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6067 	unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
6068 	bool drained = false;
6069 	unsigned long max;
6070 	int err;
6071 
6072 	buf = strstrip(buf);
6073 	err = page_counter_memparse(buf, "max", &max);
6074 	if (err)
6075 		return err;
6076 
6077 	xchg(&memcg->memory.max, max);
6078 
6079 	for (;;) {
6080 		unsigned long nr_pages = page_counter_read(&memcg->memory);
6081 
6082 		if (nr_pages <= max)
6083 			break;
6084 
6085 		if (signal_pending(current))
6086 			break;
6087 
6088 		if (!drained) {
6089 			drain_all_stock(memcg);
6090 			drained = true;
6091 			continue;
6092 		}
6093 
6094 		if (nr_reclaims) {
6095 			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6096 							  GFP_KERNEL, true))
6097 				nr_reclaims--;
6098 			continue;
6099 		}
6100 
6101 		memcg_memory_event(memcg, MEMCG_OOM);
6102 		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6103 			break;
6104 	}
6105 
6106 	memcg_wb_domain_size_changed(memcg);
6107 	return nbytes;
6108 }
6109 
6110 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6111 {
6112 	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6113 	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6114 	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6115 	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6116 	seq_printf(m, "oom_kill %lu\n",
6117 		   atomic_long_read(&events[MEMCG_OOM_KILL]));
6118 }
6119 
6120 static int memory_events_show(struct seq_file *m, void *v)
6121 {
6122 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6123 
6124 	__memory_events_show(m, memcg->memory_events);
6125 	return 0;
6126 }
6127 
6128 static int memory_events_local_show(struct seq_file *m, void *v)
6129 {
6130 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6131 
6132 	__memory_events_show(m, memcg->memory_events_local);
6133 	return 0;
6134 }
6135 
6136 static int memory_stat_show(struct seq_file *m, void *v)
6137 {
6138 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6139 	char *buf;
6140 
6141 	buf = memory_stat_format(memcg);
6142 	if (!buf)
6143 		return -ENOMEM;
6144 	seq_puts(m, buf);
6145 	kfree(buf);
6146 	return 0;
6147 }
6148 
6149 static int memory_oom_group_show(struct seq_file *m, void *v)
6150 {
6151 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6152 
6153 	seq_printf(m, "%d\n", memcg->oom_group);
6154 
6155 	return 0;
6156 }
6157 
6158 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6159 				      char *buf, size_t nbytes, loff_t off)
6160 {
6161 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6162 	int ret, oom_group;
6163 
6164 	buf = strstrip(buf);
6165 	if (!buf)
6166 		return -EINVAL;
6167 
6168 	ret = kstrtoint(buf, 0, &oom_group);
6169 	if (ret)
6170 		return ret;
6171 
6172 	if (oom_group != 0 && oom_group != 1)
6173 		return -EINVAL;
6174 
6175 	memcg->oom_group = oom_group;
6176 
6177 	return nbytes;
6178 }
6179 
6180 static struct cftype memory_files[] = {
6181 	{
6182 		.name = "current",
6183 		.flags = CFTYPE_NOT_ON_ROOT,
6184 		.read_u64 = memory_current_read,
6185 	},
6186 	{
6187 		.name = "min",
6188 		.flags = CFTYPE_NOT_ON_ROOT,
6189 		.seq_show = memory_min_show,
6190 		.write = memory_min_write,
6191 	},
6192 	{
6193 		.name = "low",
6194 		.flags = CFTYPE_NOT_ON_ROOT,
6195 		.seq_show = memory_low_show,
6196 		.write = memory_low_write,
6197 	},
6198 	{
6199 		.name = "high",
6200 		.flags = CFTYPE_NOT_ON_ROOT,
6201 		.seq_show = memory_high_show,
6202 		.write = memory_high_write,
6203 	},
6204 	{
6205 		.name = "max",
6206 		.flags = CFTYPE_NOT_ON_ROOT,
6207 		.seq_show = memory_max_show,
6208 		.write = memory_max_write,
6209 	},
6210 	{
6211 		.name = "events",
6212 		.flags = CFTYPE_NOT_ON_ROOT,
6213 		.file_offset = offsetof(struct mem_cgroup, events_file),
6214 		.seq_show = memory_events_show,
6215 	},
6216 	{
6217 		.name = "events.local",
6218 		.flags = CFTYPE_NOT_ON_ROOT,
6219 		.file_offset = offsetof(struct mem_cgroup, events_local_file),
6220 		.seq_show = memory_events_local_show,
6221 	},
6222 	{
6223 		.name = "stat",
6224 		.flags = CFTYPE_NOT_ON_ROOT,
6225 		.seq_show = memory_stat_show,
6226 	},
6227 	{
6228 		.name = "oom.group",
6229 		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
6230 		.seq_show = memory_oom_group_show,
6231 		.write = memory_oom_group_write,
6232 	},
6233 	{ }	/* terminate */
6234 };
6235 
6236 struct cgroup_subsys memory_cgrp_subsys = {
6237 	.css_alloc = mem_cgroup_css_alloc,
6238 	.css_online = mem_cgroup_css_online,
6239 	.css_offline = mem_cgroup_css_offline,
6240 	.css_released = mem_cgroup_css_released,
6241 	.css_free = mem_cgroup_css_free,
6242 	.css_reset = mem_cgroup_css_reset,
6243 	.can_attach = mem_cgroup_can_attach,
6244 	.cancel_attach = mem_cgroup_cancel_attach,
6245 	.post_attach = mem_cgroup_move_task,
6246 	.bind = mem_cgroup_bind,
6247 	.dfl_cftypes = memory_files,
6248 	.legacy_cftypes = mem_cgroup_legacy_files,
6249 	.early_init = 0,
6250 };
6251 
6252 /*
6253  * This function calculates an individual cgroup's effective
6254  * protection which is derived from its own memory.min/low, its
6255  * parent's and siblings' settings, as well as the actual memory
6256  * distribution in the tree.
6257  *
6258  * The following rules apply to the effective protection values:
6259  *
6260  * 1. At the first level of reclaim, effective protection is equal to
6261  *    the declared protection in memory.min and memory.low.
6262  *
6263  * 2. To enable safe delegation of the protection configuration, at
6264  *    subsequent levels the effective protection is capped to the
6265  *    parent's effective protection.
6266  *
6267  * 3. To make complex and dynamic subtrees easier to configure, the
6268  *    user is allowed to overcommit the declared protection at a given
6269  *    level. If that is the case, the parent's effective protection is
6270  *    distributed to the children in proportion to how much protection
6271  *    they have declared and how much of it they are utilizing.
6272  *
6273  *    This makes distribution proportional, but also work-conserving:
6274  *    if one cgroup claims much more protection than it uses memory,
6275  *    the unused remainder is available to its siblings.
6276  *
6277  * 4. Conversely, when the declared protection is undercommitted at a
6278  *    given level, the distribution of the larger parental protection
6279  *    budget is NOT proportional. A cgroup's protection from a sibling
6280  *    is capped to its own memory.min/low setting.
6281  *
6282  * 5. However, to allow protecting recursive subtrees from each other
6283  *    without having to declare each individual cgroup's fixed share
6284  *    of the ancestor's claim to protection, any unutilized -
6285  *    "floating" - protection from up the tree is distributed in
6286  *    proportion to each cgroup's *usage*. This makes the protection
6287  *    neutral wrt sibling cgroups and lets them compete freely over
6288  *    the shared parental protection budget, but it protects the
6289  *    subtree as a whole from neighboring subtrees.
6290  *
6291  * Note that 4. and 5. are not in conflict: 4. is about protecting
6292  * against immediate siblings whereas 5. is about protecting against
6293  * neighboring subtrees.
6294  */
6295 static unsigned long effective_protection(unsigned long usage,
6296 					  unsigned long parent_usage,
6297 					  unsigned long setting,
6298 					  unsigned long parent_effective,
6299 					  unsigned long siblings_protected)
6300 {
6301 	unsigned long protected;
6302 	unsigned long ep;
6303 
6304 	protected = min(usage, setting);
6305 	/*
6306 	 * If all cgroups at this level combined claim and use more
6307 	 * protection then what the parent affords them, distribute
6308 	 * shares in proportion to utilization.
6309 	 *
6310 	 * We are using actual utilization rather than the statically
6311 	 * claimed protection in order to be work-conserving: claimed
6312 	 * but unused protection is available to siblings that would
6313 	 * otherwise get a smaller chunk than what they claimed.
6314 	 */
6315 	if (siblings_protected > parent_effective)
6316 		return protected * parent_effective / siblings_protected;
6317 
6318 	/*
6319 	 * Ok, utilized protection of all children is within what the
6320 	 * parent affords them, so we know whatever this child claims
6321 	 * and utilizes is effectively protected.
6322 	 *
6323 	 * If there is unprotected usage beyond this value, reclaim
6324 	 * will apply pressure in proportion to that amount.
6325 	 *
6326 	 * If there is unutilized protection, the cgroup will be fully
6327 	 * shielded from reclaim, but we do return a smaller value for
6328 	 * protection than what the group could enjoy in theory. This
6329 	 * is okay. With the overcommit distribution above, effective
6330 	 * protection is always dependent on how memory is actually
6331 	 * consumed among the siblings anyway.
6332 	 */
6333 	ep = protected;
6334 
6335 	/*
6336 	 * If the children aren't claiming (all of) the protection
6337 	 * afforded to them by the parent, distribute the remainder in
6338 	 * proportion to the (unprotected) memory of each cgroup. That
6339 	 * way, cgroups that aren't explicitly prioritized wrt each
6340 	 * other compete freely over the allowance, but they are
6341 	 * collectively protected from neighboring trees.
6342 	 *
6343 	 * We're using unprotected memory for the weight so that if
6344 	 * some cgroups DO claim explicit protection, we don't protect
6345 	 * the same bytes twice.
6346 	 */
6347 	if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
6348 		return ep;
6349 
6350 	if (parent_effective > siblings_protected && usage > protected) {
6351 		unsigned long unclaimed;
6352 
6353 		unclaimed = parent_effective - siblings_protected;
6354 		unclaimed *= usage - protected;
6355 		unclaimed /= parent_usage - siblings_protected;
6356 
6357 		ep += unclaimed;
6358 	}
6359 
6360 	return ep;
6361 }
6362 
6363 /**
6364  * mem_cgroup_protected - check if memory consumption is in the normal range
6365  * @root: the top ancestor of the sub-tree being checked
6366  * @memcg: the memory cgroup to check
6367  *
6368  * WARNING: This function is not stateless! It can only be used as part
6369  *          of a top-down tree iteration, not for isolated queries.
6370  *
6371  * Returns one of the following:
6372  *   MEMCG_PROT_NONE: cgroup memory is not protected
6373  *   MEMCG_PROT_LOW: cgroup memory is protected as long there is
6374  *     an unprotected supply of reclaimable memory from other cgroups.
6375  *   MEMCG_PROT_MIN: cgroup memory is protected
6376  */
6377 enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
6378 						struct mem_cgroup *memcg)
6379 {
6380 	unsigned long usage, parent_usage;
6381 	struct mem_cgroup *parent;
6382 
6383 	if (mem_cgroup_disabled())
6384 		return MEMCG_PROT_NONE;
6385 
6386 	if (!root)
6387 		root = root_mem_cgroup;
6388 	if (memcg == root)
6389 		return MEMCG_PROT_NONE;
6390 
6391 	usage = page_counter_read(&memcg->memory);
6392 	if (!usage)
6393 		return MEMCG_PROT_NONE;
6394 
6395 	parent = parent_mem_cgroup(memcg);
6396 	/* No parent means a non-hierarchical mode on v1 memcg */
6397 	if (!parent)
6398 		return MEMCG_PROT_NONE;
6399 
6400 	if (parent == root) {
6401 		memcg->memory.emin = READ_ONCE(memcg->memory.min);
6402 		memcg->memory.elow = memcg->memory.low;
6403 		goto out;
6404 	}
6405 
6406 	parent_usage = page_counter_read(&parent->memory);
6407 
6408 	WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
6409 			READ_ONCE(memcg->memory.min),
6410 			READ_ONCE(parent->memory.emin),
6411 			atomic_long_read(&parent->memory.children_min_usage)));
6412 
6413 	WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
6414 			memcg->memory.low, READ_ONCE(parent->memory.elow),
6415 			atomic_long_read(&parent->memory.children_low_usage)));
6416 
6417 out:
6418 	if (usage <= memcg->memory.emin)
6419 		return MEMCG_PROT_MIN;
6420 	else if (usage <= memcg->memory.elow)
6421 		return MEMCG_PROT_LOW;
6422 	else
6423 		return MEMCG_PROT_NONE;
6424 }
6425 
6426 /**
6427  * mem_cgroup_try_charge - try charging a page
6428  * @page: page to charge
6429  * @mm: mm context of the victim
6430  * @gfp_mask: reclaim mode
6431  * @memcgp: charged memcg return
6432  * @compound: charge the page as compound or small page
6433  *
6434  * Try to charge @page to the memcg that @mm belongs to, reclaiming
6435  * pages according to @gfp_mask if necessary.
6436  *
6437  * Returns 0 on success, with *@memcgp pointing to the charged memcg.
6438  * Otherwise, an error code is returned.
6439  *
6440  * After page->mapping has been set up, the caller must finalize the
6441  * charge with mem_cgroup_commit_charge().  Or abort the transaction
6442  * with mem_cgroup_cancel_charge() in case page instantiation fails.
6443  */
6444 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
6445 			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
6446 			  bool compound)
6447 {
6448 	struct mem_cgroup *memcg = NULL;
6449 	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
6450 	int ret = 0;
6451 
6452 	if (mem_cgroup_disabled())
6453 		goto out;
6454 
6455 	if (PageSwapCache(page)) {
6456 		/*
6457 		 * Every swap fault against a single page tries to charge the
6458 		 * page, bail as early as possible.  shmem_unuse() encounters
6459 		 * already charged pages, too.  The USED bit is protected by
6460 		 * the page lock, which serializes swap cache removal, which
6461 		 * in turn serializes uncharging.
6462 		 */
6463 		VM_BUG_ON_PAGE(!PageLocked(page), page);
6464 		if (compound_head(page)->mem_cgroup)
6465 			goto out;
6466 
6467 		if (do_swap_account) {
6468 			swp_entry_t ent = { .val = page_private(page), };
6469 			unsigned short id = lookup_swap_cgroup_id(ent);
6470 
6471 			rcu_read_lock();
6472 			memcg = mem_cgroup_from_id(id);
6473 			if (memcg && !css_tryget_online(&memcg->css))
6474 				memcg = NULL;
6475 			rcu_read_unlock();
6476 		}
6477 	}
6478 
6479 	if (!memcg)
6480 		memcg = get_mem_cgroup_from_mm(mm);
6481 
6482 	ret = try_charge(memcg, gfp_mask, nr_pages);
6483 
6484 	css_put(&memcg->css);
6485 out:
6486 	*memcgp = memcg;
6487 	return ret;
6488 }
6489 
6490 int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
6491 			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
6492 			  bool compound)
6493 {
6494 	struct mem_cgroup *memcg;
6495 	int ret;
6496 
6497 	ret = mem_cgroup_try_charge(page, mm, gfp_mask, memcgp, compound);
6498 	memcg = *memcgp;
6499 	mem_cgroup_throttle_swaprate(memcg, page_to_nid(page), gfp_mask);
6500 	return ret;
6501 }
6502 
6503 /**
6504  * mem_cgroup_commit_charge - commit a page charge
6505  * @page: page to charge
6506  * @memcg: memcg to charge the page to
6507  * @lrucare: page might be on LRU already
6508  * @compound: charge the page as compound or small page
6509  *
6510  * Finalize a charge transaction started by mem_cgroup_try_charge(),
6511  * after page->mapping has been set up.  This must happen atomically
6512  * as part of the page instantiation, i.e. under the page table lock
6513  * for anonymous pages, under the page lock for page and swap cache.
6514  *
6515  * In addition, the page must not be on the LRU during the commit, to
6516  * prevent racing with task migration.  If it might be, use @lrucare.
6517  *
6518  * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
6519  */
6520 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
6521 			      bool lrucare, bool compound)
6522 {
6523 	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
6524 
6525 	VM_BUG_ON_PAGE(!page->mapping, page);
6526 	VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
6527 
6528 	if (mem_cgroup_disabled())
6529 		return;
6530 	/*
6531 	 * Swap faults will attempt to charge the same page multiple
6532 	 * times.  But reuse_swap_page() might have removed the page
6533 	 * from swapcache already, so we can't check PageSwapCache().
6534 	 */
6535 	if (!memcg)
6536 		return;
6537 
6538 	commit_charge(page, memcg, lrucare);
6539 
6540 	local_irq_disable();
6541 	mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
6542 	memcg_check_events(memcg, page);
6543 	local_irq_enable();
6544 
6545 	if (do_memsw_account() && PageSwapCache(page)) {
6546 		swp_entry_t entry = { .val = page_private(page) };
6547 		/*
6548 		 * The swap entry might not get freed for a long time,
6549 		 * let's not wait for it.  The page already received a
6550 		 * memory+swap charge, drop the swap entry duplicate.
6551 		 */
6552 		mem_cgroup_uncharge_swap(entry, nr_pages);
6553 	}
6554 }
6555 
6556 /**
6557  * mem_cgroup_cancel_charge - cancel a page charge
6558  * @page: page to charge
6559  * @memcg: memcg to charge the page to
6560  * @compound: charge the page as compound or small page
6561  *
6562  * Cancel a charge transaction started by mem_cgroup_try_charge().
6563  */
6564 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
6565 		bool compound)
6566 {
6567 	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
6568 
6569 	if (mem_cgroup_disabled())
6570 		return;
6571 	/*
6572 	 * Swap faults will attempt to charge the same page multiple
6573 	 * times.  But reuse_swap_page() might have removed the page
6574 	 * from swapcache already, so we can't check PageSwapCache().
6575 	 */
6576 	if (!memcg)
6577 		return;
6578 
6579 	cancel_charge(memcg, nr_pages);
6580 }
6581 
6582 struct uncharge_gather {
6583 	struct mem_cgroup *memcg;
6584 	unsigned long pgpgout;
6585 	unsigned long nr_anon;
6586 	unsigned long nr_file;
6587 	unsigned long nr_kmem;
6588 	unsigned long nr_huge;
6589 	unsigned long nr_shmem;
6590 	struct page *dummy_page;
6591 };
6592 
6593 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
6594 {
6595 	memset(ug, 0, sizeof(*ug));
6596 }
6597 
6598 static void uncharge_batch(const struct uncharge_gather *ug)
6599 {
6600 	unsigned long nr_pages = ug->nr_anon + ug->nr_file + ug->nr_kmem;
6601 	unsigned long flags;
6602 
6603 	if (!mem_cgroup_is_root(ug->memcg)) {
6604 		page_counter_uncharge(&ug->memcg->memory, nr_pages);
6605 		if (do_memsw_account())
6606 			page_counter_uncharge(&ug->memcg->memsw, nr_pages);
6607 		if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
6608 			page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
6609 		memcg_oom_recover(ug->memcg);
6610 	}
6611 
6612 	local_irq_save(flags);
6613 	__mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon);
6614 	__mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file);
6615 	__mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
6616 	__mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem);
6617 	__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
6618 	__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, nr_pages);
6619 	memcg_check_events(ug->memcg, ug->dummy_page);
6620 	local_irq_restore(flags);
6621 
6622 	if (!mem_cgroup_is_root(ug->memcg))
6623 		css_put_many(&ug->memcg->css, nr_pages);
6624 }
6625 
6626 static void uncharge_page(struct page *page, struct uncharge_gather *ug)
6627 {
6628 	VM_BUG_ON_PAGE(PageLRU(page), page);
6629 	VM_BUG_ON_PAGE(page_count(page) && !is_zone_device_page(page) &&
6630 			!PageHWPoison(page) , page);
6631 
6632 	if (!page->mem_cgroup)
6633 		return;
6634 
6635 	/*
6636 	 * Nobody should be changing or seriously looking at
6637 	 * page->mem_cgroup at this point, we have fully
6638 	 * exclusive access to the page.
6639 	 */
6640 
6641 	if (ug->memcg != page->mem_cgroup) {
6642 		if (ug->memcg) {
6643 			uncharge_batch(ug);
6644 			uncharge_gather_clear(ug);
6645 		}
6646 		ug->memcg = page->mem_cgroup;
6647 	}
6648 
6649 	if (!PageKmemcg(page)) {
6650 		unsigned int nr_pages = 1;
6651 
6652 		if (PageTransHuge(page)) {
6653 			nr_pages = compound_nr(page);
6654 			ug->nr_huge += nr_pages;
6655 		}
6656 		if (PageAnon(page))
6657 			ug->nr_anon += nr_pages;
6658 		else {
6659 			ug->nr_file += nr_pages;
6660 			if (PageSwapBacked(page))
6661 				ug->nr_shmem += nr_pages;
6662 		}
6663 		ug->pgpgout++;
6664 	} else {
6665 		ug->nr_kmem += compound_nr(page);
6666 		__ClearPageKmemcg(page);
6667 	}
6668 
6669 	ug->dummy_page = page;
6670 	page->mem_cgroup = NULL;
6671 }
6672 
6673 static void uncharge_list(struct list_head *page_list)
6674 {
6675 	struct uncharge_gather ug;
6676 	struct list_head *next;
6677 
6678 	uncharge_gather_clear(&ug);
6679 
6680 	/*
6681 	 * Note that the list can be a single page->lru; hence the
6682 	 * do-while loop instead of a simple list_for_each_entry().
6683 	 */
6684 	next = page_list->next;
6685 	do {
6686 		struct page *page;
6687 
6688 		page = list_entry(next, struct page, lru);
6689 		next = page->lru.next;
6690 
6691 		uncharge_page(page, &ug);
6692 	} while (next != page_list);
6693 
6694 	if (ug.memcg)
6695 		uncharge_batch(&ug);
6696 }
6697 
6698 /**
6699  * mem_cgroup_uncharge - uncharge a page
6700  * @page: page to uncharge
6701  *
6702  * Uncharge a page previously charged with mem_cgroup_try_charge() and
6703  * mem_cgroup_commit_charge().
6704  */
6705 void mem_cgroup_uncharge(struct page *page)
6706 {
6707 	struct uncharge_gather ug;
6708 
6709 	if (mem_cgroup_disabled())
6710 		return;
6711 
6712 	/* Don't touch page->lru of any random page, pre-check: */
6713 	if (!page->mem_cgroup)
6714 		return;
6715 
6716 	uncharge_gather_clear(&ug);
6717 	uncharge_page(page, &ug);
6718 	uncharge_batch(&ug);
6719 }
6720 
6721 /**
6722  * mem_cgroup_uncharge_list - uncharge a list of page
6723  * @page_list: list of pages to uncharge
6724  *
6725  * Uncharge a list of pages previously charged with
6726  * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
6727  */
6728 void mem_cgroup_uncharge_list(struct list_head *page_list)
6729 {
6730 	if (mem_cgroup_disabled())
6731 		return;
6732 
6733 	if (!list_empty(page_list))
6734 		uncharge_list(page_list);
6735 }
6736 
6737 /**
6738  * mem_cgroup_migrate - charge a page's replacement
6739  * @oldpage: currently circulating page
6740  * @newpage: replacement page
6741  *
6742  * Charge @newpage as a replacement page for @oldpage. @oldpage will
6743  * be uncharged upon free.
6744  *
6745  * Both pages must be locked, @newpage->mapping must be set up.
6746  */
6747 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
6748 {
6749 	struct mem_cgroup *memcg;
6750 	unsigned int nr_pages;
6751 	unsigned long flags;
6752 
6753 	VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
6754 	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
6755 	VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
6756 	VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
6757 		       newpage);
6758 
6759 	if (mem_cgroup_disabled())
6760 		return;
6761 
6762 	/* Page cache replacement: new page already charged? */
6763 	if (newpage->mem_cgroup)
6764 		return;
6765 
6766 	/* Swapcache readahead pages can get replaced before being charged */
6767 	memcg = oldpage->mem_cgroup;
6768 	if (!memcg)
6769 		return;
6770 
6771 	/* Force-charge the new page. The old one will be freed soon */
6772 	nr_pages = hpage_nr_pages(newpage);
6773 
6774 	page_counter_charge(&memcg->memory, nr_pages);
6775 	if (do_memsw_account())
6776 		page_counter_charge(&memcg->memsw, nr_pages);
6777 	css_get_many(&memcg->css, nr_pages);
6778 
6779 	commit_charge(newpage, memcg, false);
6780 
6781 	local_irq_save(flags);
6782 	mem_cgroup_charge_statistics(memcg, newpage, PageTransHuge(newpage),
6783 			nr_pages);
6784 	memcg_check_events(memcg, newpage);
6785 	local_irq_restore(flags);
6786 }
6787 
6788 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
6789 EXPORT_SYMBOL(memcg_sockets_enabled_key);
6790 
6791 void mem_cgroup_sk_alloc(struct sock *sk)
6792 {
6793 	struct mem_cgroup *memcg;
6794 
6795 	if (!mem_cgroup_sockets_enabled)
6796 		return;
6797 
6798 	/* Do not associate the sock with unrelated interrupted task's memcg. */
6799 	if (in_interrupt())
6800 		return;
6801 
6802 	rcu_read_lock();
6803 	memcg = mem_cgroup_from_task(current);
6804 	if (memcg == root_mem_cgroup)
6805 		goto out;
6806 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
6807 		goto out;
6808 	if (css_tryget(&memcg->css))
6809 		sk->sk_memcg = memcg;
6810 out:
6811 	rcu_read_unlock();
6812 }
6813 
6814 void mem_cgroup_sk_free(struct sock *sk)
6815 {
6816 	if (sk->sk_memcg)
6817 		css_put(&sk->sk_memcg->css);
6818 }
6819 
6820 /**
6821  * mem_cgroup_charge_skmem - charge socket memory
6822  * @memcg: memcg to charge
6823  * @nr_pages: number of pages to charge
6824  *
6825  * Charges @nr_pages to @memcg. Returns %true if the charge fit within
6826  * @memcg's configured limit, %false if the charge had to be forced.
6827  */
6828 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
6829 {
6830 	gfp_t gfp_mask = GFP_KERNEL;
6831 
6832 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
6833 		struct page_counter *fail;
6834 
6835 		if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
6836 			memcg->tcpmem_pressure = 0;
6837 			return true;
6838 		}
6839 		page_counter_charge(&memcg->tcpmem, nr_pages);
6840 		memcg->tcpmem_pressure = 1;
6841 		return false;
6842 	}
6843 
6844 	/* Don't block in the packet receive path */
6845 	if (in_softirq())
6846 		gfp_mask = GFP_NOWAIT;
6847 
6848 	mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
6849 
6850 	if (try_charge(memcg, gfp_mask, nr_pages) == 0)
6851 		return true;
6852 
6853 	try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
6854 	return false;
6855 }
6856 
6857 /**
6858  * mem_cgroup_uncharge_skmem - uncharge socket memory
6859  * @memcg: memcg to uncharge
6860  * @nr_pages: number of pages to uncharge
6861  */
6862 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
6863 {
6864 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
6865 		page_counter_uncharge(&memcg->tcpmem, nr_pages);
6866 		return;
6867 	}
6868 
6869 	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
6870 
6871 	refill_stock(memcg, nr_pages);
6872 }
6873 
6874 static int __init cgroup_memory(char *s)
6875 {
6876 	char *token;
6877 
6878 	while ((token = strsep(&s, ",")) != NULL) {
6879 		if (!*token)
6880 			continue;
6881 		if (!strcmp(token, "nosocket"))
6882 			cgroup_memory_nosocket = true;
6883 		if (!strcmp(token, "nokmem"))
6884 			cgroup_memory_nokmem = true;
6885 	}
6886 	return 0;
6887 }
6888 __setup("cgroup.memory=", cgroup_memory);
6889 
6890 /*
6891  * subsys_initcall() for memory controller.
6892  *
6893  * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
6894  * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
6895  * basically everything that doesn't depend on a specific mem_cgroup structure
6896  * should be initialized from here.
6897  */
6898 static int __init mem_cgroup_init(void)
6899 {
6900 	int cpu, node;
6901 
6902 #ifdef CONFIG_MEMCG_KMEM
6903 	/*
6904 	 * Kmem cache creation is mostly done with the slab_mutex held,
6905 	 * so use a workqueue with limited concurrency to avoid stalling
6906 	 * all worker threads in case lots of cgroups are created and
6907 	 * destroyed simultaneously.
6908 	 */
6909 	memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1);
6910 	BUG_ON(!memcg_kmem_cache_wq);
6911 #endif
6912 
6913 	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
6914 				  memcg_hotplug_cpu_dead);
6915 
6916 	for_each_possible_cpu(cpu)
6917 		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
6918 			  drain_local_stock);
6919 
6920 	for_each_node(node) {
6921 		struct mem_cgroup_tree_per_node *rtpn;
6922 
6923 		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
6924 				    node_online(node) ? node : NUMA_NO_NODE);
6925 
6926 		rtpn->rb_root = RB_ROOT;
6927 		rtpn->rb_rightmost = NULL;
6928 		spin_lock_init(&rtpn->lock);
6929 		soft_limit_tree.rb_tree_per_node[node] = rtpn;
6930 	}
6931 
6932 	return 0;
6933 }
6934 subsys_initcall(mem_cgroup_init);
6935 
6936 #ifdef CONFIG_MEMCG_SWAP
6937 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
6938 {
6939 	while (!refcount_inc_not_zero(&memcg->id.ref)) {
6940 		/*
6941 		 * The root cgroup cannot be destroyed, so it's refcount must
6942 		 * always be >= 1.
6943 		 */
6944 		if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
6945 			VM_BUG_ON(1);
6946 			break;
6947 		}
6948 		memcg = parent_mem_cgroup(memcg);
6949 		if (!memcg)
6950 			memcg = root_mem_cgroup;
6951 	}
6952 	return memcg;
6953 }
6954 
6955 /**
6956  * mem_cgroup_swapout - transfer a memsw charge to swap
6957  * @page: page whose memsw charge to transfer
6958  * @entry: swap entry to move the charge to
6959  *
6960  * Transfer the memsw charge of @page to @entry.
6961  */
6962 void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
6963 {
6964 	struct mem_cgroup *memcg, *swap_memcg;
6965 	unsigned int nr_entries;
6966 	unsigned short oldid;
6967 
6968 	VM_BUG_ON_PAGE(PageLRU(page), page);
6969 	VM_BUG_ON_PAGE(page_count(page), page);
6970 
6971 	if (!do_memsw_account())
6972 		return;
6973 
6974 	memcg = page->mem_cgroup;
6975 
6976 	/* Readahead page, never charged */
6977 	if (!memcg)
6978 		return;
6979 
6980 	/*
6981 	 * In case the memcg owning these pages has been offlined and doesn't
6982 	 * have an ID allocated to it anymore, charge the closest online
6983 	 * ancestor for the swap instead and transfer the memory+swap charge.
6984 	 */
6985 	swap_memcg = mem_cgroup_id_get_online(memcg);
6986 	nr_entries = hpage_nr_pages(page);
6987 	/* Get references for the tail pages, too */
6988 	if (nr_entries > 1)
6989 		mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
6990 	oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
6991 				   nr_entries);
6992 	VM_BUG_ON_PAGE(oldid, page);
6993 	mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
6994 
6995 	page->mem_cgroup = NULL;
6996 
6997 	if (!mem_cgroup_is_root(memcg))
6998 		page_counter_uncharge(&memcg->memory, nr_entries);
6999 
7000 	if (memcg != swap_memcg) {
7001 		if (!mem_cgroup_is_root(swap_memcg))
7002 			page_counter_charge(&swap_memcg->memsw, nr_entries);
7003 		page_counter_uncharge(&memcg->memsw, nr_entries);
7004 	}
7005 
7006 	/*
7007 	 * Interrupts should be disabled here because the caller holds the
7008 	 * i_pages lock which is taken with interrupts-off. It is
7009 	 * important here to have the interrupts disabled because it is the
7010 	 * only synchronisation we have for updating the per-CPU variables.
7011 	 */
7012 	VM_BUG_ON(!irqs_disabled());
7013 	mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page),
7014 				     -nr_entries);
7015 	memcg_check_events(memcg, page);
7016 
7017 	if (!mem_cgroup_is_root(memcg))
7018 		css_put_many(&memcg->css, nr_entries);
7019 }
7020 
7021 /**
7022  * mem_cgroup_try_charge_swap - try charging swap space for a page
7023  * @page: page being added to swap
7024  * @entry: swap entry to charge
7025  *
7026  * Try to charge @page's memcg for the swap space at @entry.
7027  *
7028  * Returns 0 on success, -ENOMEM on failure.
7029  */
7030 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
7031 {
7032 	unsigned int nr_pages = hpage_nr_pages(page);
7033 	struct page_counter *counter;
7034 	struct mem_cgroup *memcg;
7035 	unsigned short oldid;
7036 
7037 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
7038 		return 0;
7039 
7040 	memcg = page->mem_cgroup;
7041 
7042 	/* Readahead page, never charged */
7043 	if (!memcg)
7044 		return 0;
7045 
7046 	if (!entry.val) {
7047 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7048 		return 0;
7049 	}
7050 
7051 	memcg = mem_cgroup_id_get_online(memcg);
7052 
7053 	if (!mem_cgroup_is_root(memcg) &&
7054 	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7055 		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7056 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7057 		mem_cgroup_id_put(memcg);
7058 		return -ENOMEM;
7059 	}
7060 
7061 	/* Get references for the tail pages, too */
7062 	if (nr_pages > 1)
7063 		mem_cgroup_id_get_many(memcg, nr_pages - 1);
7064 	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7065 	VM_BUG_ON_PAGE(oldid, page);
7066 	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7067 
7068 	return 0;
7069 }
7070 
7071 /**
7072  * mem_cgroup_uncharge_swap - uncharge swap space
7073  * @entry: swap entry to uncharge
7074  * @nr_pages: the amount of swap space to uncharge
7075  */
7076 void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7077 {
7078 	struct mem_cgroup *memcg;
7079 	unsigned short id;
7080 
7081 	if (!do_swap_account)
7082 		return;
7083 
7084 	id = swap_cgroup_record(entry, 0, nr_pages);
7085 	rcu_read_lock();
7086 	memcg = mem_cgroup_from_id(id);
7087 	if (memcg) {
7088 		if (!mem_cgroup_is_root(memcg)) {
7089 			if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7090 				page_counter_uncharge(&memcg->swap, nr_pages);
7091 			else
7092 				page_counter_uncharge(&memcg->memsw, nr_pages);
7093 		}
7094 		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7095 		mem_cgroup_id_put_many(memcg, nr_pages);
7096 	}
7097 	rcu_read_unlock();
7098 }
7099 
7100 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7101 {
7102 	long nr_swap_pages = get_nr_swap_pages();
7103 
7104 	if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7105 		return nr_swap_pages;
7106 	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
7107 		nr_swap_pages = min_t(long, nr_swap_pages,
7108 				      READ_ONCE(memcg->swap.max) -
7109 				      page_counter_read(&memcg->swap));
7110 	return nr_swap_pages;
7111 }
7112 
7113 bool mem_cgroup_swap_full(struct page *page)
7114 {
7115 	struct mem_cgroup *memcg;
7116 
7117 	VM_BUG_ON_PAGE(!PageLocked(page), page);
7118 
7119 	if (vm_swap_full())
7120 		return true;
7121 	if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7122 		return false;
7123 
7124 	memcg = page->mem_cgroup;
7125 	if (!memcg)
7126 		return false;
7127 
7128 	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
7129 		if (page_counter_read(&memcg->swap) * 2 >=
7130 		    READ_ONCE(memcg->swap.max))
7131 			return true;
7132 
7133 	return false;
7134 }
7135 
7136 /* for remember boot option*/
7137 #ifdef CONFIG_MEMCG_SWAP_ENABLED
7138 static int really_do_swap_account __initdata = 1;
7139 #else
7140 static int really_do_swap_account __initdata;
7141 #endif
7142 
7143 static int __init enable_swap_account(char *s)
7144 {
7145 	if (!strcmp(s, "1"))
7146 		really_do_swap_account = 1;
7147 	else if (!strcmp(s, "0"))
7148 		really_do_swap_account = 0;
7149 	return 1;
7150 }
7151 __setup("swapaccount=", enable_swap_account);
7152 
7153 static u64 swap_current_read(struct cgroup_subsys_state *css,
7154 			     struct cftype *cft)
7155 {
7156 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7157 
7158 	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7159 }
7160 
7161 static int swap_max_show(struct seq_file *m, void *v)
7162 {
7163 	return seq_puts_memcg_tunable(m,
7164 		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
7165 }
7166 
7167 static ssize_t swap_max_write(struct kernfs_open_file *of,
7168 			      char *buf, size_t nbytes, loff_t off)
7169 {
7170 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7171 	unsigned long max;
7172 	int err;
7173 
7174 	buf = strstrip(buf);
7175 	err = page_counter_memparse(buf, "max", &max);
7176 	if (err)
7177 		return err;
7178 
7179 	xchg(&memcg->swap.max, max);
7180 
7181 	return nbytes;
7182 }
7183 
7184 static int swap_events_show(struct seq_file *m, void *v)
7185 {
7186 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
7187 
7188 	seq_printf(m, "max %lu\n",
7189 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
7190 	seq_printf(m, "fail %lu\n",
7191 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
7192 
7193 	return 0;
7194 }
7195 
7196 static struct cftype swap_files[] = {
7197 	{
7198 		.name = "swap.current",
7199 		.flags = CFTYPE_NOT_ON_ROOT,
7200 		.read_u64 = swap_current_read,
7201 	},
7202 	{
7203 		.name = "swap.max",
7204 		.flags = CFTYPE_NOT_ON_ROOT,
7205 		.seq_show = swap_max_show,
7206 		.write = swap_max_write,
7207 	},
7208 	{
7209 		.name = "swap.events",
7210 		.flags = CFTYPE_NOT_ON_ROOT,
7211 		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
7212 		.seq_show = swap_events_show,
7213 	},
7214 	{ }	/* terminate */
7215 };
7216 
7217 static struct cftype memsw_cgroup_files[] = {
7218 	{
7219 		.name = "memsw.usage_in_bytes",
7220 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
7221 		.read_u64 = mem_cgroup_read_u64,
7222 	},
7223 	{
7224 		.name = "memsw.max_usage_in_bytes",
7225 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
7226 		.write = mem_cgroup_reset,
7227 		.read_u64 = mem_cgroup_read_u64,
7228 	},
7229 	{
7230 		.name = "memsw.limit_in_bytes",
7231 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
7232 		.write = mem_cgroup_write,
7233 		.read_u64 = mem_cgroup_read_u64,
7234 	},
7235 	{
7236 		.name = "memsw.failcnt",
7237 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
7238 		.write = mem_cgroup_reset,
7239 		.read_u64 = mem_cgroup_read_u64,
7240 	},
7241 	{ },	/* terminate */
7242 };
7243 
7244 static int __init mem_cgroup_swap_init(void)
7245 {
7246 	if (!mem_cgroup_disabled() && really_do_swap_account) {
7247 		do_swap_account = 1;
7248 		WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys,
7249 					       swap_files));
7250 		WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
7251 						  memsw_cgroup_files));
7252 	}
7253 	return 0;
7254 }
7255 subsys_initcall(mem_cgroup_swap_init);
7256 
7257 #endif /* CONFIG_MEMCG_SWAP */
7258