xref: /linux/mm/memcontrol.c (revision 7a309195d11cde854eb75559fbd6b48f9e518f25)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <xemul@openvz.org>
9  *
10  * Memory thresholds
11  * Copyright (C) 2009 Nokia Corporation
12  * Author: Kirill A. Shutemov
13  *
14  * Kernel Memory Controller
15  * Copyright (C) 2012 Parallels Inc. and Google Inc.
16  * Authors: Glauber Costa and Suleiman Souhlal
17  *
18  * Native page reclaim
19  * Charge lifetime sanitation
20  * Lockless page tracking & accounting
21  * Unified hierarchy configuration model
22  * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23  */
24 
25 #include <linux/page_counter.h>
26 #include <linux/memcontrol.h>
27 #include <linux/cgroup.h>
28 #include <linux/pagewalk.h>
29 #include <linux/sched/mm.h>
30 #include <linux/shmem_fs.h>
31 #include <linux/hugetlb.h>
32 #include <linux/pagemap.h>
33 #include <linux/vm_event_item.h>
34 #include <linux/smp.h>
35 #include <linux/page-flags.h>
36 #include <linux/backing-dev.h>
37 #include <linux/bit_spinlock.h>
38 #include <linux/rcupdate.h>
39 #include <linux/limits.h>
40 #include <linux/export.h>
41 #include <linux/mutex.h>
42 #include <linux/rbtree.h>
43 #include <linux/slab.h>
44 #include <linux/swap.h>
45 #include <linux/swapops.h>
46 #include <linux/spinlock.h>
47 #include <linux/eventfd.h>
48 #include <linux/poll.h>
49 #include <linux/sort.h>
50 #include <linux/fs.h>
51 #include <linux/seq_file.h>
52 #include <linux/vmpressure.h>
53 #include <linux/mm_inline.h>
54 #include <linux/swap_cgroup.h>
55 #include <linux/cpu.h>
56 #include <linux/oom.h>
57 #include <linux/lockdep.h>
58 #include <linux/file.h>
59 #include <linux/tracehook.h>
60 #include <linux/psi.h>
61 #include <linux/seq_buf.h>
62 #include "internal.h"
63 #include <net/sock.h>
64 #include <net/ip.h>
65 #include "slab.h"
66 
67 #include <linux/uaccess.h>
68 
69 #include <trace/events/vmscan.h>
70 
71 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
72 EXPORT_SYMBOL(memory_cgrp_subsys);
73 
74 struct mem_cgroup *root_mem_cgroup __read_mostly;
75 
76 /* Socket memory accounting disabled? */
77 static bool cgroup_memory_nosocket;
78 
79 /* Kernel memory accounting disabled? */
80 static bool cgroup_memory_nokmem;
81 
82 /* Whether the swap controller is active */
83 #ifdef CONFIG_MEMCG_SWAP
84 bool cgroup_memory_noswap __read_mostly;
85 #else
86 #define cgroup_memory_noswap		1
87 #endif
88 
89 #ifdef CONFIG_CGROUP_WRITEBACK
90 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
91 #endif
92 
93 /* Whether legacy memory+swap accounting is active */
94 static bool do_memsw_account(void)
95 {
96 	return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap;
97 }
98 
99 #define THRESHOLDS_EVENTS_TARGET 128
100 #define SOFTLIMIT_EVENTS_TARGET 1024
101 
102 /*
103  * Cgroups above their limits are maintained in a RB-Tree, independent of
104  * their hierarchy representation
105  */
106 
107 struct mem_cgroup_tree_per_node {
108 	struct rb_root rb_root;
109 	struct rb_node *rb_rightmost;
110 	spinlock_t lock;
111 };
112 
113 struct mem_cgroup_tree {
114 	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
115 };
116 
117 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
118 
119 /* for OOM */
120 struct mem_cgroup_eventfd_list {
121 	struct list_head list;
122 	struct eventfd_ctx *eventfd;
123 };
124 
125 /*
126  * cgroup_event represents events which userspace want to receive.
127  */
128 struct mem_cgroup_event {
129 	/*
130 	 * memcg which the event belongs to.
131 	 */
132 	struct mem_cgroup *memcg;
133 	/*
134 	 * eventfd to signal userspace about the event.
135 	 */
136 	struct eventfd_ctx *eventfd;
137 	/*
138 	 * Each of these stored in a list by the cgroup.
139 	 */
140 	struct list_head list;
141 	/*
142 	 * register_event() callback will be used to add new userspace
143 	 * waiter for changes related to this event.  Use eventfd_signal()
144 	 * on eventfd to send notification to userspace.
145 	 */
146 	int (*register_event)(struct mem_cgroup *memcg,
147 			      struct eventfd_ctx *eventfd, const char *args);
148 	/*
149 	 * unregister_event() callback will be called when userspace closes
150 	 * the eventfd or on cgroup removing.  This callback must be set,
151 	 * if you want provide notification functionality.
152 	 */
153 	void (*unregister_event)(struct mem_cgroup *memcg,
154 				 struct eventfd_ctx *eventfd);
155 	/*
156 	 * All fields below needed to unregister event when
157 	 * userspace closes eventfd.
158 	 */
159 	poll_table pt;
160 	wait_queue_head_t *wqh;
161 	wait_queue_entry_t wait;
162 	struct work_struct remove;
163 };
164 
165 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
166 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
167 
168 /* Stuffs for move charges at task migration. */
169 /*
170  * Types of charges to be moved.
171  */
172 #define MOVE_ANON	0x1U
173 #define MOVE_FILE	0x2U
174 #define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
175 
176 /* "mc" and its members are protected by cgroup_mutex */
177 static struct move_charge_struct {
178 	spinlock_t	  lock; /* for from, to */
179 	struct mm_struct  *mm;
180 	struct mem_cgroup *from;
181 	struct mem_cgroup *to;
182 	unsigned long flags;
183 	unsigned long precharge;
184 	unsigned long moved_charge;
185 	unsigned long moved_swap;
186 	struct task_struct *moving_task;	/* a task moving charges */
187 	wait_queue_head_t waitq;		/* a waitq for other context */
188 } mc = {
189 	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
190 	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
191 };
192 
193 /*
194  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
195  * limit reclaim to prevent infinite loops, if they ever occur.
196  */
197 #define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
198 #define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
199 
200 enum charge_type {
201 	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
202 	MEM_CGROUP_CHARGE_TYPE_ANON,
203 	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
204 	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
205 	NR_CHARGE_TYPE,
206 };
207 
208 /* for encoding cft->private value on file */
209 enum res_type {
210 	_MEM,
211 	_MEMSWAP,
212 	_OOM_TYPE,
213 	_KMEM,
214 	_TCP,
215 };
216 
217 #define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
218 #define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
219 #define MEMFILE_ATTR(val)	((val) & 0xffff)
220 /* Used for OOM nofiier */
221 #define OOM_CONTROL		(0)
222 
223 /*
224  * Iteration constructs for visiting all cgroups (under a tree).  If
225  * loops are exited prematurely (break), mem_cgroup_iter_break() must
226  * be used for reference counting.
227  */
228 #define for_each_mem_cgroup_tree(iter, root)		\
229 	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
230 	     iter != NULL;				\
231 	     iter = mem_cgroup_iter(root, iter, NULL))
232 
233 #define for_each_mem_cgroup(iter)			\
234 	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
235 	     iter != NULL;				\
236 	     iter = mem_cgroup_iter(NULL, iter, NULL))
237 
238 static inline bool should_force_charge(void)
239 {
240 	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
241 		(current->flags & PF_EXITING);
242 }
243 
244 /* Some nice accessors for the vmpressure. */
245 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
246 {
247 	if (!memcg)
248 		memcg = root_mem_cgroup;
249 	return &memcg->vmpressure;
250 }
251 
252 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
253 {
254 	return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
255 }
256 
257 #ifdef CONFIG_MEMCG_KMEM
258 extern spinlock_t css_set_lock;
259 
260 static void obj_cgroup_release(struct percpu_ref *ref)
261 {
262 	struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
263 	struct mem_cgroup *memcg;
264 	unsigned int nr_bytes;
265 	unsigned int nr_pages;
266 	unsigned long flags;
267 
268 	/*
269 	 * At this point all allocated objects are freed, and
270 	 * objcg->nr_charged_bytes can't have an arbitrary byte value.
271 	 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
272 	 *
273 	 * The following sequence can lead to it:
274 	 * 1) CPU0: objcg == stock->cached_objcg
275 	 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
276 	 *          PAGE_SIZE bytes are charged
277 	 * 3) CPU1: a process from another memcg is allocating something,
278 	 *          the stock if flushed,
279 	 *          objcg->nr_charged_bytes = PAGE_SIZE - 92
280 	 * 5) CPU0: we do release this object,
281 	 *          92 bytes are added to stock->nr_bytes
282 	 * 6) CPU0: stock is flushed,
283 	 *          92 bytes are added to objcg->nr_charged_bytes
284 	 *
285 	 * In the result, nr_charged_bytes == PAGE_SIZE.
286 	 * This page will be uncharged in obj_cgroup_release().
287 	 */
288 	nr_bytes = atomic_read(&objcg->nr_charged_bytes);
289 	WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
290 	nr_pages = nr_bytes >> PAGE_SHIFT;
291 
292 	spin_lock_irqsave(&css_set_lock, flags);
293 	memcg = obj_cgroup_memcg(objcg);
294 	if (nr_pages)
295 		__memcg_kmem_uncharge(memcg, nr_pages);
296 	list_del(&objcg->list);
297 	mem_cgroup_put(memcg);
298 	spin_unlock_irqrestore(&css_set_lock, flags);
299 
300 	percpu_ref_exit(ref);
301 	kfree_rcu(objcg, rcu);
302 }
303 
304 static struct obj_cgroup *obj_cgroup_alloc(void)
305 {
306 	struct obj_cgroup *objcg;
307 	int ret;
308 
309 	objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
310 	if (!objcg)
311 		return NULL;
312 
313 	ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
314 			      GFP_KERNEL);
315 	if (ret) {
316 		kfree(objcg);
317 		return NULL;
318 	}
319 	INIT_LIST_HEAD(&objcg->list);
320 	return objcg;
321 }
322 
323 static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
324 				  struct mem_cgroup *parent)
325 {
326 	struct obj_cgroup *objcg, *iter;
327 
328 	objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
329 
330 	spin_lock_irq(&css_set_lock);
331 
332 	/* Move active objcg to the parent's list */
333 	xchg(&objcg->memcg, parent);
334 	css_get(&parent->css);
335 	list_add(&objcg->list, &parent->objcg_list);
336 
337 	/* Move already reparented objcgs to the parent's list */
338 	list_for_each_entry(iter, &memcg->objcg_list, list) {
339 		css_get(&parent->css);
340 		xchg(&iter->memcg, parent);
341 		css_put(&memcg->css);
342 	}
343 	list_splice(&memcg->objcg_list, &parent->objcg_list);
344 
345 	spin_unlock_irq(&css_set_lock);
346 
347 	percpu_ref_kill(&objcg->refcnt);
348 }
349 
350 /*
351  * This will be used as a shrinker list's index.
352  * The main reason for not using cgroup id for this:
353  *  this works better in sparse environments, where we have a lot of memcgs,
354  *  but only a few kmem-limited. Or also, if we have, for instance, 200
355  *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
356  *  200 entry array for that.
357  *
358  * The current size of the caches array is stored in memcg_nr_cache_ids. It
359  * will double each time we have to increase it.
360  */
361 static DEFINE_IDA(memcg_cache_ida);
362 int memcg_nr_cache_ids;
363 
364 /* Protects memcg_nr_cache_ids */
365 static DECLARE_RWSEM(memcg_cache_ids_sem);
366 
367 void memcg_get_cache_ids(void)
368 {
369 	down_read(&memcg_cache_ids_sem);
370 }
371 
372 void memcg_put_cache_ids(void)
373 {
374 	up_read(&memcg_cache_ids_sem);
375 }
376 
377 /*
378  * MIN_SIZE is different than 1, because we would like to avoid going through
379  * the alloc/free process all the time. In a small machine, 4 kmem-limited
380  * cgroups is a reasonable guess. In the future, it could be a parameter or
381  * tunable, but that is strictly not necessary.
382  *
383  * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
384  * this constant directly from cgroup, but it is understandable that this is
385  * better kept as an internal representation in cgroup.c. In any case, the
386  * cgrp_id space is not getting any smaller, and we don't have to necessarily
387  * increase ours as well if it increases.
388  */
389 #define MEMCG_CACHES_MIN_SIZE 4
390 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
391 
392 /*
393  * A lot of the calls to the cache allocation functions are expected to be
394  * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
395  * conditional to this static branch, we'll have to allow modules that does
396  * kmem_cache_alloc and the such to see this symbol as well
397  */
398 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
399 EXPORT_SYMBOL(memcg_kmem_enabled_key);
400 #endif
401 
402 static int memcg_shrinker_map_size;
403 static DEFINE_MUTEX(memcg_shrinker_map_mutex);
404 
405 static void memcg_free_shrinker_map_rcu(struct rcu_head *head)
406 {
407 	kvfree(container_of(head, struct memcg_shrinker_map, rcu));
408 }
409 
410 static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
411 					 int size, int old_size)
412 {
413 	struct memcg_shrinker_map *new, *old;
414 	int nid;
415 
416 	lockdep_assert_held(&memcg_shrinker_map_mutex);
417 
418 	for_each_node(nid) {
419 		old = rcu_dereference_protected(
420 			mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
421 		/* Not yet online memcg */
422 		if (!old)
423 			return 0;
424 
425 		new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid);
426 		if (!new)
427 			return -ENOMEM;
428 
429 		/* Set all old bits, clear all new bits */
430 		memset(new->map, (int)0xff, old_size);
431 		memset((void *)new->map + old_size, 0, size - old_size);
432 
433 		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new);
434 		call_rcu(&old->rcu, memcg_free_shrinker_map_rcu);
435 	}
436 
437 	return 0;
438 }
439 
440 static void memcg_free_shrinker_maps(struct mem_cgroup *memcg)
441 {
442 	struct mem_cgroup_per_node *pn;
443 	struct memcg_shrinker_map *map;
444 	int nid;
445 
446 	if (mem_cgroup_is_root(memcg))
447 		return;
448 
449 	for_each_node(nid) {
450 		pn = mem_cgroup_nodeinfo(memcg, nid);
451 		map = rcu_dereference_protected(pn->shrinker_map, true);
452 		if (map)
453 			kvfree(map);
454 		rcu_assign_pointer(pn->shrinker_map, NULL);
455 	}
456 }
457 
458 static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
459 {
460 	struct memcg_shrinker_map *map;
461 	int nid, size, ret = 0;
462 
463 	if (mem_cgroup_is_root(memcg))
464 		return 0;
465 
466 	mutex_lock(&memcg_shrinker_map_mutex);
467 	size = memcg_shrinker_map_size;
468 	for_each_node(nid) {
469 		map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid);
470 		if (!map) {
471 			memcg_free_shrinker_maps(memcg);
472 			ret = -ENOMEM;
473 			break;
474 		}
475 		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map);
476 	}
477 	mutex_unlock(&memcg_shrinker_map_mutex);
478 
479 	return ret;
480 }
481 
482 int memcg_expand_shrinker_maps(int new_id)
483 {
484 	int size, old_size, ret = 0;
485 	struct mem_cgroup *memcg;
486 
487 	size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
488 	old_size = memcg_shrinker_map_size;
489 	if (size <= old_size)
490 		return 0;
491 
492 	mutex_lock(&memcg_shrinker_map_mutex);
493 	if (!root_mem_cgroup)
494 		goto unlock;
495 
496 	for_each_mem_cgroup(memcg) {
497 		if (mem_cgroup_is_root(memcg))
498 			continue;
499 		ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
500 		if (ret) {
501 			mem_cgroup_iter_break(NULL, memcg);
502 			goto unlock;
503 		}
504 	}
505 unlock:
506 	if (!ret)
507 		memcg_shrinker_map_size = size;
508 	mutex_unlock(&memcg_shrinker_map_mutex);
509 	return ret;
510 }
511 
512 void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
513 {
514 	if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
515 		struct memcg_shrinker_map *map;
516 
517 		rcu_read_lock();
518 		map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map);
519 		/* Pairs with smp mb in shrink_slab() */
520 		smp_mb__before_atomic();
521 		set_bit(shrinker_id, map->map);
522 		rcu_read_unlock();
523 	}
524 }
525 
526 /**
527  * mem_cgroup_css_from_page - css of the memcg associated with a page
528  * @page: page of interest
529  *
530  * If memcg is bound to the default hierarchy, css of the memcg associated
531  * with @page is returned.  The returned css remains associated with @page
532  * until it is released.
533  *
534  * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
535  * is returned.
536  */
537 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
538 {
539 	struct mem_cgroup *memcg;
540 
541 	memcg = page->mem_cgroup;
542 
543 	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
544 		memcg = root_mem_cgroup;
545 
546 	return &memcg->css;
547 }
548 
549 /**
550  * page_cgroup_ino - return inode number of the memcg a page is charged to
551  * @page: the page
552  *
553  * Look up the closest online ancestor of the memory cgroup @page is charged to
554  * and return its inode number or 0 if @page is not charged to any cgroup. It
555  * is safe to call this function without holding a reference to @page.
556  *
557  * Note, this function is inherently racy, because there is nothing to prevent
558  * the cgroup inode from getting torn down and potentially reallocated a moment
559  * after page_cgroup_ino() returns, so it only should be used by callers that
560  * do not care (such as procfs interfaces).
561  */
562 ino_t page_cgroup_ino(struct page *page)
563 {
564 	struct mem_cgroup *memcg;
565 	unsigned long ino = 0;
566 
567 	rcu_read_lock();
568 	memcg = page->mem_cgroup;
569 
570 	/*
571 	 * The lowest bit set means that memcg isn't a valid
572 	 * memcg pointer, but a obj_cgroups pointer.
573 	 * In this case the page is shared and doesn't belong
574 	 * to any specific memory cgroup.
575 	 */
576 	if ((unsigned long) memcg & 0x1UL)
577 		memcg = NULL;
578 
579 	while (memcg && !(memcg->css.flags & CSS_ONLINE))
580 		memcg = parent_mem_cgroup(memcg);
581 	if (memcg)
582 		ino = cgroup_ino(memcg->css.cgroup);
583 	rcu_read_unlock();
584 	return ino;
585 }
586 
587 static struct mem_cgroup_per_node *
588 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
589 {
590 	int nid = page_to_nid(page);
591 
592 	return memcg->nodeinfo[nid];
593 }
594 
595 static struct mem_cgroup_tree_per_node *
596 soft_limit_tree_node(int nid)
597 {
598 	return soft_limit_tree.rb_tree_per_node[nid];
599 }
600 
601 static struct mem_cgroup_tree_per_node *
602 soft_limit_tree_from_page(struct page *page)
603 {
604 	int nid = page_to_nid(page);
605 
606 	return soft_limit_tree.rb_tree_per_node[nid];
607 }
608 
609 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
610 					 struct mem_cgroup_tree_per_node *mctz,
611 					 unsigned long new_usage_in_excess)
612 {
613 	struct rb_node **p = &mctz->rb_root.rb_node;
614 	struct rb_node *parent = NULL;
615 	struct mem_cgroup_per_node *mz_node;
616 	bool rightmost = true;
617 
618 	if (mz->on_tree)
619 		return;
620 
621 	mz->usage_in_excess = new_usage_in_excess;
622 	if (!mz->usage_in_excess)
623 		return;
624 	while (*p) {
625 		parent = *p;
626 		mz_node = rb_entry(parent, struct mem_cgroup_per_node,
627 					tree_node);
628 		if (mz->usage_in_excess < mz_node->usage_in_excess) {
629 			p = &(*p)->rb_left;
630 			rightmost = false;
631 		}
632 
633 		/*
634 		 * We can't avoid mem cgroups that are over their soft
635 		 * limit by the same amount
636 		 */
637 		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
638 			p = &(*p)->rb_right;
639 	}
640 
641 	if (rightmost)
642 		mctz->rb_rightmost = &mz->tree_node;
643 
644 	rb_link_node(&mz->tree_node, parent, p);
645 	rb_insert_color(&mz->tree_node, &mctz->rb_root);
646 	mz->on_tree = true;
647 }
648 
649 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
650 					 struct mem_cgroup_tree_per_node *mctz)
651 {
652 	if (!mz->on_tree)
653 		return;
654 
655 	if (&mz->tree_node == mctz->rb_rightmost)
656 		mctz->rb_rightmost = rb_prev(&mz->tree_node);
657 
658 	rb_erase(&mz->tree_node, &mctz->rb_root);
659 	mz->on_tree = false;
660 }
661 
662 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
663 				       struct mem_cgroup_tree_per_node *mctz)
664 {
665 	unsigned long flags;
666 
667 	spin_lock_irqsave(&mctz->lock, flags);
668 	__mem_cgroup_remove_exceeded(mz, mctz);
669 	spin_unlock_irqrestore(&mctz->lock, flags);
670 }
671 
672 static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
673 {
674 	unsigned long nr_pages = page_counter_read(&memcg->memory);
675 	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
676 	unsigned long excess = 0;
677 
678 	if (nr_pages > soft_limit)
679 		excess = nr_pages - soft_limit;
680 
681 	return excess;
682 }
683 
684 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
685 {
686 	unsigned long excess;
687 	struct mem_cgroup_per_node *mz;
688 	struct mem_cgroup_tree_per_node *mctz;
689 
690 	mctz = soft_limit_tree_from_page(page);
691 	if (!mctz)
692 		return;
693 	/*
694 	 * Necessary to update all ancestors when hierarchy is used.
695 	 * because their event counter is not touched.
696 	 */
697 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
698 		mz = mem_cgroup_page_nodeinfo(memcg, page);
699 		excess = soft_limit_excess(memcg);
700 		/*
701 		 * We have to update the tree if mz is on RB-tree or
702 		 * mem is over its softlimit.
703 		 */
704 		if (excess || mz->on_tree) {
705 			unsigned long flags;
706 
707 			spin_lock_irqsave(&mctz->lock, flags);
708 			/* if on-tree, remove it */
709 			if (mz->on_tree)
710 				__mem_cgroup_remove_exceeded(mz, mctz);
711 			/*
712 			 * Insert again. mz->usage_in_excess will be updated.
713 			 * If excess is 0, no tree ops.
714 			 */
715 			__mem_cgroup_insert_exceeded(mz, mctz, excess);
716 			spin_unlock_irqrestore(&mctz->lock, flags);
717 		}
718 	}
719 }
720 
721 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
722 {
723 	struct mem_cgroup_tree_per_node *mctz;
724 	struct mem_cgroup_per_node *mz;
725 	int nid;
726 
727 	for_each_node(nid) {
728 		mz = mem_cgroup_nodeinfo(memcg, nid);
729 		mctz = soft_limit_tree_node(nid);
730 		if (mctz)
731 			mem_cgroup_remove_exceeded(mz, mctz);
732 	}
733 }
734 
735 static struct mem_cgroup_per_node *
736 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
737 {
738 	struct mem_cgroup_per_node *mz;
739 
740 retry:
741 	mz = NULL;
742 	if (!mctz->rb_rightmost)
743 		goto done;		/* Nothing to reclaim from */
744 
745 	mz = rb_entry(mctz->rb_rightmost,
746 		      struct mem_cgroup_per_node, tree_node);
747 	/*
748 	 * Remove the node now but someone else can add it back,
749 	 * we will to add it back at the end of reclaim to its correct
750 	 * position in the tree.
751 	 */
752 	__mem_cgroup_remove_exceeded(mz, mctz);
753 	if (!soft_limit_excess(mz->memcg) ||
754 	    !css_tryget(&mz->memcg->css))
755 		goto retry;
756 done:
757 	return mz;
758 }
759 
760 static struct mem_cgroup_per_node *
761 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
762 {
763 	struct mem_cgroup_per_node *mz;
764 
765 	spin_lock_irq(&mctz->lock);
766 	mz = __mem_cgroup_largest_soft_limit_node(mctz);
767 	spin_unlock_irq(&mctz->lock);
768 	return mz;
769 }
770 
771 /**
772  * __mod_memcg_state - update cgroup memory statistics
773  * @memcg: the memory cgroup
774  * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
775  * @val: delta to add to the counter, can be negative
776  */
777 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
778 {
779 	long x, threshold = MEMCG_CHARGE_BATCH;
780 
781 	if (mem_cgroup_disabled())
782 		return;
783 
784 	if (vmstat_item_in_bytes(idx))
785 		threshold <<= PAGE_SHIFT;
786 
787 	x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
788 	if (unlikely(abs(x) > threshold)) {
789 		struct mem_cgroup *mi;
790 
791 		/*
792 		 * Batch local counters to keep them in sync with
793 		 * the hierarchical ones.
794 		 */
795 		__this_cpu_add(memcg->vmstats_local->stat[idx], x);
796 		for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
797 			atomic_long_add(x, &mi->vmstats[idx]);
798 		x = 0;
799 	}
800 	__this_cpu_write(memcg->vmstats_percpu->stat[idx], x);
801 }
802 
803 static struct mem_cgroup_per_node *
804 parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid)
805 {
806 	struct mem_cgroup *parent;
807 
808 	parent = parent_mem_cgroup(pn->memcg);
809 	if (!parent)
810 		return NULL;
811 	return mem_cgroup_nodeinfo(parent, nid);
812 }
813 
814 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
815 			      int val)
816 {
817 	struct mem_cgroup_per_node *pn;
818 	struct mem_cgroup *memcg;
819 	long x, threshold = MEMCG_CHARGE_BATCH;
820 
821 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
822 	memcg = pn->memcg;
823 
824 	/* Update memcg */
825 	__mod_memcg_state(memcg, idx, val);
826 
827 	/* Update lruvec */
828 	__this_cpu_add(pn->lruvec_stat_local->count[idx], val);
829 
830 	if (vmstat_item_in_bytes(idx))
831 		threshold <<= PAGE_SHIFT;
832 
833 	x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
834 	if (unlikely(abs(x) > threshold)) {
835 		pg_data_t *pgdat = lruvec_pgdat(lruvec);
836 		struct mem_cgroup_per_node *pi;
837 
838 		for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
839 			atomic_long_add(x, &pi->lruvec_stat[idx]);
840 		x = 0;
841 	}
842 	__this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
843 }
844 
845 /**
846  * __mod_lruvec_state - update lruvec memory statistics
847  * @lruvec: the lruvec
848  * @idx: the stat item
849  * @val: delta to add to the counter, can be negative
850  *
851  * The lruvec is the intersection of the NUMA node and a cgroup. This
852  * function updates the all three counters that are affected by a
853  * change of state at this level: per-node, per-cgroup, per-lruvec.
854  */
855 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
856 			int val)
857 {
858 	/* Update node */
859 	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
860 
861 	/* Update memcg and lruvec */
862 	if (!mem_cgroup_disabled())
863 		__mod_memcg_lruvec_state(lruvec, idx, val);
864 }
865 
866 void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
867 {
868 	pg_data_t *pgdat = page_pgdat(virt_to_page(p));
869 	struct mem_cgroup *memcg;
870 	struct lruvec *lruvec;
871 
872 	rcu_read_lock();
873 	memcg = mem_cgroup_from_obj(p);
874 
875 	/* Untracked pages have no memcg, no lruvec. Update only the node */
876 	if (!memcg || memcg == root_mem_cgroup) {
877 		__mod_node_page_state(pgdat, idx, val);
878 	} else {
879 		lruvec = mem_cgroup_lruvec(memcg, pgdat);
880 		__mod_lruvec_state(lruvec, idx, val);
881 	}
882 	rcu_read_unlock();
883 }
884 
885 void mod_memcg_obj_state(void *p, int idx, int val)
886 {
887 	struct mem_cgroup *memcg;
888 
889 	rcu_read_lock();
890 	memcg = mem_cgroup_from_obj(p);
891 	if (memcg)
892 		mod_memcg_state(memcg, idx, val);
893 	rcu_read_unlock();
894 }
895 
896 /**
897  * __count_memcg_events - account VM events in a cgroup
898  * @memcg: the memory cgroup
899  * @idx: the event item
900  * @count: the number of events that occured
901  */
902 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
903 			  unsigned long count)
904 {
905 	unsigned long x;
906 
907 	if (mem_cgroup_disabled())
908 		return;
909 
910 	x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]);
911 	if (unlikely(x > MEMCG_CHARGE_BATCH)) {
912 		struct mem_cgroup *mi;
913 
914 		/*
915 		 * Batch local counters to keep them in sync with
916 		 * the hierarchical ones.
917 		 */
918 		__this_cpu_add(memcg->vmstats_local->events[idx], x);
919 		for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
920 			atomic_long_add(x, &mi->vmevents[idx]);
921 		x = 0;
922 	}
923 	__this_cpu_write(memcg->vmstats_percpu->events[idx], x);
924 }
925 
926 static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
927 {
928 	return atomic_long_read(&memcg->vmevents[event]);
929 }
930 
931 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
932 {
933 	long x = 0;
934 	int cpu;
935 
936 	for_each_possible_cpu(cpu)
937 		x += per_cpu(memcg->vmstats_local->events[event], cpu);
938 	return x;
939 }
940 
941 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
942 					 struct page *page,
943 					 int nr_pages)
944 {
945 	/* pagein of a big page is an event. So, ignore page size */
946 	if (nr_pages > 0)
947 		__count_memcg_events(memcg, PGPGIN, 1);
948 	else {
949 		__count_memcg_events(memcg, PGPGOUT, 1);
950 		nr_pages = -nr_pages; /* for event */
951 	}
952 
953 	__this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
954 }
955 
956 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
957 				       enum mem_cgroup_events_target target)
958 {
959 	unsigned long val, next;
960 
961 	val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
962 	next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
963 	/* from time_after() in jiffies.h */
964 	if ((long)(next - val) < 0) {
965 		switch (target) {
966 		case MEM_CGROUP_TARGET_THRESH:
967 			next = val + THRESHOLDS_EVENTS_TARGET;
968 			break;
969 		case MEM_CGROUP_TARGET_SOFTLIMIT:
970 			next = val + SOFTLIMIT_EVENTS_TARGET;
971 			break;
972 		default:
973 			break;
974 		}
975 		__this_cpu_write(memcg->vmstats_percpu->targets[target], next);
976 		return true;
977 	}
978 	return false;
979 }
980 
981 /*
982  * Check events in order.
983  *
984  */
985 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
986 {
987 	/* threshold event is triggered in finer grain than soft limit */
988 	if (unlikely(mem_cgroup_event_ratelimit(memcg,
989 						MEM_CGROUP_TARGET_THRESH))) {
990 		bool do_softlimit;
991 
992 		do_softlimit = mem_cgroup_event_ratelimit(memcg,
993 						MEM_CGROUP_TARGET_SOFTLIMIT);
994 		mem_cgroup_threshold(memcg);
995 		if (unlikely(do_softlimit))
996 			mem_cgroup_update_tree(memcg, page);
997 	}
998 }
999 
1000 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
1001 {
1002 	/*
1003 	 * mm_update_next_owner() may clear mm->owner to NULL
1004 	 * if it races with swapoff, page migration, etc.
1005 	 * So this can be called with p == NULL.
1006 	 */
1007 	if (unlikely(!p))
1008 		return NULL;
1009 
1010 	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
1011 }
1012 EXPORT_SYMBOL(mem_cgroup_from_task);
1013 
1014 /**
1015  * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
1016  * @mm: mm from which memcg should be extracted. It can be NULL.
1017  *
1018  * Obtain a reference on mm->memcg and returns it if successful. Otherwise
1019  * root_mem_cgroup is returned. However if mem_cgroup is disabled, NULL is
1020  * returned.
1021  */
1022 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1023 {
1024 	struct mem_cgroup *memcg;
1025 
1026 	if (mem_cgroup_disabled())
1027 		return NULL;
1028 
1029 	rcu_read_lock();
1030 	do {
1031 		/*
1032 		 * Page cache insertions can happen withou an
1033 		 * actual mm context, e.g. during disk probing
1034 		 * on boot, loopback IO, acct() writes etc.
1035 		 */
1036 		if (unlikely(!mm))
1037 			memcg = root_mem_cgroup;
1038 		else {
1039 			memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1040 			if (unlikely(!memcg))
1041 				memcg = root_mem_cgroup;
1042 		}
1043 	} while (!css_tryget(&memcg->css));
1044 	rcu_read_unlock();
1045 	return memcg;
1046 }
1047 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
1048 
1049 /**
1050  * get_mem_cgroup_from_page: Obtain a reference on given page's memcg.
1051  * @page: page from which memcg should be extracted.
1052  *
1053  * Obtain a reference on page->memcg and returns it if successful. Otherwise
1054  * root_mem_cgroup is returned.
1055  */
1056 struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
1057 {
1058 	struct mem_cgroup *memcg = page->mem_cgroup;
1059 
1060 	if (mem_cgroup_disabled())
1061 		return NULL;
1062 
1063 	rcu_read_lock();
1064 	/* Page should not get uncharged and freed memcg under us. */
1065 	if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css)))
1066 		memcg = root_mem_cgroup;
1067 	rcu_read_unlock();
1068 	return memcg;
1069 }
1070 EXPORT_SYMBOL(get_mem_cgroup_from_page);
1071 
1072 /**
1073  * If current->active_memcg is non-NULL, do not fallback to current->mm->memcg.
1074  */
1075 static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void)
1076 {
1077 	if (unlikely(current->active_memcg)) {
1078 		struct mem_cgroup *memcg;
1079 
1080 		rcu_read_lock();
1081 		/* current->active_memcg must hold a ref. */
1082 		if (WARN_ON_ONCE(!css_tryget(&current->active_memcg->css)))
1083 			memcg = root_mem_cgroup;
1084 		else
1085 			memcg = current->active_memcg;
1086 		rcu_read_unlock();
1087 		return memcg;
1088 	}
1089 	return get_mem_cgroup_from_mm(current->mm);
1090 }
1091 
1092 /**
1093  * mem_cgroup_iter - iterate over memory cgroup hierarchy
1094  * @root: hierarchy root
1095  * @prev: previously returned memcg, NULL on first invocation
1096  * @reclaim: cookie for shared reclaim walks, NULL for full walks
1097  *
1098  * Returns references to children of the hierarchy below @root, or
1099  * @root itself, or %NULL after a full round-trip.
1100  *
1101  * Caller must pass the return value in @prev on subsequent
1102  * invocations for reference counting, or use mem_cgroup_iter_break()
1103  * to cancel a hierarchy walk before the round-trip is complete.
1104  *
1105  * Reclaimers can specify a node and a priority level in @reclaim to
1106  * divide up the memcgs in the hierarchy among all concurrent
1107  * reclaimers operating on the same node and priority.
1108  */
1109 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1110 				   struct mem_cgroup *prev,
1111 				   struct mem_cgroup_reclaim_cookie *reclaim)
1112 {
1113 	struct mem_cgroup_reclaim_iter *iter;
1114 	struct cgroup_subsys_state *css = NULL;
1115 	struct mem_cgroup *memcg = NULL;
1116 	struct mem_cgroup *pos = NULL;
1117 
1118 	if (mem_cgroup_disabled())
1119 		return NULL;
1120 
1121 	if (!root)
1122 		root = root_mem_cgroup;
1123 
1124 	if (prev && !reclaim)
1125 		pos = prev;
1126 
1127 	if (!root->use_hierarchy && root != root_mem_cgroup) {
1128 		if (prev)
1129 			goto out;
1130 		return root;
1131 	}
1132 
1133 	rcu_read_lock();
1134 
1135 	if (reclaim) {
1136 		struct mem_cgroup_per_node *mz;
1137 
1138 		mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
1139 		iter = &mz->iter;
1140 
1141 		if (prev && reclaim->generation != iter->generation)
1142 			goto out_unlock;
1143 
1144 		while (1) {
1145 			pos = READ_ONCE(iter->position);
1146 			if (!pos || css_tryget(&pos->css))
1147 				break;
1148 			/*
1149 			 * css reference reached zero, so iter->position will
1150 			 * be cleared by ->css_released. However, we should not
1151 			 * rely on this happening soon, because ->css_released
1152 			 * is called from a work queue, and by busy-waiting we
1153 			 * might block it. So we clear iter->position right
1154 			 * away.
1155 			 */
1156 			(void)cmpxchg(&iter->position, pos, NULL);
1157 		}
1158 	}
1159 
1160 	if (pos)
1161 		css = &pos->css;
1162 
1163 	for (;;) {
1164 		css = css_next_descendant_pre(css, &root->css);
1165 		if (!css) {
1166 			/*
1167 			 * Reclaimers share the hierarchy walk, and a
1168 			 * new one might jump in right at the end of
1169 			 * the hierarchy - make sure they see at least
1170 			 * one group and restart from the beginning.
1171 			 */
1172 			if (!prev)
1173 				continue;
1174 			break;
1175 		}
1176 
1177 		/*
1178 		 * Verify the css and acquire a reference.  The root
1179 		 * is provided by the caller, so we know it's alive
1180 		 * and kicking, and don't take an extra reference.
1181 		 */
1182 		memcg = mem_cgroup_from_css(css);
1183 
1184 		if (css == &root->css)
1185 			break;
1186 
1187 		if (css_tryget(css))
1188 			break;
1189 
1190 		memcg = NULL;
1191 	}
1192 
1193 	if (reclaim) {
1194 		/*
1195 		 * The position could have already been updated by a competing
1196 		 * thread, so check that the value hasn't changed since we read
1197 		 * it to avoid reclaiming from the same cgroup twice.
1198 		 */
1199 		(void)cmpxchg(&iter->position, pos, memcg);
1200 
1201 		if (pos)
1202 			css_put(&pos->css);
1203 
1204 		if (!memcg)
1205 			iter->generation++;
1206 		else if (!prev)
1207 			reclaim->generation = iter->generation;
1208 	}
1209 
1210 out_unlock:
1211 	rcu_read_unlock();
1212 out:
1213 	if (prev && prev != root)
1214 		css_put(&prev->css);
1215 
1216 	return memcg;
1217 }
1218 
1219 /**
1220  * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1221  * @root: hierarchy root
1222  * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1223  */
1224 void mem_cgroup_iter_break(struct mem_cgroup *root,
1225 			   struct mem_cgroup *prev)
1226 {
1227 	if (!root)
1228 		root = root_mem_cgroup;
1229 	if (prev && prev != root)
1230 		css_put(&prev->css);
1231 }
1232 
1233 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1234 					struct mem_cgroup *dead_memcg)
1235 {
1236 	struct mem_cgroup_reclaim_iter *iter;
1237 	struct mem_cgroup_per_node *mz;
1238 	int nid;
1239 
1240 	for_each_node(nid) {
1241 		mz = mem_cgroup_nodeinfo(from, nid);
1242 		iter = &mz->iter;
1243 		cmpxchg(&iter->position, dead_memcg, NULL);
1244 	}
1245 }
1246 
1247 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1248 {
1249 	struct mem_cgroup *memcg = dead_memcg;
1250 	struct mem_cgroup *last;
1251 
1252 	do {
1253 		__invalidate_reclaim_iterators(memcg, dead_memcg);
1254 		last = memcg;
1255 	} while ((memcg = parent_mem_cgroup(memcg)));
1256 
1257 	/*
1258 	 * When cgruop1 non-hierarchy mode is used,
1259 	 * parent_mem_cgroup() does not walk all the way up to the
1260 	 * cgroup root (root_mem_cgroup). So we have to handle
1261 	 * dead_memcg from cgroup root separately.
1262 	 */
1263 	if (last != root_mem_cgroup)
1264 		__invalidate_reclaim_iterators(root_mem_cgroup,
1265 						dead_memcg);
1266 }
1267 
1268 /**
1269  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1270  * @memcg: hierarchy root
1271  * @fn: function to call for each task
1272  * @arg: argument passed to @fn
1273  *
1274  * This function iterates over tasks attached to @memcg or to any of its
1275  * descendants and calls @fn for each task. If @fn returns a non-zero
1276  * value, the function breaks the iteration loop and returns the value.
1277  * Otherwise, it will iterate over all tasks and return 0.
1278  *
1279  * This function must not be called for the root memory cgroup.
1280  */
1281 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1282 			  int (*fn)(struct task_struct *, void *), void *arg)
1283 {
1284 	struct mem_cgroup *iter;
1285 	int ret = 0;
1286 
1287 	BUG_ON(memcg == root_mem_cgroup);
1288 
1289 	for_each_mem_cgroup_tree(iter, memcg) {
1290 		struct css_task_iter it;
1291 		struct task_struct *task;
1292 
1293 		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1294 		while (!ret && (task = css_task_iter_next(&it)))
1295 			ret = fn(task, arg);
1296 		css_task_iter_end(&it);
1297 		if (ret) {
1298 			mem_cgroup_iter_break(memcg, iter);
1299 			break;
1300 		}
1301 	}
1302 	return ret;
1303 }
1304 
1305 /**
1306  * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
1307  * @page: the page
1308  * @pgdat: pgdat of the page
1309  *
1310  * This function relies on page->mem_cgroup being stable - see the
1311  * access rules in commit_charge().
1312  */
1313 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
1314 {
1315 	struct mem_cgroup_per_node *mz;
1316 	struct mem_cgroup *memcg;
1317 	struct lruvec *lruvec;
1318 
1319 	if (mem_cgroup_disabled()) {
1320 		lruvec = &pgdat->__lruvec;
1321 		goto out;
1322 	}
1323 
1324 	memcg = page->mem_cgroup;
1325 	/*
1326 	 * Swapcache readahead pages are added to the LRU - and
1327 	 * possibly migrated - before they are charged.
1328 	 */
1329 	if (!memcg)
1330 		memcg = root_mem_cgroup;
1331 
1332 	mz = mem_cgroup_page_nodeinfo(memcg, page);
1333 	lruvec = &mz->lruvec;
1334 out:
1335 	/*
1336 	 * Since a node can be onlined after the mem_cgroup was created,
1337 	 * we have to be prepared to initialize lruvec->zone here;
1338 	 * and if offlined then reonlined, we need to reinitialize it.
1339 	 */
1340 	if (unlikely(lruvec->pgdat != pgdat))
1341 		lruvec->pgdat = pgdat;
1342 	return lruvec;
1343 }
1344 
1345 /**
1346  * mem_cgroup_update_lru_size - account for adding or removing an lru page
1347  * @lruvec: mem_cgroup per zone lru vector
1348  * @lru: index of lru list the page is sitting on
1349  * @zid: zone id of the accounted pages
1350  * @nr_pages: positive when adding or negative when removing
1351  *
1352  * This function must be called under lru_lock, just before a page is added
1353  * to or just after a page is removed from an lru list (that ordering being
1354  * so as to allow it to check that lru_size 0 is consistent with list_empty).
1355  */
1356 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1357 				int zid, int nr_pages)
1358 {
1359 	struct mem_cgroup_per_node *mz;
1360 	unsigned long *lru_size;
1361 	long size;
1362 
1363 	if (mem_cgroup_disabled())
1364 		return;
1365 
1366 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1367 	lru_size = &mz->lru_zone_size[zid][lru];
1368 
1369 	if (nr_pages < 0)
1370 		*lru_size += nr_pages;
1371 
1372 	size = *lru_size;
1373 	if (WARN_ONCE(size < 0,
1374 		"%s(%p, %d, %d): lru_size %ld\n",
1375 		__func__, lruvec, lru, nr_pages, size)) {
1376 		VM_BUG_ON(1);
1377 		*lru_size = 0;
1378 	}
1379 
1380 	if (nr_pages > 0)
1381 		*lru_size += nr_pages;
1382 }
1383 
1384 /**
1385  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1386  * @memcg: the memory cgroup
1387  *
1388  * Returns the maximum amount of memory @mem can be charged with, in
1389  * pages.
1390  */
1391 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1392 {
1393 	unsigned long margin = 0;
1394 	unsigned long count;
1395 	unsigned long limit;
1396 
1397 	count = page_counter_read(&memcg->memory);
1398 	limit = READ_ONCE(memcg->memory.max);
1399 	if (count < limit)
1400 		margin = limit - count;
1401 
1402 	if (do_memsw_account()) {
1403 		count = page_counter_read(&memcg->memsw);
1404 		limit = READ_ONCE(memcg->memsw.max);
1405 		if (count < limit)
1406 			margin = min(margin, limit - count);
1407 		else
1408 			margin = 0;
1409 	}
1410 
1411 	return margin;
1412 }
1413 
1414 /*
1415  * A routine for checking "mem" is under move_account() or not.
1416  *
1417  * Checking a cgroup is mc.from or mc.to or under hierarchy of
1418  * moving cgroups. This is for waiting at high-memory pressure
1419  * caused by "move".
1420  */
1421 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1422 {
1423 	struct mem_cgroup *from;
1424 	struct mem_cgroup *to;
1425 	bool ret = false;
1426 	/*
1427 	 * Unlike task_move routines, we access mc.to, mc.from not under
1428 	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1429 	 */
1430 	spin_lock(&mc.lock);
1431 	from = mc.from;
1432 	to = mc.to;
1433 	if (!from)
1434 		goto unlock;
1435 
1436 	ret = mem_cgroup_is_descendant(from, memcg) ||
1437 		mem_cgroup_is_descendant(to, memcg);
1438 unlock:
1439 	spin_unlock(&mc.lock);
1440 	return ret;
1441 }
1442 
1443 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1444 {
1445 	if (mc.moving_task && current != mc.moving_task) {
1446 		if (mem_cgroup_under_move(memcg)) {
1447 			DEFINE_WAIT(wait);
1448 			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1449 			/* moving charge context might have finished. */
1450 			if (mc.moving_task)
1451 				schedule();
1452 			finish_wait(&mc.waitq, &wait);
1453 			return true;
1454 		}
1455 	}
1456 	return false;
1457 }
1458 
1459 static char *memory_stat_format(struct mem_cgroup *memcg)
1460 {
1461 	struct seq_buf s;
1462 	int i;
1463 
1464 	seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
1465 	if (!s.buffer)
1466 		return NULL;
1467 
1468 	/*
1469 	 * Provide statistics on the state of the memory subsystem as
1470 	 * well as cumulative event counters that show past behavior.
1471 	 *
1472 	 * This list is ordered following a combination of these gradients:
1473 	 * 1) generic big picture -> specifics and details
1474 	 * 2) reflecting userspace activity -> reflecting kernel heuristics
1475 	 *
1476 	 * Current memory state:
1477 	 */
1478 
1479 	seq_buf_printf(&s, "anon %llu\n",
1480 		       (u64)memcg_page_state(memcg, NR_ANON_MAPPED) *
1481 		       PAGE_SIZE);
1482 	seq_buf_printf(&s, "file %llu\n",
1483 		       (u64)memcg_page_state(memcg, NR_FILE_PAGES) *
1484 		       PAGE_SIZE);
1485 	seq_buf_printf(&s, "kernel_stack %llu\n",
1486 		       (u64)memcg_page_state(memcg, NR_KERNEL_STACK_KB) *
1487 		       1024);
1488 	seq_buf_printf(&s, "slab %llu\n",
1489 		       (u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B) +
1490 			     memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B)));
1491 	seq_buf_printf(&s, "sock %llu\n",
1492 		       (u64)memcg_page_state(memcg, MEMCG_SOCK) *
1493 		       PAGE_SIZE);
1494 
1495 	seq_buf_printf(&s, "shmem %llu\n",
1496 		       (u64)memcg_page_state(memcg, NR_SHMEM) *
1497 		       PAGE_SIZE);
1498 	seq_buf_printf(&s, "file_mapped %llu\n",
1499 		       (u64)memcg_page_state(memcg, NR_FILE_MAPPED) *
1500 		       PAGE_SIZE);
1501 	seq_buf_printf(&s, "file_dirty %llu\n",
1502 		       (u64)memcg_page_state(memcg, NR_FILE_DIRTY) *
1503 		       PAGE_SIZE);
1504 	seq_buf_printf(&s, "file_writeback %llu\n",
1505 		       (u64)memcg_page_state(memcg, NR_WRITEBACK) *
1506 		       PAGE_SIZE);
1507 
1508 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1509 	seq_buf_printf(&s, "anon_thp %llu\n",
1510 		       (u64)memcg_page_state(memcg, NR_ANON_THPS) *
1511 		       HPAGE_PMD_SIZE);
1512 #endif
1513 
1514 	for (i = 0; i < NR_LRU_LISTS; i++)
1515 		seq_buf_printf(&s, "%s %llu\n", lru_list_name(i),
1516 			       (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
1517 			       PAGE_SIZE);
1518 
1519 	seq_buf_printf(&s, "slab_reclaimable %llu\n",
1520 		       (u64)memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B));
1521 	seq_buf_printf(&s, "slab_unreclaimable %llu\n",
1522 		       (u64)memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B));
1523 
1524 	/* Accumulated memory events */
1525 
1526 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT),
1527 		       memcg_events(memcg, PGFAULT));
1528 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT),
1529 		       memcg_events(memcg, PGMAJFAULT));
1530 
1531 	seq_buf_printf(&s, "workingset_refault %lu\n",
1532 		       memcg_page_state(memcg, WORKINGSET_REFAULT));
1533 	seq_buf_printf(&s, "workingset_activate %lu\n",
1534 		       memcg_page_state(memcg, WORKINGSET_ACTIVATE));
1535 	seq_buf_printf(&s, "workingset_restore %lu\n",
1536 		       memcg_page_state(memcg, WORKINGSET_RESTORE));
1537 	seq_buf_printf(&s, "workingset_nodereclaim %lu\n",
1538 		       memcg_page_state(memcg, WORKINGSET_NODERECLAIM));
1539 
1540 	seq_buf_printf(&s, "%s %lu\n",  vm_event_name(PGREFILL),
1541 		       memcg_events(memcg, PGREFILL));
1542 	seq_buf_printf(&s, "pgscan %lu\n",
1543 		       memcg_events(memcg, PGSCAN_KSWAPD) +
1544 		       memcg_events(memcg, PGSCAN_DIRECT));
1545 	seq_buf_printf(&s, "pgsteal %lu\n",
1546 		       memcg_events(memcg, PGSTEAL_KSWAPD) +
1547 		       memcg_events(memcg, PGSTEAL_DIRECT));
1548 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE),
1549 		       memcg_events(memcg, PGACTIVATE));
1550 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE),
1551 		       memcg_events(memcg, PGDEACTIVATE));
1552 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE),
1553 		       memcg_events(memcg, PGLAZYFREE));
1554 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED),
1555 		       memcg_events(memcg, PGLAZYFREED));
1556 
1557 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1558 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC),
1559 		       memcg_events(memcg, THP_FAULT_ALLOC));
1560 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC),
1561 		       memcg_events(memcg, THP_COLLAPSE_ALLOC));
1562 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1563 
1564 	/* The above should easily fit into one page */
1565 	WARN_ON_ONCE(seq_buf_has_overflowed(&s));
1566 
1567 	return s.buffer;
1568 }
1569 
1570 #define K(x) ((x) << (PAGE_SHIFT-10))
1571 /**
1572  * mem_cgroup_print_oom_context: Print OOM information relevant to
1573  * memory controller.
1574  * @memcg: The memory cgroup that went over limit
1575  * @p: Task that is going to be killed
1576  *
1577  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1578  * enabled
1579  */
1580 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1581 {
1582 	rcu_read_lock();
1583 
1584 	if (memcg) {
1585 		pr_cont(",oom_memcg=");
1586 		pr_cont_cgroup_path(memcg->css.cgroup);
1587 	} else
1588 		pr_cont(",global_oom");
1589 	if (p) {
1590 		pr_cont(",task_memcg=");
1591 		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1592 	}
1593 	rcu_read_unlock();
1594 }
1595 
1596 /**
1597  * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1598  * memory controller.
1599  * @memcg: The memory cgroup that went over limit
1600  */
1601 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1602 {
1603 	char *buf;
1604 
1605 	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1606 		K((u64)page_counter_read(&memcg->memory)),
1607 		K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1608 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1609 		pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1610 			K((u64)page_counter_read(&memcg->swap)),
1611 			K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1612 	else {
1613 		pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1614 			K((u64)page_counter_read(&memcg->memsw)),
1615 			K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1616 		pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1617 			K((u64)page_counter_read(&memcg->kmem)),
1618 			K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1619 	}
1620 
1621 	pr_info("Memory cgroup stats for ");
1622 	pr_cont_cgroup_path(memcg->css.cgroup);
1623 	pr_cont(":");
1624 	buf = memory_stat_format(memcg);
1625 	if (!buf)
1626 		return;
1627 	pr_info("%s", buf);
1628 	kfree(buf);
1629 }
1630 
1631 /*
1632  * Return the memory (and swap, if configured) limit for a memcg.
1633  */
1634 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1635 {
1636 	unsigned long max;
1637 
1638 	max = READ_ONCE(memcg->memory.max);
1639 	if (mem_cgroup_swappiness(memcg)) {
1640 		unsigned long memsw_max;
1641 		unsigned long swap_max;
1642 
1643 		memsw_max = memcg->memsw.max;
1644 		swap_max = READ_ONCE(memcg->swap.max);
1645 		swap_max = min(swap_max, (unsigned long)total_swap_pages);
1646 		max = min(max + swap_max, memsw_max);
1647 	}
1648 	return max;
1649 }
1650 
1651 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1652 {
1653 	return page_counter_read(&memcg->memory);
1654 }
1655 
1656 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1657 				     int order)
1658 {
1659 	struct oom_control oc = {
1660 		.zonelist = NULL,
1661 		.nodemask = NULL,
1662 		.memcg = memcg,
1663 		.gfp_mask = gfp_mask,
1664 		.order = order,
1665 	};
1666 	bool ret = true;
1667 
1668 	if (mutex_lock_killable(&oom_lock))
1669 		return true;
1670 
1671 	if (mem_cgroup_margin(memcg) >= (1 << order))
1672 		goto unlock;
1673 
1674 	/*
1675 	 * A few threads which were not waiting at mutex_lock_killable() can
1676 	 * fail to bail out. Therefore, check again after holding oom_lock.
1677 	 */
1678 	ret = should_force_charge() || out_of_memory(&oc);
1679 
1680 unlock:
1681 	mutex_unlock(&oom_lock);
1682 	return ret;
1683 }
1684 
1685 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1686 				   pg_data_t *pgdat,
1687 				   gfp_t gfp_mask,
1688 				   unsigned long *total_scanned)
1689 {
1690 	struct mem_cgroup *victim = NULL;
1691 	int total = 0;
1692 	int loop = 0;
1693 	unsigned long excess;
1694 	unsigned long nr_scanned;
1695 	struct mem_cgroup_reclaim_cookie reclaim = {
1696 		.pgdat = pgdat,
1697 	};
1698 
1699 	excess = soft_limit_excess(root_memcg);
1700 
1701 	while (1) {
1702 		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1703 		if (!victim) {
1704 			loop++;
1705 			if (loop >= 2) {
1706 				/*
1707 				 * If we have not been able to reclaim
1708 				 * anything, it might because there are
1709 				 * no reclaimable pages under this hierarchy
1710 				 */
1711 				if (!total)
1712 					break;
1713 				/*
1714 				 * We want to do more targeted reclaim.
1715 				 * excess >> 2 is not to excessive so as to
1716 				 * reclaim too much, nor too less that we keep
1717 				 * coming back to reclaim from this cgroup
1718 				 */
1719 				if (total >= (excess >> 2) ||
1720 					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1721 					break;
1722 			}
1723 			continue;
1724 		}
1725 		total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1726 					pgdat, &nr_scanned);
1727 		*total_scanned += nr_scanned;
1728 		if (!soft_limit_excess(root_memcg))
1729 			break;
1730 	}
1731 	mem_cgroup_iter_break(root_memcg, victim);
1732 	return total;
1733 }
1734 
1735 #ifdef CONFIG_LOCKDEP
1736 static struct lockdep_map memcg_oom_lock_dep_map = {
1737 	.name = "memcg_oom_lock",
1738 };
1739 #endif
1740 
1741 static DEFINE_SPINLOCK(memcg_oom_lock);
1742 
1743 /*
1744  * Check OOM-Killer is already running under our hierarchy.
1745  * If someone is running, return false.
1746  */
1747 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1748 {
1749 	struct mem_cgroup *iter, *failed = NULL;
1750 
1751 	spin_lock(&memcg_oom_lock);
1752 
1753 	for_each_mem_cgroup_tree(iter, memcg) {
1754 		if (iter->oom_lock) {
1755 			/*
1756 			 * this subtree of our hierarchy is already locked
1757 			 * so we cannot give a lock.
1758 			 */
1759 			failed = iter;
1760 			mem_cgroup_iter_break(memcg, iter);
1761 			break;
1762 		} else
1763 			iter->oom_lock = true;
1764 	}
1765 
1766 	if (failed) {
1767 		/*
1768 		 * OK, we failed to lock the whole subtree so we have
1769 		 * to clean up what we set up to the failing subtree
1770 		 */
1771 		for_each_mem_cgroup_tree(iter, memcg) {
1772 			if (iter == failed) {
1773 				mem_cgroup_iter_break(memcg, iter);
1774 				break;
1775 			}
1776 			iter->oom_lock = false;
1777 		}
1778 	} else
1779 		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1780 
1781 	spin_unlock(&memcg_oom_lock);
1782 
1783 	return !failed;
1784 }
1785 
1786 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1787 {
1788 	struct mem_cgroup *iter;
1789 
1790 	spin_lock(&memcg_oom_lock);
1791 	mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1792 	for_each_mem_cgroup_tree(iter, memcg)
1793 		iter->oom_lock = false;
1794 	spin_unlock(&memcg_oom_lock);
1795 }
1796 
1797 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1798 {
1799 	struct mem_cgroup *iter;
1800 
1801 	spin_lock(&memcg_oom_lock);
1802 	for_each_mem_cgroup_tree(iter, memcg)
1803 		iter->under_oom++;
1804 	spin_unlock(&memcg_oom_lock);
1805 }
1806 
1807 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1808 {
1809 	struct mem_cgroup *iter;
1810 
1811 	/*
1812 	 * When a new child is created while the hierarchy is under oom,
1813 	 * mem_cgroup_oom_lock() may not be called. Watch for underflow.
1814 	 */
1815 	spin_lock(&memcg_oom_lock);
1816 	for_each_mem_cgroup_tree(iter, memcg)
1817 		if (iter->under_oom > 0)
1818 			iter->under_oom--;
1819 	spin_unlock(&memcg_oom_lock);
1820 }
1821 
1822 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1823 
1824 struct oom_wait_info {
1825 	struct mem_cgroup *memcg;
1826 	wait_queue_entry_t	wait;
1827 };
1828 
1829 static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1830 	unsigned mode, int sync, void *arg)
1831 {
1832 	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1833 	struct mem_cgroup *oom_wait_memcg;
1834 	struct oom_wait_info *oom_wait_info;
1835 
1836 	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1837 	oom_wait_memcg = oom_wait_info->memcg;
1838 
1839 	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1840 	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1841 		return 0;
1842 	return autoremove_wake_function(wait, mode, sync, arg);
1843 }
1844 
1845 static void memcg_oom_recover(struct mem_cgroup *memcg)
1846 {
1847 	/*
1848 	 * For the following lockless ->under_oom test, the only required
1849 	 * guarantee is that it must see the state asserted by an OOM when
1850 	 * this function is called as a result of userland actions
1851 	 * triggered by the notification of the OOM.  This is trivially
1852 	 * achieved by invoking mem_cgroup_mark_under_oom() before
1853 	 * triggering notification.
1854 	 */
1855 	if (memcg && memcg->under_oom)
1856 		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1857 }
1858 
1859 enum oom_status {
1860 	OOM_SUCCESS,
1861 	OOM_FAILED,
1862 	OOM_ASYNC,
1863 	OOM_SKIPPED
1864 };
1865 
1866 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1867 {
1868 	enum oom_status ret;
1869 	bool locked;
1870 
1871 	if (order > PAGE_ALLOC_COSTLY_ORDER)
1872 		return OOM_SKIPPED;
1873 
1874 	memcg_memory_event(memcg, MEMCG_OOM);
1875 
1876 	/*
1877 	 * We are in the middle of the charge context here, so we
1878 	 * don't want to block when potentially sitting on a callstack
1879 	 * that holds all kinds of filesystem and mm locks.
1880 	 *
1881 	 * cgroup1 allows disabling the OOM killer and waiting for outside
1882 	 * handling until the charge can succeed; remember the context and put
1883 	 * the task to sleep at the end of the page fault when all locks are
1884 	 * released.
1885 	 *
1886 	 * On the other hand, in-kernel OOM killer allows for an async victim
1887 	 * memory reclaim (oom_reaper) and that means that we are not solely
1888 	 * relying on the oom victim to make a forward progress and we can
1889 	 * invoke the oom killer here.
1890 	 *
1891 	 * Please note that mem_cgroup_out_of_memory might fail to find a
1892 	 * victim and then we have to bail out from the charge path.
1893 	 */
1894 	if (memcg->oom_kill_disable) {
1895 		if (!current->in_user_fault)
1896 			return OOM_SKIPPED;
1897 		css_get(&memcg->css);
1898 		current->memcg_in_oom = memcg;
1899 		current->memcg_oom_gfp_mask = mask;
1900 		current->memcg_oom_order = order;
1901 
1902 		return OOM_ASYNC;
1903 	}
1904 
1905 	mem_cgroup_mark_under_oom(memcg);
1906 
1907 	locked = mem_cgroup_oom_trylock(memcg);
1908 
1909 	if (locked)
1910 		mem_cgroup_oom_notify(memcg);
1911 
1912 	mem_cgroup_unmark_under_oom(memcg);
1913 	if (mem_cgroup_out_of_memory(memcg, mask, order))
1914 		ret = OOM_SUCCESS;
1915 	else
1916 		ret = OOM_FAILED;
1917 
1918 	if (locked)
1919 		mem_cgroup_oom_unlock(memcg);
1920 
1921 	return ret;
1922 }
1923 
1924 /**
1925  * mem_cgroup_oom_synchronize - complete memcg OOM handling
1926  * @handle: actually kill/wait or just clean up the OOM state
1927  *
1928  * This has to be called at the end of a page fault if the memcg OOM
1929  * handler was enabled.
1930  *
1931  * Memcg supports userspace OOM handling where failed allocations must
1932  * sleep on a waitqueue until the userspace task resolves the
1933  * situation.  Sleeping directly in the charge context with all kinds
1934  * of locks held is not a good idea, instead we remember an OOM state
1935  * in the task and mem_cgroup_oom_synchronize() has to be called at
1936  * the end of the page fault to complete the OOM handling.
1937  *
1938  * Returns %true if an ongoing memcg OOM situation was detected and
1939  * completed, %false otherwise.
1940  */
1941 bool mem_cgroup_oom_synchronize(bool handle)
1942 {
1943 	struct mem_cgroup *memcg = current->memcg_in_oom;
1944 	struct oom_wait_info owait;
1945 	bool locked;
1946 
1947 	/* OOM is global, do not handle */
1948 	if (!memcg)
1949 		return false;
1950 
1951 	if (!handle)
1952 		goto cleanup;
1953 
1954 	owait.memcg = memcg;
1955 	owait.wait.flags = 0;
1956 	owait.wait.func = memcg_oom_wake_function;
1957 	owait.wait.private = current;
1958 	INIT_LIST_HEAD(&owait.wait.entry);
1959 
1960 	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1961 	mem_cgroup_mark_under_oom(memcg);
1962 
1963 	locked = mem_cgroup_oom_trylock(memcg);
1964 
1965 	if (locked)
1966 		mem_cgroup_oom_notify(memcg);
1967 
1968 	if (locked && !memcg->oom_kill_disable) {
1969 		mem_cgroup_unmark_under_oom(memcg);
1970 		finish_wait(&memcg_oom_waitq, &owait.wait);
1971 		mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1972 					 current->memcg_oom_order);
1973 	} else {
1974 		schedule();
1975 		mem_cgroup_unmark_under_oom(memcg);
1976 		finish_wait(&memcg_oom_waitq, &owait.wait);
1977 	}
1978 
1979 	if (locked) {
1980 		mem_cgroup_oom_unlock(memcg);
1981 		/*
1982 		 * There is no guarantee that an OOM-lock contender
1983 		 * sees the wakeups triggered by the OOM kill
1984 		 * uncharges.  Wake any sleepers explicitely.
1985 		 */
1986 		memcg_oom_recover(memcg);
1987 	}
1988 cleanup:
1989 	current->memcg_in_oom = NULL;
1990 	css_put(&memcg->css);
1991 	return true;
1992 }
1993 
1994 /**
1995  * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1996  * @victim: task to be killed by the OOM killer
1997  * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1998  *
1999  * Returns a pointer to a memory cgroup, which has to be cleaned up
2000  * by killing all belonging OOM-killable tasks.
2001  *
2002  * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
2003  */
2004 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
2005 					    struct mem_cgroup *oom_domain)
2006 {
2007 	struct mem_cgroup *oom_group = NULL;
2008 	struct mem_cgroup *memcg;
2009 
2010 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2011 		return NULL;
2012 
2013 	if (!oom_domain)
2014 		oom_domain = root_mem_cgroup;
2015 
2016 	rcu_read_lock();
2017 
2018 	memcg = mem_cgroup_from_task(victim);
2019 	if (memcg == root_mem_cgroup)
2020 		goto out;
2021 
2022 	/*
2023 	 * If the victim task has been asynchronously moved to a different
2024 	 * memory cgroup, we might end up killing tasks outside oom_domain.
2025 	 * In this case it's better to ignore memory.group.oom.
2026 	 */
2027 	if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
2028 		goto out;
2029 
2030 	/*
2031 	 * Traverse the memory cgroup hierarchy from the victim task's
2032 	 * cgroup up to the OOMing cgroup (or root) to find the
2033 	 * highest-level memory cgroup with oom.group set.
2034 	 */
2035 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2036 		if (memcg->oom_group)
2037 			oom_group = memcg;
2038 
2039 		if (memcg == oom_domain)
2040 			break;
2041 	}
2042 
2043 	if (oom_group)
2044 		css_get(&oom_group->css);
2045 out:
2046 	rcu_read_unlock();
2047 
2048 	return oom_group;
2049 }
2050 
2051 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2052 {
2053 	pr_info("Tasks in ");
2054 	pr_cont_cgroup_path(memcg->css.cgroup);
2055 	pr_cont(" are going to be killed due to memory.oom.group set\n");
2056 }
2057 
2058 /**
2059  * lock_page_memcg - lock a page->mem_cgroup binding
2060  * @page: the page
2061  *
2062  * This function protects unlocked LRU pages from being moved to
2063  * another cgroup.
2064  *
2065  * It ensures lifetime of the returned memcg. Caller is responsible
2066  * for the lifetime of the page; __unlock_page_memcg() is available
2067  * when @page might get freed inside the locked section.
2068  */
2069 struct mem_cgroup *lock_page_memcg(struct page *page)
2070 {
2071 	struct page *head = compound_head(page); /* rmap on tail pages */
2072 	struct mem_cgroup *memcg;
2073 	unsigned long flags;
2074 
2075 	/*
2076 	 * The RCU lock is held throughout the transaction.  The fast
2077 	 * path can get away without acquiring the memcg->move_lock
2078 	 * because page moving starts with an RCU grace period.
2079 	 *
2080 	 * The RCU lock also protects the memcg from being freed when
2081 	 * the page state that is going to change is the only thing
2082 	 * preventing the page itself from being freed. E.g. writeback
2083 	 * doesn't hold a page reference and relies on PG_writeback to
2084 	 * keep off truncation, migration and so forth.
2085          */
2086 	rcu_read_lock();
2087 
2088 	if (mem_cgroup_disabled())
2089 		return NULL;
2090 again:
2091 	memcg = head->mem_cgroup;
2092 	if (unlikely(!memcg))
2093 		return NULL;
2094 
2095 	if (atomic_read(&memcg->moving_account) <= 0)
2096 		return memcg;
2097 
2098 	spin_lock_irqsave(&memcg->move_lock, flags);
2099 	if (memcg != head->mem_cgroup) {
2100 		spin_unlock_irqrestore(&memcg->move_lock, flags);
2101 		goto again;
2102 	}
2103 
2104 	/*
2105 	 * When charge migration first begins, we can have locked and
2106 	 * unlocked page stat updates happening concurrently.  Track
2107 	 * the task who has the lock for unlock_page_memcg().
2108 	 */
2109 	memcg->move_lock_task = current;
2110 	memcg->move_lock_flags = flags;
2111 
2112 	return memcg;
2113 }
2114 EXPORT_SYMBOL(lock_page_memcg);
2115 
2116 /**
2117  * __unlock_page_memcg - unlock and unpin a memcg
2118  * @memcg: the memcg
2119  *
2120  * Unlock and unpin a memcg returned by lock_page_memcg().
2121  */
2122 void __unlock_page_memcg(struct mem_cgroup *memcg)
2123 {
2124 	if (memcg && memcg->move_lock_task == current) {
2125 		unsigned long flags = memcg->move_lock_flags;
2126 
2127 		memcg->move_lock_task = NULL;
2128 		memcg->move_lock_flags = 0;
2129 
2130 		spin_unlock_irqrestore(&memcg->move_lock, flags);
2131 	}
2132 
2133 	rcu_read_unlock();
2134 }
2135 
2136 /**
2137  * unlock_page_memcg - unlock a page->mem_cgroup binding
2138  * @page: the page
2139  */
2140 void unlock_page_memcg(struct page *page)
2141 {
2142 	struct page *head = compound_head(page);
2143 
2144 	__unlock_page_memcg(head->mem_cgroup);
2145 }
2146 EXPORT_SYMBOL(unlock_page_memcg);
2147 
2148 struct memcg_stock_pcp {
2149 	struct mem_cgroup *cached; /* this never be root cgroup */
2150 	unsigned int nr_pages;
2151 
2152 #ifdef CONFIG_MEMCG_KMEM
2153 	struct obj_cgroup *cached_objcg;
2154 	unsigned int nr_bytes;
2155 #endif
2156 
2157 	struct work_struct work;
2158 	unsigned long flags;
2159 #define FLUSHING_CACHED_CHARGE	0
2160 };
2161 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2162 static DEFINE_MUTEX(percpu_charge_mutex);
2163 
2164 #ifdef CONFIG_MEMCG_KMEM
2165 static void drain_obj_stock(struct memcg_stock_pcp *stock);
2166 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2167 				     struct mem_cgroup *root_memcg);
2168 
2169 #else
2170 static inline void drain_obj_stock(struct memcg_stock_pcp *stock)
2171 {
2172 }
2173 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2174 				     struct mem_cgroup *root_memcg)
2175 {
2176 	return false;
2177 }
2178 #endif
2179 
2180 /**
2181  * consume_stock: Try to consume stocked charge on this cpu.
2182  * @memcg: memcg to consume from.
2183  * @nr_pages: how many pages to charge.
2184  *
2185  * The charges will only happen if @memcg matches the current cpu's memcg
2186  * stock, and at least @nr_pages are available in that stock.  Failure to
2187  * service an allocation will refill the stock.
2188  *
2189  * returns true if successful, false otherwise.
2190  */
2191 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2192 {
2193 	struct memcg_stock_pcp *stock;
2194 	unsigned long flags;
2195 	bool ret = false;
2196 
2197 	if (nr_pages > MEMCG_CHARGE_BATCH)
2198 		return ret;
2199 
2200 	local_irq_save(flags);
2201 
2202 	stock = this_cpu_ptr(&memcg_stock);
2203 	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
2204 		stock->nr_pages -= nr_pages;
2205 		ret = true;
2206 	}
2207 
2208 	local_irq_restore(flags);
2209 
2210 	return ret;
2211 }
2212 
2213 /*
2214  * Returns stocks cached in percpu and reset cached information.
2215  */
2216 static void drain_stock(struct memcg_stock_pcp *stock)
2217 {
2218 	struct mem_cgroup *old = stock->cached;
2219 
2220 	if (!old)
2221 		return;
2222 
2223 	if (stock->nr_pages) {
2224 		page_counter_uncharge(&old->memory, stock->nr_pages);
2225 		if (do_memsw_account())
2226 			page_counter_uncharge(&old->memsw, stock->nr_pages);
2227 		stock->nr_pages = 0;
2228 	}
2229 
2230 	css_put(&old->css);
2231 	stock->cached = NULL;
2232 }
2233 
2234 static void drain_local_stock(struct work_struct *dummy)
2235 {
2236 	struct memcg_stock_pcp *stock;
2237 	unsigned long flags;
2238 
2239 	/*
2240 	 * The only protection from memory hotplug vs. drain_stock races is
2241 	 * that we always operate on local CPU stock here with IRQ disabled
2242 	 */
2243 	local_irq_save(flags);
2244 
2245 	stock = this_cpu_ptr(&memcg_stock);
2246 	drain_obj_stock(stock);
2247 	drain_stock(stock);
2248 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2249 
2250 	local_irq_restore(flags);
2251 }
2252 
2253 /*
2254  * Cache charges(val) to local per_cpu area.
2255  * This will be consumed by consume_stock() function, later.
2256  */
2257 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2258 {
2259 	struct memcg_stock_pcp *stock;
2260 	unsigned long flags;
2261 
2262 	local_irq_save(flags);
2263 
2264 	stock = this_cpu_ptr(&memcg_stock);
2265 	if (stock->cached != memcg) { /* reset if necessary */
2266 		drain_stock(stock);
2267 		css_get(&memcg->css);
2268 		stock->cached = memcg;
2269 	}
2270 	stock->nr_pages += nr_pages;
2271 
2272 	if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2273 		drain_stock(stock);
2274 
2275 	local_irq_restore(flags);
2276 }
2277 
2278 /*
2279  * Drains all per-CPU charge caches for given root_memcg resp. subtree
2280  * of the hierarchy under it.
2281  */
2282 static void drain_all_stock(struct mem_cgroup *root_memcg)
2283 {
2284 	int cpu, curcpu;
2285 
2286 	/* If someone's already draining, avoid adding running more workers. */
2287 	if (!mutex_trylock(&percpu_charge_mutex))
2288 		return;
2289 	/*
2290 	 * Notify other cpus that system-wide "drain" is running
2291 	 * We do not care about races with the cpu hotplug because cpu down
2292 	 * as well as workers from this path always operate on the local
2293 	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2294 	 */
2295 	curcpu = get_cpu();
2296 	for_each_online_cpu(cpu) {
2297 		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2298 		struct mem_cgroup *memcg;
2299 		bool flush = false;
2300 
2301 		rcu_read_lock();
2302 		memcg = stock->cached;
2303 		if (memcg && stock->nr_pages &&
2304 		    mem_cgroup_is_descendant(memcg, root_memcg))
2305 			flush = true;
2306 		if (obj_stock_flush_required(stock, root_memcg))
2307 			flush = true;
2308 		rcu_read_unlock();
2309 
2310 		if (flush &&
2311 		    !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2312 			if (cpu == curcpu)
2313 				drain_local_stock(&stock->work);
2314 			else
2315 				schedule_work_on(cpu, &stock->work);
2316 		}
2317 	}
2318 	put_cpu();
2319 	mutex_unlock(&percpu_charge_mutex);
2320 }
2321 
2322 static int memcg_hotplug_cpu_dead(unsigned int cpu)
2323 {
2324 	struct memcg_stock_pcp *stock;
2325 	struct mem_cgroup *memcg, *mi;
2326 
2327 	stock = &per_cpu(memcg_stock, cpu);
2328 	drain_stock(stock);
2329 
2330 	for_each_mem_cgroup(memcg) {
2331 		int i;
2332 
2333 		for (i = 0; i < MEMCG_NR_STAT; i++) {
2334 			int nid;
2335 			long x;
2336 
2337 			x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0);
2338 			if (x)
2339 				for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
2340 					atomic_long_add(x, &memcg->vmstats[i]);
2341 
2342 			if (i >= NR_VM_NODE_STAT_ITEMS)
2343 				continue;
2344 
2345 			for_each_node(nid) {
2346 				struct mem_cgroup_per_node *pn;
2347 
2348 				pn = mem_cgroup_nodeinfo(memcg, nid);
2349 				x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
2350 				if (x)
2351 					do {
2352 						atomic_long_add(x, &pn->lruvec_stat[i]);
2353 					} while ((pn = parent_nodeinfo(pn, nid)));
2354 			}
2355 		}
2356 
2357 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
2358 			long x;
2359 
2360 			x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0);
2361 			if (x)
2362 				for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
2363 					atomic_long_add(x, &memcg->vmevents[i]);
2364 		}
2365 	}
2366 
2367 	return 0;
2368 }
2369 
2370 static unsigned long reclaim_high(struct mem_cgroup *memcg,
2371 				  unsigned int nr_pages,
2372 				  gfp_t gfp_mask)
2373 {
2374 	unsigned long nr_reclaimed = 0;
2375 
2376 	do {
2377 		unsigned long pflags;
2378 
2379 		if (page_counter_read(&memcg->memory) <=
2380 		    READ_ONCE(memcg->memory.high))
2381 			continue;
2382 
2383 		memcg_memory_event(memcg, MEMCG_HIGH);
2384 
2385 		psi_memstall_enter(&pflags);
2386 		nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2387 							     gfp_mask, true);
2388 		psi_memstall_leave(&pflags);
2389 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2390 		 !mem_cgroup_is_root(memcg));
2391 
2392 	return nr_reclaimed;
2393 }
2394 
2395 static void high_work_func(struct work_struct *work)
2396 {
2397 	struct mem_cgroup *memcg;
2398 
2399 	memcg = container_of(work, struct mem_cgroup, high_work);
2400 	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2401 }
2402 
2403 /*
2404  * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2405  * enough to still cause a significant slowdown in most cases, while still
2406  * allowing diagnostics and tracing to proceed without becoming stuck.
2407  */
2408 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2409 
2410 /*
2411  * When calculating the delay, we use these either side of the exponentiation to
2412  * maintain precision and scale to a reasonable number of jiffies (see the table
2413  * below.
2414  *
2415  * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2416  *   overage ratio to a delay.
2417  * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down down the
2418  *   proposed penalty in order to reduce to a reasonable number of jiffies, and
2419  *   to produce a reasonable delay curve.
2420  *
2421  * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2422  * reasonable delay curve compared to precision-adjusted overage, not
2423  * penalising heavily at first, but still making sure that growth beyond the
2424  * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2425  * example, with a high of 100 megabytes:
2426  *
2427  *  +-------+------------------------+
2428  *  | usage | time to allocate in ms |
2429  *  +-------+------------------------+
2430  *  | 100M  |                      0 |
2431  *  | 101M  |                      6 |
2432  *  | 102M  |                     25 |
2433  *  | 103M  |                     57 |
2434  *  | 104M  |                    102 |
2435  *  | 105M  |                    159 |
2436  *  | 106M  |                    230 |
2437  *  | 107M  |                    313 |
2438  *  | 108M  |                    409 |
2439  *  | 109M  |                    518 |
2440  *  | 110M  |                    639 |
2441  *  | 111M  |                    774 |
2442  *  | 112M  |                    921 |
2443  *  | 113M  |                   1081 |
2444  *  | 114M  |                   1254 |
2445  *  | 115M  |                   1439 |
2446  *  | 116M  |                   1638 |
2447  *  | 117M  |                   1849 |
2448  *  | 118M  |                   2000 |
2449  *  | 119M  |                   2000 |
2450  *  | 120M  |                   2000 |
2451  *  +-------+------------------------+
2452  */
2453  #define MEMCG_DELAY_PRECISION_SHIFT 20
2454  #define MEMCG_DELAY_SCALING_SHIFT 14
2455 
2456 static u64 calculate_overage(unsigned long usage, unsigned long high)
2457 {
2458 	u64 overage;
2459 
2460 	if (usage <= high)
2461 		return 0;
2462 
2463 	/*
2464 	 * Prevent division by 0 in overage calculation by acting as if
2465 	 * it was a threshold of 1 page
2466 	 */
2467 	high = max(high, 1UL);
2468 
2469 	overage = usage - high;
2470 	overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2471 	return div64_u64(overage, high);
2472 }
2473 
2474 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2475 {
2476 	u64 overage, max_overage = 0;
2477 
2478 	do {
2479 		overage = calculate_overage(page_counter_read(&memcg->memory),
2480 					    READ_ONCE(memcg->memory.high));
2481 		max_overage = max(overage, max_overage);
2482 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2483 		 !mem_cgroup_is_root(memcg));
2484 
2485 	return max_overage;
2486 }
2487 
2488 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2489 {
2490 	u64 overage, max_overage = 0;
2491 
2492 	do {
2493 		overage = calculate_overage(page_counter_read(&memcg->swap),
2494 					    READ_ONCE(memcg->swap.high));
2495 		if (overage)
2496 			memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2497 		max_overage = max(overage, max_overage);
2498 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2499 		 !mem_cgroup_is_root(memcg));
2500 
2501 	return max_overage;
2502 }
2503 
2504 /*
2505  * Get the number of jiffies that we should penalise a mischievous cgroup which
2506  * is exceeding its memory.high by checking both it and its ancestors.
2507  */
2508 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2509 					  unsigned int nr_pages,
2510 					  u64 max_overage)
2511 {
2512 	unsigned long penalty_jiffies;
2513 
2514 	if (!max_overage)
2515 		return 0;
2516 
2517 	/*
2518 	 * We use overage compared to memory.high to calculate the number of
2519 	 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2520 	 * fairly lenient on small overages, and increasingly harsh when the
2521 	 * memcg in question makes it clear that it has no intention of stopping
2522 	 * its crazy behaviour, so we exponentially increase the delay based on
2523 	 * overage amount.
2524 	 */
2525 	penalty_jiffies = max_overage * max_overage * HZ;
2526 	penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2527 	penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2528 
2529 	/*
2530 	 * Factor in the task's own contribution to the overage, such that four
2531 	 * N-sized allocations are throttled approximately the same as one
2532 	 * 4N-sized allocation.
2533 	 *
2534 	 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2535 	 * larger the current charge patch is than that.
2536 	 */
2537 	return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2538 }
2539 
2540 /*
2541  * Scheduled by try_charge() to be executed from the userland return path
2542  * and reclaims memory over the high limit.
2543  */
2544 void mem_cgroup_handle_over_high(void)
2545 {
2546 	unsigned long penalty_jiffies;
2547 	unsigned long pflags;
2548 	unsigned long nr_reclaimed;
2549 	unsigned int nr_pages = current->memcg_nr_pages_over_high;
2550 	int nr_retries = MAX_RECLAIM_RETRIES;
2551 	struct mem_cgroup *memcg;
2552 	bool in_retry = false;
2553 
2554 	if (likely(!nr_pages))
2555 		return;
2556 
2557 	memcg = get_mem_cgroup_from_mm(current->mm);
2558 	current->memcg_nr_pages_over_high = 0;
2559 
2560 retry_reclaim:
2561 	/*
2562 	 * The allocating task should reclaim at least the batch size, but for
2563 	 * subsequent retries we only want to do what's necessary to prevent oom
2564 	 * or breaching resource isolation.
2565 	 *
2566 	 * This is distinct from memory.max or page allocator behaviour because
2567 	 * memory.high is currently batched, whereas memory.max and the page
2568 	 * allocator run every time an allocation is made.
2569 	 */
2570 	nr_reclaimed = reclaim_high(memcg,
2571 				    in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2572 				    GFP_KERNEL);
2573 
2574 	/*
2575 	 * memory.high is breached and reclaim is unable to keep up. Throttle
2576 	 * allocators proactively to slow down excessive growth.
2577 	 */
2578 	penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2579 					       mem_find_max_overage(memcg));
2580 
2581 	penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2582 						swap_find_max_overage(memcg));
2583 
2584 	/*
2585 	 * Clamp the max delay per usermode return so as to still keep the
2586 	 * application moving forwards and also permit diagnostics, albeit
2587 	 * extremely slowly.
2588 	 */
2589 	penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2590 
2591 	/*
2592 	 * Don't sleep if the amount of jiffies this memcg owes us is so low
2593 	 * that it's not even worth doing, in an attempt to be nice to those who
2594 	 * go only a small amount over their memory.high value and maybe haven't
2595 	 * been aggressively reclaimed enough yet.
2596 	 */
2597 	if (penalty_jiffies <= HZ / 100)
2598 		goto out;
2599 
2600 	/*
2601 	 * If reclaim is making forward progress but we're still over
2602 	 * memory.high, we want to encourage that rather than doing allocator
2603 	 * throttling.
2604 	 */
2605 	if (nr_reclaimed || nr_retries--) {
2606 		in_retry = true;
2607 		goto retry_reclaim;
2608 	}
2609 
2610 	/*
2611 	 * If we exit early, we're guaranteed to die (since
2612 	 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2613 	 * need to account for any ill-begotten jiffies to pay them off later.
2614 	 */
2615 	psi_memstall_enter(&pflags);
2616 	schedule_timeout_killable(penalty_jiffies);
2617 	psi_memstall_leave(&pflags);
2618 
2619 out:
2620 	css_put(&memcg->css);
2621 }
2622 
2623 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2624 		      unsigned int nr_pages)
2625 {
2626 	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2627 	int nr_retries = MAX_RECLAIM_RETRIES;
2628 	struct mem_cgroup *mem_over_limit;
2629 	struct page_counter *counter;
2630 	enum oom_status oom_status;
2631 	unsigned long nr_reclaimed;
2632 	bool may_swap = true;
2633 	bool drained = false;
2634 	unsigned long pflags;
2635 
2636 	if (mem_cgroup_is_root(memcg))
2637 		return 0;
2638 retry:
2639 	if (consume_stock(memcg, nr_pages))
2640 		return 0;
2641 
2642 	if (!do_memsw_account() ||
2643 	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2644 		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2645 			goto done_restock;
2646 		if (do_memsw_account())
2647 			page_counter_uncharge(&memcg->memsw, batch);
2648 		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2649 	} else {
2650 		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2651 		may_swap = false;
2652 	}
2653 
2654 	if (batch > nr_pages) {
2655 		batch = nr_pages;
2656 		goto retry;
2657 	}
2658 
2659 	/*
2660 	 * Memcg doesn't have a dedicated reserve for atomic
2661 	 * allocations. But like the global atomic pool, we need to
2662 	 * put the burden of reclaim on regular allocation requests
2663 	 * and let these go through as privileged allocations.
2664 	 */
2665 	if (gfp_mask & __GFP_ATOMIC)
2666 		goto force;
2667 
2668 	/*
2669 	 * Unlike in global OOM situations, memcg is not in a physical
2670 	 * memory shortage.  Allow dying and OOM-killed tasks to
2671 	 * bypass the last charges so that they can exit quickly and
2672 	 * free their memory.
2673 	 */
2674 	if (unlikely(should_force_charge()))
2675 		goto force;
2676 
2677 	/*
2678 	 * Prevent unbounded recursion when reclaim operations need to
2679 	 * allocate memory. This might exceed the limits temporarily,
2680 	 * but we prefer facilitating memory reclaim and getting back
2681 	 * under the limit over triggering OOM kills in these cases.
2682 	 */
2683 	if (unlikely(current->flags & PF_MEMALLOC))
2684 		goto force;
2685 
2686 	if (unlikely(task_in_memcg_oom(current)))
2687 		goto nomem;
2688 
2689 	if (!gfpflags_allow_blocking(gfp_mask))
2690 		goto nomem;
2691 
2692 	memcg_memory_event(mem_over_limit, MEMCG_MAX);
2693 
2694 	psi_memstall_enter(&pflags);
2695 	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2696 						    gfp_mask, may_swap);
2697 	psi_memstall_leave(&pflags);
2698 
2699 	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2700 		goto retry;
2701 
2702 	if (!drained) {
2703 		drain_all_stock(mem_over_limit);
2704 		drained = true;
2705 		goto retry;
2706 	}
2707 
2708 	if (gfp_mask & __GFP_NORETRY)
2709 		goto nomem;
2710 	/*
2711 	 * Even though the limit is exceeded at this point, reclaim
2712 	 * may have been able to free some pages.  Retry the charge
2713 	 * before killing the task.
2714 	 *
2715 	 * Only for regular pages, though: huge pages are rather
2716 	 * unlikely to succeed so close to the limit, and we fall back
2717 	 * to regular pages anyway in case of failure.
2718 	 */
2719 	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2720 		goto retry;
2721 	/*
2722 	 * At task move, charge accounts can be doubly counted. So, it's
2723 	 * better to wait until the end of task_move if something is going on.
2724 	 */
2725 	if (mem_cgroup_wait_acct_move(mem_over_limit))
2726 		goto retry;
2727 
2728 	if (nr_retries--)
2729 		goto retry;
2730 
2731 	if (gfp_mask & __GFP_RETRY_MAYFAIL)
2732 		goto nomem;
2733 
2734 	if (gfp_mask & __GFP_NOFAIL)
2735 		goto force;
2736 
2737 	if (fatal_signal_pending(current))
2738 		goto force;
2739 
2740 	/*
2741 	 * keep retrying as long as the memcg oom killer is able to make
2742 	 * a forward progress or bypass the charge if the oom killer
2743 	 * couldn't make any progress.
2744 	 */
2745 	oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask,
2746 		       get_order(nr_pages * PAGE_SIZE));
2747 	switch (oom_status) {
2748 	case OOM_SUCCESS:
2749 		nr_retries = MAX_RECLAIM_RETRIES;
2750 		goto retry;
2751 	case OOM_FAILED:
2752 		goto force;
2753 	default:
2754 		goto nomem;
2755 	}
2756 nomem:
2757 	if (!(gfp_mask & __GFP_NOFAIL))
2758 		return -ENOMEM;
2759 force:
2760 	/*
2761 	 * The allocation either can't fail or will lead to more memory
2762 	 * being freed very soon.  Allow memory usage go over the limit
2763 	 * temporarily by force charging it.
2764 	 */
2765 	page_counter_charge(&memcg->memory, nr_pages);
2766 	if (do_memsw_account())
2767 		page_counter_charge(&memcg->memsw, nr_pages);
2768 
2769 	return 0;
2770 
2771 done_restock:
2772 	if (batch > nr_pages)
2773 		refill_stock(memcg, batch - nr_pages);
2774 
2775 	/*
2776 	 * If the hierarchy is above the normal consumption range, schedule
2777 	 * reclaim on returning to userland.  We can perform reclaim here
2778 	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2779 	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2780 	 * not recorded as it most likely matches current's and won't
2781 	 * change in the meantime.  As high limit is checked again before
2782 	 * reclaim, the cost of mismatch is negligible.
2783 	 */
2784 	do {
2785 		bool mem_high, swap_high;
2786 
2787 		mem_high = page_counter_read(&memcg->memory) >
2788 			READ_ONCE(memcg->memory.high);
2789 		swap_high = page_counter_read(&memcg->swap) >
2790 			READ_ONCE(memcg->swap.high);
2791 
2792 		/* Don't bother a random interrupted task */
2793 		if (in_interrupt()) {
2794 			if (mem_high) {
2795 				schedule_work(&memcg->high_work);
2796 				break;
2797 			}
2798 			continue;
2799 		}
2800 
2801 		if (mem_high || swap_high) {
2802 			/*
2803 			 * The allocating tasks in this cgroup will need to do
2804 			 * reclaim or be throttled to prevent further growth
2805 			 * of the memory or swap footprints.
2806 			 *
2807 			 * Target some best-effort fairness between the tasks,
2808 			 * and distribute reclaim work and delay penalties
2809 			 * based on how much each task is actually allocating.
2810 			 */
2811 			current->memcg_nr_pages_over_high += batch;
2812 			set_notify_resume(current);
2813 			break;
2814 		}
2815 	} while ((memcg = parent_mem_cgroup(memcg)));
2816 
2817 	return 0;
2818 }
2819 
2820 #if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU)
2821 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2822 {
2823 	if (mem_cgroup_is_root(memcg))
2824 		return;
2825 
2826 	page_counter_uncharge(&memcg->memory, nr_pages);
2827 	if (do_memsw_account())
2828 		page_counter_uncharge(&memcg->memsw, nr_pages);
2829 }
2830 #endif
2831 
2832 static void commit_charge(struct page *page, struct mem_cgroup *memcg)
2833 {
2834 	VM_BUG_ON_PAGE(page->mem_cgroup, page);
2835 	/*
2836 	 * Any of the following ensures page->mem_cgroup stability:
2837 	 *
2838 	 * - the page lock
2839 	 * - LRU isolation
2840 	 * - lock_page_memcg()
2841 	 * - exclusive reference
2842 	 */
2843 	page->mem_cgroup = memcg;
2844 }
2845 
2846 #ifdef CONFIG_MEMCG_KMEM
2847 int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
2848 				 gfp_t gfp)
2849 {
2850 	unsigned int objects = objs_per_slab_page(s, page);
2851 	void *vec;
2852 
2853 	vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
2854 			   page_to_nid(page));
2855 	if (!vec)
2856 		return -ENOMEM;
2857 
2858 	if (cmpxchg(&page->obj_cgroups, NULL,
2859 		    (struct obj_cgroup **) ((unsigned long)vec | 0x1UL)))
2860 		kfree(vec);
2861 	else
2862 		kmemleak_not_leak(vec);
2863 
2864 	return 0;
2865 }
2866 
2867 /*
2868  * Returns a pointer to the memory cgroup to which the kernel object is charged.
2869  *
2870  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2871  * cgroup_mutex, etc.
2872  */
2873 struct mem_cgroup *mem_cgroup_from_obj(void *p)
2874 {
2875 	struct page *page;
2876 
2877 	if (mem_cgroup_disabled())
2878 		return NULL;
2879 
2880 	page = virt_to_head_page(p);
2881 
2882 	/*
2883 	 * Slab objects are accounted individually, not per-page.
2884 	 * Memcg membership data for each individual object is saved in
2885 	 * the page->obj_cgroups.
2886 	 */
2887 	if (page_has_obj_cgroups(page)) {
2888 		struct obj_cgroup *objcg;
2889 		unsigned int off;
2890 
2891 		off = obj_to_index(page->slab_cache, page, p);
2892 		objcg = page_obj_cgroups(page)[off];
2893 		if (objcg)
2894 			return obj_cgroup_memcg(objcg);
2895 
2896 		return NULL;
2897 	}
2898 
2899 	/* All other pages use page->mem_cgroup */
2900 	return page->mem_cgroup;
2901 }
2902 
2903 __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
2904 {
2905 	struct obj_cgroup *objcg = NULL;
2906 	struct mem_cgroup *memcg;
2907 
2908 	if (unlikely(!current->mm && !current->active_memcg))
2909 		return NULL;
2910 
2911 	rcu_read_lock();
2912 	if (unlikely(current->active_memcg))
2913 		memcg = rcu_dereference(current->active_memcg);
2914 	else
2915 		memcg = mem_cgroup_from_task(current);
2916 
2917 	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
2918 		objcg = rcu_dereference(memcg->objcg);
2919 		if (objcg && obj_cgroup_tryget(objcg))
2920 			break;
2921 	}
2922 	rcu_read_unlock();
2923 
2924 	return objcg;
2925 }
2926 
2927 static int memcg_alloc_cache_id(void)
2928 {
2929 	int id, size;
2930 	int err;
2931 
2932 	id = ida_simple_get(&memcg_cache_ida,
2933 			    0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2934 	if (id < 0)
2935 		return id;
2936 
2937 	if (id < memcg_nr_cache_ids)
2938 		return id;
2939 
2940 	/*
2941 	 * There's no space for the new id in memcg_caches arrays,
2942 	 * so we have to grow them.
2943 	 */
2944 	down_write(&memcg_cache_ids_sem);
2945 
2946 	size = 2 * (id + 1);
2947 	if (size < MEMCG_CACHES_MIN_SIZE)
2948 		size = MEMCG_CACHES_MIN_SIZE;
2949 	else if (size > MEMCG_CACHES_MAX_SIZE)
2950 		size = MEMCG_CACHES_MAX_SIZE;
2951 
2952 	err = memcg_update_all_list_lrus(size);
2953 	if (!err)
2954 		memcg_nr_cache_ids = size;
2955 
2956 	up_write(&memcg_cache_ids_sem);
2957 
2958 	if (err) {
2959 		ida_simple_remove(&memcg_cache_ida, id);
2960 		return err;
2961 	}
2962 	return id;
2963 }
2964 
2965 static void memcg_free_cache_id(int id)
2966 {
2967 	ida_simple_remove(&memcg_cache_ida, id);
2968 }
2969 
2970 /**
2971  * __memcg_kmem_charge: charge a number of kernel pages to a memcg
2972  * @memcg: memory cgroup to charge
2973  * @gfp: reclaim mode
2974  * @nr_pages: number of pages to charge
2975  *
2976  * Returns 0 on success, an error code on failure.
2977  */
2978 int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
2979 			unsigned int nr_pages)
2980 {
2981 	struct page_counter *counter;
2982 	int ret;
2983 
2984 	ret = try_charge(memcg, gfp, nr_pages);
2985 	if (ret)
2986 		return ret;
2987 
2988 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2989 	    !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2990 
2991 		/*
2992 		 * Enforce __GFP_NOFAIL allocation because callers are not
2993 		 * prepared to see failures and likely do not have any failure
2994 		 * handling code.
2995 		 */
2996 		if (gfp & __GFP_NOFAIL) {
2997 			page_counter_charge(&memcg->kmem, nr_pages);
2998 			return 0;
2999 		}
3000 		cancel_charge(memcg, nr_pages);
3001 		return -ENOMEM;
3002 	}
3003 	return 0;
3004 }
3005 
3006 /**
3007  * __memcg_kmem_uncharge: uncharge a number of kernel pages from a memcg
3008  * @memcg: memcg to uncharge
3009  * @nr_pages: number of pages to uncharge
3010  */
3011 void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages)
3012 {
3013 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
3014 		page_counter_uncharge(&memcg->kmem, nr_pages);
3015 
3016 	page_counter_uncharge(&memcg->memory, nr_pages);
3017 	if (do_memsw_account())
3018 		page_counter_uncharge(&memcg->memsw, nr_pages);
3019 }
3020 
3021 /**
3022  * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3023  * @page: page to charge
3024  * @gfp: reclaim mode
3025  * @order: allocation order
3026  *
3027  * Returns 0 on success, an error code on failure.
3028  */
3029 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
3030 {
3031 	struct mem_cgroup *memcg;
3032 	int ret = 0;
3033 
3034 	if (memcg_kmem_bypass())
3035 		return 0;
3036 
3037 	memcg = get_mem_cgroup_from_current();
3038 	if (!mem_cgroup_is_root(memcg)) {
3039 		ret = __memcg_kmem_charge(memcg, gfp, 1 << order);
3040 		if (!ret) {
3041 			page->mem_cgroup = memcg;
3042 			__SetPageKmemcg(page);
3043 			return 0;
3044 		}
3045 	}
3046 	css_put(&memcg->css);
3047 	return ret;
3048 }
3049 
3050 /**
3051  * __memcg_kmem_uncharge_page: uncharge a kmem page
3052  * @page: page to uncharge
3053  * @order: allocation order
3054  */
3055 void __memcg_kmem_uncharge_page(struct page *page, int order)
3056 {
3057 	struct mem_cgroup *memcg = page->mem_cgroup;
3058 	unsigned int nr_pages = 1 << order;
3059 
3060 	if (!memcg)
3061 		return;
3062 
3063 	VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
3064 	__memcg_kmem_uncharge(memcg, nr_pages);
3065 	page->mem_cgroup = NULL;
3066 	css_put(&memcg->css);
3067 
3068 	/* slab pages do not have PageKmemcg flag set */
3069 	if (PageKmemcg(page))
3070 		__ClearPageKmemcg(page);
3071 }
3072 
3073 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3074 {
3075 	struct memcg_stock_pcp *stock;
3076 	unsigned long flags;
3077 	bool ret = false;
3078 
3079 	local_irq_save(flags);
3080 
3081 	stock = this_cpu_ptr(&memcg_stock);
3082 	if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
3083 		stock->nr_bytes -= nr_bytes;
3084 		ret = true;
3085 	}
3086 
3087 	local_irq_restore(flags);
3088 
3089 	return ret;
3090 }
3091 
3092 static void drain_obj_stock(struct memcg_stock_pcp *stock)
3093 {
3094 	struct obj_cgroup *old = stock->cached_objcg;
3095 
3096 	if (!old)
3097 		return;
3098 
3099 	if (stock->nr_bytes) {
3100 		unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3101 		unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3102 
3103 		if (nr_pages) {
3104 			rcu_read_lock();
3105 			__memcg_kmem_uncharge(obj_cgroup_memcg(old), nr_pages);
3106 			rcu_read_unlock();
3107 		}
3108 
3109 		/*
3110 		 * The leftover is flushed to the centralized per-memcg value.
3111 		 * On the next attempt to refill obj stock it will be moved
3112 		 * to a per-cpu stock (probably, on an other CPU), see
3113 		 * refill_obj_stock().
3114 		 *
3115 		 * How often it's flushed is a trade-off between the memory
3116 		 * limit enforcement accuracy and potential CPU contention,
3117 		 * so it might be changed in the future.
3118 		 */
3119 		atomic_add(nr_bytes, &old->nr_charged_bytes);
3120 		stock->nr_bytes = 0;
3121 	}
3122 
3123 	obj_cgroup_put(old);
3124 	stock->cached_objcg = NULL;
3125 }
3126 
3127 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3128 				     struct mem_cgroup *root_memcg)
3129 {
3130 	struct mem_cgroup *memcg;
3131 
3132 	if (stock->cached_objcg) {
3133 		memcg = obj_cgroup_memcg(stock->cached_objcg);
3134 		if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3135 			return true;
3136 	}
3137 
3138 	return false;
3139 }
3140 
3141 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3142 {
3143 	struct memcg_stock_pcp *stock;
3144 	unsigned long flags;
3145 
3146 	local_irq_save(flags);
3147 
3148 	stock = this_cpu_ptr(&memcg_stock);
3149 	if (stock->cached_objcg != objcg) { /* reset if necessary */
3150 		drain_obj_stock(stock);
3151 		obj_cgroup_get(objcg);
3152 		stock->cached_objcg = objcg;
3153 		stock->nr_bytes = atomic_xchg(&objcg->nr_charged_bytes, 0);
3154 	}
3155 	stock->nr_bytes += nr_bytes;
3156 
3157 	if (stock->nr_bytes > PAGE_SIZE)
3158 		drain_obj_stock(stock);
3159 
3160 	local_irq_restore(flags);
3161 }
3162 
3163 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3164 {
3165 	struct mem_cgroup *memcg;
3166 	unsigned int nr_pages, nr_bytes;
3167 	int ret;
3168 
3169 	if (consume_obj_stock(objcg, size))
3170 		return 0;
3171 
3172 	/*
3173 	 * In theory, memcg->nr_charged_bytes can have enough
3174 	 * pre-charged bytes to satisfy the allocation. However,
3175 	 * flushing memcg->nr_charged_bytes requires two atomic
3176 	 * operations, and memcg->nr_charged_bytes can't be big,
3177 	 * so it's better to ignore it and try grab some new pages.
3178 	 * memcg->nr_charged_bytes will be flushed in
3179 	 * refill_obj_stock(), called from this function or
3180 	 * independently later.
3181 	 */
3182 	rcu_read_lock();
3183 	memcg = obj_cgroup_memcg(objcg);
3184 	css_get(&memcg->css);
3185 	rcu_read_unlock();
3186 
3187 	nr_pages = size >> PAGE_SHIFT;
3188 	nr_bytes = size & (PAGE_SIZE - 1);
3189 
3190 	if (nr_bytes)
3191 		nr_pages += 1;
3192 
3193 	ret = __memcg_kmem_charge(memcg, gfp, nr_pages);
3194 	if (!ret && nr_bytes)
3195 		refill_obj_stock(objcg, PAGE_SIZE - nr_bytes);
3196 
3197 	css_put(&memcg->css);
3198 	return ret;
3199 }
3200 
3201 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3202 {
3203 	refill_obj_stock(objcg, size);
3204 }
3205 
3206 #endif /* CONFIG_MEMCG_KMEM */
3207 
3208 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3209 
3210 /*
3211  * Because tail pages are not marked as "used", set it. We're under
3212  * pgdat->lru_lock and migration entries setup in all page mappings.
3213  */
3214 void mem_cgroup_split_huge_fixup(struct page *head)
3215 {
3216 	struct mem_cgroup *memcg = head->mem_cgroup;
3217 	int i;
3218 
3219 	if (mem_cgroup_disabled())
3220 		return;
3221 
3222 	for (i = 1; i < HPAGE_PMD_NR; i++) {
3223 		css_get(&memcg->css);
3224 		head[i].mem_cgroup = memcg;
3225 	}
3226 }
3227 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
3228 
3229 #ifdef CONFIG_MEMCG_SWAP
3230 /**
3231  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3232  * @entry: swap entry to be moved
3233  * @from:  mem_cgroup which the entry is moved from
3234  * @to:  mem_cgroup which the entry is moved to
3235  *
3236  * It succeeds only when the swap_cgroup's record for this entry is the same
3237  * as the mem_cgroup's id of @from.
3238  *
3239  * Returns 0 on success, -EINVAL on failure.
3240  *
3241  * The caller must have charged to @to, IOW, called page_counter_charge() about
3242  * both res and memsw, and called css_get().
3243  */
3244 static int mem_cgroup_move_swap_account(swp_entry_t entry,
3245 				struct mem_cgroup *from, struct mem_cgroup *to)
3246 {
3247 	unsigned short old_id, new_id;
3248 
3249 	old_id = mem_cgroup_id(from);
3250 	new_id = mem_cgroup_id(to);
3251 
3252 	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3253 		mod_memcg_state(from, MEMCG_SWAP, -1);
3254 		mod_memcg_state(to, MEMCG_SWAP, 1);
3255 		return 0;
3256 	}
3257 	return -EINVAL;
3258 }
3259 #else
3260 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3261 				struct mem_cgroup *from, struct mem_cgroup *to)
3262 {
3263 	return -EINVAL;
3264 }
3265 #endif
3266 
3267 static DEFINE_MUTEX(memcg_max_mutex);
3268 
3269 static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3270 				 unsigned long max, bool memsw)
3271 {
3272 	bool enlarge = false;
3273 	bool drained = false;
3274 	int ret;
3275 	bool limits_invariant;
3276 	struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3277 
3278 	do {
3279 		if (signal_pending(current)) {
3280 			ret = -EINTR;
3281 			break;
3282 		}
3283 
3284 		mutex_lock(&memcg_max_mutex);
3285 		/*
3286 		 * Make sure that the new limit (memsw or memory limit) doesn't
3287 		 * break our basic invariant rule memory.max <= memsw.max.
3288 		 */
3289 		limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3290 					   max <= memcg->memsw.max;
3291 		if (!limits_invariant) {
3292 			mutex_unlock(&memcg_max_mutex);
3293 			ret = -EINVAL;
3294 			break;
3295 		}
3296 		if (max > counter->max)
3297 			enlarge = true;
3298 		ret = page_counter_set_max(counter, max);
3299 		mutex_unlock(&memcg_max_mutex);
3300 
3301 		if (!ret)
3302 			break;
3303 
3304 		if (!drained) {
3305 			drain_all_stock(memcg);
3306 			drained = true;
3307 			continue;
3308 		}
3309 
3310 		if (!try_to_free_mem_cgroup_pages(memcg, 1,
3311 					GFP_KERNEL, !memsw)) {
3312 			ret = -EBUSY;
3313 			break;
3314 		}
3315 	} while (true);
3316 
3317 	if (!ret && enlarge)
3318 		memcg_oom_recover(memcg);
3319 
3320 	return ret;
3321 }
3322 
3323 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3324 					    gfp_t gfp_mask,
3325 					    unsigned long *total_scanned)
3326 {
3327 	unsigned long nr_reclaimed = 0;
3328 	struct mem_cgroup_per_node *mz, *next_mz = NULL;
3329 	unsigned long reclaimed;
3330 	int loop = 0;
3331 	struct mem_cgroup_tree_per_node *mctz;
3332 	unsigned long excess;
3333 	unsigned long nr_scanned;
3334 
3335 	if (order > 0)
3336 		return 0;
3337 
3338 	mctz = soft_limit_tree_node(pgdat->node_id);
3339 
3340 	/*
3341 	 * Do not even bother to check the largest node if the root
3342 	 * is empty. Do it lockless to prevent lock bouncing. Races
3343 	 * are acceptable as soft limit is best effort anyway.
3344 	 */
3345 	if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3346 		return 0;
3347 
3348 	/*
3349 	 * This loop can run a while, specially if mem_cgroup's continuously
3350 	 * keep exceeding their soft limit and putting the system under
3351 	 * pressure
3352 	 */
3353 	do {
3354 		if (next_mz)
3355 			mz = next_mz;
3356 		else
3357 			mz = mem_cgroup_largest_soft_limit_node(mctz);
3358 		if (!mz)
3359 			break;
3360 
3361 		nr_scanned = 0;
3362 		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3363 						    gfp_mask, &nr_scanned);
3364 		nr_reclaimed += reclaimed;
3365 		*total_scanned += nr_scanned;
3366 		spin_lock_irq(&mctz->lock);
3367 		__mem_cgroup_remove_exceeded(mz, mctz);
3368 
3369 		/*
3370 		 * If we failed to reclaim anything from this memory cgroup
3371 		 * it is time to move on to the next cgroup
3372 		 */
3373 		next_mz = NULL;
3374 		if (!reclaimed)
3375 			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3376 
3377 		excess = soft_limit_excess(mz->memcg);
3378 		/*
3379 		 * One school of thought says that we should not add
3380 		 * back the node to the tree if reclaim returns 0.
3381 		 * But our reclaim could return 0, simply because due
3382 		 * to priority we are exposing a smaller subset of
3383 		 * memory to reclaim from. Consider this as a longer
3384 		 * term TODO.
3385 		 */
3386 		/* If excess == 0, no tree ops */
3387 		__mem_cgroup_insert_exceeded(mz, mctz, excess);
3388 		spin_unlock_irq(&mctz->lock);
3389 		css_put(&mz->memcg->css);
3390 		loop++;
3391 		/*
3392 		 * Could not reclaim anything and there are no more
3393 		 * mem cgroups to try or we seem to be looping without
3394 		 * reclaiming anything.
3395 		 */
3396 		if (!nr_reclaimed &&
3397 			(next_mz == NULL ||
3398 			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3399 			break;
3400 	} while (!nr_reclaimed);
3401 	if (next_mz)
3402 		css_put(&next_mz->memcg->css);
3403 	return nr_reclaimed;
3404 }
3405 
3406 /*
3407  * Test whether @memcg has children, dead or alive.  Note that this
3408  * function doesn't care whether @memcg has use_hierarchy enabled and
3409  * returns %true if there are child csses according to the cgroup
3410  * hierarchy.  Testing use_hierarchy is the caller's responsibility.
3411  */
3412 static inline bool memcg_has_children(struct mem_cgroup *memcg)
3413 {
3414 	bool ret;
3415 
3416 	rcu_read_lock();
3417 	ret = css_next_child(NULL, &memcg->css);
3418 	rcu_read_unlock();
3419 	return ret;
3420 }
3421 
3422 /*
3423  * Reclaims as many pages from the given memcg as possible.
3424  *
3425  * Caller is responsible for holding css reference for memcg.
3426  */
3427 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3428 {
3429 	int nr_retries = MAX_RECLAIM_RETRIES;
3430 
3431 	/* we call try-to-free pages for make this cgroup empty */
3432 	lru_add_drain_all();
3433 
3434 	drain_all_stock(memcg);
3435 
3436 	/* try to free all pages in this cgroup */
3437 	while (nr_retries && page_counter_read(&memcg->memory)) {
3438 		int progress;
3439 
3440 		if (signal_pending(current))
3441 			return -EINTR;
3442 
3443 		progress = try_to_free_mem_cgroup_pages(memcg, 1,
3444 							GFP_KERNEL, true);
3445 		if (!progress) {
3446 			nr_retries--;
3447 			/* maybe some writeback is necessary */
3448 			congestion_wait(BLK_RW_ASYNC, HZ/10);
3449 		}
3450 
3451 	}
3452 
3453 	return 0;
3454 }
3455 
3456 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3457 					    char *buf, size_t nbytes,
3458 					    loff_t off)
3459 {
3460 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3461 
3462 	if (mem_cgroup_is_root(memcg))
3463 		return -EINVAL;
3464 	return mem_cgroup_force_empty(memcg) ?: nbytes;
3465 }
3466 
3467 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3468 				     struct cftype *cft)
3469 {
3470 	return mem_cgroup_from_css(css)->use_hierarchy;
3471 }
3472 
3473 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3474 				      struct cftype *cft, u64 val)
3475 {
3476 	int retval = 0;
3477 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3478 	struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
3479 
3480 	if (memcg->use_hierarchy == val)
3481 		return 0;
3482 
3483 	/*
3484 	 * If parent's use_hierarchy is set, we can't make any modifications
3485 	 * in the child subtrees. If it is unset, then the change can
3486 	 * occur, provided the current cgroup has no children.
3487 	 *
3488 	 * For the root cgroup, parent_mem is NULL, we allow value to be
3489 	 * set if there are no children.
3490 	 */
3491 	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
3492 				(val == 1 || val == 0)) {
3493 		if (!memcg_has_children(memcg))
3494 			memcg->use_hierarchy = val;
3495 		else
3496 			retval = -EBUSY;
3497 	} else
3498 		retval = -EINVAL;
3499 
3500 	return retval;
3501 }
3502 
3503 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3504 {
3505 	unsigned long val;
3506 
3507 	if (mem_cgroup_is_root(memcg)) {
3508 		val = memcg_page_state(memcg, NR_FILE_PAGES) +
3509 			memcg_page_state(memcg, NR_ANON_MAPPED);
3510 		if (swap)
3511 			val += memcg_page_state(memcg, MEMCG_SWAP);
3512 	} else {
3513 		if (!swap)
3514 			val = page_counter_read(&memcg->memory);
3515 		else
3516 			val = page_counter_read(&memcg->memsw);
3517 	}
3518 	return val;
3519 }
3520 
3521 enum {
3522 	RES_USAGE,
3523 	RES_LIMIT,
3524 	RES_MAX_USAGE,
3525 	RES_FAILCNT,
3526 	RES_SOFT_LIMIT,
3527 };
3528 
3529 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3530 			       struct cftype *cft)
3531 {
3532 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3533 	struct page_counter *counter;
3534 
3535 	switch (MEMFILE_TYPE(cft->private)) {
3536 	case _MEM:
3537 		counter = &memcg->memory;
3538 		break;
3539 	case _MEMSWAP:
3540 		counter = &memcg->memsw;
3541 		break;
3542 	case _KMEM:
3543 		counter = &memcg->kmem;
3544 		break;
3545 	case _TCP:
3546 		counter = &memcg->tcpmem;
3547 		break;
3548 	default:
3549 		BUG();
3550 	}
3551 
3552 	switch (MEMFILE_ATTR(cft->private)) {
3553 	case RES_USAGE:
3554 		if (counter == &memcg->memory)
3555 			return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3556 		if (counter == &memcg->memsw)
3557 			return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3558 		return (u64)page_counter_read(counter) * PAGE_SIZE;
3559 	case RES_LIMIT:
3560 		return (u64)counter->max * PAGE_SIZE;
3561 	case RES_MAX_USAGE:
3562 		return (u64)counter->watermark * PAGE_SIZE;
3563 	case RES_FAILCNT:
3564 		return counter->failcnt;
3565 	case RES_SOFT_LIMIT:
3566 		return (u64)memcg->soft_limit * PAGE_SIZE;
3567 	default:
3568 		BUG();
3569 	}
3570 }
3571 
3572 static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg)
3573 {
3574 	unsigned long stat[MEMCG_NR_STAT] = {0};
3575 	struct mem_cgroup *mi;
3576 	int node, cpu, i;
3577 
3578 	for_each_online_cpu(cpu)
3579 		for (i = 0; i < MEMCG_NR_STAT; i++)
3580 			stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
3581 
3582 	for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3583 		for (i = 0; i < MEMCG_NR_STAT; i++)
3584 			atomic_long_add(stat[i], &mi->vmstats[i]);
3585 
3586 	for_each_node(node) {
3587 		struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
3588 		struct mem_cgroup_per_node *pi;
3589 
3590 		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3591 			stat[i] = 0;
3592 
3593 		for_each_online_cpu(cpu)
3594 			for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3595 				stat[i] += per_cpu(
3596 					pn->lruvec_stat_cpu->count[i], cpu);
3597 
3598 		for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
3599 			for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3600 				atomic_long_add(stat[i], &pi->lruvec_stat[i]);
3601 	}
3602 }
3603 
3604 static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg)
3605 {
3606 	unsigned long events[NR_VM_EVENT_ITEMS];
3607 	struct mem_cgroup *mi;
3608 	int cpu, i;
3609 
3610 	for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3611 		events[i] = 0;
3612 
3613 	for_each_online_cpu(cpu)
3614 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3615 			events[i] += per_cpu(memcg->vmstats_percpu->events[i],
3616 					     cpu);
3617 
3618 	for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3619 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3620 			atomic_long_add(events[i], &mi->vmevents[i]);
3621 }
3622 
3623 #ifdef CONFIG_MEMCG_KMEM
3624 static int memcg_online_kmem(struct mem_cgroup *memcg)
3625 {
3626 	struct obj_cgroup *objcg;
3627 	int memcg_id;
3628 
3629 	if (cgroup_memory_nokmem)
3630 		return 0;
3631 
3632 	BUG_ON(memcg->kmemcg_id >= 0);
3633 	BUG_ON(memcg->kmem_state);
3634 
3635 	memcg_id = memcg_alloc_cache_id();
3636 	if (memcg_id < 0)
3637 		return memcg_id;
3638 
3639 	objcg = obj_cgroup_alloc();
3640 	if (!objcg) {
3641 		memcg_free_cache_id(memcg_id);
3642 		return -ENOMEM;
3643 	}
3644 	objcg->memcg = memcg;
3645 	rcu_assign_pointer(memcg->objcg, objcg);
3646 
3647 	static_branch_enable(&memcg_kmem_enabled_key);
3648 
3649 	/*
3650 	 * A memory cgroup is considered kmem-online as soon as it gets
3651 	 * kmemcg_id. Setting the id after enabling static branching will
3652 	 * guarantee no one starts accounting before all call sites are
3653 	 * patched.
3654 	 */
3655 	memcg->kmemcg_id = memcg_id;
3656 	memcg->kmem_state = KMEM_ONLINE;
3657 
3658 	return 0;
3659 }
3660 
3661 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3662 {
3663 	struct cgroup_subsys_state *css;
3664 	struct mem_cgroup *parent, *child;
3665 	int kmemcg_id;
3666 
3667 	if (memcg->kmem_state != KMEM_ONLINE)
3668 		return;
3669 
3670 	memcg->kmem_state = KMEM_ALLOCATED;
3671 
3672 	parent = parent_mem_cgroup(memcg);
3673 	if (!parent)
3674 		parent = root_mem_cgroup;
3675 
3676 	memcg_reparent_objcgs(memcg, parent);
3677 
3678 	kmemcg_id = memcg->kmemcg_id;
3679 	BUG_ON(kmemcg_id < 0);
3680 
3681 	/*
3682 	 * Change kmemcg_id of this cgroup and all its descendants to the
3683 	 * parent's id, and then move all entries from this cgroup's list_lrus
3684 	 * to ones of the parent. After we have finished, all list_lrus
3685 	 * corresponding to this cgroup are guaranteed to remain empty. The
3686 	 * ordering is imposed by list_lru_node->lock taken by
3687 	 * memcg_drain_all_list_lrus().
3688 	 */
3689 	rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
3690 	css_for_each_descendant_pre(css, &memcg->css) {
3691 		child = mem_cgroup_from_css(css);
3692 		BUG_ON(child->kmemcg_id != kmemcg_id);
3693 		child->kmemcg_id = parent->kmemcg_id;
3694 		if (!memcg->use_hierarchy)
3695 			break;
3696 	}
3697 	rcu_read_unlock();
3698 
3699 	memcg_drain_all_list_lrus(kmemcg_id, parent);
3700 
3701 	memcg_free_cache_id(kmemcg_id);
3702 }
3703 
3704 static void memcg_free_kmem(struct mem_cgroup *memcg)
3705 {
3706 	/* css_alloc() failed, offlining didn't happen */
3707 	if (unlikely(memcg->kmem_state == KMEM_ONLINE))
3708 		memcg_offline_kmem(memcg);
3709 }
3710 #else
3711 static int memcg_online_kmem(struct mem_cgroup *memcg)
3712 {
3713 	return 0;
3714 }
3715 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3716 {
3717 }
3718 static void memcg_free_kmem(struct mem_cgroup *memcg)
3719 {
3720 }
3721 #endif /* CONFIG_MEMCG_KMEM */
3722 
3723 static int memcg_update_kmem_max(struct mem_cgroup *memcg,
3724 				 unsigned long max)
3725 {
3726 	int ret;
3727 
3728 	mutex_lock(&memcg_max_mutex);
3729 	ret = page_counter_set_max(&memcg->kmem, max);
3730 	mutex_unlock(&memcg_max_mutex);
3731 	return ret;
3732 }
3733 
3734 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
3735 {
3736 	int ret;
3737 
3738 	mutex_lock(&memcg_max_mutex);
3739 
3740 	ret = page_counter_set_max(&memcg->tcpmem, max);
3741 	if (ret)
3742 		goto out;
3743 
3744 	if (!memcg->tcpmem_active) {
3745 		/*
3746 		 * The active flag needs to be written after the static_key
3747 		 * update. This is what guarantees that the socket activation
3748 		 * function is the last one to run. See mem_cgroup_sk_alloc()
3749 		 * for details, and note that we don't mark any socket as
3750 		 * belonging to this memcg until that flag is up.
3751 		 *
3752 		 * We need to do this, because static_keys will span multiple
3753 		 * sites, but we can't control their order. If we mark a socket
3754 		 * as accounted, but the accounting functions are not patched in
3755 		 * yet, we'll lose accounting.
3756 		 *
3757 		 * We never race with the readers in mem_cgroup_sk_alloc(),
3758 		 * because when this value change, the code to process it is not
3759 		 * patched in yet.
3760 		 */
3761 		static_branch_inc(&memcg_sockets_enabled_key);
3762 		memcg->tcpmem_active = true;
3763 	}
3764 out:
3765 	mutex_unlock(&memcg_max_mutex);
3766 	return ret;
3767 }
3768 
3769 /*
3770  * The user of this function is...
3771  * RES_LIMIT.
3772  */
3773 static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3774 				char *buf, size_t nbytes, loff_t off)
3775 {
3776 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3777 	unsigned long nr_pages;
3778 	int ret;
3779 
3780 	buf = strstrip(buf);
3781 	ret = page_counter_memparse(buf, "-1", &nr_pages);
3782 	if (ret)
3783 		return ret;
3784 
3785 	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3786 	case RES_LIMIT:
3787 		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3788 			ret = -EINVAL;
3789 			break;
3790 		}
3791 		switch (MEMFILE_TYPE(of_cft(of)->private)) {
3792 		case _MEM:
3793 			ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3794 			break;
3795 		case _MEMSWAP:
3796 			ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3797 			break;
3798 		case _KMEM:
3799 			pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
3800 				     "Please report your usecase to linux-mm@kvack.org if you "
3801 				     "depend on this functionality.\n");
3802 			ret = memcg_update_kmem_max(memcg, nr_pages);
3803 			break;
3804 		case _TCP:
3805 			ret = memcg_update_tcp_max(memcg, nr_pages);
3806 			break;
3807 		}
3808 		break;
3809 	case RES_SOFT_LIMIT:
3810 		memcg->soft_limit = nr_pages;
3811 		ret = 0;
3812 		break;
3813 	}
3814 	return ret ?: nbytes;
3815 }
3816 
3817 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3818 				size_t nbytes, loff_t off)
3819 {
3820 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3821 	struct page_counter *counter;
3822 
3823 	switch (MEMFILE_TYPE(of_cft(of)->private)) {
3824 	case _MEM:
3825 		counter = &memcg->memory;
3826 		break;
3827 	case _MEMSWAP:
3828 		counter = &memcg->memsw;
3829 		break;
3830 	case _KMEM:
3831 		counter = &memcg->kmem;
3832 		break;
3833 	case _TCP:
3834 		counter = &memcg->tcpmem;
3835 		break;
3836 	default:
3837 		BUG();
3838 	}
3839 
3840 	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3841 	case RES_MAX_USAGE:
3842 		page_counter_reset_watermark(counter);
3843 		break;
3844 	case RES_FAILCNT:
3845 		counter->failcnt = 0;
3846 		break;
3847 	default:
3848 		BUG();
3849 	}
3850 
3851 	return nbytes;
3852 }
3853 
3854 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3855 					struct cftype *cft)
3856 {
3857 	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3858 }
3859 
3860 #ifdef CONFIG_MMU
3861 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3862 					struct cftype *cft, u64 val)
3863 {
3864 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3865 
3866 	if (val & ~MOVE_MASK)
3867 		return -EINVAL;
3868 
3869 	/*
3870 	 * No kind of locking is needed in here, because ->can_attach() will
3871 	 * check this value once in the beginning of the process, and then carry
3872 	 * on with stale data. This means that changes to this value will only
3873 	 * affect task migrations starting after the change.
3874 	 */
3875 	memcg->move_charge_at_immigrate = val;
3876 	return 0;
3877 }
3878 #else
3879 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3880 					struct cftype *cft, u64 val)
3881 {
3882 	return -ENOSYS;
3883 }
3884 #endif
3885 
3886 #ifdef CONFIG_NUMA
3887 
3888 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
3889 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
3890 #define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)
3891 
3892 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
3893 				int nid, unsigned int lru_mask, bool tree)
3894 {
3895 	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
3896 	unsigned long nr = 0;
3897 	enum lru_list lru;
3898 
3899 	VM_BUG_ON((unsigned)nid >= nr_node_ids);
3900 
3901 	for_each_lru(lru) {
3902 		if (!(BIT(lru) & lru_mask))
3903 			continue;
3904 		if (tree)
3905 			nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
3906 		else
3907 			nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
3908 	}
3909 	return nr;
3910 }
3911 
3912 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
3913 					     unsigned int lru_mask,
3914 					     bool tree)
3915 {
3916 	unsigned long nr = 0;
3917 	enum lru_list lru;
3918 
3919 	for_each_lru(lru) {
3920 		if (!(BIT(lru) & lru_mask))
3921 			continue;
3922 		if (tree)
3923 			nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
3924 		else
3925 			nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
3926 	}
3927 	return nr;
3928 }
3929 
3930 static int memcg_numa_stat_show(struct seq_file *m, void *v)
3931 {
3932 	struct numa_stat {
3933 		const char *name;
3934 		unsigned int lru_mask;
3935 	};
3936 
3937 	static const struct numa_stat stats[] = {
3938 		{ "total", LRU_ALL },
3939 		{ "file", LRU_ALL_FILE },
3940 		{ "anon", LRU_ALL_ANON },
3941 		{ "unevictable", BIT(LRU_UNEVICTABLE) },
3942 	};
3943 	const struct numa_stat *stat;
3944 	int nid;
3945 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3946 
3947 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3948 		seq_printf(m, "%s=%lu", stat->name,
3949 			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
3950 						   false));
3951 		for_each_node_state(nid, N_MEMORY)
3952 			seq_printf(m, " N%d=%lu", nid,
3953 				   mem_cgroup_node_nr_lru_pages(memcg, nid,
3954 							stat->lru_mask, false));
3955 		seq_putc(m, '\n');
3956 	}
3957 
3958 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3959 
3960 		seq_printf(m, "hierarchical_%s=%lu", stat->name,
3961 			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
3962 						   true));
3963 		for_each_node_state(nid, N_MEMORY)
3964 			seq_printf(m, " N%d=%lu", nid,
3965 				   mem_cgroup_node_nr_lru_pages(memcg, nid,
3966 							stat->lru_mask, true));
3967 		seq_putc(m, '\n');
3968 	}
3969 
3970 	return 0;
3971 }
3972 #endif /* CONFIG_NUMA */
3973 
3974 static const unsigned int memcg1_stats[] = {
3975 	NR_FILE_PAGES,
3976 	NR_ANON_MAPPED,
3977 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3978 	NR_ANON_THPS,
3979 #endif
3980 	NR_SHMEM,
3981 	NR_FILE_MAPPED,
3982 	NR_FILE_DIRTY,
3983 	NR_WRITEBACK,
3984 	MEMCG_SWAP,
3985 };
3986 
3987 static const char *const memcg1_stat_names[] = {
3988 	"cache",
3989 	"rss",
3990 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3991 	"rss_huge",
3992 #endif
3993 	"shmem",
3994 	"mapped_file",
3995 	"dirty",
3996 	"writeback",
3997 	"swap",
3998 };
3999 
4000 /* Universal VM events cgroup1 shows, original sort order */
4001 static const unsigned int memcg1_events[] = {
4002 	PGPGIN,
4003 	PGPGOUT,
4004 	PGFAULT,
4005 	PGMAJFAULT,
4006 };
4007 
4008 static int memcg_stat_show(struct seq_file *m, void *v)
4009 {
4010 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4011 	unsigned long memory, memsw;
4012 	struct mem_cgroup *mi;
4013 	unsigned int i;
4014 
4015 	BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
4016 
4017 	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4018 		unsigned long nr;
4019 
4020 		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4021 			continue;
4022 		nr = memcg_page_state_local(memcg, memcg1_stats[i]);
4023 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4024 		if (memcg1_stats[i] == NR_ANON_THPS)
4025 			nr *= HPAGE_PMD_NR;
4026 #endif
4027 		seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE);
4028 	}
4029 
4030 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4031 		seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]),
4032 			   memcg_events_local(memcg, memcg1_events[i]));
4033 
4034 	for (i = 0; i < NR_LRU_LISTS; i++)
4035 		seq_printf(m, "%s %lu\n", lru_list_name(i),
4036 			   memcg_page_state_local(memcg, NR_LRU_BASE + i) *
4037 			   PAGE_SIZE);
4038 
4039 	/* Hierarchical information */
4040 	memory = memsw = PAGE_COUNTER_MAX;
4041 	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
4042 		memory = min(memory, READ_ONCE(mi->memory.max));
4043 		memsw = min(memsw, READ_ONCE(mi->memsw.max));
4044 	}
4045 	seq_printf(m, "hierarchical_memory_limit %llu\n",
4046 		   (u64)memory * PAGE_SIZE);
4047 	if (do_memsw_account())
4048 		seq_printf(m, "hierarchical_memsw_limit %llu\n",
4049 			   (u64)memsw * PAGE_SIZE);
4050 
4051 	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4052 		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4053 			continue;
4054 		seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
4055 			   (u64)memcg_page_state(memcg, memcg1_stats[i]) *
4056 			   PAGE_SIZE);
4057 	}
4058 
4059 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4060 		seq_printf(m, "total_%s %llu\n",
4061 			   vm_event_name(memcg1_events[i]),
4062 			   (u64)memcg_events(memcg, memcg1_events[i]));
4063 
4064 	for (i = 0; i < NR_LRU_LISTS; i++)
4065 		seq_printf(m, "total_%s %llu\n", lru_list_name(i),
4066 			   (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4067 			   PAGE_SIZE);
4068 
4069 #ifdef CONFIG_DEBUG_VM
4070 	{
4071 		pg_data_t *pgdat;
4072 		struct mem_cgroup_per_node *mz;
4073 		unsigned long anon_cost = 0;
4074 		unsigned long file_cost = 0;
4075 
4076 		for_each_online_pgdat(pgdat) {
4077 			mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
4078 
4079 			anon_cost += mz->lruvec.anon_cost;
4080 			file_cost += mz->lruvec.file_cost;
4081 		}
4082 		seq_printf(m, "anon_cost %lu\n", anon_cost);
4083 		seq_printf(m, "file_cost %lu\n", file_cost);
4084 	}
4085 #endif
4086 
4087 	return 0;
4088 }
4089 
4090 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4091 				      struct cftype *cft)
4092 {
4093 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4094 
4095 	return mem_cgroup_swappiness(memcg);
4096 }
4097 
4098 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4099 				       struct cftype *cft, u64 val)
4100 {
4101 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4102 
4103 	if (val > 100)
4104 		return -EINVAL;
4105 
4106 	if (css->parent)
4107 		memcg->swappiness = val;
4108 	else
4109 		vm_swappiness = val;
4110 
4111 	return 0;
4112 }
4113 
4114 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4115 {
4116 	struct mem_cgroup_threshold_ary *t;
4117 	unsigned long usage;
4118 	int i;
4119 
4120 	rcu_read_lock();
4121 	if (!swap)
4122 		t = rcu_dereference(memcg->thresholds.primary);
4123 	else
4124 		t = rcu_dereference(memcg->memsw_thresholds.primary);
4125 
4126 	if (!t)
4127 		goto unlock;
4128 
4129 	usage = mem_cgroup_usage(memcg, swap);
4130 
4131 	/*
4132 	 * current_threshold points to threshold just below or equal to usage.
4133 	 * If it's not true, a threshold was crossed after last
4134 	 * call of __mem_cgroup_threshold().
4135 	 */
4136 	i = t->current_threshold;
4137 
4138 	/*
4139 	 * Iterate backward over array of thresholds starting from
4140 	 * current_threshold and check if a threshold is crossed.
4141 	 * If none of thresholds below usage is crossed, we read
4142 	 * only one element of the array here.
4143 	 */
4144 	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4145 		eventfd_signal(t->entries[i].eventfd, 1);
4146 
4147 	/* i = current_threshold + 1 */
4148 	i++;
4149 
4150 	/*
4151 	 * Iterate forward over array of thresholds starting from
4152 	 * current_threshold+1 and check if a threshold is crossed.
4153 	 * If none of thresholds above usage is crossed, we read
4154 	 * only one element of the array here.
4155 	 */
4156 	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4157 		eventfd_signal(t->entries[i].eventfd, 1);
4158 
4159 	/* Update current_threshold */
4160 	t->current_threshold = i - 1;
4161 unlock:
4162 	rcu_read_unlock();
4163 }
4164 
4165 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4166 {
4167 	while (memcg) {
4168 		__mem_cgroup_threshold(memcg, false);
4169 		if (do_memsw_account())
4170 			__mem_cgroup_threshold(memcg, true);
4171 
4172 		memcg = parent_mem_cgroup(memcg);
4173 	}
4174 }
4175 
4176 static int compare_thresholds(const void *a, const void *b)
4177 {
4178 	const struct mem_cgroup_threshold *_a = a;
4179 	const struct mem_cgroup_threshold *_b = b;
4180 
4181 	if (_a->threshold > _b->threshold)
4182 		return 1;
4183 
4184 	if (_a->threshold < _b->threshold)
4185 		return -1;
4186 
4187 	return 0;
4188 }
4189 
4190 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4191 {
4192 	struct mem_cgroup_eventfd_list *ev;
4193 
4194 	spin_lock(&memcg_oom_lock);
4195 
4196 	list_for_each_entry(ev, &memcg->oom_notify, list)
4197 		eventfd_signal(ev->eventfd, 1);
4198 
4199 	spin_unlock(&memcg_oom_lock);
4200 	return 0;
4201 }
4202 
4203 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4204 {
4205 	struct mem_cgroup *iter;
4206 
4207 	for_each_mem_cgroup_tree(iter, memcg)
4208 		mem_cgroup_oom_notify_cb(iter);
4209 }
4210 
4211 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4212 	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4213 {
4214 	struct mem_cgroup_thresholds *thresholds;
4215 	struct mem_cgroup_threshold_ary *new;
4216 	unsigned long threshold;
4217 	unsigned long usage;
4218 	int i, size, ret;
4219 
4220 	ret = page_counter_memparse(args, "-1", &threshold);
4221 	if (ret)
4222 		return ret;
4223 
4224 	mutex_lock(&memcg->thresholds_lock);
4225 
4226 	if (type == _MEM) {
4227 		thresholds = &memcg->thresholds;
4228 		usage = mem_cgroup_usage(memcg, false);
4229 	} else if (type == _MEMSWAP) {
4230 		thresholds = &memcg->memsw_thresholds;
4231 		usage = mem_cgroup_usage(memcg, true);
4232 	} else
4233 		BUG();
4234 
4235 	/* Check if a threshold crossed before adding a new one */
4236 	if (thresholds->primary)
4237 		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4238 
4239 	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4240 
4241 	/* Allocate memory for new array of thresholds */
4242 	new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4243 	if (!new) {
4244 		ret = -ENOMEM;
4245 		goto unlock;
4246 	}
4247 	new->size = size;
4248 
4249 	/* Copy thresholds (if any) to new array */
4250 	if (thresholds->primary) {
4251 		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
4252 				sizeof(struct mem_cgroup_threshold));
4253 	}
4254 
4255 	/* Add new threshold */
4256 	new->entries[size - 1].eventfd = eventfd;
4257 	new->entries[size - 1].threshold = threshold;
4258 
4259 	/* Sort thresholds. Registering of new threshold isn't time-critical */
4260 	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
4261 			compare_thresholds, NULL);
4262 
4263 	/* Find current threshold */
4264 	new->current_threshold = -1;
4265 	for (i = 0; i < size; i++) {
4266 		if (new->entries[i].threshold <= usage) {
4267 			/*
4268 			 * new->current_threshold will not be used until
4269 			 * rcu_assign_pointer(), so it's safe to increment
4270 			 * it here.
4271 			 */
4272 			++new->current_threshold;
4273 		} else
4274 			break;
4275 	}
4276 
4277 	/* Free old spare buffer and save old primary buffer as spare */
4278 	kfree(thresholds->spare);
4279 	thresholds->spare = thresholds->primary;
4280 
4281 	rcu_assign_pointer(thresholds->primary, new);
4282 
4283 	/* To be sure that nobody uses thresholds */
4284 	synchronize_rcu();
4285 
4286 unlock:
4287 	mutex_unlock(&memcg->thresholds_lock);
4288 
4289 	return ret;
4290 }
4291 
4292 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4293 	struct eventfd_ctx *eventfd, const char *args)
4294 {
4295 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4296 }
4297 
4298 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4299 	struct eventfd_ctx *eventfd, const char *args)
4300 {
4301 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4302 }
4303 
4304 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4305 	struct eventfd_ctx *eventfd, enum res_type type)
4306 {
4307 	struct mem_cgroup_thresholds *thresholds;
4308 	struct mem_cgroup_threshold_ary *new;
4309 	unsigned long usage;
4310 	int i, j, size, entries;
4311 
4312 	mutex_lock(&memcg->thresholds_lock);
4313 
4314 	if (type == _MEM) {
4315 		thresholds = &memcg->thresholds;
4316 		usage = mem_cgroup_usage(memcg, false);
4317 	} else if (type == _MEMSWAP) {
4318 		thresholds = &memcg->memsw_thresholds;
4319 		usage = mem_cgroup_usage(memcg, true);
4320 	} else
4321 		BUG();
4322 
4323 	if (!thresholds->primary)
4324 		goto unlock;
4325 
4326 	/* Check if a threshold crossed before removing */
4327 	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4328 
4329 	/* Calculate new number of threshold */
4330 	size = entries = 0;
4331 	for (i = 0; i < thresholds->primary->size; i++) {
4332 		if (thresholds->primary->entries[i].eventfd != eventfd)
4333 			size++;
4334 		else
4335 			entries++;
4336 	}
4337 
4338 	new = thresholds->spare;
4339 
4340 	/* If no items related to eventfd have been cleared, nothing to do */
4341 	if (!entries)
4342 		goto unlock;
4343 
4344 	/* Set thresholds array to NULL if we don't have thresholds */
4345 	if (!size) {
4346 		kfree(new);
4347 		new = NULL;
4348 		goto swap_buffers;
4349 	}
4350 
4351 	new->size = size;
4352 
4353 	/* Copy thresholds and find current threshold */
4354 	new->current_threshold = -1;
4355 	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4356 		if (thresholds->primary->entries[i].eventfd == eventfd)
4357 			continue;
4358 
4359 		new->entries[j] = thresholds->primary->entries[i];
4360 		if (new->entries[j].threshold <= usage) {
4361 			/*
4362 			 * new->current_threshold will not be used
4363 			 * until rcu_assign_pointer(), so it's safe to increment
4364 			 * it here.
4365 			 */
4366 			++new->current_threshold;
4367 		}
4368 		j++;
4369 	}
4370 
4371 swap_buffers:
4372 	/* Swap primary and spare array */
4373 	thresholds->spare = thresholds->primary;
4374 
4375 	rcu_assign_pointer(thresholds->primary, new);
4376 
4377 	/* To be sure that nobody uses thresholds */
4378 	synchronize_rcu();
4379 
4380 	/* If all events are unregistered, free the spare array */
4381 	if (!new) {
4382 		kfree(thresholds->spare);
4383 		thresholds->spare = NULL;
4384 	}
4385 unlock:
4386 	mutex_unlock(&memcg->thresholds_lock);
4387 }
4388 
4389 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4390 	struct eventfd_ctx *eventfd)
4391 {
4392 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4393 }
4394 
4395 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4396 	struct eventfd_ctx *eventfd)
4397 {
4398 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4399 }
4400 
4401 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4402 	struct eventfd_ctx *eventfd, const char *args)
4403 {
4404 	struct mem_cgroup_eventfd_list *event;
4405 
4406 	event = kmalloc(sizeof(*event),	GFP_KERNEL);
4407 	if (!event)
4408 		return -ENOMEM;
4409 
4410 	spin_lock(&memcg_oom_lock);
4411 
4412 	event->eventfd = eventfd;
4413 	list_add(&event->list, &memcg->oom_notify);
4414 
4415 	/* already in OOM ? */
4416 	if (memcg->under_oom)
4417 		eventfd_signal(eventfd, 1);
4418 	spin_unlock(&memcg_oom_lock);
4419 
4420 	return 0;
4421 }
4422 
4423 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4424 	struct eventfd_ctx *eventfd)
4425 {
4426 	struct mem_cgroup_eventfd_list *ev, *tmp;
4427 
4428 	spin_lock(&memcg_oom_lock);
4429 
4430 	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4431 		if (ev->eventfd == eventfd) {
4432 			list_del(&ev->list);
4433 			kfree(ev);
4434 		}
4435 	}
4436 
4437 	spin_unlock(&memcg_oom_lock);
4438 }
4439 
4440 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4441 {
4442 	struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4443 
4444 	seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
4445 	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4446 	seq_printf(sf, "oom_kill %lu\n",
4447 		   atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4448 	return 0;
4449 }
4450 
4451 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4452 	struct cftype *cft, u64 val)
4453 {
4454 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4455 
4456 	/* cannot set to root cgroup and only 0 and 1 are allowed */
4457 	if (!css->parent || !((val == 0) || (val == 1)))
4458 		return -EINVAL;
4459 
4460 	memcg->oom_kill_disable = val;
4461 	if (!val)
4462 		memcg_oom_recover(memcg);
4463 
4464 	return 0;
4465 }
4466 
4467 #ifdef CONFIG_CGROUP_WRITEBACK
4468 
4469 #include <trace/events/writeback.h>
4470 
4471 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4472 {
4473 	return wb_domain_init(&memcg->cgwb_domain, gfp);
4474 }
4475 
4476 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4477 {
4478 	wb_domain_exit(&memcg->cgwb_domain);
4479 }
4480 
4481 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4482 {
4483 	wb_domain_size_changed(&memcg->cgwb_domain);
4484 }
4485 
4486 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4487 {
4488 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4489 
4490 	if (!memcg->css.parent)
4491 		return NULL;
4492 
4493 	return &memcg->cgwb_domain;
4494 }
4495 
4496 /*
4497  * idx can be of type enum memcg_stat_item or node_stat_item.
4498  * Keep in sync with memcg_exact_page().
4499  */
4500 static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
4501 {
4502 	long x = atomic_long_read(&memcg->vmstats[idx]);
4503 	int cpu;
4504 
4505 	for_each_online_cpu(cpu)
4506 		x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx];
4507 	if (x < 0)
4508 		x = 0;
4509 	return x;
4510 }
4511 
4512 /**
4513  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4514  * @wb: bdi_writeback in question
4515  * @pfilepages: out parameter for number of file pages
4516  * @pheadroom: out parameter for number of allocatable pages according to memcg
4517  * @pdirty: out parameter for number of dirty pages
4518  * @pwriteback: out parameter for number of pages under writeback
4519  *
4520  * Determine the numbers of file, headroom, dirty, and writeback pages in
4521  * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
4522  * is a bit more involved.
4523  *
4524  * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
4525  * headroom is calculated as the lowest headroom of itself and the
4526  * ancestors.  Note that this doesn't consider the actual amount of
4527  * available memory in the system.  The caller should further cap
4528  * *@pheadroom accordingly.
4529  */
4530 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4531 			 unsigned long *pheadroom, unsigned long *pdirty,
4532 			 unsigned long *pwriteback)
4533 {
4534 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4535 	struct mem_cgroup *parent;
4536 
4537 	*pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY);
4538 
4539 	*pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK);
4540 	*pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) +
4541 			memcg_exact_page_state(memcg, NR_ACTIVE_FILE);
4542 	*pheadroom = PAGE_COUNTER_MAX;
4543 
4544 	while ((parent = parent_mem_cgroup(memcg))) {
4545 		unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4546 					    READ_ONCE(memcg->memory.high));
4547 		unsigned long used = page_counter_read(&memcg->memory);
4548 
4549 		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4550 		memcg = parent;
4551 	}
4552 }
4553 
4554 /*
4555  * Foreign dirty flushing
4556  *
4557  * There's an inherent mismatch between memcg and writeback.  The former
4558  * trackes ownership per-page while the latter per-inode.  This was a
4559  * deliberate design decision because honoring per-page ownership in the
4560  * writeback path is complicated, may lead to higher CPU and IO overheads
4561  * and deemed unnecessary given that write-sharing an inode across
4562  * different cgroups isn't a common use-case.
4563  *
4564  * Combined with inode majority-writer ownership switching, this works well
4565  * enough in most cases but there are some pathological cases.  For
4566  * example, let's say there are two cgroups A and B which keep writing to
4567  * different but confined parts of the same inode.  B owns the inode and
4568  * A's memory is limited far below B's.  A's dirty ratio can rise enough to
4569  * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4570  * triggering background writeback.  A will be slowed down without a way to
4571  * make writeback of the dirty pages happen.
4572  *
4573  * Conditions like the above can lead to a cgroup getting repatedly and
4574  * severely throttled after making some progress after each
4575  * dirty_expire_interval while the underyling IO device is almost
4576  * completely idle.
4577  *
4578  * Solving this problem completely requires matching the ownership tracking
4579  * granularities between memcg and writeback in either direction.  However,
4580  * the more egregious behaviors can be avoided by simply remembering the
4581  * most recent foreign dirtying events and initiating remote flushes on
4582  * them when local writeback isn't enough to keep the memory clean enough.
4583  *
4584  * The following two functions implement such mechanism.  When a foreign
4585  * page - a page whose memcg and writeback ownerships don't match - is
4586  * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4587  * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
4588  * decides that the memcg needs to sleep due to high dirty ratio, it calls
4589  * mem_cgroup_flush_foreign() which queues writeback on the recorded
4590  * foreign bdi_writebacks which haven't expired.  Both the numbers of
4591  * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4592  * limited to MEMCG_CGWB_FRN_CNT.
4593  *
4594  * The mechanism only remembers IDs and doesn't hold any object references.
4595  * As being wrong occasionally doesn't matter, updates and accesses to the
4596  * records are lockless and racy.
4597  */
4598 void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
4599 					     struct bdi_writeback *wb)
4600 {
4601 	struct mem_cgroup *memcg = page->mem_cgroup;
4602 	struct memcg_cgwb_frn *frn;
4603 	u64 now = get_jiffies_64();
4604 	u64 oldest_at = now;
4605 	int oldest = -1;
4606 	int i;
4607 
4608 	trace_track_foreign_dirty(page, wb);
4609 
4610 	/*
4611 	 * Pick the slot to use.  If there is already a slot for @wb, keep
4612 	 * using it.  If not replace the oldest one which isn't being
4613 	 * written out.
4614 	 */
4615 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4616 		frn = &memcg->cgwb_frn[i];
4617 		if (frn->bdi_id == wb->bdi->id &&
4618 		    frn->memcg_id == wb->memcg_css->id)
4619 			break;
4620 		if (time_before64(frn->at, oldest_at) &&
4621 		    atomic_read(&frn->done.cnt) == 1) {
4622 			oldest = i;
4623 			oldest_at = frn->at;
4624 		}
4625 	}
4626 
4627 	if (i < MEMCG_CGWB_FRN_CNT) {
4628 		/*
4629 		 * Re-using an existing one.  Update timestamp lazily to
4630 		 * avoid making the cacheline hot.  We want them to be
4631 		 * reasonably up-to-date and significantly shorter than
4632 		 * dirty_expire_interval as that's what expires the record.
4633 		 * Use the shorter of 1s and dirty_expire_interval / 8.
4634 		 */
4635 		unsigned long update_intv =
4636 			min_t(unsigned long, HZ,
4637 			      msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4638 
4639 		if (time_before64(frn->at, now - update_intv))
4640 			frn->at = now;
4641 	} else if (oldest >= 0) {
4642 		/* replace the oldest free one */
4643 		frn = &memcg->cgwb_frn[oldest];
4644 		frn->bdi_id = wb->bdi->id;
4645 		frn->memcg_id = wb->memcg_css->id;
4646 		frn->at = now;
4647 	}
4648 }
4649 
4650 /* issue foreign writeback flushes for recorded foreign dirtying events */
4651 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4652 {
4653 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4654 	unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4655 	u64 now = jiffies_64;
4656 	int i;
4657 
4658 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4659 		struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4660 
4661 		/*
4662 		 * If the record is older than dirty_expire_interval,
4663 		 * writeback on it has already started.  No need to kick it
4664 		 * off again.  Also, don't start a new one if there's
4665 		 * already one in flight.
4666 		 */
4667 		if (time_after64(frn->at, now - intv) &&
4668 		    atomic_read(&frn->done.cnt) == 1) {
4669 			frn->at = 0;
4670 			trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4671 			cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 0,
4672 					       WB_REASON_FOREIGN_FLUSH,
4673 					       &frn->done);
4674 		}
4675 	}
4676 }
4677 
4678 #else	/* CONFIG_CGROUP_WRITEBACK */
4679 
4680 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4681 {
4682 	return 0;
4683 }
4684 
4685 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4686 {
4687 }
4688 
4689 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4690 {
4691 }
4692 
4693 #endif	/* CONFIG_CGROUP_WRITEBACK */
4694 
4695 /*
4696  * DO NOT USE IN NEW FILES.
4697  *
4698  * "cgroup.event_control" implementation.
4699  *
4700  * This is way over-engineered.  It tries to support fully configurable
4701  * events for each user.  Such level of flexibility is completely
4702  * unnecessary especially in the light of the planned unified hierarchy.
4703  *
4704  * Please deprecate this and replace with something simpler if at all
4705  * possible.
4706  */
4707 
4708 /*
4709  * Unregister event and free resources.
4710  *
4711  * Gets called from workqueue.
4712  */
4713 static void memcg_event_remove(struct work_struct *work)
4714 {
4715 	struct mem_cgroup_event *event =
4716 		container_of(work, struct mem_cgroup_event, remove);
4717 	struct mem_cgroup *memcg = event->memcg;
4718 
4719 	remove_wait_queue(event->wqh, &event->wait);
4720 
4721 	event->unregister_event(memcg, event->eventfd);
4722 
4723 	/* Notify userspace the event is going away. */
4724 	eventfd_signal(event->eventfd, 1);
4725 
4726 	eventfd_ctx_put(event->eventfd);
4727 	kfree(event);
4728 	css_put(&memcg->css);
4729 }
4730 
4731 /*
4732  * Gets called on EPOLLHUP on eventfd when user closes it.
4733  *
4734  * Called with wqh->lock held and interrupts disabled.
4735  */
4736 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4737 			    int sync, void *key)
4738 {
4739 	struct mem_cgroup_event *event =
4740 		container_of(wait, struct mem_cgroup_event, wait);
4741 	struct mem_cgroup *memcg = event->memcg;
4742 	__poll_t flags = key_to_poll(key);
4743 
4744 	if (flags & EPOLLHUP) {
4745 		/*
4746 		 * If the event has been detached at cgroup removal, we
4747 		 * can simply return knowing the other side will cleanup
4748 		 * for us.
4749 		 *
4750 		 * We can't race against event freeing since the other
4751 		 * side will require wqh->lock via remove_wait_queue(),
4752 		 * which we hold.
4753 		 */
4754 		spin_lock(&memcg->event_list_lock);
4755 		if (!list_empty(&event->list)) {
4756 			list_del_init(&event->list);
4757 			/*
4758 			 * We are in atomic context, but cgroup_event_remove()
4759 			 * may sleep, so we have to call it in workqueue.
4760 			 */
4761 			schedule_work(&event->remove);
4762 		}
4763 		spin_unlock(&memcg->event_list_lock);
4764 	}
4765 
4766 	return 0;
4767 }
4768 
4769 static void memcg_event_ptable_queue_proc(struct file *file,
4770 		wait_queue_head_t *wqh, poll_table *pt)
4771 {
4772 	struct mem_cgroup_event *event =
4773 		container_of(pt, struct mem_cgroup_event, pt);
4774 
4775 	event->wqh = wqh;
4776 	add_wait_queue(wqh, &event->wait);
4777 }
4778 
4779 /*
4780  * DO NOT USE IN NEW FILES.
4781  *
4782  * Parse input and register new cgroup event handler.
4783  *
4784  * Input must be in format '<event_fd> <control_fd> <args>'.
4785  * Interpretation of args is defined by control file implementation.
4786  */
4787 static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4788 					 char *buf, size_t nbytes, loff_t off)
4789 {
4790 	struct cgroup_subsys_state *css = of_css(of);
4791 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4792 	struct mem_cgroup_event *event;
4793 	struct cgroup_subsys_state *cfile_css;
4794 	unsigned int efd, cfd;
4795 	struct fd efile;
4796 	struct fd cfile;
4797 	const char *name;
4798 	char *endp;
4799 	int ret;
4800 
4801 	buf = strstrip(buf);
4802 
4803 	efd = simple_strtoul(buf, &endp, 10);
4804 	if (*endp != ' ')
4805 		return -EINVAL;
4806 	buf = endp + 1;
4807 
4808 	cfd = simple_strtoul(buf, &endp, 10);
4809 	if ((*endp != ' ') && (*endp != '\0'))
4810 		return -EINVAL;
4811 	buf = endp + 1;
4812 
4813 	event = kzalloc(sizeof(*event), GFP_KERNEL);
4814 	if (!event)
4815 		return -ENOMEM;
4816 
4817 	event->memcg = memcg;
4818 	INIT_LIST_HEAD(&event->list);
4819 	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4820 	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4821 	INIT_WORK(&event->remove, memcg_event_remove);
4822 
4823 	efile = fdget(efd);
4824 	if (!efile.file) {
4825 		ret = -EBADF;
4826 		goto out_kfree;
4827 	}
4828 
4829 	event->eventfd = eventfd_ctx_fileget(efile.file);
4830 	if (IS_ERR(event->eventfd)) {
4831 		ret = PTR_ERR(event->eventfd);
4832 		goto out_put_efile;
4833 	}
4834 
4835 	cfile = fdget(cfd);
4836 	if (!cfile.file) {
4837 		ret = -EBADF;
4838 		goto out_put_eventfd;
4839 	}
4840 
4841 	/* the process need read permission on control file */
4842 	/* AV: shouldn't we check that it's been opened for read instead? */
4843 	ret = inode_permission(file_inode(cfile.file), MAY_READ);
4844 	if (ret < 0)
4845 		goto out_put_cfile;
4846 
4847 	/*
4848 	 * Determine the event callbacks and set them in @event.  This used
4849 	 * to be done via struct cftype but cgroup core no longer knows
4850 	 * about these events.  The following is crude but the whole thing
4851 	 * is for compatibility anyway.
4852 	 *
4853 	 * DO NOT ADD NEW FILES.
4854 	 */
4855 	name = cfile.file->f_path.dentry->d_name.name;
4856 
4857 	if (!strcmp(name, "memory.usage_in_bytes")) {
4858 		event->register_event = mem_cgroup_usage_register_event;
4859 		event->unregister_event = mem_cgroup_usage_unregister_event;
4860 	} else if (!strcmp(name, "memory.oom_control")) {
4861 		event->register_event = mem_cgroup_oom_register_event;
4862 		event->unregister_event = mem_cgroup_oom_unregister_event;
4863 	} else if (!strcmp(name, "memory.pressure_level")) {
4864 		event->register_event = vmpressure_register_event;
4865 		event->unregister_event = vmpressure_unregister_event;
4866 	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
4867 		event->register_event = memsw_cgroup_usage_register_event;
4868 		event->unregister_event = memsw_cgroup_usage_unregister_event;
4869 	} else {
4870 		ret = -EINVAL;
4871 		goto out_put_cfile;
4872 	}
4873 
4874 	/*
4875 	 * Verify @cfile should belong to @css.  Also, remaining events are
4876 	 * automatically removed on cgroup destruction but the removal is
4877 	 * asynchronous, so take an extra ref on @css.
4878 	 */
4879 	cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
4880 					       &memory_cgrp_subsys);
4881 	ret = -EINVAL;
4882 	if (IS_ERR(cfile_css))
4883 		goto out_put_cfile;
4884 	if (cfile_css != css) {
4885 		css_put(cfile_css);
4886 		goto out_put_cfile;
4887 	}
4888 
4889 	ret = event->register_event(memcg, event->eventfd, buf);
4890 	if (ret)
4891 		goto out_put_css;
4892 
4893 	vfs_poll(efile.file, &event->pt);
4894 
4895 	spin_lock(&memcg->event_list_lock);
4896 	list_add(&event->list, &memcg->event_list);
4897 	spin_unlock(&memcg->event_list_lock);
4898 
4899 	fdput(cfile);
4900 	fdput(efile);
4901 
4902 	return nbytes;
4903 
4904 out_put_css:
4905 	css_put(css);
4906 out_put_cfile:
4907 	fdput(cfile);
4908 out_put_eventfd:
4909 	eventfd_ctx_put(event->eventfd);
4910 out_put_efile:
4911 	fdput(efile);
4912 out_kfree:
4913 	kfree(event);
4914 
4915 	return ret;
4916 }
4917 
4918 static struct cftype mem_cgroup_legacy_files[] = {
4919 	{
4920 		.name = "usage_in_bytes",
4921 		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4922 		.read_u64 = mem_cgroup_read_u64,
4923 	},
4924 	{
4925 		.name = "max_usage_in_bytes",
4926 		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4927 		.write = mem_cgroup_reset,
4928 		.read_u64 = mem_cgroup_read_u64,
4929 	},
4930 	{
4931 		.name = "limit_in_bytes",
4932 		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4933 		.write = mem_cgroup_write,
4934 		.read_u64 = mem_cgroup_read_u64,
4935 	},
4936 	{
4937 		.name = "soft_limit_in_bytes",
4938 		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4939 		.write = mem_cgroup_write,
4940 		.read_u64 = mem_cgroup_read_u64,
4941 	},
4942 	{
4943 		.name = "failcnt",
4944 		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4945 		.write = mem_cgroup_reset,
4946 		.read_u64 = mem_cgroup_read_u64,
4947 	},
4948 	{
4949 		.name = "stat",
4950 		.seq_show = memcg_stat_show,
4951 	},
4952 	{
4953 		.name = "force_empty",
4954 		.write = mem_cgroup_force_empty_write,
4955 	},
4956 	{
4957 		.name = "use_hierarchy",
4958 		.write_u64 = mem_cgroup_hierarchy_write,
4959 		.read_u64 = mem_cgroup_hierarchy_read,
4960 	},
4961 	{
4962 		.name = "cgroup.event_control",		/* XXX: for compat */
4963 		.write = memcg_write_event_control,
4964 		.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
4965 	},
4966 	{
4967 		.name = "swappiness",
4968 		.read_u64 = mem_cgroup_swappiness_read,
4969 		.write_u64 = mem_cgroup_swappiness_write,
4970 	},
4971 	{
4972 		.name = "move_charge_at_immigrate",
4973 		.read_u64 = mem_cgroup_move_charge_read,
4974 		.write_u64 = mem_cgroup_move_charge_write,
4975 	},
4976 	{
4977 		.name = "oom_control",
4978 		.seq_show = mem_cgroup_oom_control_read,
4979 		.write_u64 = mem_cgroup_oom_control_write,
4980 		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4981 	},
4982 	{
4983 		.name = "pressure_level",
4984 	},
4985 #ifdef CONFIG_NUMA
4986 	{
4987 		.name = "numa_stat",
4988 		.seq_show = memcg_numa_stat_show,
4989 	},
4990 #endif
4991 	{
4992 		.name = "kmem.limit_in_bytes",
4993 		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
4994 		.write = mem_cgroup_write,
4995 		.read_u64 = mem_cgroup_read_u64,
4996 	},
4997 	{
4998 		.name = "kmem.usage_in_bytes",
4999 		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
5000 		.read_u64 = mem_cgroup_read_u64,
5001 	},
5002 	{
5003 		.name = "kmem.failcnt",
5004 		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
5005 		.write = mem_cgroup_reset,
5006 		.read_u64 = mem_cgroup_read_u64,
5007 	},
5008 	{
5009 		.name = "kmem.max_usage_in_bytes",
5010 		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
5011 		.write = mem_cgroup_reset,
5012 		.read_u64 = mem_cgroup_read_u64,
5013 	},
5014 #if defined(CONFIG_MEMCG_KMEM) && \
5015 	(defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
5016 	{
5017 		.name = "kmem.slabinfo",
5018 		.seq_show = memcg_slab_show,
5019 	},
5020 #endif
5021 	{
5022 		.name = "kmem.tcp.limit_in_bytes",
5023 		.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
5024 		.write = mem_cgroup_write,
5025 		.read_u64 = mem_cgroup_read_u64,
5026 	},
5027 	{
5028 		.name = "kmem.tcp.usage_in_bytes",
5029 		.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
5030 		.read_u64 = mem_cgroup_read_u64,
5031 	},
5032 	{
5033 		.name = "kmem.tcp.failcnt",
5034 		.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
5035 		.write = mem_cgroup_reset,
5036 		.read_u64 = mem_cgroup_read_u64,
5037 	},
5038 	{
5039 		.name = "kmem.tcp.max_usage_in_bytes",
5040 		.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
5041 		.write = mem_cgroup_reset,
5042 		.read_u64 = mem_cgroup_read_u64,
5043 	},
5044 	{ },	/* terminate */
5045 };
5046 
5047 /*
5048  * Private memory cgroup IDR
5049  *
5050  * Swap-out records and page cache shadow entries need to store memcg
5051  * references in constrained space, so we maintain an ID space that is
5052  * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5053  * memory-controlled cgroups to 64k.
5054  *
5055  * However, there usually are many references to the offline CSS after
5056  * the cgroup has been destroyed, such as page cache or reclaimable
5057  * slab objects, that don't need to hang on to the ID. We want to keep
5058  * those dead CSS from occupying IDs, or we might quickly exhaust the
5059  * relatively small ID space and prevent the creation of new cgroups
5060  * even when there are much fewer than 64k cgroups - possibly none.
5061  *
5062  * Maintain a private 16-bit ID space for memcg, and allow the ID to
5063  * be freed and recycled when it's no longer needed, which is usually
5064  * when the CSS is offlined.
5065  *
5066  * The only exception to that are records of swapped out tmpfs/shmem
5067  * pages that need to be attributed to live ancestors on swapin. But
5068  * those references are manageable from userspace.
5069  */
5070 
5071 static DEFINE_IDR(mem_cgroup_idr);
5072 
5073 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5074 {
5075 	if (memcg->id.id > 0) {
5076 		idr_remove(&mem_cgroup_idr, memcg->id.id);
5077 		memcg->id.id = 0;
5078 	}
5079 }
5080 
5081 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5082 						  unsigned int n)
5083 {
5084 	refcount_add(n, &memcg->id.ref);
5085 }
5086 
5087 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
5088 {
5089 	if (refcount_sub_and_test(n, &memcg->id.ref)) {
5090 		mem_cgroup_id_remove(memcg);
5091 
5092 		/* Memcg ID pins CSS */
5093 		css_put(&memcg->css);
5094 	}
5095 }
5096 
5097 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5098 {
5099 	mem_cgroup_id_put_many(memcg, 1);
5100 }
5101 
5102 /**
5103  * mem_cgroup_from_id - look up a memcg from a memcg id
5104  * @id: the memcg id to look up
5105  *
5106  * Caller must hold rcu_read_lock().
5107  */
5108 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5109 {
5110 	WARN_ON_ONCE(!rcu_read_lock_held());
5111 	return idr_find(&mem_cgroup_idr, id);
5112 }
5113 
5114 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5115 {
5116 	struct mem_cgroup_per_node *pn;
5117 	int tmp = node;
5118 	/*
5119 	 * This routine is called against possible nodes.
5120 	 * But it's BUG to call kmalloc() against offline node.
5121 	 *
5122 	 * TODO: this routine can waste much memory for nodes which will
5123 	 *       never be onlined. It's better to use memory hotplug callback
5124 	 *       function.
5125 	 */
5126 	if (!node_state(node, N_NORMAL_MEMORY))
5127 		tmp = -1;
5128 	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
5129 	if (!pn)
5130 		return 1;
5131 
5132 	pn->lruvec_stat_local = alloc_percpu(struct lruvec_stat);
5133 	if (!pn->lruvec_stat_local) {
5134 		kfree(pn);
5135 		return 1;
5136 	}
5137 
5138 	pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat);
5139 	if (!pn->lruvec_stat_cpu) {
5140 		free_percpu(pn->lruvec_stat_local);
5141 		kfree(pn);
5142 		return 1;
5143 	}
5144 
5145 	lruvec_init(&pn->lruvec);
5146 	pn->usage_in_excess = 0;
5147 	pn->on_tree = false;
5148 	pn->memcg = memcg;
5149 
5150 	memcg->nodeinfo[node] = pn;
5151 	return 0;
5152 }
5153 
5154 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5155 {
5156 	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5157 
5158 	if (!pn)
5159 		return;
5160 
5161 	free_percpu(pn->lruvec_stat_cpu);
5162 	free_percpu(pn->lruvec_stat_local);
5163 	kfree(pn);
5164 }
5165 
5166 static void __mem_cgroup_free(struct mem_cgroup *memcg)
5167 {
5168 	int node;
5169 
5170 	for_each_node(node)
5171 		free_mem_cgroup_per_node_info(memcg, node);
5172 	free_percpu(memcg->vmstats_percpu);
5173 	free_percpu(memcg->vmstats_local);
5174 	kfree(memcg);
5175 }
5176 
5177 static void mem_cgroup_free(struct mem_cgroup *memcg)
5178 {
5179 	memcg_wb_domain_exit(memcg);
5180 	/*
5181 	 * Flush percpu vmstats and vmevents to guarantee the value correctness
5182 	 * on parent's and all ancestor levels.
5183 	 */
5184 	memcg_flush_percpu_vmstats(memcg);
5185 	memcg_flush_percpu_vmevents(memcg);
5186 	__mem_cgroup_free(memcg);
5187 }
5188 
5189 static struct mem_cgroup *mem_cgroup_alloc(void)
5190 {
5191 	struct mem_cgroup *memcg;
5192 	unsigned int size;
5193 	int node;
5194 	int __maybe_unused i;
5195 	long error = -ENOMEM;
5196 
5197 	size = sizeof(struct mem_cgroup);
5198 	size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
5199 
5200 	memcg = kzalloc(size, GFP_KERNEL);
5201 	if (!memcg)
5202 		return ERR_PTR(error);
5203 
5204 	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5205 				 1, MEM_CGROUP_ID_MAX,
5206 				 GFP_KERNEL);
5207 	if (memcg->id.id < 0) {
5208 		error = memcg->id.id;
5209 		goto fail;
5210 	}
5211 
5212 	memcg->vmstats_local = alloc_percpu(struct memcg_vmstats_percpu);
5213 	if (!memcg->vmstats_local)
5214 		goto fail;
5215 
5216 	memcg->vmstats_percpu = alloc_percpu(struct memcg_vmstats_percpu);
5217 	if (!memcg->vmstats_percpu)
5218 		goto fail;
5219 
5220 	for_each_node(node)
5221 		if (alloc_mem_cgroup_per_node_info(memcg, node))
5222 			goto fail;
5223 
5224 	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5225 		goto fail;
5226 
5227 	INIT_WORK(&memcg->high_work, high_work_func);
5228 	INIT_LIST_HEAD(&memcg->oom_notify);
5229 	mutex_init(&memcg->thresholds_lock);
5230 	spin_lock_init(&memcg->move_lock);
5231 	vmpressure_init(&memcg->vmpressure);
5232 	INIT_LIST_HEAD(&memcg->event_list);
5233 	spin_lock_init(&memcg->event_list_lock);
5234 	memcg->socket_pressure = jiffies;
5235 #ifdef CONFIG_MEMCG_KMEM
5236 	memcg->kmemcg_id = -1;
5237 	INIT_LIST_HEAD(&memcg->objcg_list);
5238 #endif
5239 #ifdef CONFIG_CGROUP_WRITEBACK
5240 	INIT_LIST_HEAD(&memcg->cgwb_list);
5241 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5242 		memcg->cgwb_frn[i].done =
5243 			__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5244 #endif
5245 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5246 	spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5247 	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5248 	memcg->deferred_split_queue.split_queue_len = 0;
5249 #endif
5250 	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5251 	return memcg;
5252 fail:
5253 	mem_cgroup_id_remove(memcg);
5254 	__mem_cgroup_free(memcg);
5255 	return ERR_PTR(error);
5256 }
5257 
5258 static struct cgroup_subsys_state * __ref
5259 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5260 {
5261 	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5262 	struct mem_cgroup *memcg;
5263 	long error = -ENOMEM;
5264 
5265 	memcg = mem_cgroup_alloc();
5266 	if (IS_ERR(memcg))
5267 		return ERR_CAST(memcg);
5268 
5269 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5270 	memcg->soft_limit = PAGE_COUNTER_MAX;
5271 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5272 	if (parent) {
5273 		memcg->swappiness = mem_cgroup_swappiness(parent);
5274 		memcg->oom_kill_disable = parent->oom_kill_disable;
5275 	}
5276 	if (parent && parent->use_hierarchy) {
5277 		memcg->use_hierarchy = true;
5278 		page_counter_init(&memcg->memory, &parent->memory);
5279 		page_counter_init(&memcg->swap, &parent->swap);
5280 		page_counter_init(&memcg->memsw, &parent->memsw);
5281 		page_counter_init(&memcg->kmem, &parent->kmem);
5282 		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5283 	} else {
5284 		page_counter_init(&memcg->memory, NULL);
5285 		page_counter_init(&memcg->swap, NULL);
5286 		page_counter_init(&memcg->memsw, NULL);
5287 		page_counter_init(&memcg->kmem, NULL);
5288 		page_counter_init(&memcg->tcpmem, NULL);
5289 		/*
5290 		 * Deeper hierachy with use_hierarchy == false doesn't make
5291 		 * much sense so let cgroup subsystem know about this
5292 		 * unfortunate state in our controller.
5293 		 */
5294 		if (parent != root_mem_cgroup)
5295 			memory_cgrp_subsys.broken_hierarchy = true;
5296 	}
5297 
5298 	/* The following stuff does not apply to the root */
5299 	if (!parent) {
5300 		root_mem_cgroup = memcg;
5301 		return &memcg->css;
5302 	}
5303 
5304 	error = memcg_online_kmem(memcg);
5305 	if (error)
5306 		goto fail;
5307 
5308 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5309 		static_branch_inc(&memcg_sockets_enabled_key);
5310 
5311 	return &memcg->css;
5312 fail:
5313 	mem_cgroup_id_remove(memcg);
5314 	mem_cgroup_free(memcg);
5315 	return ERR_PTR(error);
5316 }
5317 
5318 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5319 {
5320 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5321 
5322 	/*
5323 	 * A memcg must be visible for memcg_expand_shrinker_maps()
5324 	 * by the time the maps are allocated. So, we allocate maps
5325 	 * here, when for_each_mem_cgroup() can't skip it.
5326 	 */
5327 	if (memcg_alloc_shrinker_maps(memcg)) {
5328 		mem_cgroup_id_remove(memcg);
5329 		return -ENOMEM;
5330 	}
5331 
5332 	/* Online state pins memcg ID, memcg ID pins CSS */
5333 	refcount_set(&memcg->id.ref, 1);
5334 	css_get(css);
5335 	return 0;
5336 }
5337 
5338 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5339 {
5340 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5341 	struct mem_cgroup_event *event, *tmp;
5342 
5343 	/*
5344 	 * Unregister events and notify userspace.
5345 	 * Notify userspace about cgroup removing only after rmdir of cgroup
5346 	 * directory to avoid race between userspace and kernelspace.
5347 	 */
5348 	spin_lock(&memcg->event_list_lock);
5349 	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5350 		list_del_init(&event->list);
5351 		schedule_work(&event->remove);
5352 	}
5353 	spin_unlock(&memcg->event_list_lock);
5354 
5355 	page_counter_set_min(&memcg->memory, 0);
5356 	page_counter_set_low(&memcg->memory, 0);
5357 
5358 	memcg_offline_kmem(memcg);
5359 	wb_memcg_offline(memcg);
5360 
5361 	drain_all_stock(memcg);
5362 
5363 	mem_cgroup_id_put(memcg);
5364 }
5365 
5366 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5367 {
5368 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5369 
5370 	invalidate_reclaim_iterators(memcg);
5371 }
5372 
5373 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5374 {
5375 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5376 	int __maybe_unused i;
5377 
5378 #ifdef CONFIG_CGROUP_WRITEBACK
5379 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5380 		wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5381 #endif
5382 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5383 		static_branch_dec(&memcg_sockets_enabled_key);
5384 
5385 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5386 		static_branch_dec(&memcg_sockets_enabled_key);
5387 
5388 	vmpressure_cleanup(&memcg->vmpressure);
5389 	cancel_work_sync(&memcg->high_work);
5390 	mem_cgroup_remove_from_trees(memcg);
5391 	memcg_free_shrinker_maps(memcg);
5392 	memcg_free_kmem(memcg);
5393 	mem_cgroup_free(memcg);
5394 }
5395 
5396 /**
5397  * mem_cgroup_css_reset - reset the states of a mem_cgroup
5398  * @css: the target css
5399  *
5400  * Reset the states of the mem_cgroup associated with @css.  This is
5401  * invoked when the userland requests disabling on the default hierarchy
5402  * but the memcg is pinned through dependency.  The memcg should stop
5403  * applying policies and should revert to the vanilla state as it may be
5404  * made visible again.
5405  *
5406  * The current implementation only resets the essential configurations.
5407  * This needs to be expanded to cover all the visible parts.
5408  */
5409 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5410 {
5411 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5412 
5413 	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5414 	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5415 	page_counter_set_max(&memcg->memsw, PAGE_COUNTER_MAX);
5416 	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5417 	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5418 	page_counter_set_min(&memcg->memory, 0);
5419 	page_counter_set_low(&memcg->memory, 0);
5420 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5421 	memcg->soft_limit = PAGE_COUNTER_MAX;
5422 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5423 	memcg_wb_domain_size_changed(memcg);
5424 }
5425 
5426 #ifdef CONFIG_MMU
5427 /* Handlers for move charge at task migration. */
5428 static int mem_cgroup_do_precharge(unsigned long count)
5429 {
5430 	int ret;
5431 
5432 	/* Try a single bulk charge without reclaim first, kswapd may wake */
5433 	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5434 	if (!ret) {
5435 		mc.precharge += count;
5436 		return ret;
5437 	}
5438 
5439 	/* Try charges one by one with reclaim, but do not retry */
5440 	while (count--) {
5441 		ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5442 		if (ret)
5443 			return ret;
5444 		mc.precharge++;
5445 		cond_resched();
5446 	}
5447 	return 0;
5448 }
5449 
5450 union mc_target {
5451 	struct page	*page;
5452 	swp_entry_t	ent;
5453 };
5454 
5455 enum mc_target_type {
5456 	MC_TARGET_NONE = 0,
5457 	MC_TARGET_PAGE,
5458 	MC_TARGET_SWAP,
5459 	MC_TARGET_DEVICE,
5460 };
5461 
5462 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5463 						unsigned long addr, pte_t ptent)
5464 {
5465 	struct page *page = vm_normal_page(vma, addr, ptent);
5466 
5467 	if (!page || !page_mapped(page))
5468 		return NULL;
5469 	if (PageAnon(page)) {
5470 		if (!(mc.flags & MOVE_ANON))
5471 			return NULL;
5472 	} else {
5473 		if (!(mc.flags & MOVE_FILE))
5474 			return NULL;
5475 	}
5476 	if (!get_page_unless_zero(page))
5477 		return NULL;
5478 
5479 	return page;
5480 }
5481 
5482 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
5483 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5484 			pte_t ptent, swp_entry_t *entry)
5485 {
5486 	struct page *page = NULL;
5487 	swp_entry_t ent = pte_to_swp_entry(ptent);
5488 
5489 	if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
5490 		return NULL;
5491 
5492 	/*
5493 	 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to
5494 	 * a device and because they are not accessible by CPU they are store
5495 	 * as special swap entry in the CPU page table.
5496 	 */
5497 	if (is_device_private_entry(ent)) {
5498 		page = device_private_entry_to_page(ent);
5499 		/*
5500 		 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
5501 		 * a refcount of 1 when free (unlike normal page)
5502 		 */
5503 		if (!page_ref_add_unless(page, 1, 1))
5504 			return NULL;
5505 		return page;
5506 	}
5507 
5508 	/*
5509 	 * Because lookup_swap_cache() updates some statistics counter,
5510 	 * we call find_get_page() with swapper_space directly.
5511 	 */
5512 	page = find_get_page(swap_address_space(ent), swp_offset(ent));
5513 	entry->val = ent.val;
5514 
5515 	return page;
5516 }
5517 #else
5518 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5519 			pte_t ptent, swp_entry_t *entry)
5520 {
5521 	return NULL;
5522 }
5523 #endif
5524 
5525 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5526 			unsigned long addr, pte_t ptent, swp_entry_t *entry)
5527 {
5528 	struct page *page = NULL;
5529 	struct address_space *mapping;
5530 	pgoff_t pgoff;
5531 
5532 	if (!vma->vm_file) /* anonymous vma */
5533 		return NULL;
5534 	if (!(mc.flags & MOVE_FILE))
5535 		return NULL;
5536 
5537 	mapping = vma->vm_file->f_mapping;
5538 	pgoff = linear_page_index(vma, addr);
5539 
5540 	/* page is moved even if it's not RSS of this task(page-faulted). */
5541 #ifdef CONFIG_SWAP
5542 	/* shmem/tmpfs may report page out on swap: account for that too. */
5543 	if (shmem_mapping(mapping)) {
5544 		page = find_get_entry(mapping, pgoff);
5545 		if (xa_is_value(page)) {
5546 			swp_entry_t swp = radix_to_swp_entry(page);
5547 			*entry = swp;
5548 			page = find_get_page(swap_address_space(swp),
5549 					     swp_offset(swp));
5550 		}
5551 	} else
5552 		page = find_get_page(mapping, pgoff);
5553 #else
5554 	page = find_get_page(mapping, pgoff);
5555 #endif
5556 	return page;
5557 }
5558 
5559 /**
5560  * mem_cgroup_move_account - move account of the page
5561  * @page: the page
5562  * @compound: charge the page as compound or small page
5563  * @from: mem_cgroup which the page is moved from.
5564  * @to:	mem_cgroup which the page is moved to. @from != @to.
5565  *
5566  * The caller must make sure the page is not on LRU (isolate_page() is useful.)
5567  *
5568  * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5569  * from old cgroup.
5570  */
5571 static int mem_cgroup_move_account(struct page *page,
5572 				   bool compound,
5573 				   struct mem_cgroup *from,
5574 				   struct mem_cgroup *to)
5575 {
5576 	struct lruvec *from_vec, *to_vec;
5577 	struct pglist_data *pgdat;
5578 	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5579 	int ret;
5580 
5581 	VM_BUG_ON(from == to);
5582 	VM_BUG_ON_PAGE(PageLRU(page), page);
5583 	VM_BUG_ON(compound && !PageTransHuge(page));
5584 
5585 	/*
5586 	 * Prevent mem_cgroup_migrate() from looking at
5587 	 * page->mem_cgroup of its source page while we change it.
5588 	 */
5589 	ret = -EBUSY;
5590 	if (!trylock_page(page))
5591 		goto out;
5592 
5593 	ret = -EINVAL;
5594 	if (page->mem_cgroup != from)
5595 		goto out_unlock;
5596 
5597 	pgdat = page_pgdat(page);
5598 	from_vec = mem_cgroup_lruvec(from, pgdat);
5599 	to_vec = mem_cgroup_lruvec(to, pgdat);
5600 
5601 	lock_page_memcg(page);
5602 
5603 	if (PageAnon(page)) {
5604 		if (page_mapped(page)) {
5605 			__mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
5606 			__mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
5607 			if (PageTransHuge(page)) {
5608 				__mod_lruvec_state(from_vec, NR_ANON_THPS,
5609 						   -nr_pages);
5610 				__mod_lruvec_state(to_vec, NR_ANON_THPS,
5611 						   nr_pages);
5612 			}
5613 
5614 		}
5615 	} else {
5616 		__mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
5617 		__mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
5618 
5619 		if (PageSwapBacked(page)) {
5620 			__mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
5621 			__mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
5622 		}
5623 
5624 		if (page_mapped(page)) {
5625 			__mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5626 			__mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5627 		}
5628 
5629 		if (PageDirty(page)) {
5630 			struct address_space *mapping = page_mapping(page);
5631 
5632 			if (mapping_cap_account_dirty(mapping)) {
5633 				__mod_lruvec_state(from_vec, NR_FILE_DIRTY,
5634 						   -nr_pages);
5635 				__mod_lruvec_state(to_vec, NR_FILE_DIRTY,
5636 						   nr_pages);
5637 			}
5638 		}
5639 	}
5640 
5641 	if (PageWriteback(page)) {
5642 		__mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5643 		__mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5644 	}
5645 
5646 	/*
5647 	 * All state has been migrated, let's switch to the new memcg.
5648 	 *
5649 	 * It is safe to change page->mem_cgroup here because the page
5650 	 * is referenced, charged, isolated, and locked: we can't race
5651 	 * with (un)charging, migration, LRU putback, or anything else
5652 	 * that would rely on a stable page->mem_cgroup.
5653 	 *
5654 	 * Note that lock_page_memcg is a memcg lock, not a page lock,
5655 	 * to save space. As soon as we switch page->mem_cgroup to a
5656 	 * new memcg that isn't locked, the above state can change
5657 	 * concurrently again. Make sure we're truly done with it.
5658 	 */
5659 	smp_mb();
5660 
5661 	css_get(&to->css);
5662 	css_put(&from->css);
5663 
5664 	page->mem_cgroup = to;
5665 
5666 	__unlock_page_memcg(from);
5667 
5668 	ret = 0;
5669 
5670 	local_irq_disable();
5671 	mem_cgroup_charge_statistics(to, page, nr_pages);
5672 	memcg_check_events(to, page);
5673 	mem_cgroup_charge_statistics(from, page, -nr_pages);
5674 	memcg_check_events(from, page);
5675 	local_irq_enable();
5676 out_unlock:
5677 	unlock_page(page);
5678 out:
5679 	return ret;
5680 }
5681 
5682 /**
5683  * get_mctgt_type - get target type of moving charge
5684  * @vma: the vma the pte to be checked belongs
5685  * @addr: the address corresponding to the pte to be checked
5686  * @ptent: the pte to be checked
5687  * @target: the pointer the target page or swap ent will be stored(can be NULL)
5688  *
5689  * Returns
5690  *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
5691  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5692  *     move charge. if @target is not NULL, the page is stored in target->page
5693  *     with extra refcnt got(Callers should handle it).
5694  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5695  *     target for charge migration. if @target is not NULL, the entry is stored
5696  *     in target->ent.
5697  *   3(MC_TARGET_DEVICE): like MC_TARGET_PAGE  but page is MEMORY_DEVICE_PRIVATE
5698  *     (so ZONE_DEVICE page and thus not on the lru).
5699  *     For now we such page is charge like a regular page would be as for all
5700  *     intent and purposes it is just special memory taking the place of a
5701  *     regular page.
5702  *
5703  *     See Documentations/vm/hmm.txt and include/linux/hmm.h
5704  *
5705  * Called with pte lock held.
5706  */
5707 
5708 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
5709 		unsigned long addr, pte_t ptent, union mc_target *target)
5710 {
5711 	struct page *page = NULL;
5712 	enum mc_target_type ret = MC_TARGET_NONE;
5713 	swp_entry_t ent = { .val = 0 };
5714 
5715 	if (pte_present(ptent))
5716 		page = mc_handle_present_pte(vma, addr, ptent);
5717 	else if (is_swap_pte(ptent))
5718 		page = mc_handle_swap_pte(vma, ptent, &ent);
5719 	else if (pte_none(ptent))
5720 		page = mc_handle_file_pte(vma, addr, ptent, &ent);
5721 
5722 	if (!page && !ent.val)
5723 		return ret;
5724 	if (page) {
5725 		/*
5726 		 * Do only loose check w/o serialization.
5727 		 * mem_cgroup_move_account() checks the page is valid or
5728 		 * not under LRU exclusion.
5729 		 */
5730 		if (page->mem_cgroup == mc.from) {
5731 			ret = MC_TARGET_PAGE;
5732 			if (is_device_private_page(page))
5733 				ret = MC_TARGET_DEVICE;
5734 			if (target)
5735 				target->page = page;
5736 		}
5737 		if (!ret || !target)
5738 			put_page(page);
5739 	}
5740 	/*
5741 	 * There is a swap entry and a page doesn't exist or isn't charged.
5742 	 * But we cannot move a tail-page in a THP.
5743 	 */
5744 	if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
5745 	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
5746 		ret = MC_TARGET_SWAP;
5747 		if (target)
5748 			target->ent = ent;
5749 	}
5750 	return ret;
5751 }
5752 
5753 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5754 /*
5755  * We don't consider PMD mapped swapping or file mapped pages because THP does
5756  * not support them for now.
5757  * Caller should make sure that pmd_trans_huge(pmd) is true.
5758  */
5759 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5760 		unsigned long addr, pmd_t pmd, union mc_target *target)
5761 {
5762 	struct page *page = NULL;
5763 	enum mc_target_type ret = MC_TARGET_NONE;
5764 
5765 	if (unlikely(is_swap_pmd(pmd))) {
5766 		VM_BUG_ON(thp_migration_supported() &&
5767 				  !is_pmd_migration_entry(pmd));
5768 		return ret;
5769 	}
5770 	page = pmd_page(pmd);
5771 	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
5772 	if (!(mc.flags & MOVE_ANON))
5773 		return ret;
5774 	if (page->mem_cgroup == mc.from) {
5775 		ret = MC_TARGET_PAGE;
5776 		if (target) {
5777 			get_page(page);
5778 			target->page = page;
5779 		}
5780 	}
5781 	return ret;
5782 }
5783 #else
5784 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5785 		unsigned long addr, pmd_t pmd, union mc_target *target)
5786 {
5787 	return MC_TARGET_NONE;
5788 }
5789 #endif
5790 
5791 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5792 					unsigned long addr, unsigned long end,
5793 					struct mm_walk *walk)
5794 {
5795 	struct vm_area_struct *vma = walk->vma;
5796 	pte_t *pte;
5797 	spinlock_t *ptl;
5798 
5799 	ptl = pmd_trans_huge_lock(pmd, vma);
5800 	if (ptl) {
5801 		/*
5802 		 * Note their can not be MC_TARGET_DEVICE for now as we do not
5803 		 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
5804 		 * this might change.
5805 		 */
5806 		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5807 			mc.precharge += HPAGE_PMD_NR;
5808 		spin_unlock(ptl);
5809 		return 0;
5810 	}
5811 
5812 	if (pmd_trans_unstable(pmd))
5813 		return 0;
5814 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5815 	for (; addr != end; pte++, addr += PAGE_SIZE)
5816 		if (get_mctgt_type(vma, addr, *pte, NULL))
5817 			mc.precharge++;	/* increment precharge temporarily */
5818 	pte_unmap_unlock(pte - 1, ptl);
5819 	cond_resched();
5820 
5821 	return 0;
5822 }
5823 
5824 static const struct mm_walk_ops precharge_walk_ops = {
5825 	.pmd_entry	= mem_cgroup_count_precharge_pte_range,
5826 };
5827 
5828 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5829 {
5830 	unsigned long precharge;
5831 
5832 	mmap_read_lock(mm);
5833 	walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
5834 	mmap_read_unlock(mm);
5835 
5836 	precharge = mc.precharge;
5837 	mc.precharge = 0;
5838 
5839 	return precharge;
5840 }
5841 
5842 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5843 {
5844 	unsigned long precharge = mem_cgroup_count_precharge(mm);
5845 
5846 	VM_BUG_ON(mc.moving_task);
5847 	mc.moving_task = current;
5848 	return mem_cgroup_do_precharge(precharge);
5849 }
5850 
5851 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5852 static void __mem_cgroup_clear_mc(void)
5853 {
5854 	struct mem_cgroup *from = mc.from;
5855 	struct mem_cgroup *to = mc.to;
5856 
5857 	/* we must uncharge all the leftover precharges from mc.to */
5858 	if (mc.precharge) {
5859 		cancel_charge(mc.to, mc.precharge);
5860 		mc.precharge = 0;
5861 	}
5862 	/*
5863 	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5864 	 * we must uncharge here.
5865 	 */
5866 	if (mc.moved_charge) {
5867 		cancel_charge(mc.from, mc.moved_charge);
5868 		mc.moved_charge = 0;
5869 	}
5870 	/* we must fixup refcnts and charges */
5871 	if (mc.moved_swap) {
5872 		/* uncharge swap account from the old cgroup */
5873 		if (!mem_cgroup_is_root(mc.from))
5874 			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
5875 
5876 		mem_cgroup_id_put_many(mc.from, mc.moved_swap);
5877 
5878 		/*
5879 		 * we charged both to->memory and to->memsw, so we
5880 		 * should uncharge to->memory.
5881 		 */
5882 		if (!mem_cgroup_is_root(mc.to))
5883 			page_counter_uncharge(&mc.to->memory, mc.moved_swap);
5884 
5885 		mc.moved_swap = 0;
5886 	}
5887 	memcg_oom_recover(from);
5888 	memcg_oom_recover(to);
5889 	wake_up_all(&mc.waitq);
5890 }
5891 
5892 static void mem_cgroup_clear_mc(void)
5893 {
5894 	struct mm_struct *mm = mc.mm;
5895 
5896 	/*
5897 	 * we must clear moving_task before waking up waiters at the end of
5898 	 * task migration.
5899 	 */
5900 	mc.moving_task = NULL;
5901 	__mem_cgroup_clear_mc();
5902 	spin_lock(&mc.lock);
5903 	mc.from = NULL;
5904 	mc.to = NULL;
5905 	mc.mm = NULL;
5906 	spin_unlock(&mc.lock);
5907 
5908 	mmput(mm);
5909 }
5910 
5911 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5912 {
5913 	struct cgroup_subsys_state *css;
5914 	struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
5915 	struct mem_cgroup *from;
5916 	struct task_struct *leader, *p;
5917 	struct mm_struct *mm;
5918 	unsigned long move_flags;
5919 	int ret = 0;
5920 
5921 	/* charge immigration isn't supported on the default hierarchy */
5922 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5923 		return 0;
5924 
5925 	/*
5926 	 * Multi-process migrations only happen on the default hierarchy
5927 	 * where charge immigration is not used.  Perform charge
5928 	 * immigration if @tset contains a leader and whine if there are
5929 	 * multiple.
5930 	 */
5931 	p = NULL;
5932 	cgroup_taskset_for_each_leader(leader, css, tset) {
5933 		WARN_ON_ONCE(p);
5934 		p = leader;
5935 		memcg = mem_cgroup_from_css(css);
5936 	}
5937 	if (!p)
5938 		return 0;
5939 
5940 	/*
5941 	 * We are now commited to this value whatever it is. Changes in this
5942 	 * tunable will only affect upcoming migrations, not the current one.
5943 	 * So we need to save it, and keep it going.
5944 	 */
5945 	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
5946 	if (!move_flags)
5947 		return 0;
5948 
5949 	from = mem_cgroup_from_task(p);
5950 
5951 	VM_BUG_ON(from == memcg);
5952 
5953 	mm = get_task_mm(p);
5954 	if (!mm)
5955 		return 0;
5956 	/* We move charges only when we move a owner of the mm */
5957 	if (mm->owner == p) {
5958 		VM_BUG_ON(mc.from);
5959 		VM_BUG_ON(mc.to);
5960 		VM_BUG_ON(mc.precharge);
5961 		VM_BUG_ON(mc.moved_charge);
5962 		VM_BUG_ON(mc.moved_swap);
5963 
5964 		spin_lock(&mc.lock);
5965 		mc.mm = mm;
5966 		mc.from = from;
5967 		mc.to = memcg;
5968 		mc.flags = move_flags;
5969 		spin_unlock(&mc.lock);
5970 		/* We set mc.moving_task later */
5971 
5972 		ret = mem_cgroup_precharge_mc(mm);
5973 		if (ret)
5974 			mem_cgroup_clear_mc();
5975 	} else {
5976 		mmput(mm);
5977 	}
5978 	return ret;
5979 }
5980 
5981 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5982 {
5983 	if (mc.to)
5984 		mem_cgroup_clear_mc();
5985 }
5986 
5987 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5988 				unsigned long addr, unsigned long end,
5989 				struct mm_walk *walk)
5990 {
5991 	int ret = 0;
5992 	struct vm_area_struct *vma = walk->vma;
5993 	pte_t *pte;
5994 	spinlock_t *ptl;
5995 	enum mc_target_type target_type;
5996 	union mc_target target;
5997 	struct page *page;
5998 
5999 	ptl = pmd_trans_huge_lock(pmd, vma);
6000 	if (ptl) {
6001 		if (mc.precharge < HPAGE_PMD_NR) {
6002 			spin_unlock(ptl);
6003 			return 0;
6004 		}
6005 		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6006 		if (target_type == MC_TARGET_PAGE) {
6007 			page = target.page;
6008 			if (!isolate_lru_page(page)) {
6009 				if (!mem_cgroup_move_account(page, true,
6010 							     mc.from, mc.to)) {
6011 					mc.precharge -= HPAGE_PMD_NR;
6012 					mc.moved_charge += HPAGE_PMD_NR;
6013 				}
6014 				putback_lru_page(page);
6015 			}
6016 			put_page(page);
6017 		} else if (target_type == MC_TARGET_DEVICE) {
6018 			page = target.page;
6019 			if (!mem_cgroup_move_account(page, true,
6020 						     mc.from, mc.to)) {
6021 				mc.precharge -= HPAGE_PMD_NR;
6022 				mc.moved_charge += HPAGE_PMD_NR;
6023 			}
6024 			put_page(page);
6025 		}
6026 		spin_unlock(ptl);
6027 		return 0;
6028 	}
6029 
6030 	if (pmd_trans_unstable(pmd))
6031 		return 0;
6032 retry:
6033 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6034 	for (; addr != end; addr += PAGE_SIZE) {
6035 		pte_t ptent = *(pte++);
6036 		bool device = false;
6037 		swp_entry_t ent;
6038 
6039 		if (!mc.precharge)
6040 			break;
6041 
6042 		switch (get_mctgt_type(vma, addr, ptent, &target)) {
6043 		case MC_TARGET_DEVICE:
6044 			device = true;
6045 			fallthrough;
6046 		case MC_TARGET_PAGE:
6047 			page = target.page;
6048 			/*
6049 			 * We can have a part of the split pmd here. Moving it
6050 			 * can be done but it would be too convoluted so simply
6051 			 * ignore such a partial THP and keep it in original
6052 			 * memcg. There should be somebody mapping the head.
6053 			 */
6054 			if (PageTransCompound(page))
6055 				goto put;
6056 			if (!device && isolate_lru_page(page))
6057 				goto put;
6058 			if (!mem_cgroup_move_account(page, false,
6059 						mc.from, mc.to)) {
6060 				mc.precharge--;
6061 				/* we uncharge from mc.from later. */
6062 				mc.moved_charge++;
6063 			}
6064 			if (!device)
6065 				putback_lru_page(page);
6066 put:			/* get_mctgt_type() gets the page */
6067 			put_page(page);
6068 			break;
6069 		case MC_TARGET_SWAP:
6070 			ent = target.ent;
6071 			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6072 				mc.precharge--;
6073 				mem_cgroup_id_get_many(mc.to, 1);
6074 				/* we fixup other refcnts and charges later. */
6075 				mc.moved_swap++;
6076 			}
6077 			break;
6078 		default:
6079 			break;
6080 		}
6081 	}
6082 	pte_unmap_unlock(pte - 1, ptl);
6083 	cond_resched();
6084 
6085 	if (addr != end) {
6086 		/*
6087 		 * We have consumed all precharges we got in can_attach().
6088 		 * We try charge one by one, but don't do any additional
6089 		 * charges to mc.to if we have failed in charge once in attach()
6090 		 * phase.
6091 		 */
6092 		ret = mem_cgroup_do_precharge(1);
6093 		if (!ret)
6094 			goto retry;
6095 	}
6096 
6097 	return ret;
6098 }
6099 
6100 static const struct mm_walk_ops charge_walk_ops = {
6101 	.pmd_entry	= mem_cgroup_move_charge_pte_range,
6102 };
6103 
6104 static void mem_cgroup_move_charge(void)
6105 {
6106 	lru_add_drain_all();
6107 	/*
6108 	 * Signal lock_page_memcg() to take the memcg's move_lock
6109 	 * while we're moving its pages to another memcg. Then wait
6110 	 * for already started RCU-only updates to finish.
6111 	 */
6112 	atomic_inc(&mc.from->moving_account);
6113 	synchronize_rcu();
6114 retry:
6115 	if (unlikely(!mmap_read_trylock(mc.mm))) {
6116 		/*
6117 		 * Someone who are holding the mmap_lock might be waiting in
6118 		 * waitq. So we cancel all extra charges, wake up all waiters,
6119 		 * and retry. Because we cancel precharges, we might not be able
6120 		 * to move enough charges, but moving charge is a best-effort
6121 		 * feature anyway, so it wouldn't be a big problem.
6122 		 */
6123 		__mem_cgroup_clear_mc();
6124 		cond_resched();
6125 		goto retry;
6126 	}
6127 	/*
6128 	 * When we have consumed all precharges and failed in doing
6129 	 * additional charge, the page walk just aborts.
6130 	 */
6131 	walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
6132 			NULL);
6133 
6134 	mmap_read_unlock(mc.mm);
6135 	atomic_dec(&mc.from->moving_account);
6136 }
6137 
6138 static void mem_cgroup_move_task(void)
6139 {
6140 	if (mc.to) {
6141 		mem_cgroup_move_charge();
6142 		mem_cgroup_clear_mc();
6143 	}
6144 }
6145 #else	/* !CONFIG_MMU */
6146 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6147 {
6148 	return 0;
6149 }
6150 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6151 {
6152 }
6153 static void mem_cgroup_move_task(void)
6154 {
6155 }
6156 #endif
6157 
6158 /*
6159  * Cgroup retains root cgroups across [un]mount cycles making it necessary
6160  * to verify whether we're attached to the default hierarchy on each mount
6161  * attempt.
6162  */
6163 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
6164 {
6165 	/*
6166 	 * use_hierarchy is forced on the default hierarchy.  cgroup core
6167 	 * guarantees that @root doesn't have any children, so turning it
6168 	 * on for the root memcg is enough.
6169 	 */
6170 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6171 		root_mem_cgroup->use_hierarchy = true;
6172 	else
6173 		root_mem_cgroup->use_hierarchy = false;
6174 }
6175 
6176 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6177 {
6178 	if (value == PAGE_COUNTER_MAX)
6179 		seq_puts(m, "max\n");
6180 	else
6181 		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6182 
6183 	return 0;
6184 }
6185 
6186 static u64 memory_current_read(struct cgroup_subsys_state *css,
6187 			       struct cftype *cft)
6188 {
6189 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6190 
6191 	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6192 }
6193 
6194 static int memory_min_show(struct seq_file *m, void *v)
6195 {
6196 	return seq_puts_memcg_tunable(m,
6197 		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6198 }
6199 
6200 static ssize_t memory_min_write(struct kernfs_open_file *of,
6201 				char *buf, size_t nbytes, loff_t off)
6202 {
6203 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6204 	unsigned long min;
6205 	int err;
6206 
6207 	buf = strstrip(buf);
6208 	err = page_counter_memparse(buf, "max", &min);
6209 	if (err)
6210 		return err;
6211 
6212 	page_counter_set_min(&memcg->memory, min);
6213 
6214 	return nbytes;
6215 }
6216 
6217 static int memory_low_show(struct seq_file *m, void *v)
6218 {
6219 	return seq_puts_memcg_tunable(m,
6220 		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6221 }
6222 
6223 static ssize_t memory_low_write(struct kernfs_open_file *of,
6224 				char *buf, size_t nbytes, loff_t off)
6225 {
6226 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6227 	unsigned long low;
6228 	int err;
6229 
6230 	buf = strstrip(buf);
6231 	err = page_counter_memparse(buf, "max", &low);
6232 	if (err)
6233 		return err;
6234 
6235 	page_counter_set_low(&memcg->memory, low);
6236 
6237 	return nbytes;
6238 }
6239 
6240 static int memory_high_show(struct seq_file *m, void *v)
6241 {
6242 	return seq_puts_memcg_tunable(m,
6243 		READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6244 }
6245 
6246 static ssize_t memory_high_write(struct kernfs_open_file *of,
6247 				 char *buf, size_t nbytes, loff_t off)
6248 {
6249 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6250 	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6251 	bool drained = false;
6252 	unsigned long high;
6253 	int err;
6254 
6255 	buf = strstrip(buf);
6256 	err = page_counter_memparse(buf, "max", &high);
6257 	if (err)
6258 		return err;
6259 
6260 	for (;;) {
6261 		unsigned long nr_pages = page_counter_read(&memcg->memory);
6262 		unsigned long reclaimed;
6263 
6264 		if (nr_pages <= high)
6265 			break;
6266 
6267 		if (signal_pending(current))
6268 			break;
6269 
6270 		if (!drained) {
6271 			drain_all_stock(memcg);
6272 			drained = true;
6273 			continue;
6274 		}
6275 
6276 		reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6277 							 GFP_KERNEL, true);
6278 
6279 		if (!reclaimed && !nr_retries--)
6280 			break;
6281 	}
6282 
6283 	page_counter_set_high(&memcg->memory, high);
6284 
6285 	memcg_wb_domain_size_changed(memcg);
6286 
6287 	return nbytes;
6288 }
6289 
6290 static int memory_max_show(struct seq_file *m, void *v)
6291 {
6292 	return seq_puts_memcg_tunable(m,
6293 		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6294 }
6295 
6296 static ssize_t memory_max_write(struct kernfs_open_file *of,
6297 				char *buf, size_t nbytes, loff_t off)
6298 {
6299 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6300 	unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6301 	bool drained = false;
6302 	unsigned long max;
6303 	int err;
6304 
6305 	buf = strstrip(buf);
6306 	err = page_counter_memparse(buf, "max", &max);
6307 	if (err)
6308 		return err;
6309 
6310 	xchg(&memcg->memory.max, max);
6311 
6312 	for (;;) {
6313 		unsigned long nr_pages = page_counter_read(&memcg->memory);
6314 
6315 		if (nr_pages <= max)
6316 			break;
6317 
6318 		if (signal_pending(current))
6319 			break;
6320 
6321 		if (!drained) {
6322 			drain_all_stock(memcg);
6323 			drained = true;
6324 			continue;
6325 		}
6326 
6327 		if (nr_reclaims) {
6328 			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6329 							  GFP_KERNEL, true))
6330 				nr_reclaims--;
6331 			continue;
6332 		}
6333 
6334 		memcg_memory_event(memcg, MEMCG_OOM);
6335 		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6336 			break;
6337 	}
6338 
6339 	memcg_wb_domain_size_changed(memcg);
6340 	return nbytes;
6341 }
6342 
6343 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6344 {
6345 	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6346 	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6347 	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6348 	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6349 	seq_printf(m, "oom_kill %lu\n",
6350 		   atomic_long_read(&events[MEMCG_OOM_KILL]));
6351 }
6352 
6353 static int memory_events_show(struct seq_file *m, void *v)
6354 {
6355 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6356 
6357 	__memory_events_show(m, memcg->memory_events);
6358 	return 0;
6359 }
6360 
6361 static int memory_events_local_show(struct seq_file *m, void *v)
6362 {
6363 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6364 
6365 	__memory_events_show(m, memcg->memory_events_local);
6366 	return 0;
6367 }
6368 
6369 static int memory_stat_show(struct seq_file *m, void *v)
6370 {
6371 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6372 	char *buf;
6373 
6374 	buf = memory_stat_format(memcg);
6375 	if (!buf)
6376 		return -ENOMEM;
6377 	seq_puts(m, buf);
6378 	kfree(buf);
6379 	return 0;
6380 }
6381 
6382 static int memory_oom_group_show(struct seq_file *m, void *v)
6383 {
6384 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6385 
6386 	seq_printf(m, "%d\n", memcg->oom_group);
6387 
6388 	return 0;
6389 }
6390 
6391 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6392 				      char *buf, size_t nbytes, loff_t off)
6393 {
6394 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6395 	int ret, oom_group;
6396 
6397 	buf = strstrip(buf);
6398 	if (!buf)
6399 		return -EINVAL;
6400 
6401 	ret = kstrtoint(buf, 0, &oom_group);
6402 	if (ret)
6403 		return ret;
6404 
6405 	if (oom_group != 0 && oom_group != 1)
6406 		return -EINVAL;
6407 
6408 	memcg->oom_group = oom_group;
6409 
6410 	return nbytes;
6411 }
6412 
6413 static struct cftype memory_files[] = {
6414 	{
6415 		.name = "current",
6416 		.flags = CFTYPE_NOT_ON_ROOT,
6417 		.read_u64 = memory_current_read,
6418 	},
6419 	{
6420 		.name = "min",
6421 		.flags = CFTYPE_NOT_ON_ROOT,
6422 		.seq_show = memory_min_show,
6423 		.write = memory_min_write,
6424 	},
6425 	{
6426 		.name = "low",
6427 		.flags = CFTYPE_NOT_ON_ROOT,
6428 		.seq_show = memory_low_show,
6429 		.write = memory_low_write,
6430 	},
6431 	{
6432 		.name = "high",
6433 		.flags = CFTYPE_NOT_ON_ROOT,
6434 		.seq_show = memory_high_show,
6435 		.write = memory_high_write,
6436 	},
6437 	{
6438 		.name = "max",
6439 		.flags = CFTYPE_NOT_ON_ROOT,
6440 		.seq_show = memory_max_show,
6441 		.write = memory_max_write,
6442 	},
6443 	{
6444 		.name = "events",
6445 		.flags = CFTYPE_NOT_ON_ROOT,
6446 		.file_offset = offsetof(struct mem_cgroup, events_file),
6447 		.seq_show = memory_events_show,
6448 	},
6449 	{
6450 		.name = "events.local",
6451 		.flags = CFTYPE_NOT_ON_ROOT,
6452 		.file_offset = offsetof(struct mem_cgroup, events_local_file),
6453 		.seq_show = memory_events_local_show,
6454 	},
6455 	{
6456 		.name = "stat",
6457 		.seq_show = memory_stat_show,
6458 	},
6459 	{
6460 		.name = "oom.group",
6461 		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
6462 		.seq_show = memory_oom_group_show,
6463 		.write = memory_oom_group_write,
6464 	},
6465 	{ }	/* terminate */
6466 };
6467 
6468 struct cgroup_subsys memory_cgrp_subsys = {
6469 	.css_alloc = mem_cgroup_css_alloc,
6470 	.css_online = mem_cgroup_css_online,
6471 	.css_offline = mem_cgroup_css_offline,
6472 	.css_released = mem_cgroup_css_released,
6473 	.css_free = mem_cgroup_css_free,
6474 	.css_reset = mem_cgroup_css_reset,
6475 	.can_attach = mem_cgroup_can_attach,
6476 	.cancel_attach = mem_cgroup_cancel_attach,
6477 	.post_attach = mem_cgroup_move_task,
6478 	.bind = mem_cgroup_bind,
6479 	.dfl_cftypes = memory_files,
6480 	.legacy_cftypes = mem_cgroup_legacy_files,
6481 	.early_init = 0,
6482 };
6483 
6484 /*
6485  * This function calculates an individual cgroup's effective
6486  * protection which is derived from its own memory.min/low, its
6487  * parent's and siblings' settings, as well as the actual memory
6488  * distribution in the tree.
6489  *
6490  * The following rules apply to the effective protection values:
6491  *
6492  * 1. At the first level of reclaim, effective protection is equal to
6493  *    the declared protection in memory.min and memory.low.
6494  *
6495  * 2. To enable safe delegation of the protection configuration, at
6496  *    subsequent levels the effective protection is capped to the
6497  *    parent's effective protection.
6498  *
6499  * 3. To make complex and dynamic subtrees easier to configure, the
6500  *    user is allowed to overcommit the declared protection at a given
6501  *    level. If that is the case, the parent's effective protection is
6502  *    distributed to the children in proportion to how much protection
6503  *    they have declared and how much of it they are utilizing.
6504  *
6505  *    This makes distribution proportional, but also work-conserving:
6506  *    if one cgroup claims much more protection than it uses memory,
6507  *    the unused remainder is available to its siblings.
6508  *
6509  * 4. Conversely, when the declared protection is undercommitted at a
6510  *    given level, the distribution of the larger parental protection
6511  *    budget is NOT proportional. A cgroup's protection from a sibling
6512  *    is capped to its own memory.min/low setting.
6513  *
6514  * 5. However, to allow protecting recursive subtrees from each other
6515  *    without having to declare each individual cgroup's fixed share
6516  *    of the ancestor's claim to protection, any unutilized -
6517  *    "floating" - protection from up the tree is distributed in
6518  *    proportion to each cgroup's *usage*. This makes the protection
6519  *    neutral wrt sibling cgroups and lets them compete freely over
6520  *    the shared parental protection budget, but it protects the
6521  *    subtree as a whole from neighboring subtrees.
6522  *
6523  * Note that 4. and 5. are not in conflict: 4. is about protecting
6524  * against immediate siblings whereas 5. is about protecting against
6525  * neighboring subtrees.
6526  */
6527 static unsigned long effective_protection(unsigned long usage,
6528 					  unsigned long parent_usage,
6529 					  unsigned long setting,
6530 					  unsigned long parent_effective,
6531 					  unsigned long siblings_protected)
6532 {
6533 	unsigned long protected;
6534 	unsigned long ep;
6535 
6536 	protected = min(usage, setting);
6537 	/*
6538 	 * If all cgroups at this level combined claim and use more
6539 	 * protection then what the parent affords them, distribute
6540 	 * shares in proportion to utilization.
6541 	 *
6542 	 * We are using actual utilization rather than the statically
6543 	 * claimed protection in order to be work-conserving: claimed
6544 	 * but unused protection is available to siblings that would
6545 	 * otherwise get a smaller chunk than what they claimed.
6546 	 */
6547 	if (siblings_protected > parent_effective)
6548 		return protected * parent_effective / siblings_protected;
6549 
6550 	/*
6551 	 * Ok, utilized protection of all children is within what the
6552 	 * parent affords them, so we know whatever this child claims
6553 	 * and utilizes is effectively protected.
6554 	 *
6555 	 * If there is unprotected usage beyond this value, reclaim
6556 	 * will apply pressure in proportion to that amount.
6557 	 *
6558 	 * If there is unutilized protection, the cgroup will be fully
6559 	 * shielded from reclaim, but we do return a smaller value for
6560 	 * protection than what the group could enjoy in theory. This
6561 	 * is okay. With the overcommit distribution above, effective
6562 	 * protection is always dependent on how memory is actually
6563 	 * consumed among the siblings anyway.
6564 	 */
6565 	ep = protected;
6566 
6567 	/*
6568 	 * If the children aren't claiming (all of) the protection
6569 	 * afforded to them by the parent, distribute the remainder in
6570 	 * proportion to the (unprotected) memory of each cgroup. That
6571 	 * way, cgroups that aren't explicitly prioritized wrt each
6572 	 * other compete freely over the allowance, but they are
6573 	 * collectively protected from neighboring trees.
6574 	 *
6575 	 * We're using unprotected memory for the weight so that if
6576 	 * some cgroups DO claim explicit protection, we don't protect
6577 	 * the same bytes twice.
6578 	 *
6579 	 * Check both usage and parent_usage against the respective
6580 	 * protected values. One should imply the other, but they
6581 	 * aren't read atomically - make sure the division is sane.
6582 	 */
6583 	if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
6584 		return ep;
6585 	if (parent_effective > siblings_protected &&
6586 	    parent_usage > siblings_protected &&
6587 	    usage > protected) {
6588 		unsigned long unclaimed;
6589 
6590 		unclaimed = parent_effective - siblings_protected;
6591 		unclaimed *= usage - protected;
6592 		unclaimed /= parent_usage - siblings_protected;
6593 
6594 		ep += unclaimed;
6595 	}
6596 
6597 	return ep;
6598 }
6599 
6600 /**
6601  * mem_cgroup_protected - check if memory consumption is in the normal range
6602  * @root: the top ancestor of the sub-tree being checked
6603  * @memcg: the memory cgroup to check
6604  *
6605  * WARNING: This function is not stateless! It can only be used as part
6606  *          of a top-down tree iteration, not for isolated queries.
6607  */
6608 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
6609 				     struct mem_cgroup *memcg)
6610 {
6611 	unsigned long usage, parent_usage;
6612 	struct mem_cgroup *parent;
6613 
6614 	if (mem_cgroup_disabled())
6615 		return;
6616 
6617 	if (!root)
6618 		root = root_mem_cgroup;
6619 
6620 	/*
6621 	 * Effective values of the reclaim targets are ignored so they
6622 	 * can be stale. Have a look at mem_cgroup_protection for more
6623 	 * details.
6624 	 * TODO: calculation should be more robust so that we do not need
6625 	 * that special casing.
6626 	 */
6627 	if (memcg == root)
6628 		return;
6629 
6630 	usage = page_counter_read(&memcg->memory);
6631 	if (!usage)
6632 		return;
6633 
6634 	parent = parent_mem_cgroup(memcg);
6635 	/* No parent means a non-hierarchical mode on v1 memcg */
6636 	if (!parent)
6637 		return;
6638 
6639 	if (parent == root) {
6640 		memcg->memory.emin = READ_ONCE(memcg->memory.min);
6641 		memcg->memory.elow = READ_ONCE(memcg->memory.low);
6642 		return;
6643 	}
6644 
6645 	parent_usage = page_counter_read(&parent->memory);
6646 
6647 	WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
6648 			READ_ONCE(memcg->memory.min),
6649 			READ_ONCE(parent->memory.emin),
6650 			atomic_long_read(&parent->memory.children_min_usage)));
6651 
6652 	WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
6653 			READ_ONCE(memcg->memory.low),
6654 			READ_ONCE(parent->memory.elow),
6655 			atomic_long_read(&parent->memory.children_low_usage)));
6656 }
6657 
6658 /**
6659  * mem_cgroup_charge - charge a newly allocated page to a cgroup
6660  * @page: page to charge
6661  * @mm: mm context of the victim
6662  * @gfp_mask: reclaim mode
6663  *
6664  * Try to charge @page to the memcg that @mm belongs to, reclaiming
6665  * pages according to @gfp_mask if necessary.
6666  *
6667  * Returns 0 on success. Otherwise, an error code is returned.
6668  */
6669 int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
6670 {
6671 	unsigned int nr_pages = hpage_nr_pages(page);
6672 	struct mem_cgroup *memcg = NULL;
6673 	int ret = 0;
6674 
6675 	if (mem_cgroup_disabled())
6676 		goto out;
6677 
6678 	if (PageSwapCache(page)) {
6679 		swp_entry_t ent = { .val = page_private(page), };
6680 		unsigned short id;
6681 
6682 		/*
6683 		 * Every swap fault against a single page tries to charge the
6684 		 * page, bail as early as possible.  shmem_unuse() encounters
6685 		 * already charged pages, too.  page->mem_cgroup is protected
6686 		 * by the page lock, which serializes swap cache removal, which
6687 		 * in turn serializes uncharging.
6688 		 */
6689 		VM_BUG_ON_PAGE(!PageLocked(page), page);
6690 		if (compound_head(page)->mem_cgroup)
6691 			goto out;
6692 
6693 		id = lookup_swap_cgroup_id(ent);
6694 		rcu_read_lock();
6695 		memcg = mem_cgroup_from_id(id);
6696 		if (memcg && !css_tryget_online(&memcg->css))
6697 			memcg = NULL;
6698 		rcu_read_unlock();
6699 	}
6700 
6701 	if (!memcg)
6702 		memcg = get_mem_cgroup_from_mm(mm);
6703 
6704 	ret = try_charge(memcg, gfp_mask, nr_pages);
6705 	if (ret)
6706 		goto out_put;
6707 
6708 	css_get(&memcg->css);
6709 	commit_charge(page, memcg);
6710 
6711 	local_irq_disable();
6712 	mem_cgroup_charge_statistics(memcg, page, nr_pages);
6713 	memcg_check_events(memcg, page);
6714 	local_irq_enable();
6715 
6716 	if (PageSwapCache(page)) {
6717 		swp_entry_t entry = { .val = page_private(page) };
6718 		/*
6719 		 * The swap entry might not get freed for a long time,
6720 		 * let's not wait for it.  The page already received a
6721 		 * memory+swap charge, drop the swap entry duplicate.
6722 		 */
6723 		mem_cgroup_uncharge_swap(entry, nr_pages);
6724 	}
6725 
6726 out_put:
6727 	css_put(&memcg->css);
6728 out:
6729 	return ret;
6730 }
6731 
6732 struct uncharge_gather {
6733 	struct mem_cgroup *memcg;
6734 	unsigned long nr_pages;
6735 	unsigned long pgpgout;
6736 	unsigned long nr_kmem;
6737 	struct page *dummy_page;
6738 };
6739 
6740 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
6741 {
6742 	memset(ug, 0, sizeof(*ug));
6743 }
6744 
6745 static void uncharge_batch(const struct uncharge_gather *ug)
6746 {
6747 	unsigned long flags;
6748 
6749 	if (!mem_cgroup_is_root(ug->memcg)) {
6750 		page_counter_uncharge(&ug->memcg->memory, ug->nr_pages);
6751 		if (do_memsw_account())
6752 			page_counter_uncharge(&ug->memcg->memsw, ug->nr_pages);
6753 		if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
6754 			page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
6755 		memcg_oom_recover(ug->memcg);
6756 	}
6757 
6758 	local_irq_save(flags);
6759 	__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
6760 	__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages);
6761 	memcg_check_events(ug->memcg, ug->dummy_page);
6762 	local_irq_restore(flags);
6763 }
6764 
6765 static void uncharge_page(struct page *page, struct uncharge_gather *ug)
6766 {
6767 	unsigned long nr_pages;
6768 
6769 	VM_BUG_ON_PAGE(PageLRU(page), page);
6770 
6771 	if (!page->mem_cgroup)
6772 		return;
6773 
6774 	/*
6775 	 * Nobody should be changing or seriously looking at
6776 	 * page->mem_cgroup at this point, we have fully
6777 	 * exclusive access to the page.
6778 	 */
6779 
6780 	if (ug->memcg != page->mem_cgroup) {
6781 		if (ug->memcg) {
6782 			uncharge_batch(ug);
6783 			uncharge_gather_clear(ug);
6784 		}
6785 		ug->memcg = page->mem_cgroup;
6786 	}
6787 
6788 	nr_pages = compound_nr(page);
6789 	ug->nr_pages += nr_pages;
6790 
6791 	if (!PageKmemcg(page)) {
6792 		ug->pgpgout++;
6793 	} else {
6794 		ug->nr_kmem += nr_pages;
6795 		__ClearPageKmemcg(page);
6796 	}
6797 
6798 	ug->dummy_page = page;
6799 	page->mem_cgroup = NULL;
6800 	css_put(&ug->memcg->css);
6801 }
6802 
6803 static void uncharge_list(struct list_head *page_list)
6804 {
6805 	struct uncharge_gather ug;
6806 	struct list_head *next;
6807 
6808 	uncharge_gather_clear(&ug);
6809 
6810 	/*
6811 	 * Note that the list can be a single page->lru; hence the
6812 	 * do-while loop instead of a simple list_for_each_entry().
6813 	 */
6814 	next = page_list->next;
6815 	do {
6816 		struct page *page;
6817 
6818 		page = list_entry(next, struct page, lru);
6819 		next = page->lru.next;
6820 
6821 		uncharge_page(page, &ug);
6822 	} while (next != page_list);
6823 
6824 	if (ug.memcg)
6825 		uncharge_batch(&ug);
6826 }
6827 
6828 /**
6829  * mem_cgroup_uncharge - uncharge a page
6830  * @page: page to uncharge
6831  *
6832  * Uncharge a page previously charged with mem_cgroup_charge().
6833  */
6834 void mem_cgroup_uncharge(struct page *page)
6835 {
6836 	struct uncharge_gather ug;
6837 
6838 	if (mem_cgroup_disabled())
6839 		return;
6840 
6841 	/* Don't touch page->lru of any random page, pre-check: */
6842 	if (!page->mem_cgroup)
6843 		return;
6844 
6845 	uncharge_gather_clear(&ug);
6846 	uncharge_page(page, &ug);
6847 	uncharge_batch(&ug);
6848 }
6849 
6850 /**
6851  * mem_cgroup_uncharge_list - uncharge a list of page
6852  * @page_list: list of pages to uncharge
6853  *
6854  * Uncharge a list of pages previously charged with
6855  * mem_cgroup_charge().
6856  */
6857 void mem_cgroup_uncharge_list(struct list_head *page_list)
6858 {
6859 	if (mem_cgroup_disabled())
6860 		return;
6861 
6862 	if (!list_empty(page_list))
6863 		uncharge_list(page_list);
6864 }
6865 
6866 /**
6867  * mem_cgroup_migrate - charge a page's replacement
6868  * @oldpage: currently circulating page
6869  * @newpage: replacement page
6870  *
6871  * Charge @newpage as a replacement page for @oldpage. @oldpage will
6872  * be uncharged upon free.
6873  *
6874  * Both pages must be locked, @newpage->mapping must be set up.
6875  */
6876 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
6877 {
6878 	struct mem_cgroup *memcg;
6879 	unsigned int nr_pages;
6880 	unsigned long flags;
6881 
6882 	VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
6883 	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
6884 	VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
6885 	VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
6886 		       newpage);
6887 
6888 	if (mem_cgroup_disabled())
6889 		return;
6890 
6891 	/* Page cache replacement: new page already charged? */
6892 	if (newpage->mem_cgroup)
6893 		return;
6894 
6895 	/* Swapcache readahead pages can get replaced before being charged */
6896 	memcg = oldpage->mem_cgroup;
6897 	if (!memcg)
6898 		return;
6899 
6900 	/* Force-charge the new page. The old one will be freed soon */
6901 	nr_pages = hpage_nr_pages(newpage);
6902 
6903 	page_counter_charge(&memcg->memory, nr_pages);
6904 	if (do_memsw_account())
6905 		page_counter_charge(&memcg->memsw, nr_pages);
6906 
6907 	css_get(&memcg->css);
6908 	commit_charge(newpage, memcg);
6909 
6910 	local_irq_save(flags);
6911 	mem_cgroup_charge_statistics(memcg, newpage, nr_pages);
6912 	memcg_check_events(memcg, newpage);
6913 	local_irq_restore(flags);
6914 }
6915 
6916 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
6917 EXPORT_SYMBOL(memcg_sockets_enabled_key);
6918 
6919 void mem_cgroup_sk_alloc(struct sock *sk)
6920 {
6921 	struct mem_cgroup *memcg;
6922 
6923 	if (!mem_cgroup_sockets_enabled)
6924 		return;
6925 
6926 	/* Do not associate the sock with unrelated interrupted task's memcg. */
6927 	if (in_interrupt())
6928 		return;
6929 
6930 	rcu_read_lock();
6931 	memcg = mem_cgroup_from_task(current);
6932 	if (memcg == root_mem_cgroup)
6933 		goto out;
6934 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
6935 		goto out;
6936 	if (css_tryget(&memcg->css))
6937 		sk->sk_memcg = memcg;
6938 out:
6939 	rcu_read_unlock();
6940 }
6941 
6942 void mem_cgroup_sk_free(struct sock *sk)
6943 {
6944 	if (sk->sk_memcg)
6945 		css_put(&sk->sk_memcg->css);
6946 }
6947 
6948 /**
6949  * mem_cgroup_charge_skmem - charge socket memory
6950  * @memcg: memcg to charge
6951  * @nr_pages: number of pages to charge
6952  *
6953  * Charges @nr_pages to @memcg. Returns %true if the charge fit within
6954  * @memcg's configured limit, %false if the charge had to be forced.
6955  */
6956 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
6957 {
6958 	gfp_t gfp_mask = GFP_KERNEL;
6959 
6960 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
6961 		struct page_counter *fail;
6962 
6963 		if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
6964 			memcg->tcpmem_pressure = 0;
6965 			return true;
6966 		}
6967 		page_counter_charge(&memcg->tcpmem, nr_pages);
6968 		memcg->tcpmem_pressure = 1;
6969 		return false;
6970 	}
6971 
6972 	/* Don't block in the packet receive path */
6973 	if (in_softirq())
6974 		gfp_mask = GFP_NOWAIT;
6975 
6976 	mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
6977 
6978 	if (try_charge(memcg, gfp_mask, nr_pages) == 0)
6979 		return true;
6980 
6981 	try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
6982 	return false;
6983 }
6984 
6985 /**
6986  * mem_cgroup_uncharge_skmem - uncharge socket memory
6987  * @memcg: memcg to uncharge
6988  * @nr_pages: number of pages to uncharge
6989  */
6990 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
6991 {
6992 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
6993 		page_counter_uncharge(&memcg->tcpmem, nr_pages);
6994 		return;
6995 	}
6996 
6997 	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
6998 
6999 	refill_stock(memcg, nr_pages);
7000 }
7001 
7002 static int __init cgroup_memory(char *s)
7003 {
7004 	char *token;
7005 
7006 	while ((token = strsep(&s, ",")) != NULL) {
7007 		if (!*token)
7008 			continue;
7009 		if (!strcmp(token, "nosocket"))
7010 			cgroup_memory_nosocket = true;
7011 		if (!strcmp(token, "nokmem"))
7012 			cgroup_memory_nokmem = true;
7013 	}
7014 	return 0;
7015 }
7016 __setup("cgroup.memory=", cgroup_memory);
7017 
7018 /*
7019  * subsys_initcall() for memory controller.
7020  *
7021  * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7022  * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7023  * basically everything that doesn't depend on a specific mem_cgroup structure
7024  * should be initialized from here.
7025  */
7026 static int __init mem_cgroup_init(void)
7027 {
7028 	int cpu, node;
7029 
7030 	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7031 				  memcg_hotplug_cpu_dead);
7032 
7033 	for_each_possible_cpu(cpu)
7034 		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7035 			  drain_local_stock);
7036 
7037 	for_each_node(node) {
7038 		struct mem_cgroup_tree_per_node *rtpn;
7039 
7040 		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
7041 				    node_online(node) ? node : NUMA_NO_NODE);
7042 
7043 		rtpn->rb_root = RB_ROOT;
7044 		rtpn->rb_rightmost = NULL;
7045 		spin_lock_init(&rtpn->lock);
7046 		soft_limit_tree.rb_tree_per_node[node] = rtpn;
7047 	}
7048 
7049 	return 0;
7050 }
7051 subsys_initcall(mem_cgroup_init);
7052 
7053 #ifdef CONFIG_MEMCG_SWAP
7054 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7055 {
7056 	while (!refcount_inc_not_zero(&memcg->id.ref)) {
7057 		/*
7058 		 * The root cgroup cannot be destroyed, so it's refcount must
7059 		 * always be >= 1.
7060 		 */
7061 		if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
7062 			VM_BUG_ON(1);
7063 			break;
7064 		}
7065 		memcg = parent_mem_cgroup(memcg);
7066 		if (!memcg)
7067 			memcg = root_mem_cgroup;
7068 	}
7069 	return memcg;
7070 }
7071 
7072 /**
7073  * mem_cgroup_swapout - transfer a memsw charge to swap
7074  * @page: page whose memsw charge to transfer
7075  * @entry: swap entry to move the charge to
7076  *
7077  * Transfer the memsw charge of @page to @entry.
7078  */
7079 void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
7080 {
7081 	struct mem_cgroup *memcg, *swap_memcg;
7082 	unsigned int nr_entries;
7083 	unsigned short oldid;
7084 
7085 	VM_BUG_ON_PAGE(PageLRU(page), page);
7086 	VM_BUG_ON_PAGE(page_count(page), page);
7087 
7088 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7089 		return;
7090 
7091 	memcg = page->mem_cgroup;
7092 
7093 	/* Readahead page, never charged */
7094 	if (!memcg)
7095 		return;
7096 
7097 	/*
7098 	 * In case the memcg owning these pages has been offlined and doesn't
7099 	 * have an ID allocated to it anymore, charge the closest online
7100 	 * ancestor for the swap instead and transfer the memory+swap charge.
7101 	 */
7102 	swap_memcg = mem_cgroup_id_get_online(memcg);
7103 	nr_entries = hpage_nr_pages(page);
7104 	/* Get references for the tail pages, too */
7105 	if (nr_entries > 1)
7106 		mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7107 	oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7108 				   nr_entries);
7109 	VM_BUG_ON_PAGE(oldid, page);
7110 	mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7111 
7112 	page->mem_cgroup = NULL;
7113 
7114 	if (!mem_cgroup_is_root(memcg))
7115 		page_counter_uncharge(&memcg->memory, nr_entries);
7116 
7117 	if (!cgroup_memory_noswap && memcg != swap_memcg) {
7118 		if (!mem_cgroup_is_root(swap_memcg))
7119 			page_counter_charge(&swap_memcg->memsw, nr_entries);
7120 		page_counter_uncharge(&memcg->memsw, nr_entries);
7121 	}
7122 
7123 	/*
7124 	 * Interrupts should be disabled here because the caller holds the
7125 	 * i_pages lock which is taken with interrupts-off. It is
7126 	 * important here to have the interrupts disabled because it is the
7127 	 * only synchronisation we have for updating the per-CPU variables.
7128 	 */
7129 	VM_BUG_ON(!irqs_disabled());
7130 	mem_cgroup_charge_statistics(memcg, page, -nr_entries);
7131 	memcg_check_events(memcg, page);
7132 
7133 	css_put(&memcg->css);
7134 }
7135 
7136 /**
7137  * mem_cgroup_try_charge_swap - try charging swap space for a page
7138  * @page: page being added to swap
7139  * @entry: swap entry to charge
7140  *
7141  * Try to charge @page's memcg for the swap space at @entry.
7142  *
7143  * Returns 0 on success, -ENOMEM on failure.
7144  */
7145 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
7146 {
7147 	unsigned int nr_pages = hpage_nr_pages(page);
7148 	struct page_counter *counter;
7149 	struct mem_cgroup *memcg;
7150 	unsigned short oldid;
7151 
7152 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7153 		return 0;
7154 
7155 	memcg = page->mem_cgroup;
7156 
7157 	/* Readahead page, never charged */
7158 	if (!memcg)
7159 		return 0;
7160 
7161 	if (!entry.val) {
7162 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7163 		return 0;
7164 	}
7165 
7166 	memcg = mem_cgroup_id_get_online(memcg);
7167 
7168 	if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) &&
7169 	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7170 		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7171 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7172 		mem_cgroup_id_put(memcg);
7173 		return -ENOMEM;
7174 	}
7175 
7176 	/* Get references for the tail pages, too */
7177 	if (nr_pages > 1)
7178 		mem_cgroup_id_get_many(memcg, nr_pages - 1);
7179 	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7180 	VM_BUG_ON_PAGE(oldid, page);
7181 	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7182 
7183 	return 0;
7184 }
7185 
7186 /**
7187  * mem_cgroup_uncharge_swap - uncharge swap space
7188  * @entry: swap entry to uncharge
7189  * @nr_pages: the amount of swap space to uncharge
7190  */
7191 void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7192 {
7193 	struct mem_cgroup *memcg;
7194 	unsigned short id;
7195 
7196 	id = swap_cgroup_record(entry, 0, nr_pages);
7197 	rcu_read_lock();
7198 	memcg = mem_cgroup_from_id(id);
7199 	if (memcg) {
7200 		if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) {
7201 			if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7202 				page_counter_uncharge(&memcg->swap, nr_pages);
7203 			else
7204 				page_counter_uncharge(&memcg->memsw, nr_pages);
7205 		}
7206 		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7207 		mem_cgroup_id_put_many(memcg, nr_pages);
7208 	}
7209 	rcu_read_unlock();
7210 }
7211 
7212 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7213 {
7214 	long nr_swap_pages = get_nr_swap_pages();
7215 
7216 	if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7217 		return nr_swap_pages;
7218 	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
7219 		nr_swap_pages = min_t(long, nr_swap_pages,
7220 				      READ_ONCE(memcg->swap.max) -
7221 				      page_counter_read(&memcg->swap));
7222 	return nr_swap_pages;
7223 }
7224 
7225 bool mem_cgroup_swap_full(struct page *page)
7226 {
7227 	struct mem_cgroup *memcg;
7228 
7229 	VM_BUG_ON_PAGE(!PageLocked(page), page);
7230 
7231 	if (vm_swap_full())
7232 		return true;
7233 	if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7234 		return false;
7235 
7236 	memcg = page->mem_cgroup;
7237 	if (!memcg)
7238 		return false;
7239 
7240 	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
7241 		unsigned long usage = page_counter_read(&memcg->swap);
7242 
7243 		if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7244 		    usage * 2 >= READ_ONCE(memcg->swap.max))
7245 			return true;
7246 	}
7247 
7248 	return false;
7249 }
7250 
7251 static int __init setup_swap_account(char *s)
7252 {
7253 	if (!strcmp(s, "1"))
7254 		cgroup_memory_noswap = 0;
7255 	else if (!strcmp(s, "0"))
7256 		cgroup_memory_noswap = 1;
7257 	return 1;
7258 }
7259 __setup("swapaccount=", setup_swap_account);
7260 
7261 static u64 swap_current_read(struct cgroup_subsys_state *css,
7262 			     struct cftype *cft)
7263 {
7264 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7265 
7266 	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7267 }
7268 
7269 static int swap_high_show(struct seq_file *m, void *v)
7270 {
7271 	return seq_puts_memcg_tunable(m,
7272 		READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
7273 }
7274 
7275 static ssize_t swap_high_write(struct kernfs_open_file *of,
7276 			       char *buf, size_t nbytes, loff_t off)
7277 {
7278 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7279 	unsigned long high;
7280 	int err;
7281 
7282 	buf = strstrip(buf);
7283 	err = page_counter_memparse(buf, "max", &high);
7284 	if (err)
7285 		return err;
7286 
7287 	page_counter_set_high(&memcg->swap, high);
7288 
7289 	return nbytes;
7290 }
7291 
7292 static int swap_max_show(struct seq_file *m, void *v)
7293 {
7294 	return seq_puts_memcg_tunable(m,
7295 		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
7296 }
7297 
7298 static ssize_t swap_max_write(struct kernfs_open_file *of,
7299 			      char *buf, size_t nbytes, loff_t off)
7300 {
7301 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7302 	unsigned long max;
7303 	int err;
7304 
7305 	buf = strstrip(buf);
7306 	err = page_counter_memparse(buf, "max", &max);
7307 	if (err)
7308 		return err;
7309 
7310 	xchg(&memcg->swap.max, max);
7311 
7312 	return nbytes;
7313 }
7314 
7315 static int swap_events_show(struct seq_file *m, void *v)
7316 {
7317 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
7318 
7319 	seq_printf(m, "high %lu\n",
7320 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
7321 	seq_printf(m, "max %lu\n",
7322 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
7323 	seq_printf(m, "fail %lu\n",
7324 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
7325 
7326 	return 0;
7327 }
7328 
7329 static struct cftype swap_files[] = {
7330 	{
7331 		.name = "swap.current",
7332 		.flags = CFTYPE_NOT_ON_ROOT,
7333 		.read_u64 = swap_current_read,
7334 	},
7335 	{
7336 		.name = "swap.high",
7337 		.flags = CFTYPE_NOT_ON_ROOT,
7338 		.seq_show = swap_high_show,
7339 		.write = swap_high_write,
7340 	},
7341 	{
7342 		.name = "swap.max",
7343 		.flags = CFTYPE_NOT_ON_ROOT,
7344 		.seq_show = swap_max_show,
7345 		.write = swap_max_write,
7346 	},
7347 	{
7348 		.name = "swap.events",
7349 		.flags = CFTYPE_NOT_ON_ROOT,
7350 		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
7351 		.seq_show = swap_events_show,
7352 	},
7353 	{ }	/* terminate */
7354 };
7355 
7356 static struct cftype memsw_files[] = {
7357 	{
7358 		.name = "memsw.usage_in_bytes",
7359 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
7360 		.read_u64 = mem_cgroup_read_u64,
7361 	},
7362 	{
7363 		.name = "memsw.max_usage_in_bytes",
7364 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
7365 		.write = mem_cgroup_reset,
7366 		.read_u64 = mem_cgroup_read_u64,
7367 	},
7368 	{
7369 		.name = "memsw.limit_in_bytes",
7370 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
7371 		.write = mem_cgroup_write,
7372 		.read_u64 = mem_cgroup_read_u64,
7373 	},
7374 	{
7375 		.name = "memsw.failcnt",
7376 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
7377 		.write = mem_cgroup_reset,
7378 		.read_u64 = mem_cgroup_read_u64,
7379 	},
7380 	{ },	/* terminate */
7381 };
7382 
7383 /*
7384  * If mem_cgroup_swap_init() is implemented as a subsys_initcall()
7385  * instead of a core_initcall(), this could mean cgroup_memory_noswap still
7386  * remains set to false even when memcg is disabled via "cgroup_disable=memory"
7387  * boot parameter. This may result in premature OOPS inside
7388  * mem_cgroup_get_nr_swap_pages() function in corner cases.
7389  */
7390 static int __init mem_cgroup_swap_init(void)
7391 {
7392 	/* No memory control -> no swap control */
7393 	if (mem_cgroup_disabled())
7394 		cgroup_memory_noswap = true;
7395 
7396 	if (cgroup_memory_noswap)
7397 		return 0;
7398 
7399 	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
7400 	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
7401 
7402 	return 0;
7403 }
7404 core_initcall(mem_cgroup_swap_init);
7405 
7406 #endif /* CONFIG_MEMCG_SWAP */
7407