xref: /linux/mm/memcontrol.c (revision 7204df5e7e681238d457da03502f4b653403d7e7)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <xemul@openvz.org>
9  *
10  * Memory thresholds
11  * Copyright (C) 2009 Nokia Corporation
12  * Author: Kirill A. Shutemov
13  *
14  * Kernel Memory Controller
15  * Copyright (C) 2012 Parallels Inc. and Google Inc.
16  * Authors: Glauber Costa and Suleiman Souhlal
17  *
18  * Native page reclaim
19  * Charge lifetime sanitation
20  * Lockless page tracking & accounting
21  * Unified hierarchy configuration model
22  * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23  *
24  * Per memcg lru locking
25  * Copyright (C) 2020 Alibaba, Inc, Alex Shi
26  */
27 
28 #include <linux/page_counter.h>
29 #include <linux/memcontrol.h>
30 #include <linux/cgroup.h>
31 #include <linux/pagewalk.h>
32 #include <linux/sched/mm.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/hugetlb.h>
35 #include <linux/pagemap.h>
36 #include <linux/vm_event_item.h>
37 #include <linux/smp.h>
38 #include <linux/page-flags.h>
39 #include <linux/backing-dev.h>
40 #include <linux/bit_spinlock.h>
41 #include <linux/rcupdate.h>
42 #include <linux/limits.h>
43 #include <linux/export.h>
44 #include <linux/mutex.h>
45 #include <linux/rbtree.h>
46 #include <linux/slab.h>
47 #include <linux/swap.h>
48 #include <linux/swapops.h>
49 #include <linux/spinlock.h>
50 #include <linux/eventfd.h>
51 #include <linux/poll.h>
52 #include <linux/sort.h>
53 #include <linux/fs.h>
54 #include <linux/seq_file.h>
55 #include <linux/vmpressure.h>
56 #include <linux/memremap.h>
57 #include <linux/mm_inline.h>
58 #include <linux/swap_cgroup.h>
59 #include <linux/cpu.h>
60 #include <linux/oom.h>
61 #include <linux/lockdep.h>
62 #include <linux/file.h>
63 #include <linux/resume_user_mode.h>
64 #include <linux/psi.h>
65 #include <linux/seq_buf.h>
66 #include <linux/sched/isolation.h>
67 #include <linux/kmemleak.h>
68 #include "internal.h"
69 #include <net/sock.h>
70 #include <net/ip.h>
71 #include "slab.h"
72 #include "swap.h"
73 
74 #include <linux/uaccess.h>
75 
76 #include <trace/events/vmscan.h>
77 
78 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
79 EXPORT_SYMBOL(memory_cgrp_subsys);
80 
81 struct mem_cgroup *root_mem_cgroup __read_mostly;
82 
83 /* Active memory cgroup to use from an interrupt context */
84 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
85 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
86 
87 /* Socket memory accounting disabled? */
88 static bool cgroup_memory_nosocket __ro_after_init;
89 
90 /* Kernel memory accounting disabled? */
91 static bool cgroup_memory_nokmem __ro_after_init;
92 
93 /* BPF memory accounting disabled? */
94 static bool cgroup_memory_nobpf __ro_after_init;
95 
96 #ifdef CONFIG_CGROUP_WRITEBACK
97 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
98 #endif
99 
100 /* Whether legacy memory+swap accounting is active */
101 static bool do_memsw_account(void)
102 {
103 	return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
104 }
105 
106 #define THRESHOLDS_EVENTS_TARGET 128
107 #define SOFTLIMIT_EVENTS_TARGET 1024
108 
109 /*
110  * Cgroups above their limits are maintained in a RB-Tree, independent of
111  * their hierarchy representation
112  */
113 
114 struct mem_cgroup_tree_per_node {
115 	struct rb_root rb_root;
116 	struct rb_node *rb_rightmost;
117 	spinlock_t lock;
118 };
119 
120 struct mem_cgroup_tree {
121 	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
122 };
123 
124 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
125 
126 /* for OOM */
127 struct mem_cgroup_eventfd_list {
128 	struct list_head list;
129 	struct eventfd_ctx *eventfd;
130 };
131 
132 /*
133  * cgroup_event represents events which userspace want to receive.
134  */
135 struct mem_cgroup_event {
136 	/*
137 	 * memcg which the event belongs to.
138 	 */
139 	struct mem_cgroup *memcg;
140 	/*
141 	 * eventfd to signal userspace about the event.
142 	 */
143 	struct eventfd_ctx *eventfd;
144 	/*
145 	 * Each of these stored in a list by the cgroup.
146 	 */
147 	struct list_head list;
148 	/*
149 	 * register_event() callback will be used to add new userspace
150 	 * waiter for changes related to this event.  Use eventfd_signal()
151 	 * on eventfd to send notification to userspace.
152 	 */
153 	int (*register_event)(struct mem_cgroup *memcg,
154 			      struct eventfd_ctx *eventfd, const char *args);
155 	/*
156 	 * unregister_event() callback will be called when userspace closes
157 	 * the eventfd or on cgroup removing.  This callback must be set,
158 	 * if you want provide notification functionality.
159 	 */
160 	void (*unregister_event)(struct mem_cgroup *memcg,
161 				 struct eventfd_ctx *eventfd);
162 	/*
163 	 * All fields below needed to unregister event when
164 	 * userspace closes eventfd.
165 	 */
166 	poll_table pt;
167 	wait_queue_head_t *wqh;
168 	wait_queue_entry_t wait;
169 	struct work_struct remove;
170 };
171 
172 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
173 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
174 
175 /* Stuffs for move charges at task migration. */
176 /*
177  * Types of charges to be moved.
178  */
179 #define MOVE_ANON	0x1U
180 #define MOVE_FILE	0x2U
181 #define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
182 
183 /* "mc" and its members are protected by cgroup_mutex */
184 static struct move_charge_struct {
185 	spinlock_t	  lock; /* for from, to */
186 	struct mm_struct  *mm;
187 	struct mem_cgroup *from;
188 	struct mem_cgroup *to;
189 	unsigned long flags;
190 	unsigned long precharge;
191 	unsigned long moved_charge;
192 	unsigned long moved_swap;
193 	struct task_struct *moving_task;	/* a task moving charges */
194 	wait_queue_head_t waitq;		/* a waitq for other context */
195 } mc = {
196 	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
197 	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
198 };
199 
200 /*
201  * Maximum loops in mem_cgroup_soft_reclaim(), used for soft
202  * limit reclaim to prevent infinite loops, if they ever occur.
203  */
204 #define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
205 #define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
206 
207 /* for encoding cft->private value on file */
208 enum res_type {
209 	_MEM,
210 	_MEMSWAP,
211 	_KMEM,
212 	_TCP,
213 };
214 
215 #define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
216 #define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
217 #define MEMFILE_ATTR(val)	((val) & 0xffff)
218 
219 /*
220  * Iteration constructs for visiting all cgroups (under a tree).  If
221  * loops are exited prematurely (break), mem_cgroup_iter_break() must
222  * be used for reference counting.
223  */
224 #define for_each_mem_cgroup_tree(iter, root)		\
225 	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
226 	     iter != NULL;				\
227 	     iter = mem_cgroup_iter(root, iter, NULL))
228 
229 #define for_each_mem_cgroup(iter)			\
230 	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
231 	     iter != NULL;				\
232 	     iter = mem_cgroup_iter(NULL, iter, NULL))
233 
234 static inline bool task_is_dying(void)
235 {
236 	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
237 		(current->flags & PF_EXITING);
238 }
239 
240 /* Some nice accessors for the vmpressure. */
241 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
242 {
243 	if (!memcg)
244 		memcg = root_mem_cgroup;
245 	return &memcg->vmpressure;
246 }
247 
248 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
249 {
250 	return container_of(vmpr, struct mem_cgroup, vmpressure);
251 }
252 
253 #define CURRENT_OBJCG_UPDATE_BIT 0
254 #define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT)
255 
256 #ifdef CONFIG_MEMCG_KMEM
257 static DEFINE_SPINLOCK(objcg_lock);
258 
259 bool mem_cgroup_kmem_disabled(void)
260 {
261 	return cgroup_memory_nokmem;
262 }
263 
264 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
265 				      unsigned int nr_pages);
266 
267 static void obj_cgroup_release(struct percpu_ref *ref)
268 {
269 	struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
270 	unsigned int nr_bytes;
271 	unsigned int nr_pages;
272 	unsigned long flags;
273 
274 	/*
275 	 * At this point all allocated objects are freed, and
276 	 * objcg->nr_charged_bytes can't have an arbitrary byte value.
277 	 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
278 	 *
279 	 * The following sequence can lead to it:
280 	 * 1) CPU0: objcg == stock->cached_objcg
281 	 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
282 	 *          PAGE_SIZE bytes are charged
283 	 * 3) CPU1: a process from another memcg is allocating something,
284 	 *          the stock if flushed,
285 	 *          objcg->nr_charged_bytes = PAGE_SIZE - 92
286 	 * 5) CPU0: we do release this object,
287 	 *          92 bytes are added to stock->nr_bytes
288 	 * 6) CPU0: stock is flushed,
289 	 *          92 bytes are added to objcg->nr_charged_bytes
290 	 *
291 	 * In the result, nr_charged_bytes == PAGE_SIZE.
292 	 * This page will be uncharged in obj_cgroup_release().
293 	 */
294 	nr_bytes = atomic_read(&objcg->nr_charged_bytes);
295 	WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
296 	nr_pages = nr_bytes >> PAGE_SHIFT;
297 
298 	if (nr_pages)
299 		obj_cgroup_uncharge_pages(objcg, nr_pages);
300 
301 	spin_lock_irqsave(&objcg_lock, flags);
302 	list_del(&objcg->list);
303 	spin_unlock_irqrestore(&objcg_lock, flags);
304 
305 	percpu_ref_exit(ref);
306 	kfree_rcu(objcg, rcu);
307 }
308 
309 static struct obj_cgroup *obj_cgroup_alloc(void)
310 {
311 	struct obj_cgroup *objcg;
312 	int ret;
313 
314 	objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
315 	if (!objcg)
316 		return NULL;
317 
318 	ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
319 			      GFP_KERNEL);
320 	if (ret) {
321 		kfree(objcg);
322 		return NULL;
323 	}
324 	INIT_LIST_HEAD(&objcg->list);
325 	return objcg;
326 }
327 
328 static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
329 				  struct mem_cgroup *parent)
330 {
331 	struct obj_cgroup *objcg, *iter;
332 
333 	objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
334 
335 	spin_lock_irq(&objcg_lock);
336 
337 	/* 1) Ready to reparent active objcg. */
338 	list_add(&objcg->list, &memcg->objcg_list);
339 	/* 2) Reparent active objcg and already reparented objcgs to parent. */
340 	list_for_each_entry(iter, &memcg->objcg_list, list)
341 		WRITE_ONCE(iter->memcg, parent);
342 	/* 3) Move already reparented objcgs to the parent's list */
343 	list_splice(&memcg->objcg_list, &parent->objcg_list);
344 
345 	spin_unlock_irq(&objcg_lock);
346 
347 	percpu_ref_kill(&objcg->refcnt);
348 }
349 
350 /*
351  * A lot of the calls to the cache allocation functions are expected to be
352  * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
353  * conditional to this static branch, we'll have to allow modules that does
354  * kmem_cache_alloc and the such to see this symbol as well
355  */
356 DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key);
357 EXPORT_SYMBOL(memcg_kmem_online_key);
358 
359 DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
360 EXPORT_SYMBOL(memcg_bpf_enabled_key);
361 #endif
362 
363 /**
364  * mem_cgroup_css_from_folio - css of the memcg associated with a folio
365  * @folio: folio of interest
366  *
367  * If memcg is bound to the default hierarchy, css of the memcg associated
368  * with @folio is returned.  The returned css remains associated with @folio
369  * until it is released.
370  *
371  * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
372  * is returned.
373  */
374 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio)
375 {
376 	struct mem_cgroup *memcg = folio_memcg(folio);
377 
378 	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
379 		memcg = root_mem_cgroup;
380 
381 	return &memcg->css;
382 }
383 
384 /**
385  * page_cgroup_ino - return inode number of the memcg a page is charged to
386  * @page: the page
387  *
388  * Look up the closest online ancestor of the memory cgroup @page is charged to
389  * and return its inode number or 0 if @page is not charged to any cgroup. It
390  * is safe to call this function without holding a reference to @page.
391  *
392  * Note, this function is inherently racy, because there is nothing to prevent
393  * the cgroup inode from getting torn down and potentially reallocated a moment
394  * after page_cgroup_ino() returns, so it only should be used by callers that
395  * do not care (such as procfs interfaces).
396  */
397 ino_t page_cgroup_ino(struct page *page)
398 {
399 	struct mem_cgroup *memcg;
400 	unsigned long ino = 0;
401 
402 	rcu_read_lock();
403 	/* page_folio() is racy here, but the entire function is racy anyway */
404 	memcg = folio_memcg_check(page_folio(page));
405 
406 	while (memcg && !(memcg->css.flags & CSS_ONLINE))
407 		memcg = parent_mem_cgroup(memcg);
408 	if (memcg)
409 		ino = cgroup_ino(memcg->css.cgroup);
410 	rcu_read_unlock();
411 	return ino;
412 }
413 
414 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
415 					 struct mem_cgroup_tree_per_node *mctz,
416 					 unsigned long new_usage_in_excess)
417 {
418 	struct rb_node **p = &mctz->rb_root.rb_node;
419 	struct rb_node *parent = NULL;
420 	struct mem_cgroup_per_node *mz_node;
421 	bool rightmost = true;
422 
423 	if (mz->on_tree)
424 		return;
425 
426 	mz->usage_in_excess = new_usage_in_excess;
427 	if (!mz->usage_in_excess)
428 		return;
429 	while (*p) {
430 		parent = *p;
431 		mz_node = rb_entry(parent, struct mem_cgroup_per_node,
432 					tree_node);
433 		if (mz->usage_in_excess < mz_node->usage_in_excess) {
434 			p = &(*p)->rb_left;
435 			rightmost = false;
436 		} else {
437 			p = &(*p)->rb_right;
438 		}
439 	}
440 
441 	if (rightmost)
442 		mctz->rb_rightmost = &mz->tree_node;
443 
444 	rb_link_node(&mz->tree_node, parent, p);
445 	rb_insert_color(&mz->tree_node, &mctz->rb_root);
446 	mz->on_tree = true;
447 }
448 
449 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
450 					 struct mem_cgroup_tree_per_node *mctz)
451 {
452 	if (!mz->on_tree)
453 		return;
454 
455 	if (&mz->tree_node == mctz->rb_rightmost)
456 		mctz->rb_rightmost = rb_prev(&mz->tree_node);
457 
458 	rb_erase(&mz->tree_node, &mctz->rb_root);
459 	mz->on_tree = false;
460 }
461 
462 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
463 				       struct mem_cgroup_tree_per_node *mctz)
464 {
465 	unsigned long flags;
466 
467 	spin_lock_irqsave(&mctz->lock, flags);
468 	__mem_cgroup_remove_exceeded(mz, mctz);
469 	spin_unlock_irqrestore(&mctz->lock, flags);
470 }
471 
472 static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
473 {
474 	unsigned long nr_pages = page_counter_read(&memcg->memory);
475 	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
476 	unsigned long excess = 0;
477 
478 	if (nr_pages > soft_limit)
479 		excess = nr_pages - soft_limit;
480 
481 	return excess;
482 }
483 
484 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid)
485 {
486 	unsigned long excess;
487 	struct mem_cgroup_per_node *mz;
488 	struct mem_cgroup_tree_per_node *mctz;
489 
490 	if (lru_gen_enabled()) {
491 		if (soft_limit_excess(memcg))
492 			lru_gen_soft_reclaim(memcg, nid);
493 		return;
494 	}
495 
496 	mctz = soft_limit_tree.rb_tree_per_node[nid];
497 	if (!mctz)
498 		return;
499 	/*
500 	 * Necessary to update all ancestors when hierarchy is used.
501 	 * because their event counter is not touched.
502 	 */
503 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
504 		mz = memcg->nodeinfo[nid];
505 		excess = soft_limit_excess(memcg);
506 		/*
507 		 * We have to update the tree if mz is on RB-tree or
508 		 * mem is over its softlimit.
509 		 */
510 		if (excess || mz->on_tree) {
511 			unsigned long flags;
512 
513 			spin_lock_irqsave(&mctz->lock, flags);
514 			/* if on-tree, remove it */
515 			if (mz->on_tree)
516 				__mem_cgroup_remove_exceeded(mz, mctz);
517 			/*
518 			 * Insert again. mz->usage_in_excess will be updated.
519 			 * If excess is 0, no tree ops.
520 			 */
521 			__mem_cgroup_insert_exceeded(mz, mctz, excess);
522 			spin_unlock_irqrestore(&mctz->lock, flags);
523 		}
524 	}
525 }
526 
527 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
528 {
529 	struct mem_cgroup_tree_per_node *mctz;
530 	struct mem_cgroup_per_node *mz;
531 	int nid;
532 
533 	for_each_node(nid) {
534 		mz = memcg->nodeinfo[nid];
535 		mctz = soft_limit_tree.rb_tree_per_node[nid];
536 		if (mctz)
537 			mem_cgroup_remove_exceeded(mz, mctz);
538 	}
539 }
540 
541 static struct mem_cgroup_per_node *
542 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
543 {
544 	struct mem_cgroup_per_node *mz;
545 
546 retry:
547 	mz = NULL;
548 	if (!mctz->rb_rightmost)
549 		goto done;		/* Nothing to reclaim from */
550 
551 	mz = rb_entry(mctz->rb_rightmost,
552 		      struct mem_cgroup_per_node, tree_node);
553 	/*
554 	 * Remove the node now but someone else can add it back,
555 	 * we will to add it back at the end of reclaim to its correct
556 	 * position in the tree.
557 	 */
558 	__mem_cgroup_remove_exceeded(mz, mctz);
559 	if (!soft_limit_excess(mz->memcg) ||
560 	    !css_tryget(&mz->memcg->css))
561 		goto retry;
562 done:
563 	return mz;
564 }
565 
566 static struct mem_cgroup_per_node *
567 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
568 {
569 	struct mem_cgroup_per_node *mz;
570 
571 	spin_lock_irq(&mctz->lock);
572 	mz = __mem_cgroup_largest_soft_limit_node(mctz);
573 	spin_unlock_irq(&mctz->lock);
574 	return mz;
575 }
576 
577 /* Subset of vm_event_item to report for memcg event stats */
578 static const unsigned int memcg_vm_event_stat[] = {
579 	PGPGIN,
580 	PGPGOUT,
581 	PGSCAN_KSWAPD,
582 	PGSCAN_DIRECT,
583 	PGSCAN_KHUGEPAGED,
584 	PGSTEAL_KSWAPD,
585 	PGSTEAL_DIRECT,
586 	PGSTEAL_KHUGEPAGED,
587 	PGFAULT,
588 	PGMAJFAULT,
589 	PGREFILL,
590 	PGACTIVATE,
591 	PGDEACTIVATE,
592 	PGLAZYFREE,
593 	PGLAZYFREED,
594 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
595 	ZSWPIN,
596 	ZSWPOUT,
597 	ZSWPWB,
598 #endif
599 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
600 	THP_FAULT_ALLOC,
601 	THP_COLLAPSE_ALLOC,
602 	THP_SWPOUT,
603 	THP_SWPOUT_FALLBACK,
604 #endif
605 };
606 
607 #define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
608 static int mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
609 
610 static void init_memcg_events(void)
611 {
612 	int i;
613 
614 	for (i = 0; i < NR_MEMCG_EVENTS; ++i)
615 		mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1;
616 }
617 
618 static inline int memcg_events_index(enum vm_event_item idx)
619 {
620 	return mem_cgroup_events_index[idx] - 1;
621 }
622 
623 struct memcg_vmstats_percpu {
624 	/* Local (CPU and cgroup) page state & events */
625 	long			state[MEMCG_NR_STAT];
626 	unsigned long		events[NR_MEMCG_EVENTS];
627 
628 	/* Delta calculation for lockless upward propagation */
629 	long			state_prev[MEMCG_NR_STAT];
630 	unsigned long		events_prev[NR_MEMCG_EVENTS];
631 
632 	/* Cgroup1: threshold notifications & softlimit tree updates */
633 	unsigned long		nr_page_events;
634 	unsigned long		targets[MEM_CGROUP_NTARGETS];
635 
636 	/* Stats updates since the last flush */
637 	unsigned int		stats_updates;
638 };
639 
640 struct memcg_vmstats {
641 	/* Aggregated (CPU and subtree) page state & events */
642 	long			state[MEMCG_NR_STAT];
643 	unsigned long		events[NR_MEMCG_EVENTS];
644 
645 	/* Non-hierarchical (CPU aggregated) page state & events */
646 	long			state_local[MEMCG_NR_STAT];
647 	unsigned long		events_local[NR_MEMCG_EVENTS];
648 
649 	/* Pending child counts during tree propagation */
650 	long			state_pending[MEMCG_NR_STAT];
651 	unsigned long		events_pending[NR_MEMCG_EVENTS];
652 
653 	/* Stats updates since the last flush */
654 	atomic64_t		stats_updates;
655 };
656 
657 /*
658  * memcg and lruvec stats flushing
659  *
660  * Many codepaths leading to stats update or read are performance sensitive and
661  * adding stats flushing in such codepaths is not desirable. So, to optimize the
662  * flushing the kernel does:
663  *
664  * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
665  *    rstat update tree grow unbounded.
666  *
667  * 2) Flush the stats synchronously on reader side only when there are more than
668  *    (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
669  *    will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
670  *    only for 2 seconds due to (1).
671  */
672 static void flush_memcg_stats_dwork(struct work_struct *w);
673 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
674 static u64 flush_last_time;
675 
676 #define FLUSH_TIME (2UL*HZ)
677 
678 /*
679  * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
680  * not rely on this as part of an acquired spinlock_t lock. These functions are
681  * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
682  * is sufficient.
683  */
684 static void memcg_stats_lock(void)
685 {
686 	preempt_disable_nested();
687 	VM_WARN_ON_IRQS_ENABLED();
688 }
689 
690 static void __memcg_stats_lock(void)
691 {
692 	preempt_disable_nested();
693 }
694 
695 static void memcg_stats_unlock(void)
696 {
697 	preempt_enable_nested();
698 }
699 
700 
701 static bool memcg_should_flush_stats(struct mem_cgroup *memcg)
702 {
703 	return atomic64_read(&memcg->vmstats->stats_updates) >
704 		MEMCG_CHARGE_BATCH * num_online_cpus();
705 }
706 
707 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
708 {
709 	int cpu = smp_processor_id();
710 	unsigned int x;
711 
712 	if (!val)
713 		return;
714 
715 	cgroup_rstat_updated(memcg->css.cgroup, cpu);
716 
717 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
718 		x = __this_cpu_add_return(memcg->vmstats_percpu->stats_updates,
719 					  abs(val));
720 
721 		if (x < MEMCG_CHARGE_BATCH)
722 			continue;
723 
724 		/*
725 		 * If @memcg is already flush-able, increasing stats_updates is
726 		 * redundant. Avoid the overhead of the atomic update.
727 		 */
728 		if (!memcg_should_flush_stats(memcg))
729 			atomic64_add(x, &memcg->vmstats->stats_updates);
730 		__this_cpu_write(memcg->vmstats_percpu->stats_updates, 0);
731 	}
732 }
733 
734 static void do_flush_stats(struct mem_cgroup *memcg)
735 {
736 	if (mem_cgroup_is_root(memcg))
737 		WRITE_ONCE(flush_last_time, jiffies_64);
738 
739 	cgroup_rstat_flush(memcg->css.cgroup);
740 }
741 
742 /*
743  * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
744  * @memcg: root of the subtree to flush
745  *
746  * Flushing is serialized by the underlying global rstat lock. There is also a
747  * minimum amount of work to be done even if there are no stat updates to flush.
748  * Hence, we only flush the stats if the updates delta exceeds a threshold. This
749  * avoids unnecessary work and contention on the underlying lock.
750  */
751 void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
752 {
753 	if (mem_cgroup_disabled())
754 		return;
755 
756 	if (!memcg)
757 		memcg = root_mem_cgroup;
758 
759 	if (memcg_should_flush_stats(memcg))
760 		do_flush_stats(memcg);
761 }
762 
763 void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
764 {
765 	/* Only flush if the periodic flusher is one full cycle late */
766 	if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME))
767 		mem_cgroup_flush_stats(memcg);
768 }
769 
770 static void flush_memcg_stats_dwork(struct work_struct *w)
771 {
772 	/*
773 	 * Deliberately ignore memcg_should_flush_stats() here so that flushing
774 	 * in latency-sensitive paths is as cheap as possible.
775 	 */
776 	do_flush_stats(root_mem_cgroup);
777 	queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
778 }
779 
780 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
781 {
782 	long x = READ_ONCE(memcg->vmstats->state[idx]);
783 #ifdef CONFIG_SMP
784 	if (x < 0)
785 		x = 0;
786 #endif
787 	return x;
788 }
789 
790 static int memcg_page_state_unit(int item);
791 
792 /*
793  * Normalize the value passed into memcg_rstat_updated() to be in pages. Round
794  * up non-zero sub-page updates to 1 page as zero page updates are ignored.
795  */
796 static int memcg_state_val_in_pages(int idx, int val)
797 {
798 	int unit = memcg_page_state_unit(idx);
799 
800 	if (!val || unit == PAGE_SIZE)
801 		return val;
802 	else
803 		return max(val * unit / PAGE_SIZE, 1UL);
804 }
805 
806 /**
807  * __mod_memcg_state - update cgroup memory statistics
808  * @memcg: the memory cgroup
809  * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
810  * @val: delta to add to the counter, can be negative
811  */
812 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
813 {
814 	if (mem_cgroup_disabled())
815 		return;
816 
817 	__this_cpu_add(memcg->vmstats_percpu->state[idx], val);
818 	memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val));
819 }
820 
821 /* idx can be of type enum memcg_stat_item or node_stat_item. */
822 static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
823 {
824 	long x = READ_ONCE(memcg->vmstats->state_local[idx]);
825 
826 #ifdef CONFIG_SMP
827 	if (x < 0)
828 		x = 0;
829 #endif
830 	return x;
831 }
832 
833 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
834 			      int val)
835 {
836 	struct mem_cgroup_per_node *pn;
837 	struct mem_cgroup *memcg;
838 
839 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
840 	memcg = pn->memcg;
841 
842 	/*
843 	 * The caller from rmap relies on disabled preemption because they never
844 	 * update their counter from in-interrupt context. For these two
845 	 * counters we check that the update is never performed from an
846 	 * interrupt context while other caller need to have disabled interrupt.
847 	 */
848 	__memcg_stats_lock();
849 	if (IS_ENABLED(CONFIG_DEBUG_VM)) {
850 		switch (idx) {
851 		case NR_ANON_MAPPED:
852 		case NR_FILE_MAPPED:
853 		case NR_ANON_THPS:
854 		case NR_SHMEM_PMDMAPPED:
855 		case NR_FILE_PMDMAPPED:
856 			WARN_ON_ONCE(!in_task());
857 			break;
858 		default:
859 			VM_WARN_ON_IRQS_ENABLED();
860 		}
861 	}
862 
863 	/* Update memcg */
864 	__this_cpu_add(memcg->vmstats_percpu->state[idx], val);
865 
866 	/* Update lruvec */
867 	__this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
868 
869 	memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val));
870 	memcg_stats_unlock();
871 }
872 
873 /**
874  * __mod_lruvec_state - update lruvec memory statistics
875  * @lruvec: the lruvec
876  * @idx: the stat item
877  * @val: delta to add to the counter, can be negative
878  *
879  * The lruvec is the intersection of the NUMA node and a cgroup. This
880  * function updates the all three counters that are affected by a
881  * change of state at this level: per-node, per-cgroup, per-lruvec.
882  */
883 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
884 			int val)
885 {
886 	/* Update node */
887 	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
888 
889 	/* Update memcg and lruvec */
890 	if (!mem_cgroup_disabled())
891 		__mod_memcg_lruvec_state(lruvec, idx, val);
892 }
893 
894 void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
895 			     int val)
896 {
897 	struct mem_cgroup *memcg;
898 	pg_data_t *pgdat = folio_pgdat(folio);
899 	struct lruvec *lruvec;
900 
901 	rcu_read_lock();
902 	memcg = folio_memcg(folio);
903 	/* Untracked pages have no memcg, no lruvec. Update only the node */
904 	if (!memcg) {
905 		rcu_read_unlock();
906 		__mod_node_page_state(pgdat, idx, val);
907 		return;
908 	}
909 
910 	lruvec = mem_cgroup_lruvec(memcg, pgdat);
911 	__mod_lruvec_state(lruvec, idx, val);
912 	rcu_read_unlock();
913 }
914 EXPORT_SYMBOL(__lruvec_stat_mod_folio);
915 
916 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
917 {
918 	pg_data_t *pgdat = page_pgdat(virt_to_page(p));
919 	struct mem_cgroup *memcg;
920 	struct lruvec *lruvec;
921 
922 	rcu_read_lock();
923 	memcg = mem_cgroup_from_slab_obj(p);
924 
925 	/*
926 	 * Untracked pages have no memcg, no lruvec. Update only the
927 	 * node. If we reparent the slab objects to the root memcg,
928 	 * when we free the slab object, we need to update the per-memcg
929 	 * vmstats to keep it correct for the root memcg.
930 	 */
931 	if (!memcg) {
932 		__mod_node_page_state(pgdat, idx, val);
933 	} else {
934 		lruvec = mem_cgroup_lruvec(memcg, pgdat);
935 		__mod_lruvec_state(lruvec, idx, val);
936 	}
937 	rcu_read_unlock();
938 }
939 
940 /**
941  * __count_memcg_events - account VM events in a cgroup
942  * @memcg: the memory cgroup
943  * @idx: the event item
944  * @count: the number of events that occurred
945  */
946 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
947 			  unsigned long count)
948 {
949 	int index = memcg_events_index(idx);
950 
951 	if (mem_cgroup_disabled() || index < 0)
952 		return;
953 
954 	memcg_stats_lock();
955 	__this_cpu_add(memcg->vmstats_percpu->events[index], count);
956 	memcg_rstat_updated(memcg, count);
957 	memcg_stats_unlock();
958 }
959 
960 static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
961 {
962 	int index = memcg_events_index(event);
963 
964 	if (index < 0)
965 		return 0;
966 	return READ_ONCE(memcg->vmstats->events[index]);
967 }
968 
969 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
970 {
971 	int index = memcg_events_index(event);
972 
973 	if (index < 0)
974 		return 0;
975 
976 	return READ_ONCE(memcg->vmstats->events_local[index]);
977 }
978 
979 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
980 					 int nr_pages)
981 {
982 	/* pagein of a big page is an event. So, ignore page size */
983 	if (nr_pages > 0)
984 		__count_memcg_events(memcg, PGPGIN, 1);
985 	else {
986 		__count_memcg_events(memcg, PGPGOUT, 1);
987 		nr_pages = -nr_pages; /* for event */
988 	}
989 
990 	__this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
991 }
992 
993 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
994 				       enum mem_cgroup_events_target target)
995 {
996 	unsigned long val, next;
997 
998 	val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
999 	next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
1000 	/* from time_after() in jiffies.h */
1001 	if ((long)(next - val) < 0) {
1002 		switch (target) {
1003 		case MEM_CGROUP_TARGET_THRESH:
1004 			next = val + THRESHOLDS_EVENTS_TARGET;
1005 			break;
1006 		case MEM_CGROUP_TARGET_SOFTLIMIT:
1007 			next = val + SOFTLIMIT_EVENTS_TARGET;
1008 			break;
1009 		default:
1010 			break;
1011 		}
1012 		__this_cpu_write(memcg->vmstats_percpu->targets[target], next);
1013 		return true;
1014 	}
1015 	return false;
1016 }
1017 
1018 /*
1019  * Check events in order.
1020  *
1021  */
1022 static void memcg_check_events(struct mem_cgroup *memcg, int nid)
1023 {
1024 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
1025 		return;
1026 
1027 	/* threshold event is triggered in finer grain than soft limit */
1028 	if (unlikely(mem_cgroup_event_ratelimit(memcg,
1029 						MEM_CGROUP_TARGET_THRESH))) {
1030 		bool do_softlimit;
1031 
1032 		do_softlimit = mem_cgroup_event_ratelimit(memcg,
1033 						MEM_CGROUP_TARGET_SOFTLIMIT);
1034 		mem_cgroup_threshold(memcg);
1035 		if (unlikely(do_softlimit))
1036 			mem_cgroup_update_tree(memcg, nid);
1037 	}
1038 }
1039 
1040 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
1041 {
1042 	/*
1043 	 * mm_update_next_owner() may clear mm->owner to NULL
1044 	 * if it races with swapoff, page migration, etc.
1045 	 * So this can be called with p == NULL.
1046 	 */
1047 	if (unlikely(!p))
1048 		return NULL;
1049 
1050 	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
1051 }
1052 EXPORT_SYMBOL(mem_cgroup_from_task);
1053 
1054 static __always_inline struct mem_cgroup *active_memcg(void)
1055 {
1056 	if (!in_task())
1057 		return this_cpu_read(int_active_memcg);
1058 	else
1059 		return current->active_memcg;
1060 }
1061 
1062 /**
1063  * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
1064  * @mm: mm from which memcg should be extracted. It can be NULL.
1065  *
1066  * Obtain a reference on mm->memcg and returns it if successful. If mm
1067  * is NULL, then the memcg is chosen as follows:
1068  * 1) The active memcg, if set.
1069  * 2) current->mm->memcg, if available
1070  * 3) root memcg
1071  * If mem_cgroup is disabled, NULL is returned.
1072  */
1073 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1074 {
1075 	struct mem_cgroup *memcg;
1076 
1077 	if (mem_cgroup_disabled())
1078 		return NULL;
1079 
1080 	/*
1081 	 * Page cache insertions can happen without an
1082 	 * actual mm context, e.g. during disk probing
1083 	 * on boot, loopback IO, acct() writes etc.
1084 	 *
1085 	 * No need to css_get on root memcg as the reference
1086 	 * counting is disabled on the root level in the
1087 	 * cgroup core. See CSS_NO_REF.
1088 	 */
1089 	if (unlikely(!mm)) {
1090 		memcg = active_memcg();
1091 		if (unlikely(memcg)) {
1092 			/* remote memcg must hold a ref */
1093 			css_get(&memcg->css);
1094 			return memcg;
1095 		}
1096 		mm = current->mm;
1097 		if (unlikely(!mm))
1098 			return root_mem_cgroup;
1099 	}
1100 
1101 	rcu_read_lock();
1102 	do {
1103 		memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1104 		if (unlikely(!memcg))
1105 			memcg = root_mem_cgroup;
1106 	} while (!css_tryget(&memcg->css));
1107 	rcu_read_unlock();
1108 	return memcg;
1109 }
1110 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
1111 
1112 /**
1113  * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
1114  */
1115 struct mem_cgroup *get_mem_cgroup_from_current(void)
1116 {
1117 	struct mem_cgroup *memcg;
1118 
1119 	if (mem_cgroup_disabled())
1120 		return NULL;
1121 
1122 again:
1123 	rcu_read_lock();
1124 	memcg = mem_cgroup_from_task(current);
1125 	if (!css_tryget(&memcg->css)) {
1126 		rcu_read_unlock();
1127 		goto again;
1128 	}
1129 	rcu_read_unlock();
1130 	return memcg;
1131 }
1132 
1133 /**
1134  * mem_cgroup_iter - iterate over memory cgroup hierarchy
1135  * @root: hierarchy root
1136  * @prev: previously returned memcg, NULL on first invocation
1137  * @reclaim: cookie for shared reclaim walks, NULL for full walks
1138  *
1139  * Returns references to children of the hierarchy below @root, or
1140  * @root itself, or %NULL after a full round-trip.
1141  *
1142  * Caller must pass the return value in @prev on subsequent
1143  * invocations for reference counting, or use mem_cgroup_iter_break()
1144  * to cancel a hierarchy walk before the round-trip is complete.
1145  *
1146  * Reclaimers can specify a node in @reclaim to divide up the memcgs
1147  * in the hierarchy among all concurrent reclaimers operating on the
1148  * same node.
1149  */
1150 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1151 				   struct mem_cgroup *prev,
1152 				   struct mem_cgroup_reclaim_cookie *reclaim)
1153 {
1154 	struct mem_cgroup_reclaim_iter *iter;
1155 	struct cgroup_subsys_state *css = NULL;
1156 	struct mem_cgroup *memcg = NULL;
1157 	struct mem_cgroup *pos = NULL;
1158 
1159 	if (mem_cgroup_disabled())
1160 		return NULL;
1161 
1162 	if (!root)
1163 		root = root_mem_cgroup;
1164 
1165 	rcu_read_lock();
1166 
1167 	if (reclaim) {
1168 		struct mem_cgroup_per_node *mz;
1169 
1170 		mz = root->nodeinfo[reclaim->pgdat->node_id];
1171 		iter = &mz->iter;
1172 
1173 		/*
1174 		 * On start, join the current reclaim iteration cycle.
1175 		 * Exit when a concurrent walker completes it.
1176 		 */
1177 		if (!prev)
1178 			reclaim->generation = iter->generation;
1179 		else if (reclaim->generation != iter->generation)
1180 			goto out_unlock;
1181 
1182 		while (1) {
1183 			pos = READ_ONCE(iter->position);
1184 			if (!pos || css_tryget(&pos->css))
1185 				break;
1186 			/*
1187 			 * css reference reached zero, so iter->position will
1188 			 * be cleared by ->css_released. However, we should not
1189 			 * rely on this happening soon, because ->css_released
1190 			 * is called from a work queue, and by busy-waiting we
1191 			 * might block it. So we clear iter->position right
1192 			 * away.
1193 			 */
1194 			(void)cmpxchg(&iter->position, pos, NULL);
1195 		}
1196 	} else if (prev) {
1197 		pos = prev;
1198 	}
1199 
1200 	if (pos)
1201 		css = &pos->css;
1202 
1203 	for (;;) {
1204 		css = css_next_descendant_pre(css, &root->css);
1205 		if (!css) {
1206 			/*
1207 			 * Reclaimers share the hierarchy walk, and a
1208 			 * new one might jump in right at the end of
1209 			 * the hierarchy - make sure they see at least
1210 			 * one group and restart from the beginning.
1211 			 */
1212 			if (!prev)
1213 				continue;
1214 			break;
1215 		}
1216 
1217 		/*
1218 		 * Verify the css and acquire a reference.  The root
1219 		 * is provided by the caller, so we know it's alive
1220 		 * and kicking, and don't take an extra reference.
1221 		 */
1222 		if (css == &root->css || css_tryget(css)) {
1223 			memcg = mem_cgroup_from_css(css);
1224 			break;
1225 		}
1226 	}
1227 
1228 	if (reclaim) {
1229 		/*
1230 		 * The position could have already been updated by a competing
1231 		 * thread, so check that the value hasn't changed since we read
1232 		 * it to avoid reclaiming from the same cgroup twice.
1233 		 */
1234 		(void)cmpxchg(&iter->position, pos, memcg);
1235 
1236 		if (pos)
1237 			css_put(&pos->css);
1238 
1239 		if (!memcg)
1240 			iter->generation++;
1241 	}
1242 
1243 out_unlock:
1244 	rcu_read_unlock();
1245 	if (prev && prev != root)
1246 		css_put(&prev->css);
1247 
1248 	return memcg;
1249 }
1250 
1251 /**
1252  * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1253  * @root: hierarchy root
1254  * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1255  */
1256 void mem_cgroup_iter_break(struct mem_cgroup *root,
1257 			   struct mem_cgroup *prev)
1258 {
1259 	if (!root)
1260 		root = root_mem_cgroup;
1261 	if (prev && prev != root)
1262 		css_put(&prev->css);
1263 }
1264 
1265 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1266 					struct mem_cgroup *dead_memcg)
1267 {
1268 	struct mem_cgroup_reclaim_iter *iter;
1269 	struct mem_cgroup_per_node *mz;
1270 	int nid;
1271 
1272 	for_each_node(nid) {
1273 		mz = from->nodeinfo[nid];
1274 		iter = &mz->iter;
1275 		cmpxchg(&iter->position, dead_memcg, NULL);
1276 	}
1277 }
1278 
1279 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1280 {
1281 	struct mem_cgroup *memcg = dead_memcg;
1282 	struct mem_cgroup *last;
1283 
1284 	do {
1285 		__invalidate_reclaim_iterators(memcg, dead_memcg);
1286 		last = memcg;
1287 	} while ((memcg = parent_mem_cgroup(memcg)));
1288 
1289 	/*
1290 	 * When cgroup1 non-hierarchy mode is used,
1291 	 * parent_mem_cgroup() does not walk all the way up to the
1292 	 * cgroup root (root_mem_cgroup). So we have to handle
1293 	 * dead_memcg from cgroup root separately.
1294 	 */
1295 	if (!mem_cgroup_is_root(last))
1296 		__invalidate_reclaim_iterators(root_mem_cgroup,
1297 						dead_memcg);
1298 }
1299 
1300 /**
1301  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1302  * @memcg: hierarchy root
1303  * @fn: function to call for each task
1304  * @arg: argument passed to @fn
1305  *
1306  * This function iterates over tasks attached to @memcg or to any of its
1307  * descendants and calls @fn for each task. If @fn returns a non-zero
1308  * value, the function breaks the iteration loop. Otherwise, it will iterate
1309  * over all tasks and return 0.
1310  *
1311  * This function must not be called for the root memory cgroup.
1312  */
1313 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1314 			   int (*fn)(struct task_struct *, void *), void *arg)
1315 {
1316 	struct mem_cgroup *iter;
1317 	int ret = 0;
1318 
1319 	BUG_ON(mem_cgroup_is_root(memcg));
1320 
1321 	for_each_mem_cgroup_tree(iter, memcg) {
1322 		struct css_task_iter it;
1323 		struct task_struct *task;
1324 
1325 		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1326 		while (!ret && (task = css_task_iter_next(&it)))
1327 			ret = fn(task, arg);
1328 		css_task_iter_end(&it);
1329 		if (ret) {
1330 			mem_cgroup_iter_break(memcg, iter);
1331 			break;
1332 		}
1333 	}
1334 }
1335 
1336 #ifdef CONFIG_DEBUG_VM
1337 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1338 {
1339 	struct mem_cgroup *memcg;
1340 
1341 	if (mem_cgroup_disabled())
1342 		return;
1343 
1344 	memcg = folio_memcg(folio);
1345 
1346 	if (!memcg)
1347 		VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
1348 	else
1349 		VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1350 }
1351 #endif
1352 
1353 /**
1354  * folio_lruvec_lock - Lock the lruvec for a folio.
1355  * @folio: Pointer to the folio.
1356  *
1357  * These functions are safe to use under any of the following conditions:
1358  * - folio locked
1359  * - folio_test_lru false
1360  * - folio_memcg_lock()
1361  * - folio frozen (refcount of 0)
1362  *
1363  * Return: The lruvec this folio is on with its lock held.
1364  */
1365 struct lruvec *folio_lruvec_lock(struct folio *folio)
1366 {
1367 	struct lruvec *lruvec = folio_lruvec(folio);
1368 
1369 	spin_lock(&lruvec->lru_lock);
1370 	lruvec_memcg_debug(lruvec, folio);
1371 
1372 	return lruvec;
1373 }
1374 
1375 /**
1376  * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1377  * @folio: Pointer to the folio.
1378  *
1379  * These functions are safe to use under any of the following conditions:
1380  * - folio locked
1381  * - folio_test_lru false
1382  * - folio_memcg_lock()
1383  * - folio frozen (refcount of 0)
1384  *
1385  * Return: The lruvec this folio is on with its lock held and interrupts
1386  * disabled.
1387  */
1388 struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1389 {
1390 	struct lruvec *lruvec = folio_lruvec(folio);
1391 
1392 	spin_lock_irq(&lruvec->lru_lock);
1393 	lruvec_memcg_debug(lruvec, folio);
1394 
1395 	return lruvec;
1396 }
1397 
1398 /**
1399  * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1400  * @folio: Pointer to the folio.
1401  * @flags: Pointer to irqsave flags.
1402  *
1403  * These functions are safe to use under any of the following conditions:
1404  * - folio locked
1405  * - folio_test_lru false
1406  * - folio_memcg_lock()
1407  * - folio frozen (refcount of 0)
1408  *
1409  * Return: The lruvec this folio is on with its lock held and interrupts
1410  * disabled.
1411  */
1412 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1413 		unsigned long *flags)
1414 {
1415 	struct lruvec *lruvec = folio_lruvec(folio);
1416 
1417 	spin_lock_irqsave(&lruvec->lru_lock, *flags);
1418 	lruvec_memcg_debug(lruvec, folio);
1419 
1420 	return lruvec;
1421 }
1422 
1423 /**
1424  * mem_cgroup_update_lru_size - account for adding or removing an lru page
1425  * @lruvec: mem_cgroup per zone lru vector
1426  * @lru: index of lru list the page is sitting on
1427  * @zid: zone id of the accounted pages
1428  * @nr_pages: positive when adding or negative when removing
1429  *
1430  * This function must be called under lru_lock, just before a page is added
1431  * to or just after a page is removed from an lru list.
1432  */
1433 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1434 				int zid, int nr_pages)
1435 {
1436 	struct mem_cgroup_per_node *mz;
1437 	unsigned long *lru_size;
1438 	long size;
1439 
1440 	if (mem_cgroup_disabled())
1441 		return;
1442 
1443 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1444 	lru_size = &mz->lru_zone_size[zid][lru];
1445 
1446 	if (nr_pages < 0)
1447 		*lru_size += nr_pages;
1448 
1449 	size = *lru_size;
1450 	if (WARN_ONCE(size < 0,
1451 		"%s(%p, %d, %d): lru_size %ld\n",
1452 		__func__, lruvec, lru, nr_pages, size)) {
1453 		VM_BUG_ON(1);
1454 		*lru_size = 0;
1455 	}
1456 
1457 	if (nr_pages > 0)
1458 		*lru_size += nr_pages;
1459 }
1460 
1461 /**
1462  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1463  * @memcg: the memory cgroup
1464  *
1465  * Returns the maximum amount of memory @mem can be charged with, in
1466  * pages.
1467  */
1468 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1469 {
1470 	unsigned long margin = 0;
1471 	unsigned long count;
1472 	unsigned long limit;
1473 
1474 	count = page_counter_read(&memcg->memory);
1475 	limit = READ_ONCE(memcg->memory.max);
1476 	if (count < limit)
1477 		margin = limit - count;
1478 
1479 	if (do_memsw_account()) {
1480 		count = page_counter_read(&memcg->memsw);
1481 		limit = READ_ONCE(memcg->memsw.max);
1482 		if (count < limit)
1483 			margin = min(margin, limit - count);
1484 		else
1485 			margin = 0;
1486 	}
1487 
1488 	return margin;
1489 }
1490 
1491 /*
1492  * A routine for checking "mem" is under move_account() or not.
1493  *
1494  * Checking a cgroup is mc.from or mc.to or under hierarchy of
1495  * moving cgroups. This is for waiting at high-memory pressure
1496  * caused by "move".
1497  */
1498 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1499 {
1500 	struct mem_cgroup *from;
1501 	struct mem_cgroup *to;
1502 	bool ret = false;
1503 	/*
1504 	 * Unlike task_move routines, we access mc.to, mc.from not under
1505 	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1506 	 */
1507 	spin_lock(&mc.lock);
1508 	from = mc.from;
1509 	to = mc.to;
1510 	if (!from)
1511 		goto unlock;
1512 
1513 	ret = mem_cgroup_is_descendant(from, memcg) ||
1514 		mem_cgroup_is_descendant(to, memcg);
1515 unlock:
1516 	spin_unlock(&mc.lock);
1517 	return ret;
1518 }
1519 
1520 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1521 {
1522 	if (mc.moving_task && current != mc.moving_task) {
1523 		if (mem_cgroup_under_move(memcg)) {
1524 			DEFINE_WAIT(wait);
1525 			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1526 			/* moving charge context might have finished. */
1527 			if (mc.moving_task)
1528 				schedule();
1529 			finish_wait(&mc.waitq, &wait);
1530 			return true;
1531 		}
1532 	}
1533 	return false;
1534 }
1535 
1536 struct memory_stat {
1537 	const char *name;
1538 	unsigned int idx;
1539 };
1540 
1541 static const struct memory_stat memory_stats[] = {
1542 	{ "anon",			NR_ANON_MAPPED			},
1543 	{ "file",			NR_FILE_PAGES			},
1544 	{ "kernel",			MEMCG_KMEM			},
1545 	{ "kernel_stack",		NR_KERNEL_STACK_KB		},
1546 	{ "pagetables",			NR_PAGETABLE			},
1547 	{ "sec_pagetables",		NR_SECONDARY_PAGETABLE		},
1548 	{ "percpu",			MEMCG_PERCPU_B			},
1549 	{ "sock",			MEMCG_SOCK			},
1550 	{ "vmalloc",			MEMCG_VMALLOC			},
1551 	{ "shmem",			NR_SHMEM			},
1552 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
1553 	{ "zswap",			MEMCG_ZSWAP_B			},
1554 	{ "zswapped",			MEMCG_ZSWAPPED			},
1555 #endif
1556 	{ "file_mapped",		NR_FILE_MAPPED			},
1557 	{ "file_dirty",			NR_FILE_DIRTY			},
1558 	{ "file_writeback",		NR_WRITEBACK			},
1559 #ifdef CONFIG_SWAP
1560 	{ "swapcached",			NR_SWAPCACHE			},
1561 #endif
1562 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1563 	{ "anon_thp",			NR_ANON_THPS			},
1564 	{ "file_thp",			NR_FILE_THPS			},
1565 	{ "shmem_thp",			NR_SHMEM_THPS			},
1566 #endif
1567 	{ "inactive_anon",		NR_INACTIVE_ANON		},
1568 	{ "active_anon",		NR_ACTIVE_ANON			},
1569 	{ "inactive_file",		NR_INACTIVE_FILE		},
1570 	{ "active_file",		NR_ACTIVE_FILE			},
1571 	{ "unevictable",		NR_UNEVICTABLE			},
1572 	{ "slab_reclaimable",		NR_SLAB_RECLAIMABLE_B		},
1573 	{ "slab_unreclaimable",		NR_SLAB_UNRECLAIMABLE_B		},
1574 
1575 	/* The memory events */
1576 	{ "workingset_refault_anon",	WORKINGSET_REFAULT_ANON		},
1577 	{ "workingset_refault_file",	WORKINGSET_REFAULT_FILE		},
1578 	{ "workingset_activate_anon",	WORKINGSET_ACTIVATE_ANON	},
1579 	{ "workingset_activate_file",	WORKINGSET_ACTIVATE_FILE	},
1580 	{ "workingset_restore_anon",	WORKINGSET_RESTORE_ANON		},
1581 	{ "workingset_restore_file",	WORKINGSET_RESTORE_FILE		},
1582 	{ "workingset_nodereclaim",	WORKINGSET_NODERECLAIM		},
1583 };
1584 
1585 /* The actual unit of the state item, not the same as the output unit */
1586 static int memcg_page_state_unit(int item)
1587 {
1588 	switch (item) {
1589 	case MEMCG_PERCPU_B:
1590 	case MEMCG_ZSWAP_B:
1591 	case NR_SLAB_RECLAIMABLE_B:
1592 	case NR_SLAB_UNRECLAIMABLE_B:
1593 		return 1;
1594 	case NR_KERNEL_STACK_KB:
1595 		return SZ_1K;
1596 	default:
1597 		return PAGE_SIZE;
1598 	}
1599 }
1600 
1601 /* Translate stat items to the correct unit for memory.stat output */
1602 static int memcg_page_state_output_unit(int item)
1603 {
1604 	/*
1605 	 * Workingset state is actually in pages, but we export it to userspace
1606 	 * as a scalar count of events, so special case it here.
1607 	 */
1608 	switch (item) {
1609 	case WORKINGSET_REFAULT_ANON:
1610 	case WORKINGSET_REFAULT_FILE:
1611 	case WORKINGSET_ACTIVATE_ANON:
1612 	case WORKINGSET_ACTIVATE_FILE:
1613 	case WORKINGSET_RESTORE_ANON:
1614 	case WORKINGSET_RESTORE_FILE:
1615 	case WORKINGSET_NODERECLAIM:
1616 		return 1;
1617 	default:
1618 		return memcg_page_state_unit(item);
1619 	}
1620 }
1621 
1622 static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
1623 						    int item)
1624 {
1625 	return memcg_page_state(memcg, item) *
1626 		memcg_page_state_output_unit(item);
1627 }
1628 
1629 static inline unsigned long memcg_page_state_local_output(
1630 		struct mem_cgroup *memcg, int item)
1631 {
1632 	return memcg_page_state_local(memcg, item) *
1633 		memcg_page_state_output_unit(item);
1634 }
1635 
1636 static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1637 {
1638 	int i;
1639 
1640 	/*
1641 	 * Provide statistics on the state of the memory subsystem as
1642 	 * well as cumulative event counters that show past behavior.
1643 	 *
1644 	 * This list is ordered following a combination of these gradients:
1645 	 * 1) generic big picture -> specifics and details
1646 	 * 2) reflecting userspace activity -> reflecting kernel heuristics
1647 	 *
1648 	 * Current memory state:
1649 	 */
1650 	mem_cgroup_flush_stats(memcg);
1651 
1652 	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1653 		u64 size;
1654 
1655 		size = memcg_page_state_output(memcg, memory_stats[i].idx);
1656 		seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size);
1657 
1658 		if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1659 			size += memcg_page_state_output(memcg,
1660 							NR_SLAB_RECLAIMABLE_B);
1661 			seq_buf_printf(s, "slab %llu\n", size);
1662 		}
1663 	}
1664 
1665 	/* Accumulated memory events */
1666 	seq_buf_printf(s, "pgscan %lu\n",
1667 		       memcg_events(memcg, PGSCAN_KSWAPD) +
1668 		       memcg_events(memcg, PGSCAN_DIRECT) +
1669 		       memcg_events(memcg, PGSCAN_KHUGEPAGED));
1670 	seq_buf_printf(s, "pgsteal %lu\n",
1671 		       memcg_events(memcg, PGSTEAL_KSWAPD) +
1672 		       memcg_events(memcg, PGSTEAL_DIRECT) +
1673 		       memcg_events(memcg, PGSTEAL_KHUGEPAGED));
1674 
1675 	for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1676 		if (memcg_vm_event_stat[i] == PGPGIN ||
1677 		    memcg_vm_event_stat[i] == PGPGOUT)
1678 			continue;
1679 
1680 		seq_buf_printf(s, "%s %lu\n",
1681 			       vm_event_name(memcg_vm_event_stat[i]),
1682 			       memcg_events(memcg, memcg_vm_event_stat[i]));
1683 	}
1684 
1685 	/* The above should easily fit into one page */
1686 	WARN_ON_ONCE(seq_buf_has_overflowed(s));
1687 }
1688 
1689 static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s);
1690 
1691 static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1692 {
1693 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1694 		memcg_stat_format(memcg, s);
1695 	else
1696 		memcg1_stat_format(memcg, s);
1697 	WARN_ON_ONCE(seq_buf_has_overflowed(s));
1698 }
1699 
1700 /**
1701  * mem_cgroup_print_oom_context: Print OOM information relevant to
1702  * memory controller.
1703  * @memcg: The memory cgroup that went over limit
1704  * @p: Task that is going to be killed
1705  *
1706  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1707  * enabled
1708  */
1709 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1710 {
1711 	rcu_read_lock();
1712 
1713 	if (memcg) {
1714 		pr_cont(",oom_memcg=");
1715 		pr_cont_cgroup_path(memcg->css.cgroup);
1716 	} else
1717 		pr_cont(",global_oom");
1718 	if (p) {
1719 		pr_cont(",task_memcg=");
1720 		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1721 	}
1722 	rcu_read_unlock();
1723 }
1724 
1725 /**
1726  * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1727  * memory controller.
1728  * @memcg: The memory cgroup that went over limit
1729  */
1730 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1731 {
1732 	/* Use static buffer, for the caller is holding oom_lock. */
1733 	static char buf[PAGE_SIZE];
1734 	struct seq_buf s;
1735 
1736 	lockdep_assert_held(&oom_lock);
1737 
1738 	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1739 		K((u64)page_counter_read(&memcg->memory)),
1740 		K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1741 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1742 		pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1743 			K((u64)page_counter_read(&memcg->swap)),
1744 			K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1745 	else {
1746 		pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1747 			K((u64)page_counter_read(&memcg->memsw)),
1748 			K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1749 		pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1750 			K((u64)page_counter_read(&memcg->kmem)),
1751 			K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1752 	}
1753 
1754 	pr_info("Memory cgroup stats for ");
1755 	pr_cont_cgroup_path(memcg->css.cgroup);
1756 	pr_cont(":");
1757 	seq_buf_init(&s, buf, sizeof(buf));
1758 	memory_stat_format(memcg, &s);
1759 	seq_buf_do_printk(&s, KERN_INFO);
1760 }
1761 
1762 /*
1763  * Return the memory (and swap, if configured) limit for a memcg.
1764  */
1765 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1766 {
1767 	unsigned long max = READ_ONCE(memcg->memory.max);
1768 
1769 	if (do_memsw_account()) {
1770 		if (mem_cgroup_swappiness(memcg)) {
1771 			/* Calculate swap excess capacity from memsw limit */
1772 			unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1773 
1774 			max += min(swap, (unsigned long)total_swap_pages);
1775 		}
1776 	} else {
1777 		if (mem_cgroup_swappiness(memcg))
1778 			max += min(READ_ONCE(memcg->swap.max),
1779 				   (unsigned long)total_swap_pages);
1780 	}
1781 	return max;
1782 }
1783 
1784 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1785 {
1786 	return page_counter_read(&memcg->memory);
1787 }
1788 
1789 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1790 				     int order)
1791 {
1792 	struct oom_control oc = {
1793 		.zonelist = NULL,
1794 		.nodemask = NULL,
1795 		.memcg = memcg,
1796 		.gfp_mask = gfp_mask,
1797 		.order = order,
1798 	};
1799 	bool ret = true;
1800 
1801 	if (mutex_lock_killable(&oom_lock))
1802 		return true;
1803 
1804 	if (mem_cgroup_margin(memcg) >= (1 << order))
1805 		goto unlock;
1806 
1807 	/*
1808 	 * A few threads which were not waiting at mutex_lock_killable() can
1809 	 * fail to bail out. Therefore, check again after holding oom_lock.
1810 	 */
1811 	ret = task_is_dying() || out_of_memory(&oc);
1812 
1813 unlock:
1814 	mutex_unlock(&oom_lock);
1815 	return ret;
1816 }
1817 
1818 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1819 				   pg_data_t *pgdat,
1820 				   gfp_t gfp_mask,
1821 				   unsigned long *total_scanned)
1822 {
1823 	struct mem_cgroup *victim = NULL;
1824 	int total = 0;
1825 	int loop = 0;
1826 	unsigned long excess;
1827 	unsigned long nr_scanned;
1828 	struct mem_cgroup_reclaim_cookie reclaim = {
1829 		.pgdat = pgdat,
1830 	};
1831 
1832 	excess = soft_limit_excess(root_memcg);
1833 
1834 	while (1) {
1835 		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1836 		if (!victim) {
1837 			loop++;
1838 			if (loop >= 2) {
1839 				/*
1840 				 * If we have not been able to reclaim
1841 				 * anything, it might because there are
1842 				 * no reclaimable pages under this hierarchy
1843 				 */
1844 				if (!total)
1845 					break;
1846 				/*
1847 				 * We want to do more targeted reclaim.
1848 				 * excess >> 2 is not to excessive so as to
1849 				 * reclaim too much, nor too less that we keep
1850 				 * coming back to reclaim from this cgroup
1851 				 */
1852 				if (total >= (excess >> 2) ||
1853 					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1854 					break;
1855 			}
1856 			continue;
1857 		}
1858 		total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1859 					pgdat, &nr_scanned);
1860 		*total_scanned += nr_scanned;
1861 		if (!soft_limit_excess(root_memcg))
1862 			break;
1863 	}
1864 	mem_cgroup_iter_break(root_memcg, victim);
1865 	return total;
1866 }
1867 
1868 #ifdef CONFIG_LOCKDEP
1869 static struct lockdep_map memcg_oom_lock_dep_map = {
1870 	.name = "memcg_oom_lock",
1871 };
1872 #endif
1873 
1874 static DEFINE_SPINLOCK(memcg_oom_lock);
1875 
1876 /*
1877  * Check OOM-Killer is already running under our hierarchy.
1878  * If someone is running, return false.
1879  */
1880 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1881 {
1882 	struct mem_cgroup *iter, *failed = NULL;
1883 
1884 	spin_lock(&memcg_oom_lock);
1885 
1886 	for_each_mem_cgroup_tree(iter, memcg) {
1887 		if (iter->oom_lock) {
1888 			/*
1889 			 * this subtree of our hierarchy is already locked
1890 			 * so we cannot give a lock.
1891 			 */
1892 			failed = iter;
1893 			mem_cgroup_iter_break(memcg, iter);
1894 			break;
1895 		} else
1896 			iter->oom_lock = true;
1897 	}
1898 
1899 	if (failed) {
1900 		/*
1901 		 * OK, we failed to lock the whole subtree so we have
1902 		 * to clean up what we set up to the failing subtree
1903 		 */
1904 		for_each_mem_cgroup_tree(iter, memcg) {
1905 			if (iter == failed) {
1906 				mem_cgroup_iter_break(memcg, iter);
1907 				break;
1908 			}
1909 			iter->oom_lock = false;
1910 		}
1911 	} else
1912 		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1913 
1914 	spin_unlock(&memcg_oom_lock);
1915 
1916 	return !failed;
1917 }
1918 
1919 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1920 {
1921 	struct mem_cgroup *iter;
1922 
1923 	spin_lock(&memcg_oom_lock);
1924 	mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1925 	for_each_mem_cgroup_tree(iter, memcg)
1926 		iter->oom_lock = false;
1927 	spin_unlock(&memcg_oom_lock);
1928 }
1929 
1930 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1931 {
1932 	struct mem_cgroup *iter;
1933 
1934 	spin_lock(&memcg_oom_lock);
1935 	for_each_mem_cgroup_tree(iter, memcg)
1936 		iter->under_oom++;
1937 	spin_unlock(&memcg_oom_lock);
1938 }
1939 
1940 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1941 {
1942 	struct mem_cgroup *iter;
1943 
1944 	/*
1945 	 * Be careful about under_oom underflows because a child memcg
1946 	 * could have been added after mem_cgroup_mark_under_oom.
1947 	 */
1948 	spin_lock(&memcg_oom_lock);
1949 	for_each_mem_cgroup_tree(iter, memcg)
1950 		if (iter->under_oom > 0)
1951 			iter->under_oom--;
1952 	spin_unlock(&memcg_oom_lock);
1953 }
1954 
1955 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1956 
1957 struct oom_wait_info {
1958 	struct mem_cgroup *memcg;
1959 	wait_queue_entry_t	wait;
1960 };
1961 
1962 static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1963 	unsigned mode, int sync, void *arg)
1964 {
1965 	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1966 	struct mem_cgroup *oom_wait_memcg;
1967 	struct oom_wait_info *oom_wait_info;
1968 
1969 	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1970 	oom_wait_memcg = oom_wait_info->memcg;
1971 
1972 	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1973 	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1974 		return 0;
1975 	return autoremove_wake_function(wait, mode, sync, arg);
1976 }
1977 
1978 static void memcg_oom_recover(struct mem_cgroup *memcg)
1979 {
1980 	/*
1981 	 * For the following lockless ->under_oom test, the only required
1982 	 * guarantee is that it must see the state asserted by an OOM when
1983 	 * this function is called as a result of userland actions
1984 	 * triggered by the notification of the OOM.  This is trivially
1985 	 * achieved by invoking mem_cgroup_mark_under_oom() before
1986 	 * triggering notification.
1987 	 */
1988 	if (memcg && memcg->under_oom)
1989 		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1990 }
1991 
1992 /*
1993  * Returns true if successfully killed one or more processes. Though in some
1994  * corner cases it can return true even without killing any process.
1995  */
1996 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1997 {
1998 	bool locked, ret;
1999 
2000 	if (order > PAGE_ALLOC_COSTLY_ORDER)
2001 		return false;
2002 
2003 	memcg_memory_event(memcg, MEMCG_OOM);
2004 
2005 	/*
2006 	 * We are in the middle of the charge context here, so we
2007 	 * don't want to block when potentially sitting on a callstack
2008 	 * that holds all kinds of filesystem and mm locks.
2009 	 *
2010 	 * cgroup1 allows disabling the OOM killer and waiting for outside
2011 	 * handling until the charge can succeed; remember the context and put
2012 	 * the task to sleep at the end of the page fault when all locks are
2013 	 * released.
2014 	 *
2015 	 * On the other hand, in-kernel OOM killer allows for an async victim
2016 	 * memory reclaim (oom_reaper) and that means that we are not solely
2017 	 * relying on the oom victim to make a forward progress and we can
2018 	 * invoke the oom killer here.
2019 	 *
2020 	 * Please note that mem_cgroup_out_of_memory might fail to find a
2021 	 * victim and then we have to bail out from the charge path.
2022 	 */
2023 	if (READ_ONCE(memcg->oom_kill_disable)) {
2024 		if (current->in_user_fault) {
2025 			css_get(&memcg->css);
2026 			current->memcg_in_oom = memcg;
2027 			current->memcg_oom_gfp_mask = mask;
2028 			current->memcg_oom_order = order;
2029 		}
2030 		return false;
2031 	}
2032 
2033 	mem_cgroup_mark_under_oom(memcg);
2034 
2035 	locked = mem_cgroup_oom_trylock(memcg);
2036 
2037 	if (locked)
2038 		mem_cgroup_oom_notify(memcg);
2039 
2040 	mem_cgroup_unmark_under_oom(memcg);
2041 	ret = mem_cgroup_out_of_memory(memcg, mask, order);
2042 
2043 	if (locked)
2044 		mem_cgroup_oom_unlock(memcg);
2045 
2046 	return ret;
2047 }
2048 
2049 /**
2050  * mem_cgroup_oom_synchronize - complete memcg OOM handling
2051  * @handle: actually kill/wait or just clean up the OOM state
2052  *
2053  * This has to be called at the end of a page fault if the memcg OOM
2054  * handler was enabled.
2055  *
2056  * Memcg supports userspace OOM handling where failed allocations must
2057  * sleep on a waitqueue until the userspace task resolves the
2058  * situation.  Sleeping directly in the charge context with all kinds
2059  * of locks held is not a good idea, instead we remember an OOM state
2060  * in the task and mem_cgroup_oom_synchronize() has to be called at
2061  * the end of the page fault to complete the OOM handling.
2062  *
2063  * Returns %true if an ongoing memcg OOM situation was detected and
2064  * completed, %false otherwise.
2065  */
2066 bool mem_cgroup_oom_synchronize(bool handle)
2067 {
2068 	struct mem_cgroup *memcg = current->memcg_in_oom;
2069 	struct oom_wait_info owait;
2070 	bool locked;
2071 
2072 	/* OOM is global, do not handle */
2073 	if (!memcg)
2074 		return false;
2075 
2076 	if (!handle)
2077 		goto cleanup;
2078 
2079 	owait.memcg = memcg;
2080 	owait.wait.flags = 0;
2081 	owait.wait.func = memcg_oom_wake_function;
2082 	owait.wait.private = current;
2083 	INIT_LIST_HEAD(&owait.wait.entry);
2084 
2085 	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
2086 	mem_cgroup_mark_under_oom(memcg);
2087 
2088 	locked = mem_cgroup_oom_trylock(memcg);
2089 
2090 	if (locked)
2091 		mem_cgroup_oom_notify(memcg);
2092 
2093 	schedule();
2094 	mem_cgroup_unmark_under_oom(memcg);
2095 	finish_wait(&memcg_oom_waitq, &owait.wait);
2096 
2097 	if (locked)
2098 		mem_cgroup_oom_unlock(memcg);
2099 cleanup:
2100 	current->memcg_in_oom = NULL;
2101 	css_put(&memcg->css);
2102 	return true;
2103 }
2104 
2105 /**
2106  * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
2107  * @victim: task to be killed by the OOM killer
2108  * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
2109  *
2110  * Returns a pointer to a memory cgroup, which has to be cleaned up
2111  * by killing all belonging OOM-killable tasks.
2112  *
2113  * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
2114  */
2115 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
2116 					    struct mem_cgroup *oom_domain)
2117 {
2118 	struct mem_cgroup *oom_group = NULL;
2119 	struct mem_cgroup *memcg;
2120 
2121 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2122 		return NULL;
2123 
2124 	if (!oom_domain)
2125 		oom_domain = root_mem_cgroup;
2126 
2127 	rcu_read_lock();
2128 
2129 	memcg = mem_cgroup_from_task(victim);
2130 	if (mem_cgroup_is_root(memcg))
2131 		goto out;
2132 
2133 	/*
2134 	 * If the victim task has been asynchronously moved to a different
2135 	 * memory cgroup, we might end up killing tasks outside oom_domain.
2136 	 * In this case it's better to ignore memory.group.oom.
2137 	 */
2138 	if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
2139 		goto out;
2140 
2141 	/*
2142 	 * Traverse the memory cgroup hierarchy from the victim task's
2143 	 * cgroup up to the OOMing cgroup (or root) to find the
2144 	 * highest-level memory cgroup with oom.group set.
2145 	 */
2146 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2147 		if (READ_ONCE(memcg->oom_group))
2148 			oom_group = memcg;
2149 
2150 		if (memcg == oom_domain)
2151 			break;
2152 	}
2153 
2154 	if (oom_group)
2155 		css_get(&oom_group->css);
2156 out:
2157 	rcu_read_unlock();
2158 
2159 	return oom_group;
2160 }
2161 
2162 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2163 {
2164 	pr_info("Tasks in ");
2165 	pr_cont_cgroup_path(memcg->css.cgroup);
2166 	pr_cont(" are going to be killed due to memory.oom.group set\n");
2167 }
2168 
2169 /**
2170  * folio_memcg_lock - Bind a folio to its memcg.
2171  * @folio: The folio.
2172  *
2173  * This function prevents unlocked LRU folios from being moved to
2174  * another cgroup.
2175  *
2176  * It ensures lifetime of the bound memcg.  The caller is responsible
2177  * for the lifetime of the folio.
2178  */
2179 void folio_memcg_lock(struct folio *folio)
2180 {
2181 	struct mem_cgroup *memcg;
2182 	unsigned long flags;
2183 
2184 	/*
2185 	 * The RCU lock is held throughout the transaction.  The fast
2186 	 * path can get away without acquiring the memcg->move_lock
2187 	 * because page moving starts with an RCU grace period.
2188          */
2189 	rcu_read_lock();
2190 
2191 	if (mem_cgroup_disabled())
2192 		return;
2193 again:
2194 	memcg = folio_memcg(folio);
2195 	if (unlikely(!memcg))
2196 		return;
2197 
2198 #ifdef CONFIG_PROVE_LOCKING
2199 	local_irq_save(flags);
2200 	might_lock(&memcg->move_lock);
2201 	local_irq_restore(flags);
2202 #endif
2203 
2204 	if (atomic_read(&memcg->moving_account) <= 0)
2205 		return;
2206 
2207 	spin_lock_irqsave(&memcg->move_lock, flags);
2208 	if (memcg != folio_memcg(folio)) {
2209 		spin_unlock_irqrestore(&memcg->move_lock, flags);
2210 		goto again;
2211 	}
2212 
2213 	/*
2214 	 * When charge migration first begins, we can have multiple
2215 	 * critical sections holding the fast-path RCU lock and one
2216 	 * holding the slowpath move_lock. Track the task who has the
2217 	 * move_lock for folio_memcg_unlock().
2218 	 */
2219 	memcg->move_lock_task = current;
2220 	memcg->move_lock_flags = flags;
2221 }
2222 
2223 static void __folio_memcg_unlock(struct mem_cgroup *memcg)
2224 {
2225 	if (memcg && memcg->move_lock_task == current) {
2226 		unsigned long flags = memcg->move_lock_flags;
2227 
2228 		memcg->move_lock_task = NULL;
2229 		memcg->move_lock_flags = 0;
2230 
2231 		spin_unlock_irqrestore(&memcg->move_lock, flags);
2232 	}
2233 
2234 	rcu_read_unlock();
2235 }
2236 
2237 /**
2238  * folio_memcg_unlock - Release the binding between a folio and its memcg.
2239  * @folio: The folio.
2240  *
2241  * This releases the binding created by folio_memcg_lock().  This does
2242  * not change the accounting of this folio to its memcg, but it does
2243  * permit others to change it.
2244  */
2245 void folio_memcg_unlock(struct folio *folio)
2246 {
2247 	__folio_memcg_unlock(folio_memcg(folio));
2248 }
2249 
2250 struct memcg_stock_pcp {
2251 	local_lock_t stock_lock;
2252 	struct mem_cgroup *cached; /* this never be root cgroup */
2253 	unsigned int nr_pages;
2254 
2255 #ifdef CONFIG_MEMCG_KMEM
2256 	struct obj_cgroup *cached_objcg;
2257 	struct pglist_data *cached_pgdat;
2258 	unsigned int nr_bytes;
2259 	int nr_slab_reclaimable_b;
2260 	int nr_slab_unreclaimable_b;
2261 #endif
2262 
2263 	struct work_struct work;
2264 	unsigned long flags;
2265 #define FLUSHING_CACHED_CHARGE	0
2266 };
2267 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
2268 	.stock_lock = INIT_LOCAL_LOCK(stock_lock),
2269 };
2270 static DEFINE_MUTEX(percpu_charge_mutex);
2271 
2272 #ifdef CONFIG_MEMCG_KMEM
2273 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
2274 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2275 				     struct mem_cgroup *root_memcg);
2276 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages);
2277 
2278 #else
2279 static inline struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
2280 {
2281 	return NULL;
2282 }
2283 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2284 				     struct mem_cgroup *root_memcg)
2285 {
2286 	return false;
2287 }
2288 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
2289 {
2290 }
2291 #endif
2292 
2293 /**
2294  * consume_stock: Try to consume stocked charge on this cpu.
2295  * @memcg: memcg to consume from.
2296  * @nr_pages: how many pages to charge.
2297  *
2298  * The charges will only happen if @memcg matches the current cpu's memcg
2299  * stock, and at least @nr_pages are available in that stock.  Failure to
2300  * service an allocation will refill the stock.
2301  *
2302  * returns true if successful, false otherwise.
2303  */
2304 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2305 {
2306 	struct memcg_stock_pcp *stock;
2307 	unsigned long flags;
2308 	bool ret = false;
2309 
2310 	if (nr_pages > MEMCG_CHARGE_BATCH)
2311 		return ret;
2312 
2313 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2314 
2315 	stock = this_cpu_ptr(&memcg_stock);
2316 	if (memcg == READ_ONCE(stock->cached) && stock->nr_pages >= nr_pages) {
2317 		stock->nr_pages -= nr_pages;
2318 		ret = true;
2319 	}
2320 
2321 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2322 
2323 	return ret;
2324 }
2325 
2326 /*
2327  * Returns stocks cached in percpu and reset cached information.
2328  */
2329 static void drain_stock(struct memcg_stock_pcp *stock)
2330 {
2331 	struct mem_cgroup *old = READ_ONCE(stock->cached);
2332 
2333 	if (!old)
2334 		return;
2335 
2336 	if (stock->nr_pages) {
2337 		page_counter_uncharge(&old->memory, stock->nr_pages);
2338 		if (do_memsw_account())
2339 			page_counter_uncharge(&old->memsw, stock->nr_pages);
2340 		stock->nr_pages = 0;
2341 	}
2342 
2343 	css_put(&old->css);
2344 	WRITE_ONCE(stock->cached, NULL);
2345 }
2346 
2347 static void drain_local_stock(struct work_struct *dummy)
2348 {
2349 	struct memcg_stock_pcp *stock;
2350 	struct obj_cgroup *old = NULL;
2351 	unsigned long flags;
2352 
2353 	/*
2354 	 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
2355 	 * drain_stock races is that we always operate on local CPU stock
2356 	 * here with IRQ disabled
2357 	 */
2358 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2359 
2360 	stock = this_cpu_ptr(&memcg_stock);
2361 	old = drain_obj_stock(stock);
2362 	drain_stock(stock);
2363 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2364 
2365 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2366 	if (old)
2367 		obj_cgroup_put(old);
2368 }
2369 
2370 /*
2371  * Cache charges(val) to local per_cpu area.
2372  * This will be consumed by consume_stock() function, later.
2373  */
2374 static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2375 {
2376 	struct memcg_stock_pcp *stock;
2377 
2378 	stock = this_cpu_ptr(&memcg_stock);
2379 	if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */
2380 		drain_stock(stock);
2381 		css_get(&memcg->css);
2382 		WRITE_ONCE(stock->cached, memcg);
2383 	}
2384 	stock->nr_pages += nr_pages;
2385 
2386 	if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2387 		drain_stock(stock);
2388 }
2389 
2390 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2391 {
2392 	unsigned long flags;
2393 
2394 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2395 	__refill_stock(memcg, nr_pages);
2396 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2397 }
2398 
2399 /*
2400  * Drains all per-CPU charge caches for given root_memcg resp. subtree
2401  * of the hierarchy under it.
2402  */
2403 static void drain_all_stock(struct mem_cgroup *root_memcg)
2404 {
2405 	int cpu, curcpu;
2406 
2407 	/* If someone's already draining, avoid adding running more workers. */
2408 	if (!mutex_trylock(&percpu_charge_mutex))
2409 		return;
2410 	/*
2411 	 * Notify other cpus that system-wide "drain" is running
2412 	 * We do not care about races with the cpu hotplug because cpu down
2413 	 * as well as workers from this path always operate on the local
2414 	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2415 	 */
2416 	migrate_disable();
2417 	curcpu = smp_processor_id();
2418 	for_each_online_cpu(cpu) {
2419 		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2420 		struct mem_cgroup *memcg;
2421 		bool flush = false;
2422 
2423 		rcu_read_lock();
2424 		memcg = READ_ONCE(stock->cached);
2425 		if (memcg && stock->nr_pages &&
2426 		    mem_cgroup_is_descendant(memcg, root_memcg))
2427 			flush = true;
2428 		else if (obj_stock_flush_required(stock, root_memcg))
2429 			flush = true;
2430 		rcu_read_unlock();
2431 
2432 		if (flush &&
2433 		    !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2434 			if (cpu == curcpu)
2435 				drain_local_stock(&stock->work);
2436 			else if (!cpu_is_isolated(cpu))
2437 				schedule_work_on(cpu, &stock->work);
2438 		}
2439 	}
2440 	migrate_enable();
2441 	mutex_unlock(&percpu_charge_mutex);
2442 }
2443 
2444 static int memcg_hotplug_cpu_dead(unsigned int cpu)
2445 {
2446 	struct memcg_stock_pcp *stock;
2447 
2448 	stock = &per_cpu(memcg_stock, cpu);
2449 	drain_stock(stock);
2450 
2451 	return 0;
2452 }
2453 
2454 static unsigned long reclaim_high(struct mem_cgroup *memcg,
2455 				  unsigned int nr_pages,
2456 				  gfp_t gfp_mask)
2457 {
2458 	unsigned long nr_reclaimed = 0;
2459 
2460 	do {
2461 		unsigned long pflags;
2462 
2463 		if (page_counter_read(&memcg->memory) <=
2464 		    READ_ONCE(memcg->memory.high))
2465 			continue;
2466 
2467 		memcg_memory_event(memcg, MEMCG_HIGH);
2468 
2469 		psi_memstall_enter(&pflags);
2470 		nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2471 							gfp_mask,
2472 							MEMCG_RECLAIM_MAY_SWAP);
2473 		psi_memstall_leave(&pflags);
2474 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2475 		 !mem_cgroup_is_root(memcg));
2476 
2477 	return nr_reclaimed;
2478 }
2479 
2480 static void high_work_func(struct work_struct *work)
2481 {
2482 	struct mem_cgroup *memcg;
2483 
2484 	memcg = container_of(work, struct mem_cgroup, high_work);
2485 	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2486 }
2487 
2488 /*
2489  * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2490  * enough to still cause a significant slowdown in most cases, while still
2491  * allowing diagnostics and tracing to proceed without becoming stuck.
2492  */
2493 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2494 
2495 /*
2496  * When calculating the delay, we use these either side of the exponentiation to
2497  * maintain precision and scale to a reasonable number of jiffies (see the table
2498  * below.
2499  *
2500  * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2501  *   overage ratio to a delay.
2502  * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2503  *   proposed penalty in order to reduce to a reasonable number of jiffies, and
2504  *   to produce a reasonable delay curve.
2505  *
2506  * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2507  * reasonable delay curve compared to precision-adjusted overage, not
2508  * penalising heavily at first, but still making sure that growth beyond the
2509  * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2510  * example, with a high of 100 megabytes:
2511  *
2512  *  +-------+------------------------+
2513  *  | usage | time to allocate in ms |
2514  *  +-------+------------------------+
2515  *  | 100M  |                      0 |
2516  *  | 101M  |                      6 |
2517  *  | 102M  |                     25 |
2518  *  | 103M  |                     57 |
2519  *  | 104M  |                    102 |
2520  *  | 105M  |                    159 |
2521  *  | 106M  |                    230 |
2522  *  | 107M  |                    313 |
2523  *  | 108M  |                    409 |
2524  *  | 109M  |                    518 |
2525  *  | 110M  |                    639 |
2526  *  | 111M  |                    774 |
2527  *  | 112M  |                    921 |
2528  *  | 113M  |                   1081 |
2529  *  | 114M  |                   1254 |
2530  *  | 115M  |                   1439 |
2531  *  | 116M  |                   1638 |
2532  *  | 117M  |                   1849 |
2533  *  | 118M  |                   2000 |
2534  *  | 119M  |                   2000 |
2535  *  | 120M  |                   2000 |
2536  *  +-------+------------------------+
2537  */
2538  #define MEMCG_DELAY_PRECISION_SHIFT 20
2539  #define MEMCG_DELAY_SCALING_SHIFT 14
2540 
2541 static u64 calculate_overage(unsigned long usage, unsigned long high)
2542 {
2543 	u64 overage;
2544 
2545 	if (usage <= high)
2546 		return 0;
2547 
2548 	/*
2549 	 * Prevent division by 0 in overage calculation by acting as if
2550 	 * it was a threshold of 1 page
2551 	 */
2552 	high = max(high, 1UL);
2553 
2554 	overage = usage - high;
2555 	overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2556 	return div64_u64(overage, high);
2557 }
2558 
2559 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2560 {
2561 	u64 overage, max_overage = 0;
2562 
2563 	do {
2564 		overage = calculate_overage(page_counter_read(&memcg->memory),
2565 					    READ_ONCE(memcg->memory.high));
2566 		max_overage = max(overage, max_overage);
2567 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2568 		 !mem_cgroup_is_root(memcg));
2569 
2570 	return max_overage;
2571 }
2572 
2573 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2574 {
2575 	u64 overage, max_overage = 0;
2576 
2577 	do {
2578 		overage = calculate_overage(page_counter_read(&memcg->swap),
2579 					    READ_ONCE(memcg->swap.high));
2580 		if (overage)
2581 			memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2582 		max_overage = max(overage, max_overage);
2583 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2584 		 !mem_cgroup_is_root(memcg));
2585 
2586 	return max_overage;
2587 }
2588 
2589 /*
2590  * Get the number of jiffies that we should penalise a mischievous cgroup which
2591  * is exceeding its memory.high by checking both it and its ancestors.
2592  */
2593 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2594 					  unsigned int nr_pages,
2595 					  u64 max_overage)
2596 {
2597 	unsigned long penalty_jiffies;
2598 
2599 	if (!max_overage)
2600 		return 0;
2601 
2602 	/*
2603 	 * We use overage compared to memory.high to calculate the number of
2604 	 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2605 	 * fairly lenient on small overages, and increasingly harsh when the
2606 	 * memcg in question makes it clear that it has no intention of stopping
2607 	 * its crazy behaviour, so we exponentially increase the delay based on
2608 	 * overage amount.
2609 	 */
2610 	penalty_jiffies = max_overage * max_overage * HZ;
2611 	penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2612 	penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2613 
2614 	/*
2615 	 * Factor in the task's own contribution to the overage, such that four
2616 	 * N-sized allocations are throttled approximately the same as one
2617 	 * 4N-sized allocation.
2618 	 *
2619 	 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2620 	 * larger the current charge patch is than that.
2621 	 */
2622 	return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2623 }
2624 
2625 /*
2626  * Reclaims memory over the high limit. Called directly from
2627  * try_charge() (context permitting), as well as from the userland
2628  * return path where reclaim is always able to block.
2629  */
2630 void mem_cgroup_handle_over_high(gfp_t gfp_mask)
2631 {
2632 	unsigned long penalty_jiffies;
2633 	unsigned long pflags;
2634 	unsigned long nr_reclaimed;
2635 	unsigned int nr_pages = current->memcg_nr_pages_over_high;
2636 	int nr_retries = MAX_RECLAIM_RETRIES;
2637 	struct mem_cgroup *memcg;
2638 	bool in_retry = false;
2639 
2640 	if (likely(!nr_pages))
2641 		return;
2642 
2643 	memcg = get_mem_cgroup_from_mm(current->mm);
2644 	current->memcg_nr_pages_over_high = 0;
2645 
2646 retry_reclaim:
2647 	/*
2648 	 * Bail if the task is already exiting. Unlike memory.max,
2649 	 * memory.high enforcement isn't as strict, and there is no
2650 	 * OOM killer involved, which means the excess could already
2651 	 * be much bigger (and still growing) than it could for
2652 	 * memory.max; the dying task could get stuck in fruitless
2653 	 * reclaim for a long time, which isn't desirable.
2654 	 */
2655 	if (task_is_dying())
2656 		goto out;
2657 
2658 	/*
2659 	 * The allocating task should reclaim at least the batch size, but for
2660 	 * subsequent retries we only want to do what's necessary to prevent oom
2661 	 * or breaching resource isolation.
2662 	 *
2663 	 * This is distinct from memory.max or page allocator behaviour because
2664 	 * memory.high is currently batched, whereas memory.max and the page
2665 	 * allocator run every time an allocation is made.
2666 	 */
2667 	nr_reclaimed = reclaim_high(memcg,
2668 				    in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2669 				    gfp_mask);
2670 
2671 	/*
2672 	 * memory.high is breached and reclaim is unable to keep up. Throttle
2673 	 * allocators proactively to slow down excessive growth.
2674 	 */
2675 	penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2676 					       mem_find_max_overage(memcg));
2677 
2678 	penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2679 						swap_find_max_overage(memcg));
2680 
2681 	/*
2682 	 * Clamp the max delay per usermode return so as to still keep the
2683 	 * application moving forwards and also permit diagnostics, albeit
2684 	 * extremely slowly.
2685 	 */
2686 	penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2687 
2688 	/*
2689 	 * Don't sleep if the amount of jiffies this memcg owes us is so low
2690 	 * that it's not even worth doing, in an attempt to be nice to those who
2691 	 * go only a small amount over their memory.high value and maybe haven't
2692 	 * been aggressively reclaimed enough yet.
2693 	 */
2694 	if (penalty_jiffies <= HZ / 100)
2695 		goto out;
2696 
2697 	/*
2698 	 * If reclaim is making forward progress but we're still over
2699 	 * memory.high, we want to encourage that rather than doing allocator
2700 	 * throttling.
2701 	 */
2702 	if (nr_reclaimed || nr_retries--) {
2703 		in_retry = true;
2704 		goto retry_reclaim;
2705 	}
2706 
2707 	/*
2708 	 * Reclaim didn't manage to push usage below the limit, slow
2709 	 * this allocating task down.
2710 	 *
2711 	 * If we exit early, we're guaranteed to die (since
2712 	 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2713 	 * need to account for any ill-begotten jiffies to pay them off later.
2714 	 */
2715 	psi_memstall_enter(&pflags);
2716 	schedule_timeout_killable(penalty_jiffies);
2717 	psi_memstall_leave(&pflags);
2718 
2719 out:
2720 	css_put(&memcg->css);
2721 }
2722 
2723 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2724 			unsigned int nr_pages)
2725 {
2726 	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2727 	int nr_retries = MAX_RECLAIM_RETRIES;
2728 	struct mem_cgroup *mem_over_limit;
2729 	struct page_counter *counter;
2730 	unsigned long nr_reclaimed;
2731 	bool passed_oom = false;
2732 	unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2733 	bool drained = false;
2734 	bool raised_max_event = false;
2735 	unsigned long pflags;
2736 
2737 retry:
2738 	if (consume_stock(memcg, nr_pages))
2739 		return 0;
2740 
2741 	if (!do_memsw_account() ||
2742 	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2743 		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2744 			goto done_restock;
2745 		if (do_memsw_account())
2746 			page_counter_uncharge(&memcg->memsw, batch);
2747 		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2748 	} else {
2749 		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2750 		reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
2751 	}
2752 
2753 	if (batch > nr_pages) {
2754 		batch = nr_pages;
2755 		goto retry;
2756 	}
2757 
2758 	/*
2759 	 * Prevent unbounded recursion when reclaim operations need to
2760 	 * allocate memory. This might exceed the limits temporarily,
2761 	 * but we prefer facilitating memory reclaim and getting back
2762 	 * under the limit over triggering OOM kills in these cases.
2763 	 */
2764 	if (unlikely(current->flags & PF_MEMALLOC))
2765 		goto force;
2766 
2767 	if (unlikely(task_in_memcg_oom(current)))
2768 		goto nomem;
2769 
2770 	if (!gfpflags_allow_blocking(gfp_mask))
2771 		goto nomem;
2772 
2773 	memcg_memory_event(mem_over_limit, MEMCG_MAX);
2774 	raised_max_event = true;
2775 
2776 	psi_memstall_enter(&pflags);
2777 	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2778 						    gfp_mask, reclaim_options);
2779 	psi_memstall_leave(&pflags);
2780 
2781 	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2782 		goto retry;
2783 
2784 	if (!drained) {
2785 		drain_all_stock(mem_over_limit);
2786 		drained = true;
2787 		goto retry;
2788 	}
2789 
2790 	if (gfp_mask & __GFP_NORETRY)
2791 		goto nomem;
2792 	/*
2793 	 * Even though the limit is exceeded at this point, reclaim
2794 	 * may have been able to free some pages.  Retry the charge
2795 	 * before killing the task.
2796 	 *
2797 	 * Only for regular pages, though: huge pages are rather
2798 	 * unlikely to succeed so close to the limit, and we fall back
2799 	 * to regular pages anyway in case of failure.
2800 	 */
2801 	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2802 		goto retry;
2803 	/*
2804 	 * At task move, charge accounts can be doubly counted. So, it's
2805 	 * better to wait until the end of task_move if something is going on.
2806 	 */
2807 	if (mem_cgroup_wait_acct_move(mem_over_limit))
2808 		goto retry;
2809 
2810 	if (nr_retries--)
2811 		goto retry;
2812 
2813 	if (gfp_mask & __GFP_RETRY_MAYFAIL)
2814 		goto nomem;
2815 
2816 	/* Avoid endless loop for tasks bypassed by the oom killer */
2817 	if (passed_oom && task_is_dying())
2818 		goto nomem;
2819 
2820 	/*
2821 	 * keep retrying as long as the memcg oom killer is able to make
2822 	 * a forward progress or bypass the charge if the oom killer
2823 	 * couldn't make any progress.
2824 	 */
2825 	if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2826 			   get_order(nr_pages * PAGE_SIZE))) {
2827 		passed_oom = true;
2828 		nr_retries = MAX_RECLAIM_RETRIES;
2829 		goto retry;
2830 	}
2831 nomem:
2832 	/*
2833 	 * Memcg doesn't have a dedicated reserve for atomic
2834 	 * allocations. But like the global atomic pool, we need to
2835 	 * put the burden of reclaim on regular allocation requests
2836 	 * and let these go through as privileged allocations.
2837 	 */
2838 	if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2839 		return -ENOMEM;
2840 force:
2841 	/*
2842 	 * If the allocation has to be enforced, don't forget to raise
2843 	 * a MEMCG_MAX event.
2844 	 */
2845 	if (!raised_max_event)
2846 		memcg_memory_event(mem_over_limit, MEMCG_MAX);
2847 
2848 	/*
2849 	 * The allocation either can't fail or will lead to more memory
2850 	 * being freed very soon.  Allow memory usage go over the limit
2851 	 * temporarily by force charging it.
2852 	 */
2853 	page_counter_charge(&memcg->memory, nr_pages);
2854 	if (do_memsw_account())
2855 		page_counter_charge(&memcg->memsw, nr_pages);
2856 
2857 	return 0;
2858 
2859 done_restock:
2860 	if (batch > nr_pages)
2861 		refill_stock(memcg, batch - nr_pages);
2862 
2863 	/*
2864 	 * If the hierarchy is above the normal consumption range, schedule
2865 	 * reclaim on returning to userland.  We can perform reclaim here
2866 	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2867 	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2868 	 * not recorded as it most likely matches current's and won't
2869 	 * change in the meantime.  As high limit is checked again before
2870 	 * reclaim, the cost of mismatch is negligible.
2871 	 */
2872 	do {
2873 		bool mem_high, swap_high;
2874 
2875 		mem_high = page_counter_read(&memcg->memory) >
2876 			READ_ONCE(memcg->memory.high);
2877 		swap_high = page_counter_read(&memcg->swap) >
2878 			READ_ONCE(memcg->swap.high);
2879 
2880 		/* Don't bother a random interrupted task */
2881 		if (!in_task()) {
2882 			if (mem_high) {
2883 				schedule_work(&memcg->high_work);
2884 				break;
2885 			}
2886 			continue;
2887 		}
2888 
2889 		if (mem_high || swap_high) {
2890 			/*
2891 			 * The allocating tasks in this cgroup will need to do
2892 			 * reclaim or be throttled to prevent further growth
2893 			 * of the memory or swap footprints.
2894 			 *
2895 			 * Target some best-effort fairness between the tasks,
2896 			 * and distribute reclaim work and delay penalties
2897 			 * based on how much each task is actually allocating.
2898 			 */
2899 			current->memcg_nr_pages_over_high += batch;
2900 			set_notify_resume(current);
2901 			break;
2902 		}
2903 	} while ((memcg = parent_mem_cgroup(memcg)));
2904 
2905 	/*
2906 	 * Reclaim is set up above to be called from the userland
2907 	 * return path. But also attempt synchronous reclaim to avoid
2908 	 * excessive overrun while the task is still inside the
2909 	 * kernel. If this is successful, the return path will see it
2910 	 * when it rechecks the overage and simply bail out.
2911 	 */
2912 	if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2913 	    !(current->flags & PF_MEMALLOC) &&
2914 	    gfpflags_allow_blocking(gfp_mask))
2915 		mem_cgroup_handle_over_high(gfp_mask);
2916 	return 0;
2917 }
2918 
2919 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2920 			     unsigned int nr_pages)
2921 {
2922 	if (mem_cgroup_is_root(memcg))
2923 		return 0;
2924 
2925 	return try_charge_memcg(memcg, gfp_mask, nr_pages);
2926 }
2927 
2928 /**
2929  * mem_cgroup_cancel_charge() - cancel an uncommitted try_charge() call.
2930  * @memcg: memcg previously charged.
2931  * @nr_pages: number of pages previously charged.
2932  */
2933 void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2934 {
2935 	if (mem_cgroup_is_root(memcg))
2936 		return;
2937 
2938 	page_counter_uncharge(&memcg->memory, nr_pages);
2939 	if (do_memsw_account())
2940 		page_counter_uncharge(&memcg->memsw, nr_pages);
2941 }
2942 
2943 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2944 {
2945 	VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
2946 	/*
2947 	 * Any of the following ensures page's memcg stability:
2948 	 *
2949 	 * - the page lock
2950 	 * - LRU isolation
2951 	 * - folio_memcg_lock()
2952 	 * - exclusive reference
2953 	 * - mem_cgroup_trylock_pages()
2954 	 */
2955 	folio->memcg_data = (unsigned long)memcg;
2956 }
2957 
2958 /**
2959  * mem_cgroup_commit_charge - commit a previously successful try_charge().
2960  * @folio: folio to commit the charge to.
2961  * @memcg: memcg previously charged.
2962  */
2963 void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2964 {
2965 	css_get(&memcg->css);
2966 	commit_charge(folio, memcg);
2967 
2968 	local_irq_disable();
2969 	mem_cgroup_charge_statistics(memcg, folio_nr_pages(folio));
2970 	memcg_check_events(memcg, folio_nid(folio));
2971 	local_irq_enable();
2972 }
2973 
2974 #ifdef CONFIG_MEMCG_KMEM
2975 /*
2976  * The allocated objcg pointers array is not accounted directly.
2977  * Moreover, it should not come from DMA buffer and is not readily
2978  * reclaimable. So those GFP bits should be masked off.
2979  */
2980 #define OBJCGS_CLEAR_MASK	(__GFP_DMA | __GFP_RECLAIMABLE | \
2981 				 __GFP_ACCOUNT | __GFP_NOFAIL)
2982 
2983 /*
2984  * mod_objcg_mlstate() may be called with irq enabled, so
2985  * mod_memcg_lruvec_state() should be used.
2986  */
2987 static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
2988 				     struct pglist_data *pgdat,
2989 				     enum node_stat_item idx, int nr)
2990 {
2991 	struct mem_cgroup *memcg;
2992 	struct lruvec *lruvec;
2993 
2994 	rcu_read_lock();
2995 	memcg = obj_cgroup_memcg(objcg);
2996 	lruvec = mem_cgroup_lruvec(memcg, pgdat);
2997 	mod_memcg_lruvec_state(lruvec, idx, nr);
2998 	rcu_read_unlock();
2999 }
3000 
3001 int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
3002 				 gfp_t gfp, bool new_slab)
3003 {
3004 	unsigned int objects = objs_per_slab(s, slab);
3005 	unsigned long memcg_data;
3006 	void *vec;
3007 
3008 	gfp &= ~OBJCGS_CLEAR_MASK;
3009 	vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
3010 			   slab_nid(slab));
3011 	if (!vec)
3012 		return -ENOMEM;
3013 
3014 	memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS;
3015 	if (new_slab) {
3016 		/*
3017 		 * If the slab is brand new and nobody can yet access its
3018 		 * memcg_data, no synchronization is required and memcg_data can
3019 		 * be simply assigned.
3020 		 */
3021 		slab->memcg_data = memcg_data;
3022 	} else if (cmpxchg(&slab->memcg_data, 0, memcg_data)) {
3023 		/*
3024 		 * If the slab is already in use, somebody can allocate and
3025 		 * assign obj_cgroups in parallel. In this case the existing
3026 		 * objcg vector should be reused.
3027 		 */
3028 		kfree(vec);
3029 		return 0;
3030 	}
3031 
3032 	kmemleak_not_leak(vec);
3033 	return 0;
3034 }
3035 
3036 static __always_inline
3037 struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
3038 {
3039 	/*
3040 	 * Slab objects are accounted individually, not per-page.
3041 	 * Memcg membership data for each individual object is saved in
3042 	 * slab->memcg_data.
3043 	 */
3044 	if (folio_test_slab(folio)) {
3045 		struct obj_cgroup **objcgs;
3046 		struct slab *slab;
3047 		unsigned int off;
3048 
3049 		slab = folio_slab(folio);
3050 		objcgs = slab_objcgs(slab);
3051 		if (!objcgs)
3052 			return NULL;
3053 
3054 		off = obj_to_index(slab->slab_cache, slab, p);
3055 		if (objcgs[off])
3056 			return obj_cgroup_memcg(objcgs[off]);
3057 
3058 		return NULL;
3059 	}
3060 
3061 	/*
3062 	 * folio_memcg_check() is used here, because in theory we can encounter
3063 	 * a folio where the slab flag has been cleared already, but
3064 	 * slab->memcg_data has not been freed yet
3065 	 * folio_memcg_check() will guarantee that a proper memory
3066 	 * cgroup pointer or NULL will be returned.
3067 	 */
3068 	return folio_memcg_check(folio);
3069 }
3070 
3071 /*
3072  * Returns a pointer to the memory cgroup to which the kernel object is charged.
3073  *
3074  * A passed kernel object can be a slab object, vmalloc object or a generic
3075  * kernel page, so different mechanisms for getting the memory cgroup pointer
3076  * should be used.
3077  *
3078  * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
3079  * can not know for sure how the kernel object is implemented.
3080  * mem_cgroup_from_obj() can be safely used in such cases.
3081  *
3082  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
3083  * cgroup_mutex, etc.
3084  */
3085 struct mem_cgroup *mem_cgroup_from_obj(void *p)
3086 {
3087 	struct folio *folio;
3088 
3089 	if (mem_cgroup_disabled())
3090 		return NULL;
3091 
3092 	if (unlikely(is_vmalloc_addr(p)))
3093 		folio = page_folio(vmalloc_to_page(p));
3094 	else
3095 		folio = virt_to_folio(p);
3096 
3097 	return mem_cgroup_from_obj_folio(folio, p);
3098 }
3099 
3100 /*
3101  * Returns a pointer to the memory cgroup to which the kernel object is charged.
3102  * Similar to mem_cgroup_from_obj(), but faster and not suitable for objects,
3103  * allocated using vmalloc().
3104  *
3105  * A passed kernel object must be a slab object or a generic kernel page.
3106  *
3107  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
3108  * cgroup_mutex, etc.
3109  */
3110 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
3111 {
3112 	if (mem_cgroup_disabled())
3113 		return NULL;
3114 
3115 	return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
3116 }
3117 
3118 static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
3119 {
3120 	struct obj_cgroup *objcg = NULL;
3121 
3122 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
3123 		objcg = rcu_dereference(memcg->objcg);
3124 		if (likely(objcg && obj_cgroup_tryget(objcg)))
3125 			break;
3126 		objcg = NULL;
3127 	}
3128 	return objcg;
3129 }
3130 
3131 static struct obj_cgroup *current_objcg_update(void)
3132 {
3133 	struct mem_cgroup *memcg;
3134 	struct obj_cgroup *old, *objcg = NULL;
3135 
3136 	do {
3137 		/* Atomically drop the update bit. */
3138 		old = xchg(&current->objcg, NULL);
3139 		if (old) {
3140 			old = (struct obj_cgroup *)
3141 				((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG);
3142 			if (old)
3143 				obj_cgroup_put(old);
3144 
3145 			old = NULL;
3146 		}
3147 
3148 		/* If new objcg is NULL, no reason for the second atomic update. */
3149 		if (!current->mm || (current->flags & PF_KTHREAD))
3150 			return NULL;
3151 
3152 		/*
3153 		 * Release the objcg pointer from the previous iteration,
3154 		 * if try_cmpxcg() below fails.
3155 		 */
3156 		if (unlikely(objcg)) {
3157 			obj_cgroup_put(objcg);
3158 			objcg = NULL;
3159 		}
3160 
3161 		/*
3162 		 * Obtain the new objcg pointer. The current task can be
3163 		 * asynchronously moved to another memcg and the previous
3164 		 * memcg can be offlined. So let's get the memcg pointer
3165 		 * and try get a reference to objcg under a rcu read lock.
3166 		 */
3167 
3168 		rcu_read_lock();
3169 		memcg = mem_cgroup_from_task(current);
3170 		objcg = __get_obj_cgroup_from_memcg(memcg);
3171 		rcu_read_unlock();
3172 
3173 		/*
3174 		 * Try set up a new objcg pointer atomically. If it
3175 		 * fails, it means the update flag was set concurrently, so
3176 		 * the whole procedure should be repeated.
3177 		 */
3178 	} while (!try_cmpxchg(&current->objcg, &old, objcg));
3179 
3180 	return objcg;
3181 }
3182 
3183 __always_inline struct obj_cgroup *current_obj_cgroup(void)
3184 {
3185 	struct mem_cgroup *memcg;
3186 	struct obj_cgroup *objcg;
3187 
3188 	if (in_task()) {
3189 		memcg = current->active_memcg;
3190 		if (unlikely(memcg))
3191 			goto from_memcg;
3192 
3193 		objcg = READ_ONCE(current->objcg);
3194 		if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG))
3195 			objcg = current_objcg_update();
3196 		/*
3197 		 * Objcg reference is kept by the task, so it's safe
3198 		 * to use the objcg by the current task.
3199 		 */
3200 		return objcg;
3201 	}
3202 
3203 	memcg = this_cpu_read(int_active_memcg);
3204 	if (unlikely(memcg))
3205 		goto from_memcg;
3206 
3207 	return NULL;
3208 
3209 from_memcg:
3210 	objcg = NULL;
3211 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
3212 		/*
3213 		 * Memcg pointer is protected by scope (see set_active_memcg())
3214 		 * and is pinning the corresponding objcg, so objcg can't go
3215 		 * away and can be used within the scope without any additional
3216 		 * protection.
3217 		 */
3218 		objcg = rcu_dereference_check(memcg->objcg, 1);
3219 		if (likely(objcg))
3220 			break;
3221 	}
3222 
3223 	return objcg;
3224 }
3225 
3226 struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
3227 {
3228 	struct obj_cgroup *objcg;
3229 
3230 	if (!memcg_kmem_online())
3231 		return NULL;
3232 
3233 	if (folio_memcg_kmem(folio)) {
3234 		objcg = __folio_objcg(folio);
3235 		obj_cgroup_get(objcg);
3236 	} else {
3237 		struct mem_cgroup *memcg;
3238 
3239 		rcu_read_lock();
3240 		memcg = __folio_memcg(folio);
3241 		if (memcg)
3242 			objcg = __get_obj_cgroup_from_memcg(memcg);
3243 		else
3244 			objcg = NULL;
3245 		rcu_read_unlock();
3246 	}
3247 	return objcg;
3248 }
3249 
3250 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
3251 {
3252 	mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
3253 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
3254 		if (nr_pages > 0)
3255 			page_counter_charge(&memcg->kmem, nr_pages);
3256 		else
3257 			page_counter_uncharge(&memcg->kmem, -nr_pages);
3258 	}
3259 }
3260 
3261 
3262 /*
3263  * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
3264  * @objcg: object cgroup to uncharge
3265  * @nr_pages: number of pages to uncharge
3266  */
3267 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
3268 				      unsigned int nr_pages)
3269 {
3270 	struct mem_cgroup *memcg;
3271 
3272 	memcg = get_mem_cgroup_from_objcg(objcg);
3273 
3274 	memcg_account_kmem(memcg, -nr_pages);
3275 	refill_stock(memcg, nr_pages);
3276 
3277 	css_put(&memcg->css);
3278 }
3279 
3280 /*
3281  * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
3282  * @objcg: object cgroup to charge
3283  * @gfp: reclaim mode
3284  * @nr_pages: number of pages to charge
3285  *
3286  * Returns 0 on success, an error code on failure.
3287  */
3288 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
3289 				   unsigned int nr_pages)
3290 {
3291 	struct mem_cgroup *memcg;
3292 	int ret;
3293 
3294 	memcg = get_mem_cgroup_from_objcg(objcg);
3295 
3296 	ret = try_charge_memcg(memcg, gfp, nr_pages);
3297 	if (ret)
3298 		goto out;
3299 
3300 	memcg_account_kmem(memcg, nr_pages);
3301 out:
3302 	css_put(&memcg->css);
3303 
3304 	return ret;
3305 }
3306 
3307 /**
3308  * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3309  * @page: page to charge
3310  * @gfp: reclaim mode
3311  * @order: allocation order
3312  *
3313  * Returns 0 on success, an error code on failure.
3314  */
3315 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
3316 {
3317 	struct obj_cgroup *objcg;
3318 	int ret = 0;
3319 
3320 	objcg = current_obj_cgroup();
3321 	if (objcg) {
3322 		ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
3323 		if (!ret) {
3324 			obj_cgroup_get(objcg);
3325 			page->memcg_data = (unsigned long)objcg |
3326 				MEMCG_DATA_KMEM;
3327 			return 0;
3328 		}
3329 	}
3330 	return ret;
3331 }
3332 
3333 /**
3334  * __memcg_kmem_uncharge_page: uncharge a kmem page
3335  * @page: page to uncharge
3336  * @order: allocation order
3337  */
3338 void __memcg_kmem_uncharge_page(struct page *page, int order)
3339 {
3340 	struct folio *folio = page_folio(page);
3341 	struct obj_cgroup *objcg;
3342 	unsigned int nr_pages = 1 << order;
3343 
3344 	if (!folio_memcg_kmem(folio))
3345 		return;
3346 
3347 	objcg = __folio_objcg(folio);
3348 	obj_cgroup_uncharge_pages(objcg, nr_pages);
3349 	folio->memcg_data = 0;
3350 	obj_cgroup_put(objcg);
3351 }
3352 
3353 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
3354 		     enum node_stat_item idx, int nr)
3355 {
3356 	struct memcg_stock_pcp *stock;
3357 	struct obj_cgroup *old = NULL;
3358 	unsigned long flags;
3359 	int *bytes;
3360 
3361 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
3362 	stock = this_cpu_ptr(&memcg_stock);
3363 
3364 	/*
3365 	 * Save vmstat data in stock and skip vmstat array update unless
3366 	 * accumulating over a page of vmstat data or when pgdat or idx
3367 	 * changes.
3368 	 */
3369 	if (READ_ONCE(stock->cached_objcg) != objcg) {
3370 		old = drain_obj_stock(stock);
3371 		obj_cgroup_get(objcg);
3372 		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3373 				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3374 		WRITE_ONCE(stock->cached_objcg, objcg);
3375 		stock->cached_pgdat = pgdat;
3376 	} else if (stock->cached_pgdat != pgdat) {
3377 		/* Flush the existing cached vmstat data */
3378 		struct pglist_data *oldpg = stock->cached_pgdat;
3379 
3380 		if (stock->nr_slab_reclaimable_b) {
3381 			mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
3382 					  stock->nr_slab_reclaimable_b);
3383 			stock->nr_slab_reclaimable_b = 0;
3384 		}
3385 		if (stock->nr_slab_unreclaimable_b) {
3386 			mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
3387 					  stock->nr_slab_unreclaimable_b);
3388 			stock->nr_slab_unreclaimable_b = 0;
3389 		}
3390 		stock->cached_pgdat = pgdat;
3391 	}
3392 
3393 	bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
3394 					       : &stock->nr_slab_unreclaimable_b;
3395 	/*
3396 	 * Even for large object >= PAGE_SIZE, the vmstat data will still be
3397 	 * cached locally at least once before pushing it out.
3398 	 */
3399 	if (!*bytes) {
3400 		*bytes = nr;
3401 		nr = 0;
3402 	} else {
3403 		*bytes += nr;
3404 		if (abs(*bytes) > PAGE_SIZE) {
3405 			nr = *bytes;
3406 			*bytes = 0;
3407 		} else {
3408 			nr = 0;
3409 		}
3410 	}
3411 	if (nr)
3412 		mod_objcg_mlstate(objcg, pgdat, idx, nr);
3413 
3414 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3415 	if (old)
3416 		obj_cgroup_put(old);
3417 }
3418 
3419 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3420 {
3421 	struct memcg_stock_pcp *stock;
3422 	unsigned long flags;
3423 	bool ret = false;
3424 
3425 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
3426 
3427 	stock = this_cpu_ptr(&memcg_stock);
3428 	if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
3429 		stock->nr_bytes -= nr_bytes;
3430 		ret = true;
3431 	}
3432 
3433 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3434 
3435 	return ret;
3436 }
3437 
3438 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
3439 {
3440 	struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
3441 
3442 	if (!old)
3443 		return NULL;
3444 
3445 	if (stock->nr_bytes) {
3446 		unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3447 		unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3448 
3449 		if (nr_pages) {
3450 			struct mem_cgroup *memcg;
3451 
3452 			memcg = get_mem_cgroup_from_objcg(old);
3453 
3454 			memcg_account_kmem(memcg, -nr_pages);
3455 			__refill_stock(memcg, nr_pages);
3456 
3457 			css_put(&memcg->css);
3458 		}
3459 
3460 		/*
3461 		 * The leftover is flushed to the centralized per-memcg value.
3462 		 * On the next attempt to refill obj stock it will be moved
3463 		 * to a per-cpu stock (probably, on an other CPU), see
3464 		 * refill_obj_stock().
3465 		 *
3466 		 * How often it's flushed is a trade-off between the memory
3467 		 * limit enforcement accuracy and potential CPU contention,
3468 		 * so it might be changed in the future.
3469 		 */
3470 		atomic_add(nr_bytes, &old->nr_charged_bytes);
3471 		stock->nr_bytes = 0;
3472 	}
3473 
3474 	/*
3475 	 * Flush the vmstat data in current stock
3476 	 */
3477 	if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3478 		if (stock->nr_slab_reclaimable_b) {
3479 			mod_objcg_mlstate(old, stock->cached_pgdat,
3480 					  NR_SLAB_RECLAIMABLE_B,
3481 					  stock->nr_slab_reclaimable_b);
3482 			stock->nr_slab_reclaimable_b = 0;
3483 		}
3484 		if (stock->nr_slab_unreclaimable_b) {
3485 			mod_objcg_mlstate(old, stock->cached_pgdat,
3486 					  NR_SLAB_UNRECLAIMABLE_B,
3487 					  stock->nr_slab_unreclaimable_b);
3488 			stock->nr_slab_unreclaimable_b = 0;
3489 		}
3490 		stock->cached_pgdat = NULL;
3491 	}
3492 
3493 	WRITE_ONCE(stock->cached_objcg, NULL);
3494 	/*
3495 	 * The `old' objects needs to be released by the caller via
3496 	 * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
3497 	 */
3498 	return old;
3499 }
3500 
3501 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3502 				     struct mem_cgroup *root_memcg)
3503 {
3504 	struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
3505 	struct mem_cgroup *memcg;
3506 
3507 	if (objcg) {
3508 		memcg = obj_cgroup_memcg(objcg);
3509 		if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3510 			return true;
3511 	}
3512 
3513 	return false;
3514 }
3515 
3516 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3517 			     bool allow_uncharge)
3518 {
3519 	struct memcg_stock_pcp *stock;
3520 	struct obj_cgroup *old = NULL;
3521 	unsigned long flags;
3522 	unsigned int nr_pages = 0;
3523 
3524 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
3525 
3526 	stock = this_cpu_ptr(&memcg_stock);
3527 	if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
3528 		old = drain_obj_stock(stock);
3529 		obj_cgroup_get(objcg);
3530 		WRITE_ONCE(stock->cached_objcg, objcg);
3531 		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3532 				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3533 		allow_uncharge = true;	/* Allow uncharge when objcg changes */
3534 	}
3535 	stock->nr_bytes += nr_bytes;
3536 
3537 	if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3538 		nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3539 		stock->nr_bytes &= (PAGE_SIZE - 1);
3540 	}
3541 
3542 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3543 	if (old)
3544 		obj_cgroup_put(old);
3545 
3546 	if (nr_pages)
3547 		obj_cgroup_uncharge_pages(objcg, nr_pages);
3548 }
3549 
3550 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3551 {
3552 	unsigned int nr_pages, nr_bytes;
3553 	int ret;
3554 
3555 	if (consume_obj_stock(objcg, size))
3556 		return 0;
3557 
3558 	/*
3559 	 * In theory, objcg->nr_charged_bytes can have enough
3560 	 * pre-charged bytes to satisfy the allocation. However,
3561 	 * flushing objcg->nr_charged_bytes requires two atomic
3562 	 * operations, and objcg->nr_charged_bytes can't be big.
3563 	 * The shared objcg->nr_charged_bytes can also become a
3564 	 * performance bottleneck if all tasks of the same memcg are
3565 	 * trying to update it. So it's better to ignore it and try
3566 	 * grab some new pages. The stock's nr_bytes will be flushed to
3567 	 * objcg->nr_charged_bytes later on when objcg changes.
3568 	 *
3569 	 * The stock's nr_bytes may contain enough pre-charged bytes
3570 	 * to allow one less page from being charged, but we can't rely
3571 	 * on the pre-charged bytes not being changed outside of
3572 	 * consume_obj_stock() or refill_obj_stock(). So ignore those
3573 	 * pre-charged bytes as well when charging pages. To avoid a
3574 	 * page uncharge right after a page charge, we set the
3575 	 * allow_uncharge flag to false when calling refill_obj_stock()
3576 	 * to temporarily allow the pre-charged bytes to exceed the page
3577 	 * size limit. The maximum reachable value of the pre-charged
3578 	 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3579 	 * race.
3580 	 */
3581 	nr_pages = size >> PAGE_SHIFT;
3582 	nr_bytes = size & (PAGE_SIZE - 1);
3583 
3584 	if (nr_bytes)
3585 		nr_pages += 1;
3586 
3587 	ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
3588 	if (!ret && nr_bytes)
3589 		refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
3590 
3591 	return ret;
3592 }
3593 
3594 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3595 {
3596 	refill_obj_stock(objcg, size, true);
3597 }
3598 
3599 #endif /* CONFIG_MEMCG_KMEM */
3600 
3601 /*
3602  * Because page_memcg(head) is not set on tails, set it now.
3603  */
3604 void split_page_memcg(struct page *head, unsigned int nr)
3605 {
3606 	struct folio *folio = page_folio(head);
3607 	struct mem_cgroup *memcg = folio_memcg(folio);
3608 	int i;
3609 
3610 	if (mem_cgroup_disabled() || !memcg)
3611 		return;
3612 
3613 	for (i = 1; i < nr; i++)
3614 		folio_page(folio, i)->memcg_data = folio->memcg_data;
3615 
3616 	if (folio_memcg_kmem(folio))
3617 		obj_cgroup_get_many(__folio_objcg(folio), nr - 1);
3618 	else
3619 		css_get_many(&memcg->css, nr - 1);
3620 }
3621 
3622 #ifdef CONFIG_SWAP
3623 /**
3624  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3625  * @entry: swap entry to be moved
3626  * @from:  mem_cgroup which the entry is moved from
3627  * @to:  mem_cgroup which the entry is moved to
3628  *
3629  * It succeeds only when the swap_cgroup's record for this entry is the same
3630  * as the mem_cgroup's id of @from.
3631  *
3632  * Returns 0 on success, -EINVAL on failure.
3633  *
3634  * The caller must have charged to @to, IOW, called page_counter_charge() about
3635  * both res and memsw, and called css_get().
3636  */
3637 static int mem_cgroup_move_swap_account(swp_entry_t entry,
3638 				struct mem_cgroup *from, struct mem_cgroup *to)
3639 {
3640 	unsigned short old_id, new_id;
3641 
3642 	old_id = mem_cgroup_id(from);
3643 	new_id = mem_cgroup_id(to);
3644 
3645 	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3646 		mod_memcg_state(from, MEMCG_SWAP, -1);
3647 		mod_memcg_state(to, MEMCG_SWAP, 1);
3648 		return 0;
3649 	}
3650 	return -EINVAL;
3651 }
3652 #else
3653 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3654 				struct mem_cgroup *from, struct mem_cgroup *to)
3655 {
3656 	return -EINVAL;
3657 }
3658 #endif
3659 
3660 static DEFINE_MUTEX(memcg_max_mutex);
3661 
3662 static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3663 				 unsigned long max, bool memsw)
3664 {
3665 	bool enlarge = false;
3666 	bool drained = false;
3667 	int ret;
3668 	bool limits_invariant;
3669 	struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3670 
3671 	do {
3672 		if (signal_pending(current)) {
3673 			ret = -EINTR;
3674 			break;
3675 		}
3676 
3677 		mutex_lock(&memcg_max_mutex);
3678 		/*
3679 		 * Make sure that the new limit (memsw or memory limit) doesn't
3680 		 * break our basic invariant rule memory.max <= memsw.max.
3681 		 */
3682 		limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3683 					   max <= memcg->memsw.max;
3684 		if (!limits_invariant) {
3685 			mutex_unlock(&memcg_max_mutex);
3686 			ret = -EINVAL;
3687 			break;
3688 		}
3689 		if (max > counter->max)
3690 			enlarge = true;
3691 		ret = page_counter_set_max(counter, max);
3692 		mutex_unlock(&memcg_max_mutex);
3693 
3694 		if (!ret)
3695 			break;
3696 
3697 		if (!drained) {
3698 			drain_all_stock(memcg);
3699 			drained = true;
3700 			continue;
3701 		}
3702 
3703 		if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3704 					memsw ? 0 : MEMCG_RECLAIM_MAY_SWAP)) {
3705 			ret = -EBUSY;
3706 			break;
3707 		}
3708 	} while (true);
3709 
3710 	if (!ret && enlarge)
3711 		memcg_oom_recover(memcg);
3712 
3713 	return ret;
3714 }
3715 
3716 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3717 					    gfp_t gfp_mask,
3718 					    unsigned long *total_scanned)
3719 {
3720 	unsigned long nr_reclaimed = 0;
3721 	struct mem_cgroup_per_node *mz, *next_mz = NULL;
3722 	unsigned long reclaimed;
3723 	int loop = 0;
3724 	struct mem_cgroup_tree_per_node *mctz;
3725 	unsigned long excess;
3726 
3727 	if (lru_gen_enabled())
3728 		return 0;
3729 
3730 	if (order > 0)
3731 		return 0;
3732 
3733 	mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id];
3734 
3735 	/*
3736 	 * Do not even bother to check the largest node if the root
3737 	 * is empty. Do it lockless to prevent lock bouncing. Races
3738 	 * are acceptable as soft limit is best effort anyway.
3739 	 */
3740 	if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3741 		return 0;
3742 
3743 	/*
3744 	 * This loop can run a while, specially if mem_cgroup's continuously
3745 	 * keep exceeding their soft limit and putting the system under
3746 	 * pressure
3747 	 */
3748 	do {
3749 		if (next_mz)
3750 			mz = next_mz;
3751 		else
3752 			mz = mem_cgroup_largest_soft_limit_node(mctz);
3753 		if (!mz)
3754 			break;
3755 
3756 		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3757 						    gfp_mask, total_scanned);
3758 		nr_reclaimed += reclaimed;
3759 		spin_lock_irq(&mctz->lock);
3760 
3761 		/*
3762 		 * If we failed to reclaim anything from this memory cgroup
3763 		 * it is time to move on to the next cgroup
3764 		 */
3765 		next_mz = NULL;
3766 		if (!reclaimed)
3767 			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3768 
3769 		excess = soft_limit_excess(mz->memcg);
3770 		/*
3771 		 * One school of thought says that we should not add
3772 		 * back the node to the tree if reclaim returns 0.
3773 		 * But our reclaim could return 0, simply because due
3774 		 * to priority we are exposing a smaller subset of
3775 		 * memory to reclaim from. Consider this as a longer
3776 		 * term TODO.
3777 		 */
3778 		/* If excess == 0, no tree ops */
3779 		__mem_cgroup_insert_exceeded(mz, mctz, excess);
3780 		spin_unlock_irq(&mctz->lock);
3781 		css_put(&mz->memcg->css);
3782 		loop++;
3783 		/*
3784 		 * Could not reclaim anything and there are no more
3785 		 * mem cgroups to try or we seem to be looping without
3786 		 * reclaiming anything.
3787 		 */
3788 		if (!nr_reclaimed &&
3789 			(next_mz == NULL ||
3790 			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3791 			break;
3792 	} while (!nr_reclaimed);
3793 	if (next_mz)
3794 		css_put(&next_mz->memcg->css);
3795 	return nr_reclaimed;
3796 }
3797 
3798 /*
3799  * Reclaims as many pages from the given memcg as possible.
3800  *
3801  * Caller is responsible for holding css reference for memcg.
3802  */
3803 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3804 {
3805 	int nr_retries = MAX_RECLAIM_RETRIES;
3806 
3807 	/* we call try-to-free pages for make this cgroup empty */
3808 	lru_add_drain_all();
3809 
3810 	drain_all_stock(memcg);
3811 
3812 	/* try to free all pages in this cgroup */
3813 	while (nr_retries && page_counter_read(&memcg->memory)) {
3814 		if (signal_pending(current))
3815 			return -EINTR;
3816 
3817 		if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3818 						  MEMCG_RECLAIM_MAY_SWAP))
3819 			nr_retries--;
3820 	}
3821 
3822 	return 0;
3823 }
3824 
3825 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3826 					    char *buf, size_t nbytes,
3827 					    loff_t off)
3828 {
3829 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3830 
3831 	if (mem_cgroup_is_root(memcg))
3832 		return -EINVAL;
3833 	return mem_cgroup_force_empty(memcg) ?: nbytes;
3834 }
3835 
3836 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3837 				     struct cftype *cft)
3838 {
3839 	return 1;
3840 }
3841 
3842 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3843 				      struct cftype *cft, u64 val)
3844 {
3845 	if (val == 1)
3846 		return 0;
3847 
3848 	pr_warn_once("Non-hierarchical mode is deprecated. "
3849 		     "Please report your usecase to linux-mm@kvack.org if you "
3850 		     "depend on this functionality.\n");
3851 
3852 	return -EINVAL;
3853 }
3854 
3855 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3856 {
3857 	unsigned long val;
3858 
3859 	if (mem_cgroup_is_root(memcg)) {
3860 		/*
3861 		 * Approximate root's usage from global state. This isn't
3862 		 * perfect, but the root usage was always an approximation.
3863 		 */
3864 		val = global_node_page_state(NR_FILE_PAGES) +
3865 			global_node_page_state(NR_ANON_MAPPED);
3866 		if (swap)
3867 			val += total_swap_pages - get_nr_swap_pages();
3868 	} else {
3869 		if (!swap)
3870 			val = page_counter_read(&memcg->memory);
3871 		else
3872 			val = page_counter_read(&memcg->memsw);
3873 	}
3874 	return val;
3875 }
3876 
3877 enum {
3878 	RES_USAGE,
3879 	RES_LIMIT,
3880 	RES_MAX_USAGE,
3881 	RES_FAILCNT,
3882 	RES_SOFT_LIMIT,
3883 };
3884 
3885 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3886 			       struct cftype *cft)
3887 {
3888 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3889 	struct page_counter *counter;
3890 
3891 	switch (MEMFILE_TYPE(cft->private)) {
3892 	case _MEM:
3893 		counter = &memcg->memory;
3894 		break;
3895 	case _MEMSWAP:
3896 		counter = &memcg->memsw;
3897 		break;
3898 	case _KMEM:
3899 		counter = &memcg->kmem;
3900 		break;
3901 	case _TCP:
3902 		counter = &memcg->tcpmem;
3903 		break;
3904 	default:
3905 		BUG();
3906 	}
3907 
3908 	switch (MEMFILE_ATTR(cft->private)) {
3909 	case RES_USAGE:
3910 		if (counter == &memcg->memory)
3911 			return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3912 		if (counter == &memcg->memsw)
3913 			return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3914 		return (u64)page_counter_read(counter) * PAGE_SIZE;
3915 	case RES_LIMIT:
3916 		return (u64)counter->max * PAGE_SIZE;
3917 	case RES_MAX_USAGE:
3918 		return (u64)counter->watermark * PAGE_SIZE;
3919 	case RES_FAILCNT:
3920 		return counter->failcnt;
3921 	case RES_SOFT_LIMIT:
3922 		return (u64)READ_ONCE(memcg->soft_limit) * PAGE_SIZE;
3923 	default:
3924 		BUG();
3925 	}
3926 }
3927 
3928 /*
3929  * This function doesn't do anything useful. Its only job is to provide a read
3930  * handler for a file so that cgroup_file_mode() will add read permissions.
3931  */
3932 static int mem_cgroup_dummy_seq_show(__always_unused struct seq_file *m,
3933 				     __always_unused void *v)
3934 {
3935 	return -EINVAL;
3936 }
3937 
3938 #ifdef CONFIG_MEMCG_KMEM
3939 static int memcg_online_kmem(struct mem_cgroup *memcg)
3940 {
3941 	struct obj_cgroup *objcg;
3942 
3943 	if (mem_cgroup_kmem_disabled())
3944 		return 0;
3945 
3946 	if (unlikely(mem_cgroup_is_root(memcg)))
3947 		return 0;
3948 
3949 	objcg = obj_cgroup_alloc();
3950 	if (!objcg)
3951 		return -ENOMEM;
3952 
3953 	objcg->memcg = memcg;
3954 	rcu_assign_pointer(memcg->objcg, objcg);
3955 	obj_cgroup_get(objcg);
3956 	memcg->orig_objcg = objcg;
3957 
3958 	static_branch_enable(&memcg_kmem_online_key);
3959 
3960 	memcg->kmemcg_id = memcg->id.id;
3961 
3962 	return 0;
3963 }
3964 
3965 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3966 {
3967 	struct mem_cgroup *parent;
3968 
3969 	if (mem_cgroup_kmem_disabled())
3970 		return;
3971 
3972 	if (unlikely(mem_cgroup_is_root(memcg)))
3973 		return;
3974 
3975 	parent = parent_mem_cgroup(memcg);
3976 	if (!parent)
3977 		parent = root_mem_cgroup;
3978 
3979 	memcg_reparent_objcgs(memcg, parent);
3980 
3981 	/*
3982 	 * After we have finished memcg_reparent_objcgs(), all list_lrus
3983 	 * corresponding to this cgroup are guaranteed to remain empty.
3984 	 * The ordering is imposed by list_lru_node->lock taken by
3985 	 * memcg_reparent_list_lrus().
3986 	 */
3987 	memcg_reparent_list_lrus(memcg, parent);
3988 }
3989 #else
3990 static int memcg_online_kmem(struct mem_cgroup *memcg)
3991 {
3992 	return 0;
3993 }
3994 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3995 {
3996 }
3997 #endif /* CONFIG_MEMCG_KMEM */
3998 
3999 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
4000 {
4001 	int ret;
4002 
4003 	mutex_lock(&memcg_max_mutex);
4004 
4005 	ret = page_counter_set_max(&memcg->tcpmem, max);
4006 	if (ret)
4007 		goto out;
4008 
4009 	if (!memcg->tcpmem_active) {
4010 		/*
4011 		 * The active flag needs to be written after the static_key
4012 		 * update. This is what guarantees that the socket activation
4013 		 * function is the last one to run. See mem_cgroup_sk_alloc()
4014 		 * for details, and note that we don't mark any socket as
4015 		 * belonging to this memcg until that flag is up.
4016 		 *
4017 		 * We need to do this, because static_keys will span multiple
4018 		 * sites, but we can't control their order. If we mark a socket
4019 		 * as accounted, but the accounting functions are not patched in
4020 		 * yet, we'll lose accounting.
4021 		 *
4022 		 * We never race with the readers in mem_cgroup_sk_alloc(),
4023 		 * because when this value change, the code to process it is not
4024 		 * patched in yet.
4025 		 */
4026 		static_branch_inc(&memcg_sockets_enabled_key);
4027 		memcg->tcpmem_active = true;
4028 	}
4029 out:
4030 	mutex_unlock(&memcg_max_mutex);
4031 	return ret;
4032 }
4033 
4034 /*
4035  * The user of this function is...
4036  * RES_LIMIT.
4037  */
4038 static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
4039 				char *buf, size_t nbytes, loff_t off)
4040 {
4041 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4042 	unsigned long nr_pages;
4043 	int ret;
4044 
4045 	buf = strstrip(buf);
4046 	ret = page_counter_memparse(buf, "-1", &nr_pages);
4047 	if (ret)
4048 		return ret;
4049 
4050 	switch (MEMFILE_ATTR(of_cft(of)->private)) {
4051 	case RES_LIMIT:
4052 		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
4053 			ret = -EINVAL;
4054 			break;
4055 		}
4056 		switch (MEMFILE_TYPE(of_cft(of)->private)) {
4057 		case _MEM:
4058 			ret = mem_cgroup_resize_max(memcg, nr_pages, false);
4059 			break;
4060 		case _MEMSWAP:
4061 			ret = mem_cgroup_resize_max(memcg, nr_pages, true);
4062 			break;
4063 		case _KMEM:
4064 			pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
4065 				     "Writing any value to this file has no effect. "
4066 				     "Please report your usecase to linux-mm@kvack.org if you "
4067 				     "depend on this functionality.\n");
4068 			ret = 0;
4069 			break;
4070 		case _TCP:
4071 			ret = memcg_update_tcp_max(memcg, nr_pages);
4072 			break;
4073 		}
4074 		break;
4075 	case RES_SOFT_LIMIT:
4076 		if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
4077 			ret = -EOPNOTSUPP;
4078 		} else {
4079 			WRITE_ONCE(memcg->soft_limit, nr_pages);
4080 			ret = 0;
4081 		}
4082 		break;
4083 	}
4084 	return ret ?: nbytes;
4085 }
4086 
4087 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
4088 				size_t nbytes, loff_t off)
4089 {
4090 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4091 	struct page_counter *counter;
4092 
4093 	switch (MEMFILE_TYPE(of_cft(of)->private)) {
4094 	case _MEM:
4095 		counter = &memcg->memory;
4096 		break;
4097 	case _MEMSWAP:
4098 		counter = &memcg->memsw;
4099 		break;
4100 	case _KMEM:
4101 		counter = &memcg->kmem;
4102 		break;
4103 	case _TCP:
4104 		counter = &memcg->tcpmem;
4105 		break;
4106 	default:
4107 		BUG();
4108 	}
4109 
4110 	switch (MEMFILE_ATTR(of_cft(of)->private)) {
4111 	case RES_MAX_USAGE:
4112 		page_counter_reset_watermark(counter);
4113 		break;
4114 	case RES_FAILCNT:
4115 		counter->failcnt = 0;
4116 		break;
4117 	default:
4118 		BUG();
4119 	}
4120 
4121 	return nbytes;
4122 }
4123 
4124 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
4125 					struct cftype *cft)
4126 {
4127 	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
4128 }
4129 
4130 #ifdef CONFIG_MMU
4131 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
4132 					struct cftype *cft, u64 val)
4133 {
4134 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4135 
4136 	pr_warn_once("Cgroup memory moving (move_charge_at_immigrate) is deprecated. "
4137 		     "Please report your usecase to linux-mm@kvack.org if you "
4138 		     "depend on this functionality.\n");
4139 
4140 	if (val & ~MOVE_MASK)
4141 		return -EINVAL;
4142 
4143 	/*
4144 	 * No kind of locking is needed in here, because ->can_attach() will
4145 	 * check this value once in the beginning of the process, and then carry
4146 	 * on with stale data. This means that changes to this value will only
4147 	 * affect task migrations starting after the change.
4148 	 */
4149 	memcg->move_charge_at_immigrate = val;
4150 	return 0;
4151 }
4152 #else
4153 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
4154 					struct cftype *cft, u64 val)
4155 {
4156 	return -ENOSYS;
4157 }
4158 #endif
4159 
4160 #ifdef CONFIG_NUMA
4161 
4162 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
4163 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
4164 #define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)
4165 
4166 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
4167 				int nid, unsigned int lru_mask, bool tree)
4168 {
4169 	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
4170 	unsigned long nr = 0;
4171 	enum lru_list lru;
4172 
4173 	VM_BUG_ON((unsigned)nid >= nr_node_ids);
4174 
4175 	for_each_lru(lru) {
4176 		if (!(BIT(lru) & lru_mask))
4177 			continue;
4178 		if (tree)
4179 			nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
4180 		else
4181 			nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
4182 	}
4183 	return nr;
4184 }
4185 
4186 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
4187 					     unsigned int lru_mask,
4188 					     bool tree)
4189 {
4190 	unsigned long nr = 0;
4191 	enum lru_list lru;
4192 
4193 	for_each_lru(lru) {
4194 		if (!(BIT(lru) & lru_mask))
4195 			continue;
4196 		if (tree)
4197 			nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
4198 		else
4199 			nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
4200 	}
4201 	return nr;
4202 }
4203 
4204 static int memcg_numa_stat_show(struct seq_file *m, void *v)
4205 {
4206 	struct numa_stat {
4207 		const char *name;
4208 		unsigned int lru_mask;
4209 	};
4210 
4211 	static const struct numa_stat stats[] = {
4212 		{ "total", LRU_ALL },
4213 		{ "file", LRU_ALL_FILE },
4214 		{ "anon", LRU_ALL_ANON },
4215 		{ "unevictable", BIT(LRU_UNEVICTABLE) },
4216 	};
4217 	const struct numa_stat *stat;
4218 	int nid;
4219 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4220 
4221 	mem_cgroup_flush_stats(memcg);
4222 
4223 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4224 		seq_printf(m, "%s=%lu", stat->name,
4225 			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4226 						   false));
4227 		for_each_node_state(nid, N_MEMORY)
4228 			seq_printf(m, " N%d=%lu", nid,
4229 				   mem_cgroup_node_nr_lru_pages(memcg, nid,
4230 							stat->lru_mask, false));
4231 		seq_putc(m, '\n');
4232 	}
4233 
4234 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4235 
4236 		seq_printf(m, "hierarchical_%s=%lu", stat->name,
4237 			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4238 						   true));
4239 		for_each_node_state(nid, N_MEMORY)
4240 			seq_printf(m, " N%d=%lu", nid,
4241 				   mem_cgroup_node_nr_lru_pages(memcg, nid,
4242 							stat->lru_mask, true));
4243 		seq_putc(m, '\n');
4244 	}
4245 
4246 	return 0;
4247 }
4248 #endif /* CONFIG_NUMA */
4249 
4250 static const unsigned int memcg1_stats[] = {
4251 	NR_FILE_PAGES,
4252 	NR_ANON_MAPPED,
4253 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4254 	NR_ANON_THPS,
4255 #endif
4256 	NR_SHMEM,
4257 	NR_FILE_MAPPED,
4258 	NR_FILE_DIRTY,
4259 	NR_WRITEBACK,
4260 	WORKINGSET_REFAULT_ANON,
4261 	WORKINGSET_REFAULT_FILE,
4262 #ifdef CONFIG_SWAP
4263 	MEMCG_SWAP,
4264 	NR_SWAPCACHE,
4265 #endif
4266 };
4267 
4268 static const char *const memcg1_stat_names[] = {
4269 	"cache",
4270 	"rss",
4271 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4272 	"rss_huge",
4273 #endif
4274 	"shmem",
4275 	"mapped_file",
4276 	"dirty",
4277 	"writeback",
4278 	"workingset_refault_anon",
4279 	"workingset_refault_file",
4280 #ifdef CONFIG_SWAP
4281 	"swap",
4282 	"swapcached",
4283 #endif
4284 };
4285 
4286 /* Universal VM events cgroup1 shows, original sort order */
4287 static const unsigned int memcg1_events[] = {
4288 	PGPGIN,
4289 	PGPGOUT,
4290 	PGFAULT,
4291 	PGMAJFAULT,
4292 };
4293 
4294 static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
4295 {
4296 	unsigned long memory, memsw;
4297 	struct mem_cgroup *mi;
4298 	unsigned int i;
4299 
4300 	BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
4301 
4302 	mem_cgroup_flush_stats(memcg);
4303 
4304 	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4305 		unsigned long nr;
4306 
4307 		nr = memcg_page_state_local_output(memcg, memcg1_stats[i]);
4308 		seq_buf_printf(s, "%s %lu\n", memcg1_stat_names[i], nr);
4309 	}
4310 
4311 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4312 		seq_buf_printf(s, "%s %lu\n", vm_event_name(memcg1_events[i]),
4313 			       memcg_events_local(memcg, memcg1_events[i]));
4314 
4315 	for (i = 0; i < NR_LRU_LISTS; i++)
4316 		seq_buf_printf(s, "%s %lu\n", lru_list_name(i),
4317 			       memcg_page_state_local(memcg, NR_LRU_BASE + i) *
4318 			       PAGE_SIZE);
4319 
4320 	/* Hierarchical information */
4321 	memory = memsw = PAGE_COUNTER_MAX;
4322 	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
4323 		memory = min(memory, READ_ONCE(mi->memory.max));
4324 		memsw = min(memsw, READ_ONCE(mi->memsw.max));
4325 	}
4326 	seq_buf_printf(s, "hierarchical_memory_limit %llu\n",
4327 		       (u64)memory * PAGE_SIZE);
4328 	seq_buf_printf(s, "hierarchical_memsw_limit %llu\n",
4329 		       (u64)memsw * PAGE_SIZE);
4330 
4331 	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4332 		unsigned long nr;
4333 
4334 		nr = memcg_page_state_output(memcg, memcg1_stats[i]);
4335 		seq_buf_printf(s, "total_%s %llu\n", memcg1_stat_names[i],
4336 			       (u64)nr);
4337 	}
4338 
4339 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4340 		seq_buf_printf(s, "total_%s %llu\n",
4341 			       vm_event_name(memcg1_events[i]),
4342 			       (u64)memcg_events(memcg, memcg1_events[i]));
4343 
4344 	for (i = 0; i < NR_LRU_LISTS; i++)
4345 		seq_buf_printf(s, "total_%s %llu\n", lru_list_name(i),
4346 			       (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4347 			       PAGE_SIZE);
4348 
4349 #ifdef CONFIG_DEBUG_VM
4350 	{
4351 		pg_data_t *pgdat;
4352 		struct mem_cgroup_per_node *mz;
4353 		unsigned long anon_cost = 0;
4354 		unsigned long file_cost = 0;
4355 
4356 		for_each_online_pgdat(pgdat) {
4357 			mz = memcg->nodeinfo[pgdat->node_id];
4358 
4359 			anon_cost += mz->lruvec.anon_cost;
4360 			file_cost += mz->lruvec.file_cost;
4361 		}
4362 		seq_buf_printf(s, "anon_cost %lu\n", anon_cost);
4363 		seq_buf_printf(s, "file_cost %lu\n", file_cost);
4364 	}
4365 #endif
4366 }
4367 
4368 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4369 				      struct cftype *cft)
4370 {
4371 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4372 
4373 	return mem_cgroup_swappiness(memcg);
4374 }
4375 
4376 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4377 				       struct cftype *cft, u64 val)
4378 {
4379 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4380 
4381 	if (val > 200)
4382 		return -EINVAL;
4383 
4384 	if (!mem_cgroup_is_root(memcg))
4385 		WRITE_ONCE(memcg->swappiness, val);
4386 	else
4387 		WRITE_ONCE(vm_swappiness, val);
4388 
4389 	return 0;
4390 }
4391 
4392 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4393 {
4394 	struct mem_cgroup_threshold_ary *t;
4395 	unsigned long usage;
4396 	int i;
4397 
4398 	rcu_read_lock();
4399 	if (!swap)
4400 		t = rcu_dereference(memcg->thresholds.primary);
4401 	else
4402 		t = rcu_dereference(memcg->memsw_thresholds.primary);
4403 
4404 	if (!t)
4405 		goto unlock;
4406 
4407 	usage = mem_cgroup_usage(memcg, swap);
4408 
4409 	/*
4410 	 * current_threshold points to threshold just below or equal to usage.
4411 	 * If it's not true, a threshold was crossed after last
4412 	 * call of __mem_cgroup_threshold().
4413 	 */
4414 	i = t->current_threshold;
4415 
4416 	/*
4417 	 * Iterate backward over array of thresholds starting from
4418 	 * current_threshold and check if a threshold is crossed.
4419 	 * If none of thresholds below usage is crossed, we read
4420 	 * only one element of the array here.
4421 	 */
4422 	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4423 		eventfd_signal(t->entries[i].eventfd);
4424 
4425 	/* i = current_threshold + 1 */
4426 	i++;
4427 
4428 	/*
4429 	 * Iterate forward over array of thresholds starting from
4430 	 * current_threshold+1 and check if a threshold is crossed.
4431 	 * If none of thresholds above usage is crossed, we read
4432 	 * only one element of the array here.
4433 	 */
4434 	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4435 		eventfd_signal(t->entries[i].eventfd);
4436 
4437 	/* Update current_threshold */
4438 	t->current_threshold = i - 1;
4439 unlock:
4440 	rcu_read_unlock();
4441 }
4442 
4443 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4444 {
4445 	while (memcg) {
4446 		__mem_cgroup_threshold(memcg, false);
4447 		if (do_memsw_account())
4448 			__mem_cgroup_threshold(memcg, true);
4449 
4450 		memcg = parent_mem_cgroup(memcg);
4451 	}
4452 }
4453 
4454 static int compare_thresholds(const void *a, const void *b)
4455 {
4456 	const struct mem_cgroup_threshold *_a = a;
4457 	const struct mem_cgroup_threshold *_b = b;
4458 
4459 	if (_a->threshold > _b->threshold)
4460 		return 1;
4461 
4462 	if (_a->threshold < _b->threshold)
4463 		return -1;
4464 
4465 	return 0;
4466 }
4467 
4468 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4469 {
4470 	struct mem_cgroup_eventfd_list *ev;
4471 
4472 	spin_lock(&memcg_oom_lock);
4473 
4474 	list_for_each_entry(ev, &memcg->oom_notify, list)
4475 		eventfd_signal(ev->eventfd);
4476 
4477 	spin_unlock(&memcg_oom_lock);
4478 	return 0;
4479 }
4480 
4481 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4482 {
4483 	struct mem_cgroup *iter;
4484 
4485 	for_each_mem_cgroup_tree(iter, memcg)
4486 		mem_cgroup_oom_notify_cb(iter);
4487 }
4488 
4489 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4490 	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4491 {
4492 	struct mem_cgroup_thresholds *thresholds;
4493 	struct mem_cgroup_threshold_ary *new;
4494 	unsigned long threshold;
4495 	unsigned long usage;
4496 	int i, size, ret;
4497 
4498 	ret = page_counter_memparse(args, "-1", &threshold);
4499 	if (ret)
4500 		return ret;
4501 
4502 	mutex_lock(&memcg->thresholds_lock);
4503 
4504 	if (type == _MEM) {
4505 		thresholds = &memcg->thresholds;
4506 		usage = mem_cgroup_usage(memcg, false);
4507 	} else if (type == _MEMSWAP) {
4508 		thresholds = &memcg->memsw_thresholds;
4509 		usage = mem_cgroup_usage(memcg, true);
4510 	} else
4511 		BUG();
4512 
4513 	/* Check if a threshold crossed before adding a new one */
4514 	if (thresholds->primary)
4515 		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4516 
4517 	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4518 
4519 	/* Allocate memory for new array of thresholds */
4520 	new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4521 	if (!new) {
4522 		ret = -ENOMEM;
4523 		goto unlock;
4524 	}
4525 	new->size = size;
4526 
4527 	/* Copy thresholds (if any) to new array */
4528 	if (thresholds->primary)
4529 		memcpy(new->entries, thresholds->primary->entries,
4530 		       flex_array_size(new, entries, size - 1));
4531 
4532 	/* Add new threshold */
4533 	new->entries[size - 1].eventfd = eventfd;
4534 	new->entries[size - 1].threshold = threshold;
4535 
4536 	/* Sort thresholds. Registering of new threshold isn't time-critical */
4537 	sort(new->entries, size, sizeof(*new->entries),
4538 			compare_thresholds, NULL);
4539 
4540 	/* Find current threshold */
4541 	new->current_threshold = -1;
4542 	for (i = 0; i < size; i++) {
4543 		if (new->entries[i].threshold <= usage) {
4544 			/*
4545 			 * new->current_threshold will not be used until
4546 			 * rcu_assign_pointer(), so it's safe to increment
4547 			 * it here.
4548 			 */
4549 			++new->current_threshold;
4550 		} else
4551 			break;
4552 	}
4553 
4554 	/* Free old spare buffer and save old primary buffer as spare */
4555 	kfree(thresholds->spare);
4556 	thresholds->spare = thresholds->primary;
4557 
4558 	rcu_assign_pointer(thresholds->primary, new);
4559 
4560 	/* To be sure that nobody uses thresholds */
4561 	synchronize_rcu();
4562 
4563 unlock:
4564 	mutex_unlock(&memcg->thresholds_lock);
4565 
4566 	return ret;
4567 }
4568 
4569 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4570 	struct eventfd_ctx *eventfd, const char *args)
4571 {
4572 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4573 }
4574 
4575 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4576 	struct eventfd_ctx *eventfd, const char *args)
4577 {
4578 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4579 }
4580 
4581 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4582 	struct eventfd_ctx *eventfd, enum res_type type)
4583 {
4584 	struct mem_cgroup_thresholds *thresholds;
4585 	struct mem_cgroup_threshold_ary *new;
4586 	unsigned long usage;
4587 	int i, j, size, entries;
4588 
4589 	mutex_lock(&memcg->thresholds_lock);
4590 
4591 	if (type == _MEM) {
4592 		thresholds = &memcg->thresholds;
4593 		usage = mem_cgroup_usage(memcg, false);
4594 	} else if (type == _MEMSWAP) {
4595 		thresholds = &memcg->memsw_thresholds;
4596 		usage = mem_cgroup_usage(memcg, true);
4597 	} else
4598 		BUG();
4599 
4600 	if (!thresholds->primary)
4601 		goto unlock;
4602 
4603 	/* Check if a threshold crossed before removing */
4604 	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4605 
4606 	/* Calculate new number of threshold */
4607 	size = entries = 0;
4608 	for (i = 0; i < thresholds->primary->size; i++) {
4609 		if (thresholds->primary->entries[i].eventfd != eventfd)
4610 			size++;
4611 		else
4612 			entries++;
4613 	}
4614 
4615 	new = thresholds->spare;
4616 
4617 	/* If no items related to eventfd have been cleared, nothing to do */
4618 	if (!entries)
4619 		goto unlock;
4620 
4621 	/* Set thresholds array to NULL if we don't have thresholds */
4622 	if (!size) {
4623 		kfree(new);
4624 		new = NULL;
4625 		goto swap_buffers;
4626 	}
4627 
4628 	new->size = size;
4629 
4630 	/* Copy thresholds and find current threshold */
4631 	new->current_threshold = -1;
4632 	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4633 		if (thresholds->primary->entries[i].eventfd == eventfd)
4634 			continue;
4635 
4636 		new->entries[j] = thresholds->primary->entries[i];
4637 		if (new->entries[j].threshold <= usage) {
4638 			/*
4639 			 * new->current_threshold will not be used
4640 			 * until rcu_assign_pointer(), so it's safe to increment
4641 			 * it here.
4642 			 */
4643 			++new->current_threshold;
4644 		}
4645 		j++;
4646 	}
4647 
4648 swap_buffers:
4649 	/* Swap primary and spare array */
4650 	thresholds->spare = thresholds->primary;
4651 
4652 	rcu_assign_pointer(thresholds->primary, new);
4653 
4654 	/* To be sure that nobody uses thresholds */
4655 	synchronize_rcu();
4656 
4657 	/* If all events are unregistered, free the spare array */
4658 	if (!new) {
4659 		kfree(thresholds->spare);
4660 		thresholds->spare = NULL;
4661 	}
4662 unlock:
4663 	mutex_unlock(&memcg->thresholds_lock);
4664 }
4665 
4666 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4667 	struct eventfd_ctx *eventfd)
4668 {
4669 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4670 }
4671 
4672 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4673 	struct eventfd_ctx *eventfd)
4674 {
4675 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4676 }
4677 
4678 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4679 	struct eventfd_ctx *eventfd, const char *args)
4680 {
4681 	struct mem_cgroup_eventfd_list *event;
4682 
4683 	event = kmalloc(sizeof(*event),	GFP_KERNEL);
4684 	if (!event)
4685 		return -ENOMEM;
4686 
4687 	spin_lock(&memcg_oom_lock);
4688 
4689 	event->eventfd = eventfd;
4690 	list_add(&event->list, &memcg->oom_notify);
4691 
4692 	/* already in OOM ? */
4693 	if (memcg->under_oom)
4694 		eventfd_signal(eventfd);
4695 	spin_unlock(&memcg_oom_lock);
4696 
4697 	return 0;
4698 }
4699 
4700 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4701 	struct eventfd_ctx *eventfd)
4702 {
4703 	struct mem_cgroup_eventfd_list *ev, *tmp;
4704 
4705 	spin_lock(&memcg_oom_lock);
4706 
4707 	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4708 		if (ev->eventfd == eventfd) {
4709 			list_del(&ev->list);
4710 			kfree(ev);
4711 		}
4712 	}
4713 
4714 	spin_unlock(&memcg_oom_lock);
4715 }
4716 
4717 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4718 {
4719 	struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4720 
4721 	seq_printf(sf, "oom_kill_disable %d\n", READ_ONCE(memcg->oom_kill_disable));
4722 	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4723 	seq_printf(sf, "oom_kill %lu\n",
4724 		   atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4725 	return 0;
4726 }
4727 
4728 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4729 	struct cftype *cft, u64 val)
4730 {
4731 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4732 
4733 	/* cannot set to root cgroup and only 0 and 1 are allowed */
4734 	if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
4735 		return -EINVAL;
4736 
4737 	WRITE_ONCE(memcg->oom_kill_disable, val);
4738 	if (!val)
4739 		memcg_oom_recover(memcg);
4740 
4741 	return 0;
4742 }
4743 
4744 #ifdef CONFIG_CGROUP_WRITEBACK
4745 
4746 #include <trace/events/writeback.h>
4747 
4748 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4749 {
4750 	return wb_domain_init(&memcg->cgwb_domain, gfp);
4751 }
4752 
4753 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4754 {
4755 	wb_domain_exit(&memcg->cgwb_domain);
4756 }
4757 
4758 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4759 {
4760 	wb_domain_size_changed(&memcg->cgwb_domain);
4761 }
4762 
4763 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4764 {
4765 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4766 
4767 	if (!memcg->css.parent)
4768 		return NULL;
4769 
4770 	return &memcg->cgwb_domain;
4771 }
4772 
4773 /**
4774  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4775  * @wb: bdi_writeback in question
4776  * @pfilepages: out parameter for number of file pages
4777  * @pheadroom: out parameter for number of allocatable pages according to memcg
4778  * @pdirty: out parameter for number of dirty pages
4779  * @pwriteback: out parameter for number of pages under writeback
4780  *
4781  * Determine the numbers of file, headroom, dirty, and writeback pages in
4782  * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
4783  * is a bit more involved.
4784  *
4785  * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
4786  * headroom is calculated as the lowest headroom of itself and the
4787  * ancestors.  Note that this doesn't consider the actual amount of
4788  * available memory in the system.  The caller should further cap
4789  * *@pheadroom accordingly.
4790  */
4791 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4792 			 unsigned long *pheadroom, unsigned long *pdirty,
4793 			 unsigned long *pwriteback)
4794 {
4795 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4796 	struct mem_cgroup *parent;
4797 
4798 	mem_cgroup_flush_stats(memcg);
4799 
4800 	*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
4801 	*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
4802 	*pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
4803 			memcg_page_state(memcg, NR_ACTIVE_FILE);
4804 
4805 	*pheadroom = PAGE_COUNTER_MAX;
4806 	while ((parent = parent_mem_cgroup(memcg))) {
4807 		unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4808 					    READ_ONCE(memcg->memory.high));
4809 		unsigned long used = page_counter_read(&memcg->memory);
4810 
4811 		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4812 		memcg = parent;
4813 	}
4814 }
4815 
4816 /*
4817  * Foreign dirty flushing
4818  *
4819  * There's an inherent mismatch between memcg and writeback.  The former
4820  * tracks ownership per-page while the latter per-inode.  This was a
4821  * deliberate design decision because honoring per-page ownership in the
4822  * writeback path is complicated, may lead to higher CPU and IO overheads
4823  * and deemed unnecessary given that write-sharing an inode across
4824  * different cgroups isn't a common use-case.
4825  *
4826  * Combined with inode majority-writer ownership switching, this works well
4827  * enough in most cases but there are some pathological cases.  For
4828  * example, let's say there are two cgroups A and B which keep writing to
4829  * different but confined parts of the same inode.  B owns the inode and
4830  * A's memory is limited far below B's.  A's dirty ratio can rise enough to
4831  * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4832  * triggering background writeback.  A will be slowed down without a way to
4833  * make writeback of the dirty pages happen.
4834  *
4835  * Conditions like the above can lead to a cgroup getting repeatedly and
4836  * severely throttled after making some progress after each
4837  * dirty_expire_interval while the underlying IO device is almost
4838  * completely idle.
4839  *
4840  * Solving this problem completely requires matching the ownership tracking
4841  * granularities between memcg and writeback in either direction.  However,
4842  * the more egregious behaviors can be avoided by simply remembering the
4843  * most recent foreign dirtying events and initiating remote flushes on
4844  * them when local writeback isn't enough to keep the memory clean enough.
4845  *
4846  * The following two functions implement such mechanism.  When a foreign
4847  * page - a page whose memcg and writeback ownerships don't match - is
4848  * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4849  * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
4850  * decides that the memcg needs to sleep due to high dirty ratio, it calls
4851  * mem_cgroup_flush_foreign() which queues writeback on the recorded
4852  * foreign bdi_writebacks which haven't expired.  Both the numbers of
4853  * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4854  * limited to MEMCG_CGWB_FRN_CNT.
4855  *
4856  * The mechanism only remembers IDs and doesn't hold any object references.
4857  * As being wrong occasionally doesn't matter, updates and accesses to the
4858  * records are lockless and racy.
4859  */
4860 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
4861 					     struct bdi_writeback *wb)
4862 {
4863 	struct mem_cgroup *memcg = folio_memcg(folio);
4864 	struct memcg_cgwb_frn *frn;
4865 	u64 now = get_jiffies_64();
4866 	u64 oldest_at = now;
4867 	int oldest = -1;
4868 	int i;
4869 
4870 	trace_track_foreign_dirty(folio, wb);
4871 
4872 	/*
4873 	 * Pick the slot to use.  If there is already a slot for @wb, keep
4874 	 * using it.  If not replace the oldest one which isn't being
4875 	 * written out.
4876 	 */
4877 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4878 		frn = &memcg->cgwb_frn[i];
4879 		if (frn->bdi_id == wb->bdi->id &&
4880 		    frn->memcg_id == wb->memcg_css->id)
4881 			break;
4882 		if (time_before64(frn->at, oldest_at) &&
4883 		    atomic_read(&frn->done.cnt) == 1) {
4884 			oldest = i;
4885 			oldest_at = frn->at;
4886 		}
4887 	}
4888 
4889 	if (i < MEMCG_CGWB_FRN_CNT) {
4890 		/*
4891 		 * Re-using an existing one.  Update timestamp lazily to
4892 		 * avoid making the cacheline hot.  We want them to be
4893 		 * reasonably up-to-date and significantly shorter than
4894 		 * dirty_expire_interval as that's what expires the record.
4895 		 * Use the shorter of 1s and dirty_expire_interval / 8.
4896 		 */
4897 		unsigned long update_intv =
4898 			min_t(unsigned long, HZ,
4899 			      msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4900 
4901 		if (time_before64(frn->at, now - update_intv))
4902 			frn->at = now;
4903 	} else if (oldest >= 0) {
4904 		/* replace the oldest free one */
4905 		frn = &memcg->cgwb_frn[oldest];
4906 		frn->bdi_id = wb->bdi->id;
4907 		frn->memcg_id = wb->memcg_css->id;
4908 		frn->at = now;
4909 	}
4910 }
4911 
4912 /* issue foreign writeback flushes for recorded foreign dirtying events */
4913 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4914 {
4915 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4916 	unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4917 	u64 now = jiffies_64;
4918 	int i;
4919 
4920 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4921 		struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4922 
4923 		/*
4924 		 * If the record is older than dirty_expire_interval,
4925 		 * writeback on it has already started.  No need to kick it
4926 		 * off again.  Also, don't start a new one if there's
4927 		 * already one in flight.
4928 		 */
4929 		if (time_after64(frn->at, now - intv) &&
4930 		    atomic_read(&frn->done.cnt) == 1) {
4931 			frn->at = 0;
4932 			trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4933 			cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
4934 					       WB_REASON_FOREIGN_FLUSH,
4935 					       &frn->done);
4936 		}
4937 	}
4938 }
4939 
4940 #else	/* CONFIG_CGROUP_WRITEBACK */
4941 
4942 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4943 {
4944 	return 0;
4945 }
4946 
4947 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4948 {
4949 }
4950 
4951 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4952 {
4953 }
4954 
4955 #endif	/* CONFIG_CGROUP_WRITEBACK */
4956 
4957 /*
4958  * DO NOT USE IN NEW FILES.
4959  *
4960  * "cgroup.event_control" implementation.
4961  *
4962  * This is way over-engineered.  It tries to support fully configurable
4963  * events for each user.  Such level of flexibility is completely
4964  * unnecessary especially in the light of the planned unified hierarchy.
4965  *
4966  * Please deprecate this and replace with something simpler if at all
4967  * possible.
4968  */
4969 
4970 /*
4971  * Unregister event and free resources.
4972  *
4973  * Gets called from workqueue.
4974  */
4975 static void memcg_event_remove(struct work_struct *work)
4976 {
4977 	struct mem_cgroup_event *event =
4978 		container_of(work, struct mem_cgroup_event, remove);
4979 	struct mem_cgroup *memcg = event->memcg;
4980 
4981 	remove_wait_queue(event->wqh, &event->wait);
4982 
4983 	event->unregister_event(memcg, event->eventfd);
4984 
4985 	/* Notify userspace the event is going away. */
4986 	eventfd_signal(event->eventfd);
4987 
4988 	eventfd_ctx_put(event->eventfd);
4989 	kfree(event);
4990 	css_put(&memcg->css);
4991 }
4992 
4993 /*
4994  * Gets called on EPOLLHUP on eventfd when user closes it.
4995  *
4996  * Called with wqh->lock held and interrupts disabled.
4997  */
4998 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4999 			    int sync, void *key)
5000 {
5001 	struct mem_cgroup_event *event =
5002 		container_of(wait, struct mem_cgroup_event, wait);
5003 	struct mem_cgroup *memcg = event->memcg;
5004 	__poll_t flags = key_to_poll(key);
5005 
5006 	if (flags & EPOLLHUP) {
5007 		/*
5008 		 * If the event has been detached at cgroup removal, we
5009 		 * can simply return knowing the other side will cleanup
5010 		 * for us.
5011 		 *
5012 		 * We can't race against event freeing since the other
5013 		 * side will require wqh->lock via remove_wait_queue(),
5014 		 * which we hold.
5015 		 */
5016 		spin_lock(&memcg->event_list_lock);
5017 		if (!list_empty(&event->list)) {
5018 			list_del_init(&event->list);
5019 			/*
5020 			 * We are in atomic context, but cgroup_event_remove()
5021 			 * may sleep, so we have to call it in workqueue.
5022 			 */
5023 			schedule_work(&event->remove);
5024 		}
5025 		spin_unlock(&memcg->event_list_lock);
5026 	}
5027 
5028 	return 0;
5029 }
5030 
5031 static void memcg_event_ptable_queue_proc(struct file *file,
5032 		wait_queue_head_t *wqh, poll_table *pt)
5033 {
5034 	struct mem_cgroup_event *event =
5035 		container_of(pt, struct mem_cgroup_event, pt);
5036 
5037 	event->wqh = wqh;
5038 	add_wait_queue(wqh, &event->wait);
5039 }
5040 
5041 /*
5042  * DO NOT USE IN NEW FILES.
5043  *
5044  * Parse input and register new cgroup event handler.
5045  *
5046  * Input must be in format '<event_fd> <control_fd> <args>'.
5047  * Interpretation of args is defined by control file implementation.
5048  */
5049 static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
5050 					 char *buf, size_t nbytes, loff_t off)
5051 {
5052 	struct cgroup_subsys_state *css = of_css(of);
5053 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5054 	struct mem_cgroup_event *event;
5055 	struct cgroup_subsys_state *cfile_css;
5056 	unsigned int efd, cfd;
5057 	struct fd efile;
5058 	struct fd cfile;
5059 	struct dentry *cdentry;
5060 	const char *name;
5061 	char *endp;
5062 	int ret;
5063 
5064 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
5065 		return -EOPNOTSUPP;
5066 
5067 	buf = strstrip(buf);
5068 
5069 	efd = simple_strtoul(buf, &endp, 10);
5070 	if (*endp != ' ')
5071 		return -EINVAL;
5072 	buf = endp + 1;
5073 
5074 	cfd = simple_strtoul(buf, &endp, 10);
5075 	if ((*endp != ' ') && (*endp != '\0'))
5076 		return -EINVAL;
5077 	buf = endp + 1;
5078 
5079 	event = kzalloc(sizeof(*event), GFP_KERNEL);
5080 	if (!event)
5081 		return -ENOMEM;
5082 
5083 	event->memcg = memcg;
5084 	INIT_LIST_HEAD(&event->list);
5085 	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
5086 	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
5087 	INIT_WORK(&event->remove, memcg_event_remove);
5088 
5089 	efile = fdget(efd);
5090 	if (!efile.file) {
5091 		ret = -EBADF;
5092 		goto out_kfree;
5093 	}
5094 
5095 	event->eventfd = eventfd_ctx_fileget(efile.file);
5096 	if (IS_ERR(event->eventfd)) {
5097 		ret = PTR_ERR(event->eventfd);
5098 		goto out_put_efile;
5099 	}
5100 
5101 	cfile = fdget(cfd);
5102 	if (!cfile.file) {
5103 		ret = -EBADF;
5104 		goto out_put_eventfd;
5105 	}
5106 
5107 	/* the process need read permission on control file */
5108 	/* AV: shouldn't we check that it's been opened for read instead? */
5109 	ret = file_permission(cfile.file, MAY_READ);
5110 	if (ret < 0)
5111 		goto out_put_cfile;
5112 
5113 	/*
5114 	 * The control file must be a regular cgroup1 file. As a regular cgroup
5115 	 * file can't be renamed, it's safe to access its name afterwards.
5116 	 */
5117 	cdentry = cfile.file->f_path.dentry;
5118 	if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) {
5119 		ret = -EINVAL;
5120 		goto out_put_cfile;
5121 	}
5122 
5123 	/*
5124 	 * Determine the event callbacks and set them in @event.  This used
5125 	 * to be done via struct cftype but cgroup core no longer knows
5126 	 * about these events.  The following is crude but the whole thing
5127 	 * is for compatibility anyway.
5128 	 *
5129 	 * DO NOT ADD NEW FILES.
5130 	 */
5131 	name = cdentry->d_name.name;
5132 
5133 	if (!strcmp(name, "memory.usage_in_bytes")) {
5134 		event->register_event = mem_cgroup_usage_register_event;
5135 		event->unregister_event = mem_cgroup_usage_unregister_event;
5136 	} else if (!strcmp(name, "memory.oom_control")) {
5137 		event->register_event = mem_cgroup_oom_register_event;
5138 		event->unregister_event = mem_cgroup_oom_unregister_event;
5139 	} else if (!strcmp(name, "memory.pressure_level")) {
5140 		event->register_event = vmpressure_register_event;
5141 		event->unregister_event = vmpressure_unregister_event;
5142 	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
5143 		event->register_event = memsw_cgroup_usage_register_event;
5144 		event->unregister_event = memsw_cgroup_usage_unregister_event;
5145 	} else {
5146 		ret = -EINVAL;
5147 		goto out_put_cfile;
5148 	}
5149 
5150 	/*
5151 	 * Verify @cfile should belong to @css.  Also, remaining events are
5152 	 * automatically removed on cgroup destruction but the removal is
5153 	 * asynchronous, so take an extra ref on @css.
5154 	 */
5155 	cfile_css = css_tryget_online_from_dir(cdentry->d_parent,
5156 					       &memory_cgrp_subsys);
5157 	ret = -EINVAL;
5158 	if (IS_ERR(cfile_css))
5159 		goto out_put_cfile;
5160 	if (cfile_css != css) {
5161 		css_put(cfile_css);
5162 		goto out_put_cfile;
5163 	}
5164 
5165 	ret = event->register_event(memcg, event->eventfd, buf);
5166 	if (ret)
5167 		goto out_put_css;
5168 
5169 	vfs_poll(efile.file, &event->pt);
5170 
5171 	spin_lock_irq(&memcg->event_list_lock);
5172 	list_add(&event->list, &memcg->event_list);
5173 	spin_unlock_irq(&memcg->event_list_lock);
5174 
5175 	fdput(cfile);
5176 	fdput(efile);
5177 
5178 	return nbytes;
5179 
5180 out_put_css:
5181 	css_put(css);
5182 out_put_cfile:
5183 	fdput(cfile);
5184 out_put_eventfd:
5185 	eventfd_ctx_put(event->eventfd);
5186 out_put_efile:
5187 	fdput(efile);
5188 out_kfree:
5189 	kfree(event);
5190 
5191 	return ret;
5192 }
5193 
5194 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_SLUB_DEBUG)
5195 static int mem_cgroup_slab_show(struct seq_file *m, void *p)
5196 {
5197 	/*
5198 	 * Deprecated.
5199 	 * Please, take a look at tools/cgroup/memcg_slabinfo.py .
5200 	 */
5201 	return 0;
5202 }
5203 #endif
5204 
5205 static int memory_stat_show(struct seq_file *m, void *v);
5206 
5207 static struct cftype mem_cgroup_legacy_files[] = {
5208 	{
5209 		.name = "usage_in_bytes",
5210 		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
5211 		.read_u64 = mem_cgroup_read_u64,
5212 	},
5213 	{
5214 		.name = "max_usage_in_bytes",
5215 		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
5216 		.write = mem_cgroup_reset,
5217 		.read_u64 = mem_cgroup_read_u64,
5218 	},
5219 	{
5220 		.name = "limit_in_bytes",
5221 		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
5222 		.write = mem_cgroup_write,
5223 		.read_u64 = mem_cgroup_read_u64,
5224 	},
5225 	{
5226 		.name = "soft_limit_in_bytes",
5227 		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
5228 		.write = mem_cgroup_write,
5229 		.read_u64 = mem_cgroup_read_u64,
5230 	},
5231 	{
5232 		.name = "failcnt",
5233 		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
5234 		.write = mem_cgroup_reset,
5235 		.read_u64 = mem_cgroup_read_u64,
5236 	},
5237 	{
5238 		.name = "stat",
5239 		.seq_show = memory_stat_show,
5240 	},
5241 	{
5242 		.name = "force_empty",
5243 		.write = mem_cgroup_force_empty_write,
5244 	},
5245 	{
5246 		.name = "use_hierarchy",
5247 		.write_u64 = mem_cgroup_hierarchy_write,
5248 		.read_u64 = mem_cgroup_hierarchy_read,
5249 	},
5250 	{
5251 		.name = "cgroup.event_control",		/* XXX: for compat */
5252 		.write = memcg_write_event_control,
5253 		.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
5254 	},
5255 	{
5256 		.name = "swappiness",
5257 		.read_u64 = mem_cgroup_swappiness_read,
5258 		.write_u64 = mem_cgroup_swappiness_write,
5259 	},
5260 	{
5261 		.name = "move_charge_at_immigrate",
5262 		.read_u64 = mem_cgroup_move_charge_read,
5263 		.write_u64 = mem_cgroup_move_charge_write,
5264 	},
5265 	{
5266 		.name = "oom_control",
5267 		.seq_show = mem_cgroup_oom_control_read,
5268 		.write_u64 = mem_cgroup_oom_control_write,
5269 	},
5270 	{
5271 		.name = "pressure_level",
5272 		.seq_show = mem_cgroup_dummy_seq_show,
5273 	},
5274 #ifdef CONFIG_NUMA
5275 	{
5276 		.name = "numa_stat",
5277 		.seq_show = memcg_numa_stat_show,
5278 	},
5279 #endif
5280 	{
5281 		.name = "kmem.limit_in_bytes",
5282 		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
5283 		.write = mem_cgroup_write,
5284 		.read_u64 = mem_cgroup_read_u64,
5285 	},
5286 	{
5287 		.name = "kmem.usage_in_bytes",
5288 		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
5289 		.read_u64 = mem_cgroup_read_u64,
5290 	},
5291 	{
5292 		.name = "kmem.failcnt",
5293 		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
5294 		.write = mem_cgroup_reset,
5295 		.read_u64 = mem_cgroup_read_u64,
5296 	},
5297 	{
5298 		.name = "kmem.max_usage_in_bytes",
5299 		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
5300 		.write = mem_cgroup_reset,
5301 		.read_u64 = mem_cgroup_read_u64,
5302 	},
5303 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_SLUB_DEBUG)
5304 	{
5305 		.name = "kmem.slabinfo",
5306 		.seq_show = mem_cgroup_slab_show,
5307 	},
5308 #endif
5309 	{
5310 		.name = "kmem.tcp.limit_in_bytes",
5311 		.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
5312 		.write = mem_cgroup_write,
5313 		.read_u64 = mem_cgroup_read_u64,
5314 	},
5315 	{
5316 		.name = "kmem.tcp.usage_in_bytes",
5317 		.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
5318 		.read_u64 = mem_cgroup_read_u64,
5319 	},
5320 	{
5321 		.name = "kmem.tcp.failcnt",
5322 		.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
5323 		.write = mem_cgroup_reset,
5324 		.read_u64 = mem_cgroup_read_u64,
5325 	},
5326 	{
5327 		.name = "kmem.tcp.max_usage_in_bytes",
5328 		.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
5329 		.write = mem_cgroup_reset,
5330 		.read_u64 = mem_cgroup_read_u64,
5331 	},
5332 	{ },	/* terminate */
5333 };
5334 
5335 /*
5336  * Private memory cgroup IDR
5337  *
5338  * Swap-out records and page cache shadow entries need to store memcg
5339  * references in constrained space, so we maintain an ID space that is
5340  * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5341  * memory-controlled cgroups to 64k.
5342  *
5343  * However, there usually are many references to the offline CSS after
5344  * the cgroup has been destroyed, such as page cache or reclaimable
5345  * slab objects, that don't need to hang on to the ID. We want to keep
5346  * those dead CSS from occupying IDs, or we might quickly exhaust the
5347  * relatively small ID space and prevent the creation of new cgroups
5348  * even when there are much fewer than 64k cgroups - possibly none.
5349  *
5350  * Maintain a private 16-bit ID space for memcg, and allow the ID to
5351  * be freed and recycled when it's no longer needed, which is usually
5352  * when the CSS is offlined.
5353  *
5354  * The only exception to that are records of swapped out tmpfs/shmem
5355  * pages that need to be attributed to live ancestors on swapin. But
5356  * those references are manageable from userspace.
5357  */
5358 
5359 #define MEM_CGROUP_ID_MAX	((1UL << MEM_CGROUP_ID_SHIFT) - 1)
5360 static DEFINE_IDR(mem_cgroup_idr);
5361 
5362 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5363 {
5364 	if (memcg->id.id > 0) {
5365 		idr_remove(&mem_cgroup_idr, memcg->id.id);
5366 		memcg->id.id = 0;
5367 	}
5368 }
5369 
5370 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5371 						  unsigned int n)
5372 {
5373 	refcount_add(n, &memcg->id.ref);
5374 }
5375 
5376 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
5377 {
5378 	if (refcount_sub_and_test(n, &memcg->id.ref)) {
5379 		mem_cgroup_id_remove(memcg);
5380 
5381 		/* Memcg ID pins CSS */
5382 		css_put(&memcg->css);
5383 	}
5384 }
5385 
5386 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5387 {
5388 	mem_cgroup_id_put_many(memcg, 1);
5389 }
5390 
5391 /**
5392  * mem_cgroup_from_id - look up a memcg from a memcg id
5393  * @id: the memcg id to look up
5394  *
5395  * Caller must hold rcu_read_lock().
5396  */
5397 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5398 {
5399 	WARN_ON_ONCE(!rcu_read_lock_held());
5400 	return idr_find(&mem_cgroup_idr, id);
5401 }
5402 
5403 #ifdef CONFIG_SHRINKER_DEBUG
5404 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
5405 {
5406 	struct cgroup *cgrp;
5407 	struct cgroup_subsys_state *css;
5408 	struct mem_cgroup *memcg;
5409 
5410 	cgrp = cgroup_get_from_id(ino);
5411 	if (IS_ERR(cgrp))
5412 		return ERR_CAST(cgrp);
5413 
5414 	css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
5415 	if (css)
5416 		memcg = container_of(css, struct mem_cgroup, css);
5417 	else
5418 		memcg = ERR_PTR(-ENOENT);
5419 
5420 	cgroup_put(cgrp);
5421 
5422 	return memcg;
5423 }
5424 #endif
5425 
5426 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5427 {
5428 	struct mem_cgroup_per_node *pn;
5429 
5430 	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
5431 	if (!pn)
5432 		return 1;
5433 
5434 	pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
5435 						   GFP_KERNEL_ACCOUNT);
5436 	if (!pn->lruvec_stats_percpu) {
5437 		kfree(pn);
5438 		return 1;
5439 	}
5440 
5441 	lruvec_init(&pn->lruvec);
5442 	pn->memcg = memcg;
5443 
5444 	memcg->nodeinfo[node] = pn;
5445 	return 0;
5446 }
5447 
5448 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5449 {
5450 	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5451 
5452 	if (!pn)
5453 		return;
5454 
5455 	free_percpu(pn->lruvec_stats_percpu);
5456 	kfree(pn);
5457 }
5458 
5459 static void __mem_cgroup_free(struct mem_cgroup *memcg)
5460 {
5461 	int node;
5462 
5463 	if (memcg->orig_objcg)
5464 		obj_cgroup_put(memcg->orig_objcg);
5465 
5466 	for_each_node(node)
5467 		free_mem_cgroup_per_node_info(memcg, node);
5468 	kfree(memcg->vmstats);
5469 	free_percpu(memcg->vmstats_percpu);
5470 	kfree(memcg);
5471 }
5472 
5473 static void mem_cgroup_free(struct mem_cgroup *memcg)
5474 {
5475 	lru_gen_exit_memcg(memcg);
5476 	memcg_wb_domain_exit(memcg);
5477 	__mem_cgroup_free(memcg);
5478 }
5479 
5480 static struct mem_cgroup *mem_cgroup_alloc(void)
5481 {
5482 	struct mem_cgroup *memcg;
5483 	int node;
5484 	int __maybe_unused i;
5485 	long error = -ENOMEM;
5486 
5487 	memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
5488 	if (!memcg)
5489 		return ERR_PTR(error);
5490 
5491 	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5492 				 1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL);
5493 	if (memcg->id.id < 0) {
5494 		error = memcg->id.id;
5495 		goto fail;
5496 	}
5497 
5498 	memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), GFP_KERNEL);
5499 	if (!memcg->vmstats)
5500 		goto fail;
5501 
5502 	memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5503 						 GFP_KERNEL_ACCOUNT);
5504 	if (!memcg->vmstats_percpu)
5505 		goto fail;
5506 
5507 	for_each_node(node)
5508 		if (alloc_mem_cgroup_per_node_info(memcg, node))
5509 			goto fail;
5510 
5511 	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5512 		goto fail;
5513 
5514 	INIT_WORK(&memcg->high_work, high_work_func);
5515 	INIT_LIST_HEAD(&memcg->oom_notify);
5516 	mutex_init(&memcg->thresholds_lock);
5517 	spin_lock_init(&memcg->move_lock);
5518 	vmpressure_init(&memcg->vmpressure);
5519 	INIT_LIST_HEAD(&memcg->event_list);
5520 	spin_lock_init(&memcg->event_list_lock);
5521 	memcg->socket_pressure = jiffies;
5522 #ifdef CONFIG_MEMCG_KMEM
5523 	memcg->kmemcg_id = -1;
5524 	INIT_LIST_HEAD(&memcg->objcg_list);
5525 #endif
5526 #ifdef CONFIG_CGROUP_WRITEBACK
5527 	INIT_LIST_HEAD(&memcg->cgwb_list);
5528 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5529 		memcg->cgwb_frn[i].done =
5530 			__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5531 #endif
5532 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5533 	spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5534 	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5535 	memcg->deferred_split_queue.split_queue_len = 0;
5536 #endif
5537 	lru_gen_init_memcg(memcg);
5538 	return memcg;
5539 fail:
5540 	mem_cgroup_id_remove(memcg);
5541 	__mem_cgroup_free(memcg);
5542 	return ERR_PTR(error);
5543 }
5544 
5545 static struct cgroup_subsys_state * __ref
5546 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5547 {
5548 	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5549 	struct mem_cgroup *memcg, *old_memcg;
5550 
5551 	old_memcg = set_active_memcg(parent);
5552 	memcg = mem_cgroup_alloc();
5553 	set_active_memcg(old_memcg);
5554 	if (IS_ERR(memcg))
5555 		return ERR_CAST(memcg);
5556 
5557 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5558 	WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
5559 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
5560 	memcg->zswap_max = PAGE_COUNTER_MAX;
5561 	WRITE_ONCE(memcg->zswap_writeback,
5562 		!parent || READ_ONCE(parent->zswap_writeback));
5563 #endif
5564 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5565 	if (parent) {
5566 		WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
5567 		WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
5568 
5569 		page_counter_init(&memcg->memory, &parent->memory);
5570 		page_counter_init(&memcg->swap, &parent->swap);
5571 		page_counter_init(&memcg->kmem, &parent->kmem);
5572 		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5573 	} else {
5574 		init_memcg_events();
5575 		page_counter_init(&memcg->memory, NULL);
5576 		page_counter_init(&memcg->swap, NULL);
5577 		page_counter_init(&memcg->kmem, NULL);
5578 		page_counter_init(&memcg->tcpmem, NULL);
5579 
5580 		root_mem_cgroup = memcg;
5581 		return &memcg->css;
5582 	}
5583 
5584 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5585 		static_branch_inc(&memcg_sockets_enabled_key);
5586 
5587 #if defined(CONFIG_MEMCG_KMEM)
5588 	if (!cgroup_memory_nobpf)
5589 		static_branch_inc(&memcg_bpf_enabled_key);
5590 #endif
5591 
5592 	return &memcg->css;
5593 }
5594 
5595 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5596 {
5597 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5598 
5599 	if (memcg_online_kmem(memcg))
5600 		goto remove_id;
5601 
5602 	/*
5603 	 * A memcg must be visible for expand_shrinker_info()
5604 	 * by the time the maps are allocated. So, we allocate maps
5605 	 * here, when for_each_mem_cgroup() can't skip it.
5606 	 */
5607 	if (alloc_shrinker_info(memcg))
5608 		goto offline_kmem;
5609 
5610 	if (unlikely(mem_cgroup_is_root(memcg)))
5611 		queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
5612 				   FLUSH_TIME);
5613 	lru_gen_online_memcg(memcg);
5614 
5615 	/* Online state pins memcg ID, memcg ID pins CSS */
5616 	refcount_set(&memcg->id.ref, 1);
5617 	css_get(css);
5618 
5619 	/*
5620 	 * Ensure mem_cgroup_from_id() works once we're fully online.
5621 	 *
5622 	 * We could do this earlier and require callers to filter with
5623 	 * css_tryget_online(). But right now there are no users that
5624 	 * need earlier access, and the workingset code relies on the
5625 	 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
5626 	 * publish it here at the end of onlining. This matches the
5627 	 * regular ID destruction during offlining.
5628 	 */
5629 	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5630 
5631 	return 0;
5632 offline_kmem:
5633 	memcg_offline_kmem(memcg);
5634 remove_id:
5635 	mem_cgroup_id_remove(memcg);
5636 	return -ENOMEM;
5637 }
5638 
5639 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5640 {
5641 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5642 	struct mem_cgroup_event *event, *tmp;
5643 
5644 	/*
5645 	 * Unregister events and notify userspace.
5646 	 * Notify userspace about cgroup removing only after rmdir of cgroup
5647 	 * directory to avoid race between userspace and kernelspace.
5648 	 */
5649 	spin_lock_irq(&memcg->event_list_lock);
5650 	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5651 		list_del_init(&event->list);
5652 		schedule_work(&event->remove);
5653 	}
5654 	spin_unlock_irq(&memcg->event_list_lock);
5655 
5656 	page_counter_set_min(&memcg->memory, 0);
5657 	page_counter_set_low(&memcg->memory, 0);
5658 
5659 	zswap_memcg_offline_cleanup(memcg);
5660 
5661 	memcg_offline_kmem(memcg);
5662 	reparent_shrinker_deferred(memcg);
5663 	wb_memcg_offline(memcg);
5664 	lru_gen_offline_memcg(memcg);
5665 
5666 	drain_all_stock(memcg);
5667 
5668 	mem_cgroup_id_put(memcg);
5669 }
5670 
5671 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5672 {
5673 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5674 
5675 	invalidate_reclaim_iterators(memcg);
5676 	lru_gen_release_memcg(memcg);
5677 }
5678 
5679 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5680 {
5681 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5682 	int __maybe_unused i;
5683 
5684 #ifdef CONFIG_CGROUP_WRITEBACK
5685 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5686 		wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5687 #endif
5688 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5689 		static_branch_dec(&memcg_sockets_enabled_key);
5690 
5691 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5692 		static_branch_dec(&memcg_sockets_enabled_key);
5693 
5694 #if defined(CONFIG_MEMCG_KMEM)
5695 	if (!cgroup_memory_nobpf)
5696 		static_branch_dec(&memcg_bpf_enabled_key);
5697 #endif
5698 
5699 	vmpressure_cleanup(&memcg->vmpressure);
5700 	cancel_work_sync(&memcg->high_work);
5701 	mem_cgroup_remove_from_trees(memcg);
5702 	free_shrinker_info(memcg);
5703 	mem_cgroup_free(memcg);
5704 }
5705 
5706 /**
5707  * mem_cgroup_css_reset - reset the states of a mem_cgroup
5708  * @css: the target css
5709  *
5710  * Reset the states of the mem_cgroup associated with @css.  This is
5711  * invoked when the userland requests disabling on the default hierarchy
5712  * but the memcg is pinned through dependency.  The memcg should stop
5713  * applying policies and should revert to the vanilla state as it may be
5714  * made visible again.
5715  *
5716  * The current implementation only resets the essential configurations.
5717  * This needs to be expanded to cover all the visible parts.
5718  */
5719 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5720 {
5721 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5722 
5723 	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5724 	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5725 	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5726 	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5727 	page_counter_set_min(&memcg->memory, 0);
5728 	page_counter_set_low(&memcg->memory, 0);
5729 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5730 	WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
5731 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5732 	memcg_wb_domain_size_changed(memcg);
5733 }
5734 
5735 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
5736 {
5737 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5738 	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5739 	struct memcg_vmstats_percpu *statc;
5740 	long delta, delta_cpu, v;
5741 	int i, nid;
5742 
5743 	statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5744 
5745 	for (i = 0; i < MEMCG_NR_STAT; i++) {
5746 		/*
5747 		 * Collect the aggregated propagation counts of groups
5748 		 * below us. We're in a per-cpu loop here and this is
5749 		 * a global counter, so the first cycle will get them.
5750 		 */
5751 		delta = memcg->vmstats->state_pending[i];
5752 		if (delta)
5753 			memcg->vmstats->state_pending[i] = 0;
5754 
5755 		/* Add CPU changes on this level since the last flush */
5756 		delta_cpu = 0;
5757 		v = READ_ONCE(statc->state[i]);
5758 		if (v != statc->state_prev[i]) {
5759 			delta_cpu = v - statc->state_prev[i];
5760 			delta += delta_cpu;
5761 			statc->state_prev[i] = v;
5762 		}
5763 
5764 		/* Aggregate counts on this level and propagate upwards */
5765 		if (delta_cpu)
5766 			memcg->vmstats->state_local[i] += delta_cpu;
5767 
5768 		if (delta) {
5769 			memcg->vmstats->state[i] += delta;
5770 			if (parent)
5771 				parent->vmstats->state_pending[i] += delta;
5772 		}
5773 	}
5774 
5775 	for (i = 0; i < NR_MEMCG_EVENTS; i++) {
5776 		delta = memcg->vmstats->events_pending[i];
5777 		if (delta)
5778 			memcg->vmstats->events_pending[i] = 0;
5779 
5780 		delta_cpu = 0;
5781 		v = READ_ONCE(statc->events[i]);
5782 		if (v != statc->events_prev[i]) {
5783 			delta_cpu = v - statc->events_prev[i];
5784 			delta += delta_cpu;
5785 			statc->events_prev[i] = v;
5786 		}
5787 
5788 		if (delta_cpu)
5789 			memcg->vmstats->events_local[i] += delta_cpu;
5790 
5791 		if (delta) {
5792 			memcg->vmstats->events[i] += delta;
5793 			if (parent)
5794 				parent->vmstats->events_pending[i] += delta;
5795 		}
5796 	}
5797 
5798 	for_each_node_state(nid, N_MEMORY) {
5799 		struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
5800 		struct mem_cgroup_per_node *ppn = NULL;
5801 		struct lruvec_stats_percpu *lstatc;
5802 
5803 		if (parent)
5804 			ppn = parent->nodeinfo[nid];
5805 
5806 		lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
5807 
5808 		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
5809 			delta = pn->lruvec_stats.state_pending[i];
5810 			if (delta)
5811 				pn->lruvec_stats.state_pending[i] = 0;
5812 
5813 			delta_cpu = 0;
5814 			v = READ_ONCE(lstatc->state[i]);
5815 			if (v != lstatc->state_prev[i]) {
5816 				delta_cpu = v - lstatc->state_prev[i];
5817 				delta += delta_cpu;
5818 				lstatc->state_prev[i] = v;
5819 			}
5820 
5821 			if (delta_cpu)
5822 				pn->lruvec_stats.state_local[i] += delta_cpu;
5823 
5824 			if (delta) {
5825 				pn->lruvec_stats.state[i] += delta;
5826 				if (ppn)
5827 					ppn->lruvec_stats.state_pending[i] += delta;
5828 			}
5829 		}
5830 	}
5831 	statc->stats_updates = 0;
5832 	/* We are in a per-cpu loop here, only do the atomic write once */
5833 	if (atomic64_read(&memcg->vmstats->stats_updates))
5834 		atomic64_set(&memcg->vmstats->stats_updates, 0);
5835 }
5836 
5837 #ifdef CONFIG_MMU
5838 /* Handlers for move charge at task migration. */
5839 static int mem_cgroup_do_precharge(unsigned long count)
5840 {
5841 	int ret;
5842 
5843 	/* Try a single bulk charge without reclaim first, kswapd may wake */
5844 	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5845 	if (!ret) {
5846 		mc.precharge += count;
5847 		return ret;
5848 	}
5849 
5850 	/* Try charges one by one with reclaim, but do not retry */
5851 	while (count--) {
5852 		ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5853 		if (ret)
5854 			return ret;
5855 		mc.precharge++;
5856 		cond_resched();
5857 	}
5858 	return 0;
5859 }
5860 
5861 union mc_target {
5862 	struct page	*page;
5863 	swp_entry_t	ent;
5864 };
5865 
5866 enum mc_target_type {
5867 	MC_TARGET_NONE = 0,
5868 	MC_TARGET_PAGE,
5869 	MC_TARGET_SWAP,
5870 	MC_TARGET_DEVICE,
5871 };
5872 
5873 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5874 						unsigned long addr, pte_t ptent)
5875 {
5876 	struct page *page = vm_normal_page(vma, addr, ptent);
5877 
5878 	if (!page)
5879 		return NULL;
5880 	if (PageAnon(page)) {
5881 		if (!(mc.flags & MOVE_ANON))
5882 			return NULL;
5883 	} else {
5884 		if (!(mc.flags & MOVE_FILE))
5885 			return NULL;
5886 	}
5887 	get_page(page);
5888 
5889 	return page;
5890 }
5891 
5892 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
5893 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5894 			pte_t ptent, swp_entry_t *entry)
5895 {
5896 	struct page *page = NULL;
5897 	swp_entry_t ent = pte_to_swp_entry(ptent);
5898 
5899 	if (!(mc.flags & MOVE_ANON))
5900 		return NULL;
5901 
5902 	/*
5903 	 * Handle device private pages that are not accessible by the CPU, but
5904 	 * stored as special swap entries in the page table.
5905 	 */
5906 	if (is_device_private_entry(ent)) {
5907 		page = pfn_swap_entry_to_page(ent);
5908 		if (!get_page_unless_zero(page))
5909 			return NULL;
5910 		return page;
5911 	}
5912 
5913 	if (non_swap_entry(ent))
5914 		return NULL;
5915 
5916 	/*
5917 	 * Because swap_cache_get_folio() updates some statistics counter,
5918 	 * we call find_get_page() with swapper_space directly.
5919 	 */
5920 	page = find_get_page(swap_address_space(ent), swp_offset(ent));
5921 	entry->val = ent.val;
5922 
5923 	return page;
5924 }
5925 #else
5926 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5927 			pte_t ptent, swp_entry_t *entry)
5928 {
5929 	return NULL;
5930 }
5931 #endif
5932 
5933 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5934 			unsigned long addr, pte_t ptent)
5935 {
5936 	unsigned long index;
5937 	struct folio *folio;
5938 
5939 	if (!vma->vm_file) /* anonymous vma */
5940 		return NULL;
5941 	if (!(mc.flags & MOVE_FILE))
5942 		return NULL;
5943 
5944 	/* folio is moved even if it's not RSS of this task(page-faulted). */
5945 	/* shmem/tmpfs may report page out on swap: account for that too. */
5946 	index = linear_page_index(vma, addr);
5947 	folio = filemap_get_incore_folio(vma->vm_file->f_mapping, index);
5948 	if (IS_ERR(folio))
5949 		return NULL;
5950 	return folio_file_page(folio, index);
5951 }
5952 
5953 /**
5954  * mem_cgroup_move_account - move account of the page
5955  * @page: the page
5956  * @compound: charge the page as compound or small page
5957  * @from: mem_cgroup which the page is moved from.
5958  * @to:	mem_cgroup which the page is moved to. @from != @to.
5959  *
5960  * The page must be locked and not on the LRU.
5961  *
5962  * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5963  * from old cgroup.
5964  */
5965 static int mem_cgroup_move_account(struct page *page,
5966 				   bool compound,
5967 				   struct mem_cgroup *from,
5968 				   struct mem_cgroup *to)
5969 {
5970 	struct folio *folio = page_folio(page);
5971 	struct lruvec *from_vec, *to_vec;
5972 	struct pglist_data *pgdat;
5973 	unsigned int nr_pages = compound ? folio_nr_pages(folio) : 1;
5974 	int nid, ret;
5975 
5976 	VM_BUG_ON(from == to);
5977 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
5978 	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
5979 	VM_BUG_ON(compound && !folio_test_large(folio));
5980 
5981 	ret = -EINVAL;
5982 	if (folio_memcg(folio) != from)
5983 		goto out;
5984 
5985 	pgdat = folio_pgdat(folio);
5986 	from_vec = mem_cgroup_lruvec(from, pgdat);
5987 	to_vec = mem_cgroup_lruvec(to, pgdat);
5988 
5989 	folio_memcg_lock(folio);
5990 
5991 	if (folio_test_anon(folio)) {
5992 		if (folio_mapped(folio)) {
5993 			__mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
5994 			__mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
5995 			if (folio_test_pmd_mappable(folio)) {
5996 				__mod_lruvec_state(from_vec, NR_ANON_THPS,
5997 						   -nr_pages);
5998 				__mod_lruvec_state(to_vec, NR_ANON_THPS,
5999 						   nr_pages);
6000 			}
6001 		}
6002 	} else {
6003 		__mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
6004 		__mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
6005 
6006 		if (folio_test_swapbacked(folio)) {
6007 			__mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
6008 			__mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
6009 		}
6010 
6011 		if (folio_mapped(folio)) {
6012 			__mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
6013 			__mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
6014 		}
6015 
6016 		if (folio_test_dirty(folio)) {
6017 			struct address_space *mapping = folio_mapping(folio);
6018 
6019 			if (mapping_can_writeback(mapping)) {
6020 				__mod_lruvec_state(from_vec, NR_FILE_DIRTY,
6021 						   -nr_pages);
6022 				__mod_lruvec_state(to_vec, NR_FILE_DIRTY,
6023 						   nr_pages);
6024 			}
6025 		}
6026 	}
6027 
6028 #ifdef CONFIG_SWAP
6029 	if (folio_test_swapcache(folio)) {
6030 		__mod_lruvec_state(from_vec, NR_SWAPCACHE, -nr_pages);
6031 		__mod_lruvec_state(to_vec, NR_SWAPCACHE, nr_pages);
6032 	}
6033 #endif
6034 	if (folio_test_writeback(folio)) {
6035 		__mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
6036 		__mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
6037 	}
6038 
6039 	/*
6040 	 * All state has been migrated, let's switch to the new memcg.
6041 	 *
6042 	 * It is safe to change page's memcg here because the page
6043 	 * is referenced, charged, isolated, and locked: we can't race
6044 	 * with (un)charging, migration, LRU putback, or anything else
6045 	 * that would rely on a stable page's memory cgroup.
6046 	 *
6047 	 * Note that folio_memcg_lock is a memcg lock, not a page lock,
6048 	 * to save space. As soon as we switch page's memory cgroup to a
6049 	 * new memcg that isn't locked, the above state can change
6050 	 * concurrently again. Make sure we're truly done with it.
6051 	 */
6052 	smp_mb();
6053 
6054 	css_get(&to->css);
6055 	css_put(&from->css);
6056 
6057 	folio->memcg_data = (unsigned long)to;
6058 
6059 	__folio_memcg_unlock(from);
6060 
6061 	ret = 0;
6062 	nid = folio_nid(folio);
6063 
6064 	local_irq_disable();
6065 	mem_cgroup_charge_statistics(to, nr_pages);
6066 	memcg_check_events(to, nid);
6067 	mem_cgroup_charge_statistics(from, -nr_pages);
6068 	memcg_check_events(from, nid);
6069 	local_irq_enable();
6070 out:
6071 	return ret;
6072 }
6073 
6074 /**
6075  * get_mctgt_type - get target type of moving charge
6076  * @vma: the vma the pte to be checked belongs
6077  * @addr: the address corresponding to the pte to be checked
6078  * @ptent: the pte to be checked
6079  * @target: the pointer the target page or swap ent will be stored(can be NULL)
6080  *
6081  * Context: Called with pte lock held.
6082  * Return:
6083  * * MC_TARGET_NONE - If the pte is not a target for move charge.
6084  * * MC_TARGET_PAGE - If the page corresponding to this pte is a target for
6085  *   move charge. If @target is not NULL, the page is stored in target->page
6086  *   with extra refcnt taken (Caller should release it).
6087  * * MC_TARGET_SWAP - If the swap entry corresponding to this pte is a
6088  *   target for charge migration.  If @target is not NULL, the entry is
6089  *   stored in target->ent.
6090  * * MC_TARGET_DEVICE - Like MC_TARGET_PAGE but page is device memory and
6091  *   thus not on the lru.  For now such page is charged like a regular page
6092  *   would be as it is just special memory taking the place of a regular page.
6093  *   See Documentations/vm/hmm.txt and include/linux/hmm.h
6094  */
6095 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
6096 		unsigned long addr, pte_t ptent, union mc_target *target)
6097 {
6098 	struct page *page = NULL;
6099 	enum mc_target_type ret = MC_TARGET_NONE;
6100 	swp_entry_t ent = { .val = 0 };
6101 
6102 	if (pte_present(ptent))
6103 		page = mc_handle_present_pte(vma, addr, ptent);
6104 	else if (pte_none_mostly(ptent))
6105 		/*
6106 		 * PTE markers should be treated as a none pte here, separated
6107 		 * from other swap handling below.
6108 		 */
6109 		page = mc_handle_file_pte(vma, addr, ptent);
6110 	else if (is_swap_pte(ptent))
6111 		page = mc_handle_swap_pte(vma, ptent, &ent);
6112 
6113 	if (target && page) {
6114 		if (!trylock_page(page)) {
6115 			put_page(page);
6116 			return ret;
6117 		}
6118 		/*
6119 		 * page_mapped() must be stable during the move. This
6120 		 * pte is locked, so if it's present, the page cannot
6121 		 * become unmapped. If it isn't, we have only partial
6122 		 * control over the mapped state: the page lock will
6123 		 * prevent new faults against pagecache and swapcache,
6124 		 * so an unmapped page cannot become mapped. However,
6125 		 * if the page is already mapped elsewhere, it can
6126 		 * unmap, and there is nothing we can do about it.
6127 		 * Alas, skip moving the page in this case.
6128 		 */
6129 		if (!pte_present(ptent) && page_mapped(page)) {
6130 			unlock_page(page);
6131 			put_page(page);
6132 			return ret;
6133 		}
6134 	}
6135 
6136 	if (!page && !ent.val)
6137 		return ret;
6138 	if (page) {
6139 		/*
6140 		 * Do only loose check w/o serialization.
6141 		 * mem_cgroup_move_account() checks the page is valid or
6142 		 * not under LRU exclusion.
6143 		 */
6144 		if (page_memcg(page) == mc.from) {
6145 			ret = MC_TARGET_PAGE;
6146 			if (is_device_private_page(page) ||
6147 			    is_device_coherent_page(page))
6148 				ret = MC_TARGET_DEVICE;
6149 			if (target)
6150 				target->page = page;
6151 		}
6152 		if (!ret || !target) {
6153 			if (target)
6154 				unlock_page(page);
6155 			put_page(page);
6156 		}
6157 	}
6158 	/*
6159 	 * There is a swap entry and a page doesn't exist or isn't charged.
6160 	 * But we cannot move a tail-page in a THP.
6161 	 */
6162 	if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
6163 	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
6164 		ret = MC_TARGET_SWAP;
6165 		if (target)
6166 			target->ent = ent;
6167 	}
6168 	return ret;
6169 }
6170 
6171 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
6172 /*
6173  * We don't consider PMD mapped swapping or file mapped pages because THP does
6174  * not support them for now.
6175  * Caller should make sure that pmd_trans_huge(pmd) is true.
6176  */
6177 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6178 		unsigned long addr, pmd_t pmd, union mc_target *target)
6179 {
6180 	struct page *page = NULL;
6181 	enum mc_target_type ret = MC_TARGET_NONE;
6182 
6183 	if (unlikely(is_swap_pmd(pmd))) {
6184 		VM_BUG_ON(thp_migration_supported() &&
6185 				  !is_pmd_migration_entry(pmd));
6186 		return ret;
6187 	}
6188 	page = pmd_page(pmd);
6189 	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
6190 	if (!(mc.flags & MOVE_ANON))
6191 		return ret;
6192 	if (page_memcg(page) == mc.from) {
6193 		ret = MC_TARGET_PAGE;
6194 		if (target) {
6195 			get_page(page);
6196 			if (!trylock_page(page)) {
6197 				put_page(page);
6198 				return MC_TARGET_NONE;
6199 			}
6200 			target->page = page;
6201 		}
6202 	}
6203 	return ret;
6204 }
6205 #else
6206 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6207 		unsigned long addr, pmd_t pmd, union mc_target *target)
6208 {
6209 	return MC_TARGET_NONE;
6210 }
6211 #endif
6212 
6213 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
6214 					unsigned long addr, unsigned long end,
6215 					struct mm_walk *walk)
6216 {
6217 	struct vm_area_struct *vma = walk->vma;
6218 	pte_t *pte;
6219 	spinlock_t *ptl;
6220 
6221 	ptl = pmd_trans_huge_lock(pmd, vma);
6222 	if (ptl) {
6223 		/*
6224 		 * Note their can not be MC_TARGET_DEVICE for now as we do not
6225 		 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
6226 		 * this might change.
6227 		 */
6228 		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
6229 			mc.precharge += HPAGE_PMD_NR;
6230 		spin_unlock(ptl);
6231 		return 0;
6232 	}
6233 
6234 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6235 	if (!pte)
6236 		return 0;
6237 	for (; addr != end; pte++, addr += PAGE_SIZE)
6238 		if (get_mctgt_type(vma, addr, ptep_get(pte), NULL))
6239 			mc.precharge++;	/* increment precharge temporarily */
6240 	pte_unmap_unlock(pte - 1, ptl);
6241 	cond_resched();
6242 
6243 	return 0;
6244 }
6245 
6246 static const struct mm_walk_ops precharge_walk_ops = {
6247 	.pmd_entry	= mem_cgroup_count_precharge_pte_range,
6248 	.walk_lock	= PGWALK_RDLOCK,
6249 };
6250 
6251 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
6252 {
6253 	unsigned long precharge;
6254 
6255 	mmap_read_lock(mm);
6256 	walk_page_range(mm, 0, ULONG_MAX, &precharge_walk_ops, NULL);
6257 	mmap_read_unlock(mm);
6258 
6259 	precharge = mc.precharge;
6260 	mc.precharge = 0;
6261 
6262 	return precharge;
6263 }
6264 
6265 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
6266 {
6267 	unsigned long precharge = mem_cgroup_count_precharge(mm);
6268 
6269 	VM_BUG_ON(mc.moving_task);
6270 	mc.moving_task = current;
6271 	return mem_cgroup_do_precharge(precharge);
6272 }
6273 
6274 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
6275 static void __mem_cgroup_clear_mc(void)
6276 {
6277 	struct mem_cgroup *from = mc.from;
6278 	struct mem_cgroup *to = mc.to;
6279 
6280 	/* we must uncharge all the leftover precharges from mc.to */
6281 	if (mc.precharge) {
6282 		mem_cgroup_cancel_charge(mc.to, mc.precharge);
6283 		mc.precharge = 0;
6284 	}
6285 	/*
6286 	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
6287 	 * we must uncharge here.
6288 	 */
6289 	if (mc.moved_charge) {
6290 		mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
6291 		mc.moved_charge = 0;
6292 	}
6293 	/* we must fixup refcnts and charges */
6294 	if (mc.moved_swap) {
6295 		/* uncharge swap account from the old cgroup */
6296 		if (!mem_cgroup_is_root(mc.from))
6297 			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
6298 
6299 		mem_cgroup_id_put_many(mc.from, mc.moved_swap);
6300 
6301 		/*
6302 		 * we charged both to->memory and to->memsw, so we
6303 		 * should uncharge to->memory.
6304 		 */
6305 		if (!mem_cgroup_is_root(mc.to))
6306 			page_counter_uncharge(&mc.to->memory, mc.moved_swap);
6307 
6308 		mc.moved_swap = 0;
6309 	}
6310 	memcg_oom_recover(from);
6311 	memcg_oom_recover(to);
6312 	wake_up_all(&mc.waitq);
6313 }
6314 
6315 static void mem_cgroup_clear_mc(void)
6316 {
6317 	struct mm_struct *mm = mc.mm;
6318 
6319 	/*
6320 	 * we must clear moving_task before waking up waiters at the end of
6321 	 * task migration.
6322 	 */
6323 	mc.moving_task = NULL;
6324 	__mem_cgroup_clear_mc();
6325 	spin_lock(&mc.lock);
6326 	mc.from = NULL;
6327 	mc.to = NULL;
6328 	mc.mm = NULL;
6329 	spin_unlock(&mc.lock);
6330 
6331 	mmput(mm);
6332 }
6333 
6334 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6335 {
6336 	struct cgroup_subsys_state *css;
6337 	struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
6338 	struct mem_cgroup *from;
6339 	struct task_struct *leader, *p;
6340 	struct mm_struct *mm;
6341 	unsigned long move_flags;
6342 	int ret = 0;
6343 
6344 	/* charge immigration isn't supported on the default hierarchy */
6345 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6346 		return 0;
6347 
6348 	/*
6349 	 * Multi-process migrations only happen on the default hierarchy
6350 	 * where charge immigration is not used.  Perform charge
6351 	 * immigration if @tset contains a leader and whine if there are
6352 	 * multiple.
6353 	 */
6354 	p = NULL;
6355 	cgroup_taskset_for_each_leader(leader, css, tset) {
6356 		WARN_ON_ONCE(p);
6357 		p = leader;
6358 		memcg = mem_cgroup_from_css(css);
6359 	}
6360 	if (!p)
6361 		return 0;
6362 
6363 	/*
6364 	 * We are now committed to this value whatever it is. Changes in this
6365 	 * tunable will only affect upcoming migrations, not the current one.
6366 	 * So we need to save it, and keep it going.
6367 	 */
6368 	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
6369 	if (!move_flags)
6370 		return 0;
6371 
6372 	from = mem_cgroup_from_task(p);
6373 
6374 	VM_BUG_ON(from == memcg);
6375 
6376 	mm = get_task_mm(p);
6377 	if (!mm)
6378 		return 0;
6379 	/* We move charges only when we move a owner of the mm */
6380 	if (mm->owner == p) {
6381 		VM_BUG_ON(mc.from);
6382 		VM_BUG_ON(mc.to);
6383 		VM_BUG_ON(mc.precharge);
6384 		VM_BUG_ON(mc.moved_charge);
6385 		VM_BUG_ON(mc.moved_swap);
6386 
6387 		spin_lock(&mc.lock);
6388 		mc.mm = mm;
6389 		mc.from = from;
6390 		mc.to = memcg;
6391 		mc.flags = move_flags;
6392 		spin_unlock(&mc.lock);
6393 		/* We set mc.moving_task later */
6394 
6395 		ret = mem_cgroup_precharge_mc(mm);
6396 		if (ret)
6397 			mem_cgroup_clear_mc();
6398 	} else {
6399 		mmput(mm);
6400 	}
6401 	return ret;
6402 }
6403 
6404 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6405 {
6406 	if (mc.to)
6407 		mem_cgroup_clear_mc();
6408 }
6409 
6410 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6411 				unsigned long addr, unsigned long end,
6412 				struct mm_walk *walk)
6413 {
6414 	int ret = 0;
6415 	struct vm_area_struct *vma = walk->vma;
6416 	pte_t *pte;
6417 	spinlock_t *ptl;
6418 	enum mc_target_type target_type;
6419 	union mc_target target;
6420 	struct page *page;
6421 
6422 	ptl = pmd_trans_huge_lock(pmd, vma);
6423 	if (ptl) {
6424 		if (mc.precharge < HPAGE_PMD_NR) {
6425 			spin_unlock(ptl);
6426 			return 0;
6427 		}
6428 		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6429 		if (target_type == MC_TARGET_PAGE) {
6430 			page = target.page;
6431 			if (isolate_lru_page(page)) {
6432 				if (!mem_cgroup_move_account(page, true,
6433 							     mc.from, mc.to)) {
6434 					mc.precharge -= HPAGE_PMD_NR;
6435 					mc.moved_charge += HPAGE_PMD_NR;
6436 				}
6437 				putback_lru_page(page);
6438 			}
6439 			unlock_page(page);
6440 			put_page(page);
6441 		} else if (target_type == MC_TARGET_DEVICE) {
6442 			page = target.page;
6443 			if (!mem_cgroup_move_account(page, true,
6444 						     mc.from, mc.to)) {
6445 				mc.precharge -= HPAGE_PMD_NR;
6446 				mc.moved_charge += HPAGE_PMD_NR;
6447 			}
6448 			unlock_page(page);
6449 			put_page(page);
6450 		}
6451 		spin_unlock(ptl);
6452 		return 0;
6453 	}
6454 
6455 retry:
6456 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6457 	if (!pte)
6458 		return 0;
6459 	for (; addr != end; addr += PAGE_SIZE) {
6460 		pte_t ptent = ptep_get(pte++);
6461 		bool device = false;
6462 		swp_entry_t ent;
6463 
6464 		if (!mc.precharge)
6465 			break;
6466 
6467 		switch (get_mctgt_type(vma, addr, ptent, &target)) {
6468 		case MC_TARGET_DEVICE:
6469 			device = true;
6470 			fallthrough;
6471 		case MC_TARGET_PAGE:
6472 			page = target.page;
6473 			/*
6474 			 * We can have a part of the split pmd here. Moving it
6475 			 * can be done but it would be too convoluted so simply
6476 			 * ignore such a partial THP and keep it in original
6477 			 * memcg. There should be somebody mapping the head.
6478 			 */
6479 			if (PageTransCompound(page))
6480 				goto put;
6481 			if (!device && !isolate_lru_page(page))
6482 				goto put;
6483 			if (!mem_cgroup_move_account(page, false,
6484 						mc.from, mc.to)) {
6485 				mc.precharge--;
6486 				/* we uncharge from mc.from later. */
6487 				mc.moved_charge++;
6488 			}
6489 			if (!device)
6490 				putback_lru_page(page);
6491 put:			/* get_mctgt_type() gets & locks the page */
6492 			unlock_page(page);
6493 			put_page(page);
6494 			break;
6495 		case MC_TARGET_SWAP:
6496 			ent = target.ent;
6497 			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6498 				mc.precharge--;
6499 				mem_cgroup_id_get_many(mc.to, 1);
6500 				/* we fixup other refcnts and charges later. */
6501 				mc.moved_swap++;
6502 			}
6503 			break;
6504 		default:
6505 			break;
6506 		}
6507 	}
6508 	pte_unmap_unlock(pte - 1, ptl);
6509 	cond_resched();
6510 
6511 	if (addr != end) {
6512 		/*
6513 		 * We have consumed all precharges we got in can_attach().
6514 		 * We try charge one by one, but don't do any additional
6515 		 * charges to mc.to if we have failed in charge once in attach()
6516 		 * phase.
6517 		 */
6518 		ret = mem_cgroup_do_precharge(1);
6519 		if (!ret)
6520 			goto retry;
6521 	}
6522 
6523 	return ret;
6524 }
6525 
6526 static const struct mm_walk_ops charge_walk_ops = {
6527 	.pmd_entry	= mem_cgroup_move_charge_pte_range,
6528 	.walk_lock	= PGWALK_RDLOCK,
6529 };
6530 
6531 static void mem_cgroup_move_charge(void)
6532 {
6533 	lru_add_drain_all();
6534 	/*
6535 	 * Signal folio_memcg_lock() to take the memcg's move_lock
6536 	 * while we're moving its pages to another memcg. Then wait
6537 	 * for already started RCU-only updates to finish.
6538 	 */
6539 	atomic_inc(&mc.from->moving_account);
6540 	synchronize_rcu();
6541 retry:
6542 	if (unlikely(!mmap_read_trylock(mc.mm))) {
6543 		/*
6544 		 * Someone who are holding the mmap_lock might be waiting in
6545 		 * waitq. So we cancel all extra charges, wake up all waiters,
6546 		 * and retry. Because we cancel precharges, we might not be able
6547 		 * to move enough charges, but moving charge is a best-effort
6548 		 * feature anyway, so it wouldn't be a big problem.
6549 		 */
6550 		__mem_cgroup_clear_mc();
6551 		cond_resched();
6552 		goto retry;
6553 	}
6554 	/*
6555 	 * When we have consumed all precharges and failed in doing
6556 	 * additional charge, the page walk just aborts.
6557 	 */
6558 	walk_page_range(mc.mm, 0, ULONG_MAX, &charge_walk_ops, NULL);
6559 	mmap_read_unlock(mc.mm);
6560 	atomic_dec(&mc.from->moving_account);
6561 }
6562 
6563 static void mem_cgroup_move_task(void)
6564 {
6565 	if (mc.to) {
6566 		mem_cgroup_move_charge();
6567 		mem_cgroup_clear_mc();
6568 	}
6569 }
6570 
6571 #else	/* !CONFIG_MMU */
6572 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6573 {
6574 	return 0;
6575 }
6576 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6577 {
6578 }
6579 static void mem_cgroup_move_task(void)
6580 {
6581 }
6582 #endif
6583 
6584 #ifdef CONFIG_MEMCG_KMEM
6585 static void mem_cgroup_fork(struct task_struct *task)
6586 {
6587 	/*
6588 	 * Set the update flag to cause task->objcg to be initialized lazily
6589 	 * on the first allocation. It can be done without any synchronization
6590 	 * because it's always performed on the current task, so does
6591 	 * current_objcg_update().
6592 	 */
6593 	task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG;
6594 }
6595 
6596 static void mem_cgroup_exit(struct task_struct *task)
6597 {
6598 	struct obj_cgroup *objcg = task->objcg;
6599 
6600 	objcg = (struct obj_cgroup *)
6601 		((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG);
6602 	if (objcg)
6603 		obj_cgroup_put(objcg);
6604 
6605 	/*
6606 	 * Some kernel allocations can happen after this point,
6607 	 * but let's ignore them. It can be done without any synchronization
6608 	 * because it's always performed on the current task, so does
6609 	 * current_objcg_update().
6610 	 */
6611 	task->objcg = NULL;
6612 }
6613 #endif
6614 
6615 #ifdef CONFIG_LRU_GEN
6616 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset)
6617 {
6618 	struct task_struct *task;
6619 	struct cgroup_subsys_state *css;
6620 
6621 	/* find the first leader if there is any */
6622 	cgroup_taskset_for_each_leader(task, css, tset)
6623 		break;
6624 
6625 	if (!task)
6626 		return;
6627 
6628 	task_lock(task);
6629 	if (task->mm && READ_ONCE(task->mm->owner) == task)
6630 		lru_gen_migrate_mm(task->mm);
6631 	task_unlock(task);
6632 }
6633 #else
6634 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {}
6635 #endif /* CONFIG_LRU_GEN */
6636 
6637 #ifdef CONFIG_MEMCG_KMEM
6638 static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset)
6639 {
6640 	struct task_struct *task;
6641 	struct cgroup_subsys_state *css;
6642 
6643 	cgroup_taskset_for_each(task, css, tset) {
6644 		/* atomically set the update bit */
6645 		set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg);
6646 	}
6647 }
6648 #else
6649 static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset) {}
6650 #endif /* CONFIG_MEMCG_KMEM */
6651 
6652 #if defined(CONFIG_LRU_GEN) || defined(CONFIG_MEMCG_KMEM)
6653 static void mem_cgroup_attach(struct cgroup_taskset *tset)
6654 {
6655 	mem_cgroup_lru_gen_attach(tset);
6656 	mem_cgroup_kmem_attach(tset);
6657 }
6658 #endif
6659 
6660 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6661 {
6662 	if (value == PAGE_COUNTER_MAX)
6663 		seq_puts(m, "max\n");
6664 	else
6665 		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6666 
6667 	return 0;
6668 }
6669 
6670 static u64 memory_current_read(struct cgroup_subsys_state *css,
6671 			       struct cftype *cft)
6672 {
6673 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6674 
6675 	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6676 }
6677 
6678 static u64 memory_peak_read(struct cgroup_subsys_state *css,
6679 			    struct cftype *cft)
6680 {
6681 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6682 
6683 	return (u64)memcg->memory.watermark * PAGE_SIZE;
6684 }
6685 
6686 static int memory_min_show(struct seq_file *m, void *v)
6687 {
6688 	return seq_puts_memcg_tunable(m,
6689 		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6690 }
6691 
6692 static ssize_t memory_min_write(struct kernfs_open_file *of,
6693 				char *buf, size_t nbytes, loff_t off)
6694 {
6695 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6696 	unsigned long min;
6697 	int err;
6698 
6699 	buf = strstrip(buf);
6700 	err = page_counter_memparse(buf, "max", &min);
6701 	if (err)
6702 		return err;
6703 
6704 	page_counter_set_min(&memcg->memory, min);
6705 
6706 	return nbytes;
6707 }
6708 
6709 static int memory_low_show(struct seq_file *m, void *v)
6710 {
6711 	return seq_puts_memcg_tunable(m,
6712 		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6713 }
6714 
6715 static ssize_t memory_low_write(struct kernfs_open_file *of,
6716 				char *buf, size_t nbytes, loff_t off)
6717 {
6718 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6719 	unsigned long low;
6720 	int err;
6721 
6722 	buf = strstrip(buf);
6723 	err = page_counter_memparse(buf, "max", &low);
6724 	if (err)
6725 		return err;
6726 
6727 	page_counter_set_low(&memcg->memory, low);
6728 
6729 	return nbytes;
6730 }
6731 
6732 static int memory_high_show(struct seq_file *m, void *v)
6733 {
6734 	return seq_puts_memcg_tunable(m,
6735 		READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6736 }
6737 
6738 static ssize_t memory_high_write(struct kernfs_open_file *of,
6739 				 char *buf, size_t nbytes, loff_t off)
6740 {
6741 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6742 	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6743 	bool drained = false;
6744 	unsigned long high;
6745 	int err;
6746 
6747 	buf = strstrip(buf);
6748 	err = page_counter_memparse(buf, "max", &high);
6749 	if (err)
6750 		return err;
6751 
6752 	page_counter_set_high(&memcg->memory, high);
6753 
6754 	for (;;) {
6755 		unsigned long nr_pages = page_counter_read(&memcg->memory);
6756 		unsigned long reclaimed;
6757 
6758 		if (nr_pages <= high)
6759 			break;
6760 
6761 		if (signal_pending(current))
6762 			break;
6763 
6764 		if (!drained) {
6765 			drain_all_stock(memcg);
6766 			drained = true;
6767 			continue;
6768 		}
6769 
6770 		reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6771 					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP);
6772 
6773 		if (!reclaimed && !nr_retries--)
6774 			break;
6775 	}
6776 
6777 	memcg_wb_domain_size_changed(memcg);
6778 	return nbytes;
6779 }
6780 
6781 static int memory_max_show(struct seq_file *m, void *v)
6782 {
6783 	return seq_puts_memcg_tunable(m,
6784 		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6785 }
6786 
6787 static ssize_t memory_max_write(struct kernfs_open_file *of,
6788 				char *buf, size_t nbytes, loff_t off)
6789 {
6790 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6791 	unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6792 	bool drained = false;
6793 	unsigned long max;
6794 	int err;
6795 
6796 	buf = strstrip(buf);
6797 	err = page_counter_memparse(buf, "max", &max);
6798 	if (err)
6799 		return err;
6800 
6801 	xchg(&memcg->memory.max, max);
6802 
6803 	for (;;) {
6804 		unsigned long nr_pages = page_counter_read(&memcg->memory);
6805 
6806 		if (nr_pages <= max)
6807 			break;
6808 
6809 		if (signal_pending(current))
6810 			break;
6811 
6812 		if (!drained) {
6813 			drain_all_stock(memcg);
6814 			drained = true;
6815 			continue;
6816 		}
6817 
6818 		if (nr_reclaims) {
6819 			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6820 					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP))
6821 				nr_reclaims--;
6822 			continue;
6823 		}
6824 
6825 		memcg_memory_event(memcg, MEMCG_OOM);
6826 		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6827 			break;
6828 	}
6829 
6830 	memcg_wb_domain_size_changed(memcg);
6831 	return nbytes;
6832 }
6833 
6834 /*
6835  * Note: don't forget to update the 'samples/cgroup/memcg_event_listener'
6836  * if any new events become available.
6837  */
6838 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6839 {
6840 	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6841 	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6842 	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6843 	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6844 	seq_printf(m, "oom_kill %lu\n",
6845 		   atomic_long_read(&events[MEMCG_OOM_KILL]));
6846 	seq_printf(m, "oom_group_kill %lu\n",
6847 		   atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
6848 }
6849 
6850 static int memory_events_show(struct seq_file *m, void *v)
6851 {
6852 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6853 
6854 	__memory_events_show(m, memcg->memory_events);
6855 	return 0;
6856 }
6857 
6858 static int memory_events_local_show(struct seq_file *m, void *v)
6859 {
6860 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6861 
6862 	__memory_events_show(m, memcg->memory_events_local);
6863 	return 0;
6864 }
6865 
6866 static int memory_stat_show(struct seq_file *m, void *v)
6867 {
6868 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6869 	char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
6870 	struct seq_buf s;
6871 
6872 	if (!buf)
6873 		return -ENOMEM;
6874 	seq_buf_init(&s, buf, PAGE_SIZE);
6875 	memory_stat_format(memcg, &s);
6876 	seq_puts(m, buf);
6877 	kfree(buf);
6878 	return 0;
6879 }
6880 
6881 #ifdef CONFIG_NUMA
6882 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
6883 						     int item)
6884 {
6885 	return lruvec_page_state(lruvec, item) *
6886 		memcg_page_state_output_unit(item);
6887 }
6888 
6889 static int memory_numa_stat_show(struct seq_file *m, void *v)
6890 {
6891 	int i;
6892 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6893 
6894 	mem_cgroup_flush_stats(memcg);
6895 
6896 	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
6897 		int nid;
6898 
6899 		if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
6900 			continue;
6901 
6902 		seq_printf(m, "%s", memory_stats[i].name);
6903 		for_each_node_state(nid, N_MEMORY) {
6904 			u64 size;
6905 			struct lruvec *lruvec;
6906 
6907 			lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
6908 			size = lruvec_page_state_output(lruvec,
6909 							memory_stats[i].idx);
6910 			seq_printf(m, " N%d=%llu", nid, size);
6911 		}
6912 		seq_putc(m, '\n');
6913 	}
6914 
6915 	return 0;
6916 }
6917 #endif
6918 
6919 static int memory_oom_group_show(struct seq_file *m, void *v)
6920 {
6921 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6922 
6923 	seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
6924 
6925 	return 0;
6926 }
6927 
6928 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6929 				      char *buf, size_t nbytes, loff_t off)
6930 {
6931 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6932 	int ret, oom_group;
6933 
6934 	buf = strstrip(buf);
6935 	if (!buf)
6936 		return -EINVAL;
6937 
6938 	ret = kstrtoint(buf, 0, &oom_group);
6939 	if (ret)
6940 		return ret;
6941 
6942 	if (oom_group != 0 && oom_group != 1)
6943 		return -EINVAL;
6944 
6945 	WRITE_ONCE(memcg->oom_group, oom_group);
6946 
6947 	return nbytes;
6948 }
6949 
6950 static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
6951 			      size_t nbytes, loff_t off)
6952 {
6953 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6954 	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6955 	unsigned long nr_to_reclaim, nr_reclaimed = 0;
6956 	unsigned int reclaim_options;
6957 	int err;
6958 
6959 	buf = strstrip(buf);
6960 	err = page_counter_memparse(buf, "", &nr_to_reclaim);
6961 	if (err)
6962 		return err;
6963 
6964 	reclaim_options	= MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE;
6965 	while (nr_reclaimed < nr_to_reclaim) {
6966 		unsigned long reclaimed;
6967 
6968 		if (signal_pending(current))
6969 			return -EINTR;
6970 
6971 		/*
6972 		 * This is the final attempt, drain percpu lru caches in the
6973 		 * hope of introducing more evictable pages for
6974 		 * try_to_free_mem_cgroup_pages().
6975 		 */
6976 		if (!nr_retries)
6977 			lru_add_drain_all();
6978 
6979 		reclaimed = try_to_free_mem_cgroup_pages(memcg,
6980 					min(nr_to_reclaim - nr_reclaimed, SWAP_CLUSTER_MAX),
6981 					GFP_KERNEL, reclaim_options);
6982 
6983 		if (!reclaimed && !nr_retries--)
6984 			return -EAGAIN;
6985 
6986 		nr_reclaimed += reclaimed;
6987 	}
6988 
6989 	return nbytes;
6990 }
6991 
6992 static struct cftype memory_files[] = {
6993 	{
6994 		.name = "current",
6995 		.flags = CFTYPE_NOT_ON_ROOT,
6996 		.read_u64 = memory_current_read,
6997 	},
6998 	{
6999 		.name = "peak",
7000 		.flags = CFTYPE_NOT_ON_ROOT,
7001 		.read_u64 = memory_peak_read,
7002 	},
7003 	{
7004 		.name = "min",
7005 		.flags = CFTYPE_NOT_ON_ROOT,
7006 		.seq_show = memory_min_show,
7007 		.write = memory_min_write,
7008 	},
7009 	{
7010 		.name = "low",
7011 		.flags = CFTYPE_NOT_ON_ROOT,
7012 		.seq_show = memory_low_show,
7013 		.write = memory_low_write,
7014 	},
7015 	{
7016 		.name = "high",
7017 		.flags = CFTYPE_NOT_ON_ROOT,
7018 		.seq_show = memory_high_show,
7019 		.write = memory_high_write,
7020 	},
7021 	{
7022 		.name = "max",
7023 		.flags = CFTYPE_NOT_ON_ROOT,
7024 		.seq_show = memory_max_show,
7025 		.write = memory_max_write,
7026 	},
7027 	{
7028 		.name = "events",
7029 		.flags = CFTYPE_NOT_ON_ROOT,
7030 		.file_offset = offsetof(struct mem_cgroup, events_file),
7031 		.seq_show = memory_events_show,
7032 	},
7033 	{
7034 		.name = "events.local",
7035 		.flags = CFTYPE_NOT_ON_ROOT,
7036 		.file_offset = offsetof(struct mem_cgroup, events_local_file),
7037 		.seq_show = memory_events_local_show,
7038 	},
7039 	{
7040 		.name = "stat",
7041 		.seq_show = memory_stat_show,
7042 	},
7043 #ifdef CONFIG_NUMA
7044 	{
7045 		.name = "numa_stat",
7046 		.seq_show = memory_numa_stat_show,
7047 	},
7048 #endif
7049 	{
7050 		.name = "oom.group",
7051 		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
7052 		.seq_show = memory_oom_group_show,
7053 		.write = memory_oom_group_write,
7054 	},
7055 	{
7056 		.name = "reclaim",
7057 		.flags = CFTYPE_NS_DELEGATABLE,
7058 		.write = memory_reclaim,
7059 	},
7060 	{ }	/* terminate */
7061 };
7062 
7063 struct cgroup_subsys memory_cgrp_subsys = {
7064 	.css_alloc = mem_cgroup_css_alloc,
7065 	.css_online = mem_cgroup_css_online,
7066 	.css_offline = mem_cgroup_css_offline,
7067 	.css_released = mem_cgroup_css_released,
7068 	.css_free = mem_cgroup_css_free,
7069 	.css_reset = mem_cgroup_css_reset,
7070 	.css_rstat_flush = mem_cgroup_css_rstat_flush,
7071 	.can_attach = mem_cgroup_can_attach,
7072 #if defined(CONFIG_LRU_GEN) || defined(CONFIG_MEMCG_KMEM)
7073 	.attach = mem_cgroup_attach,
7074 #endif
7075 	.cancel_attach = mem_cgroup_cancel_attach,
7076 	.post_attach = mem_cgroup_move_task,
7077 #ifdef CONFIG_MEMCG_KMEM
7078 	.fork = mem_cgroup_fork,
7079 	.exit = mem_cgroup_exit,
7080 #endif
7081 	.dfl_cftypes = memory_files,
7082 	.legacy_cftypes = mem_cgroup_legacy_files,
7083 	.early_init = 0,
7084 };
7085 
7086 /*
7087  * This function calculates an individual cgroup's effective
7088  * protection which is derived from its own memory.min/low, its
7089  * parent's and siblings' settings, as well as the actual memory
7090  * distribution in the tree.
7091  *
7092  * The following rules apply to the effective protection values:
7093  *
7094  * 1. At the first level of reclaim, effective protection is equal to
7095  *    the declared protection in memory.min and memory.low.
7096  *
7097  * 2. To enable safe delegation of the protection configuration, at
7098  *    subsequent levels the effective protection is capped to the
7099  *    parent's effective protection.
7100  *
7101  * 3. To make complex and dynamic subtrees easier to configure, the
7102  *    user is allowed to overcommit the declared protection at a given
7103  *    level. If that is the case, the parent's effective protection is
7104  *    distributed to the children in proportion to how much protection
7105  *    they have declared and how much of it they are utilizing.
7106  *
7107  *    This makes distribution proportional, but also work-conserving:
7108  *    if one cgroup claims much more protection than it uses memory,
7109  *    the unused remainder is available to its siblings.
7110  *
7111  * 4. Conversely, when the declared protection is undercommitted at a
7112  *    given level, the distribution of the larger parental protection
7113  *    budget is NOT proportional. A cgroup's protection from a sibling
7114  *    is capped to its own memory.min/low setting.
7115  *
7116  * 5. However, to allow protecting recursive subtrees from each other
7117  *    without having to declare each individual cgroup's fixed share
7118  *    of the ancestor's claim to protection, any unutilized -
7119  *    "floating" - protection from up the tree is distributed in
7120  *    proportion to each cgroup's *usage*. This makes the protection
7121  *    neutral wrt sibling cgroups and lets them compete freely over
7122  *    the shared parental protection budget, but it protects the
7123  *    subtree as a whole from neighboring subtrees.
7124  *
7125  * Note that 4. and 5. are not in conflict: 4. is about protecting
7126  * against immediate siblings whereas 5. is about protecting against
7127  * neighboring subtrees.
7128  */
7129 static unsigned long effective_protection(unsigned long usage,
7130 					  unsigned long parent_usage,
7131 					  unsigned long setting,
7132 					  unsigned long parent_effective,
7133 					  unsigned long siblings_protected)
7134 {
7135 	unsigned long protected;
7136 	unsigned long ep;
7137 
7138 	protected = min(usage, setting);
7139 	/*
7140 	 * If all cgroups at this level combined claim and use more
7141 	 * protection than what the parent affords them, distribute
7142 	 * shares in proportion to utilization.
7143 	 *
7144 	 * We are using actual utilization rather than the statically
7145 	 * claimed protection in order to be work-conserving: claimed
7146 	 * but unused protection is available to siblings that would
7147 	 * otherwise get a smaller chunk than what they claimed.
7148 	 */
7149 	if (siblings_protected > parent_effective)
7150 		return protected * parent_effective / siblings_protected;
7151 
7152 	/*
7153 	 * Ok, utilized protection of all children is within what the
7154 	 * parent affords them, so we know whatever this child claims
7155 	 * and utilizes is effectively protected.
7156 	 *
7157 	 * If there is unprotected usage beyond this value, reclaim
7158 	 * will apply pressure in proportion to that amount.
7159 	 *
7160 	 * If there is unutilized protection, the cgroup will be fully
7161 	 * shielded from reclaim, but we do return a smaller value for
7162 	 * protection than what the group could enjoy in theory. This
7163 	 * is okay. With the overcommit distribution above, effective
7164 	 * protection is always dependent on how memory is actually
7165 	 * consumed among the siblings anyway.
7166 	 */
7167 	ep = protected;
7168 
7169 	/*
7170 	 * If the children aren't claiming (all of) the protection
7171 	 * afforded to them by the parent, distribute the remainder in
7172 	 * proportion to the (unprotected) memory of each cgroup. That
7173 	 * way, cgroups that aren't explicitly prioritized wrt each
7174 	 * other compete freely over the allowance, but they are
7175 	 * collectively protected from neighboring trees.
7176 	 *
7177 	 * We're using unprotected memory for the weight so that if
7178 	 * some cgroups DO claim explicit protection, we don't protect
7179 	 * the same bytes twice.
7180 	 *
7181 	 * Check both usage and parent_usage against the respective
7182 	 * protected values. One should imply the other, but they
7183 	 * aren't read atomically - make sure the division is sane.
7184 	 */
7185 	if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
7186 		return ep;
7187 	if (parent_effective > siblings_protected &&
7188 	    parent_usage > siblings_protected &&
7189 	    usage > protected) {
7190 		unsigned long unclaimed;
7191 
7192 		unclaimed = parent_effective - siblings_protected;
7193 		unclaimed *= usage - protected;
7194 		unclaimed /= parent_usage - siblings_protected;
7195 
7196 		ep += unclaimed;
7197 	}
7198 
7199 	return ep;
7200 }
7201 
7202 /**
7203  * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
7204  * @root: the top ancestor of the sub-tree being checked
7205  * @memcg: the memory cgroup to check
7206  *
7207  * WARNING: This function is not stateless! It can only be used as part
7208  *          of a top-down tree iteration, not for isolated queries.
7209  */
7210 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
7211 				     struct mem_cgroup *memcg)
7212 {
7213 	unsigned long usage, parent_usage;
7214 	struct mem_cgroup *parent;
7215 
7216 	if (mem_cgroup_disabled())
7217 		return;
7218 
7219 	if (!root)
7220 		root = root_mem_cgroup;
7221 
7222 	/*
7223 	 * Effective values of the reclaim targets are ignored so they
7224 	 * can be stale. Have a look at mem_cgroup_protection for more
7225 	 * details.
7226 	 * TODO: calculation should be more robust so that we do not need
7227 	 * that special casing.
7228 	 */
7229 	if (memcg == root)
7230 		return;
7231 
7232 	usage = page_counter_read(&memcg->memory);
7233 	if (!usage)
7234 		return;
7235 
7236 	parent = parent_mem_cgroup(memcg);
7237 
7238 	if (parent == root) {
7239 		memcg->memory.emin = READ_ONCE(memcg->memory.min);
7240 		memcg->memory.elow = READ_ONCE(memcg->memory.low);
7241 		return;
7242 	}
7243 
7244 	parent_usage = page_counter_read(&parent->memory);
7245 
7246 	WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
7247 			READ_ONCE(memcg->memory.min),
7248 			READ_ONCE(parent->memory.emin),
7249 			atomic_long_read(&parent->memory.children_min_usage)));
7250 
7251 	WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
7252 			READ_ONCE(memcg->memory.low),
7253 			READ_ONCE(parent->memory.elow),
7254 			atomic_long_read(&parent->memory.children_low_usage)));
7255 }
7256 
7257 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
7258 			gfp_t gfp)
7259 {
7260 	int ret;
7261 
7262 	ret = try_charge(memcg, gfp, folio_nr_pages(folio));
7263 	if (ret)
7264 		goto out;
7265 
7266 	mem_cgroup_commit_charge(folio, memcg);
7267 out:
7268 	return ret;
7269 }
7270 
7271 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
7272 {
7273 	struct mem_cgroup *memcg;
7274 	int ret;
7275 
7276 	memcg = get_mem_cgroup_from_mm(mm);
7277 	ret = charge_memcg(folio, memcg, gfp);
7278 	css_put(&memcg->css);
7279 
7280 	return ret;
7281 }
7282 
7283 /**
7284  * mem_cgroup_hugetlb_try_charge - try to charge the memcg for a hugetlb folio
7285  * @memcg: memcg to charge.
7286  * @gfp: reclaim mode.
7287  * @nr_pages: number of pages to charge.
7288  *
7289  * This function is called when allocating a huge page folio to determine if
7290  * the memcg has the capacity for it. It does not commit the charge yet,
7291  * as the hugetlb folio itself has not been obtained from the hugetlb pool.
7292  *
7293  * Once we have obtained the hugetlb folio, we can call
7294  * mem_cgroup_commit_charge() to commit the charge. If we fail to obtain the
7295  * folio, we should instead call mem_cgroup_cancel_charge() to undo the effect
7296  * of try_charge().
7297  *
7298  * Returns 0 on success. Otherwise, an error code is returned.
7299  */
7300 int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
7301 			long nr_pages)
7302 {
7303 	/*
7304 	 * If hugetlb memcg charging is not enabled, do not fail hugetlb allocation,
7305 	 * but do not attempt to commit charge later (or cancel on error) either.
7306 	 */
7307 	if (mem_cgroup_disabled() || !memcg ||
7308 		!cgroup_subsys_on_dfl(memory_cgrp_subsys) ||
7309 		!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING))
7310 		return -EOPNOTSUPP;
7311 
7312 	if (try_charge(memcg, gfp, nr_pages))
7313 		return -ENOMEM;
7314 
7315 	return 0;
7316 }
7317 
7318 /**
7319  * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
7320  * @folio: folio to charge.
7321  * @mm: mm context of the victim
7322  * @gfp: reclaim mode
7323  * @entry: swap entry for which the folio is allocated
7324  *
7325  * This function charges a folio allocated for swapin. Please call this before
7326  * adding the folio to the swapcache.
7327  *
7328  * Returns 0 on success. Otherwise, an error code is returned.
7329  */
7330 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
7331 				  gfp_t gfp, swp_entry_t entry)
7332 {
7333 	struct mem_cgroup *memcg;
7334 	unsigned short id;
7335 	int ret;
7336 
7337 	if (mem_cgroup_disabled())
7338 		return 0;
7339 
7340 	id = lookup_swap_cgroup_id(entry);
7341 	rcu_read_lock();
7342 	memcg = mem_cgroup_from_id(id);
7343 	if (!memcg || !css_tryget_online(&memcg->css))
7344 		memcg = get_mem_cgroup_from_mm(mm);
7345 	rcu_read_unlock();
7346 
7347 	ret = charge_memcg(folio, memcg, gfp);
7348 
7349 	css_put(&memcg->css);
7350 	return ret;
7351 }
7352 
7353 /*
7354  * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
7355  * @entry: swap entry for which the page is charged
7356  *
7357  * Call this function after successfully adding the charged page to swapcache.
7358  *
7359  * Note: This function assumes the page for which swap slot is being uncharged
7360  * is order 0 page.
7361  */
7362 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
7363 {
7364 	/*
7365 	 * Cgroup1's unified memory+swap counter has been charged with the
7366 	 * new swapcache page, finish the transfer by uncharging the swap
7367 	 * slot. The swap slot would also get uncharged when it dies, but
7368 	 * it can stick around indefinitely and we'd count the page twice
7369 	 * the entire time.
7370 	 *
7371 	 * Cgroup2 has separate resource counters for memory and swap,
7372 	 * so this is a non-issue here. Memory and swap charge lifetimes
7373 	 * correspond 1:1 to page and swap slot lifetimes: we charge the
7374 	 * page to memory here, and uncharge swap when the slot is freed.
7375 	 */
7376 	if (!mem_cgroup_disabled() && do_memsw_account()) {
7377 		/*
7378 		 * The swap entry might not get freed for a long time,
7379 		 * let's not wait for it.  The page already received a
7380 		 * memory+swap charge, drop the swap entry duplicate.
7381 		 */
7382 		mem_cgroup_uncharge_swap(entry, 1);
7383 	}
7384 }
7385 
7386 struct uncharge_gather {
7387 	struct mem_cgroup *memcg;
7388 	unsigned long nr_memory;
7389 	unsigned long pgpgout;
7390 	unsigned long nr_kmem;
7391 	int nid;
7392 };
7393 
7394 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
7395 {
7396 	memset(ug, 0, sizeof(*ug));
7397 }
7398 
7399 static void uncharge_batch(const struct uncharge_gather *ug)
7400 {
7401 	unsigned long flags;
7402 
7403 	if (ug->nr_memory) {
7404 		page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
7405 		if (do_memsw_account())
7406 			page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
7407 		if (ug->nr_kmem)
7408 			memcg_account_kmem(ug->memcg, -ug->nr_kmem);
7409 		memcg_oom_recover(ug->memcg);
7410 	}
7411 
7412 	local_irq_save(flags);
7413 	__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
7414 	__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
7415 	memcg_check_events(ug->memcg, ug->nid);
7416 	local_irq_restore(flags);
7417 
7418 	/* drop reference from uncharge_folio */
7419 	css_put(&ug->memcg->css);
7420 }
7421 
7422 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
7423 {
7424 	long nr_pages;
7425 	struct mem_cgroup *memcg;
7426 	struct obj_cgroup *objcg;
7427 
7428 	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7429 
7430 	/*
7431 	 * Nobody should be changing or seriously looking at
7432 	 * folio memcg or objcg at this point, we have fully
7433 	 * exclusive access to the folio.
7434 	 */
7435 	if (folio_memcg_kmem(folio)) {
7436 		objcg = __folio_objcg(folio);
7437 		/*
7438 		 * This get matches the put at the end of the function and
7439 		 * kmem pages do not hold memcg references anymore.
7440 		 */
7441 		memcg = get_mem_cgroup_from_objcg(objcg);
7442 	} else {
7443 		memcg = __folio_memcg(folio);
7444 	}
7445 
7446 	if (!memcg)
7447 		return;
7448 
7449 	if (ug->memcg != memcg) {
7450 		if (ug->memcg) {
7451 			uncharge_batch(ug);
7452 			uncharge_gather_clear(ug);
7453 		}
7454 		ug->memcg = memcg;
7455 		ug->nid = folio_nid(folio);
7456 
7457 		/* pairs with css_put in uncharge_batch */
7458 		css_get(&memcg->css);
7459 	}
7460 
7461 	nr_pages = folio_nr_pages(folio);
7462 
7463 	if (folio_memcg_kmem(folio)) {
7464 		ug->nr_memory += nr_pages;
7465 		ug->nr_kmem += nr_pages;
7466 
7467 		folio->memcg_data = 0;
7468 		obj_cgroup_put(objcg);
7469 	} else {
7470 		/* LRU pages aren't accounted at the root level */
7471 		if (!mem_cgroup_is_root(memcg))
7472 			ug->nr_memory += nr_pages;
7473 		ug->pgpgout++;
7474 
7475 		folio->memcg_data = 0;
7476 	}
7477 
7478 	css_put(&memcg->css);
7479 }
7480 
7481 void __mem_cgroup_uncharge(struct folio *folio)
7482 {
7483 	struct uncharge_gather ug;
7484 
7485 	/* Don't touch folio->lru of any random page, pre-check: */
7486 	if (!folio_memcg(folio))
7487 		return;
7488 
7489 	uncharge_gather_clear(&ug);
7490 	uncharge_folio(folio, &ug);
7491 	uncharge_batch(&ug);
7492 }
7493 
7494 /**
7495  * __mem_cgroup_uncharge_list - uncharge a list of page
7496  * @page_list: list of pages to uncharge
7497  *
7498  * Uncharge a list of pages previously charged with
7499  * __mem_cgroup_charge().
7500  */
7501 void __mem_cgroup_uncharge_list(struct list_head *page_list)
7502 {
7503 	struct uncharge_gather ug;
7504 	struct folio *folio;
7505 
7506 	uncharge_gather_clear(&ug);
7507 	list_for_each_entry(folio, page_list, lru)
7508 		uncharge_folio(folio, &ug);
7509 	if (ug.memcg)
7510 		uncharge_batch(&ug);
7511 }
7512 
7513 /**
7514  * mem_cgroup_replace_folio - Charge a folio's replacement.
7515  * @old: Currently circulating folio.
7516  * @new: Replacement folio.
7517  *
7518  * Charge @new as a replacement folio for @old. @old will
7519  * be uncharged upon free. This is only used by the page cache
7520  * (in replace_page_cache_folio()).
7521  *
7522  * Both folios must be locked, @new->mapping must be set up.
7523  */
7524 void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
7525 {
7526 	struct mem_cgroup *memcg;
7527 	long nr_pages = folio_nr_pages(new);
7528 	unsigned long flags;
7529 
7530 	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
7531 	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
7532 	VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
7533 	VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
7534 
7535 	if (mem_cgroup_disabled())
7536 		return;
7537 
7538 	/* Page cache replacement: new folio already charged? */
7539 	if (folio_memcg(new))
7540 		return;
7541 
7542 	memcg = folio_memcg(old);
7543 	VM_WARN_ON_ONCE_FOLIO(!memcg, old);
7544 	if (!memcg)
7545 		return;
7546 
7547 	/* Force-charge the new page. The old one will be freed soon */
7548 	if (!mem_cgroup_is_root(memcg)) {
7549 		page_counter_charge(&memcg->memory, nr_pages);
7550 		if (do_memsw_account())
7551 			page_counter_charge(&memcg->memsw, nr_pages);
7552 	}
7553 
7554 	css_get(&memcg->css);
7555 	commit_charge(new, memcg);
7556 
7557 	local_irq_save(flags);
7558 	mem_cgroup_charge_statistics(memcg, nr_pages);
7559 	memcg_check_events(memcg, folio_nid(new));
7560 	local_irq_restore(flags);
7561 }
7562 
7563 /**
7564  * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
7565  * @old: Currently circulating folio.
7566  * @new: Replacement folio.
7567  *
7568  * Transfer the memcg data from the old folio to the new folio for migration.
7569  * The old folio's data info will be cleared. Note that the memory counters
7570  * will remain unchanged throughout the process.
7571  *
7572  * Both folios must be locked, @new->mapping must be set up.
7573  */
7574 void mem_cgroup_migrate(struct folio *old, struct folio *new)
7575 {
7576 	struct mem_cgroup *memcg;
7577 
7578 	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
7579 	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
7580 	VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
7581 	VM_BUG_ON_FOLIO(folio_nr_pages(old) != folio_nr_pages(new), new);
7582 
7583 	if (mem_cgroup_disabled())
7584 		return;
7585 
7586 	memcg = folio_memcg(old);
7587 	/*
7588 	 * Note that it is normal to see !memcg for a hugetlb folio.
7589 	 * For e.g, itt could have been allocated when memory_hugetlb_accounting
7590 	 * was not selected.
7591 	 */
7592 	VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old);
7593 	if (!memcg)
7594 		return;
7595 
7596 	/* Transfer the charge and the css ref */
7597 	commit_charge(new, memcg);
7598 	/*
7599 	 * If the old folio is a large folio and is in the split queue, it needs
7600 	 * to be removed from the split queue now, in case getting an incorrect
7601 	 * split queue in destroy_large_folio() after the memcg of the old folio
7602 	 * is cleared.
7603 	 *
7604 	 * In addition, the old folio is about to be freed after migration, so
7605 	 * removing from the split queue a bit earlier seems reasonable.
7606 	 */
7607 	if (folio_test_large(old) && folio_test_large_rmappable(old))
7608 		folio_undo_large_rmappable(old);
7609 	old->memcg_data = 0;
7610 }
7611 
7612 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
7613 EXPORT_SYMBOL(memcg_sockets_enabled_key);
7614 
7615 void mem_cgroup_sk_alloc(struct sock *sk)
7616 {
7617 	struct mem_cgroup *memcg;
7618 
7619 	if (!mem_cgroup_sockets_enabled)
7620 		return;
7621 
7622 	/* Do not associate the sock with unrelated interrupted task's memcg. */
7623 	if (!in_task())
7624 		return;
7625 
7626 	rcu_read_lock();
7627 	memcg = mem_cgroup_from_task(current);
7628 	if (mem_cgroup_is_root(memcg))
7629 		goto out;
7630 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
7631 		goto out;
7632 	if (css_tryget(&memcg->css))
7633 		sk->sk_memcg = memcg;
7634 out:
7635 	rcu_read_unlock();
7636 }
7637 
7638 void mem_cgroup_sk_free(struct sock *sk)
7639 {
7640 	if (sk->sk_memcg)
7641 		css_put(&sk->sk_memcg->css);
7642 }
7643 
7644 /**
7645  * mem_cgroup_charge_skmem - charge socket memory
7646  * @memcg: memcg to charge
7647  * @nr_pages: number of pages to charge
7648  * @gfp_mask: reclaim mode
7649  *
7650  * Charges @nr_pages to @memcg. Returns %true if the charge fit within
7651  * @memcg's configured limit, %false if it doesn't.
7652  */
7653 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
7654 			     gfp_t gfp_mask)
7655 {
7656 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7657 		struct page_counter *fail;
7658 
7659 		if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
7660 			memcg->tcpmem_pressure = 0;
7661 			return true;
7662 		}
7663 		memcg->tcpmem_pressure = 1;
7664 		if (gfp_mask & __GFP_NOFAIL) {
7665 			page_counter_charge(&memcg->tcpmem, nr_pages);
7666 			return true;
7667 		}
7668 		return false;
7669 	}
7670 
7671 	if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
7672 		mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
7673 		return true;
7674 	}
7675 
7676 	return false;
7677 }
7678 
7679 /**
7680  * mem_cgroup_uncharge_skmem - uncharge socket memory
7681  * @memcg: memcg to uncharge
7682  * @nr_pages: number of pages to uncharge
7683  */
7684 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7685 {
7686 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7687 		page_counter_uncharge(&memcg->tcpmem, nr_pages);
7688 		return;
7689 	}
7690 
7691 	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
7692 
7693 	refill_stock(memcg, nr_pages);
7694 }
7695 
7696 static int __init cgroup_memory(char *s)
7697 {
7698 	char *token;
7699 
7700 	while ((token = strsep(&s, ",")) != NULL) {
7701 		if (!*token)
7702 			continue;
7703 		if (!strcmp(token, "nosocket"))
7704 			cgroup_memory_nosocket = true;
7705 		if (!strcmp(token, "nokmem"))
7706 			cgroup_memory_nokmem = true;
7707 		if (!strcmp(token, "nobpf"))
7708 			cgroup_memory_nobpf = true;
7709 	}
7710 	return 1;
7711 }
7712 __setup("cgroup.memory=", cgroup_memory);
7713 
7714 /*
7715  * subsys_initcall() for memory controller.
7716  *
7717  * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7718  * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7719  * basically everything that doesn't depend on a specific mem_cgroup structure
7720  * should be initialized from here.
7721  */
7722 static int __init mem_cgroup_init(void)
7723 {
7724 	int cpu, node;
7725 
7726 	/*
7727 	 * Currently s32 type (can refer to struct batched_lruvec_stat) is
7728 	 * used for per-memcg-per-cpu caching of per-node statistics. In order
7729 	 * to work fine, we should make sure that the overfill threshold can't
7730 	 * exceed S32_MAX / PAGE_SIZE.
7731 	 */
7732 	BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
7733 
7734 	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7735 				  memcg_hotplug_cpu_dead);
7736 
7737 	for_each_possible_cpu(cpu)
7738 		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7739 			  drain_local_stock);
7740 
7741 	for_each_node(node) {
7742 		struct mem_cgroup_tree_per_node *rtpn;
7743 
7744 		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, node);
7745 
7746 		rtpn->rb_root = RB_ROOT;
7747 		rtpn->rb_rightmost = NULL;
7748 		spin_lock_init(&rtpn->lock);
7749 		soft_limit_tree.rb_tree_per_node[node] = rtpn;
7750 	}
7751 
7752 	return 0;
7753 }
7754 subsys_initcall(mem_cgroup_init);
7755 
7756 #ifdef CONFIG_SWAP
7757 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7758 {
7759 	while (!refcount_inc_not_zero(&memcg->id.ref)) {
7760 		/*
7761 		 * The root cgroup cannot be destroyed, so it's refcount must
7762 		 * always be >= 1.
7763 		 */
7764 		if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
7765 			VM_BUG_ON(1);
7766 			break;
7767 		}
7768 		memcg = parent_mem_cgroup(memcg);
7769 		if (!memcg)
7770 			memcg = root_mem_cgroup;
7771 	}
7772 	return memcg;
7773 }
7774 
7775 /**
7776  * mem_cgroup_swapout - transfer a memsw charge to swap
7777  * @folio: folio whose memsw charge to transfer
7778  * @entry: swap entry to move the charge to
7779  *
7780  * Transfer the memsw charge of @folio to @entry.
7781  */
7782 void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
7783 {
7784 	struct mem_cgroup *memcg, *swap_memcg;
7785 	unsigned int nr_entries;
7786 	unsigned short oldid;
7787 
7788 	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7789 	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
7790 
7791 	if (mem_cgroup_disabled())
7792 		return;
7793 
7794 	if (!do_memsw_account())
7795 		return;
7796 
7797 	memcg = folio_memcg(folio);
7798 
7799 	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7800 	if (!memcg)
7801 		return;
7802 
7803 	/*
7804 	 * In case the memcg owning these pages has been offlined and doesn't
7805 	 * have an ID allocated to it anymore, charge the closest online
7806 	 * ancestor for the swap instead and transfer the memory+swap charge.
7807 	 */
7808 	swap_memcg = mem_cgroup_id_get_online(memcg);
7809 	nr_entries = folio_nr_pages(folio);
7810 	/* Get references for the tail pages, too */
7811 	if (nr_entries > 1)
7812 		mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7813 	oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7814 				   nr_entries);
7815 	VM_BUG_ON_FOLIO(oldid, folio);
7816 	mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7817 
7818 	folio->memcg_data = 0;
7819 
7820 	if (!mem_cgroup_is_root(memcg))
7821 		page_counter_uncharge(&memcg->memory, nr_entries);
7822 
7823 	if (memcg != swap_memcg) {
7824 		if (!mem_cgroup_is_root(swap_memcg))
7825 			page_counter_charge(&swap_memcg->memsw, nr_entries);
7826 		page_counter_uncharge(&memcg->memsw, nr_entries);
7827 	}
7828 
7829 	/*
7830 	 * Interrupts should be disabled here because the caller holds the
7831 	 * i_pages lock which is taken with interrupts-off. It is
7832 	 * important here to have the interrupts disabled because it is the
7833 	 * only synchronisation we have for updating the per-CPU variables.
7834 	 */
7835 	memcg_stats_lock();
7836 	mem_cgroup_charge_statistics(memcg, -nr_entries);
7837 	memcg_stats_unlock();
7838 	memcg_check_events(memcg, folio_nid(folio));
7839 
7840 	css_put(&memcg->css);
7841 }
7842 
7843 /**
7844  * __mem_cgroup_try_charge_swap - try charging swap space for a folio
7845  * @folio: folio being added to swap
7846  * @entry: swap entry to charge
7847  *
7848  * Try to charge @folio's memcg for the swap space at @entry.
7849  *
7850  * Returns 0 on success, -ENOMEM on failure.
7851  */
7852 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
7853 {
7854 	unsigned int nr_pages = folio_nr_pages(folio);
7855 	struct page_counter *counter;
7856 	struct mem_cgroup *memcg;
7857 	unsigned short oldid;
7858 
7859 	if (do_memsw_account())
7860 		return 0;
7861 
7862 	memcg = folio_memcg(folio);
7863 
7864 	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7865 	if (!memcg)
7866 		return 0;
7867 
7868 	if (!entry.val) {
7869 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7870 		return 0;
7871 	}
7872 
7873 	memcg = mem_cgroup_id_get_online(memcg);
7874 
7875 	if (!mem_cgroup_is_root(memcg) &&
7876 	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7877 		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7878 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7879 		mem_cgroup_id_put(memcg);
7880 		return -ENOMEM;
7881 	}
7882 
7883 	/* Get references for the tail pages, too */
7884 	if (nr_pages > 1)
7885 		mem_cgroup_id_get_many(memcg, nr_pages - 1);
7886 	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7887 	VM_BUG_ON_FOLIO(oldid, folio);
7888 	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7889 
7890 	return 0;
7891 }
7892 
7893 /**
7894  * __mem_cgroup_uncharge_swap - uncharge swap space
7895  * @entry: swap entry to uncharge
7896  * @nr_pages: the amount of swap space to uncharge
7897  */
7898 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7899 {
7900 	struct mem_cgroup *memcg;
7901 	unsigned short id;
7902 
7903 	id = swap_cgroup_record(entry, 0, nr_pages);
7904 	rcu_read_lock();
7905 	memcg = mem_cgroup_from_id(id);
7906 	if (memcg) {
7907 		if (!mem_cgroup_is_root(memcg)) {
7908 			if (do_memsw_account())
7909 				page_counter_uncharge(&memcg->memsw, nr_pages);
7910 			else
7911 				page_counter_uncharge(&memcg->swap, nr_pages);
7912 		}
7913 		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7914 		mem_cgroup_id_put_many(memcg, nr_pages);
7915 	}
7916 	rcu_read_unlock();
7917 }
7918 
7919 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7920 {
7921 	long nr_swap_pages = get_nr_swap_pages();
7922 
7923 	if (mem_cgroup_disabled() || do_memsw_account())
7924 		return nr_swap_pages;
7925 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
7926 		nr_swap_pages = min_t(long, nr_swap_pages,
7927 				      READ_ONCE(memcg->swap.max) -
7928 				      page_counter_read(&memcg->swap));
7929 	return nr_swap_pages;
7930 }
7931 
7932 bool mem_cgroup_swap_full(struct folio *folio)
7933 {
7934 	struct mem_cgroup *memcg;
7935 
7936 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
7937 
7938 	if (vm_swap_full())
7939 		return true;
7940 	if (do_memsw_account())
7941 		return false;
7942 
7943 	memcg = folio_memcg(folio);
7944 	if (!memcg)
7945 		return false;
7946 
7947 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
7948 		unsigned long usage = page_counter_read(&memcg->swap);
7949 
7950 		if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7951 		    usage * 2 >= READ_ONCE(memcg->swap.max))
7952 			return true;
7953 	}
7954 
7955 	return false;
7956 }
7957 
7958 static int __init setup_swap_account(char *s)
7959 {
7960 	pr_warn_once("The swapaccount= commandline option is deprecated. "
7961 		     "Please report your usecase to linux-mm@kvack.org if you "
7962 		     "depend on this functionality.\n");
7963 	return 1;
7964 }
7965 __setup("swapaccount=", setup_swap_account);
7966 
7967 static u64 swap_current_read(struct cgroup_subsys_state *css,
7968 			     struct cftype *cft)
7969 {
7970 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7971 
7972 	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7973 }
7974 
7975 static u64 swap_peak_read(struct cgroup_subsys_state *css,
7976 			  struct cftype *cft)
7977 {
7978 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7979 
7980 	return (u64)memcg->swap.watermark * PAGE_SIZE;
7981 }
7982 
7983 static int swap_high_show(struct seq_file *m, void *v)
7984 {
7985 	return seq_puts_memcg_tunable(m,
7986 		READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
7987 }
7988 
7989 static ssize_t swap_high_write(struct kernfs_open_file *of,
7990 			       char *buf, size_t nbytes, loff_t off)
7991 {
7992 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7993 	unsigned long high;
7994 	int err;
7995 
7996 	buf = strstrip(buf);
7997 	err = page_counter_memparse(buf, "max", &high);
7998 	if (err)
7999 		return err;
8000 
8001 	page_counter_set_high(&memcg->swap, high);
8002 
8003 	return nbytes;
8004 }
8005 
8006 static int swap_max_show(struct seq_file *m, void *v)
8007 {
8008 	return seq_puts_memcg_tunable(m,
8009 		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
8010 }
8011 
8012 static ssize_t swap_max_write(struct kernfs_open_file *of,
8013 			      char *buf, size_t nbytes, loff_t off)
8014 {
8015 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8016 	unsigned long max;
8017 	int err;
8018 
8019 	buf = strstrip(buf);
8020 	err = page_counter_memparse(buf, "max", &max);
8021 	if (err)
8022 		return err;
8023 
8024 	xchg(&memcg->swap.max, max);
8025 
8026 	return nbytes;
8027 }
8028 
8029 static int swap_events_show(struct seq_file *m, void *v)
8030 {
8031 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
8032 
8033 	seq_printf(m, "high %lu\n",
8034 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
8035 	seq_printf(m, "max %lu\n",
8036 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
8037 	seq_printf(m, "fail %lu\n",
8038 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
8039 
8040 	return 0;
8041 }
8042 
8043 static struct cftype swap_files[] = {
8044 	{
8045 		.name = "swap.current",
8046 		.flags = CFTYPE_NOT_ON_ROOT,
8047 		.read_u64 = swap_current_read,
8048 	},
8049 	{
8050 		.name = "swap.high",
8051 		.flags = CFTYPE_NOT_ON_ROOT,
8052 		.seq_show = swap_high_show,
8053 		.write = swap_high_write,
8054 	},
8055 	{
8056 		.name = "swap.max",
8057 		.flags = CFTYPE_NOT_ON_ROOT,
8058 		.seq_show = swap_max_show,
8059 		.write = swap_max_write,
8060 	},
8061 	{
8062 		.name = "swap.peak",
8063 		.flags = CFTYPE_NOT_ON_ROOT,
8064 		.read_u64 = swap_peak_read,
8065 	},
8066 	{
8067 		.name = "swap.events",
8068 		.flags = CFTYPE_NOT_ON_ROOT,
8069 		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
8070 		.seq_show = swap_events_show,
8071 	},
8072 	{ }	/* terminate */
8073 };
8074 
8075 static struct cftype memsw_files[] = {
8076 	{
8077 		.name = "memsw.usage_in_bytes",
8078 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
8079 		.read_u64 = mem_cgroup_read_u64,
8080 	},
8081 	{
8082 		.name = "memsw.max_usage_in_bytes",
8083 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
8084 		.write = mem_cgroup_reset,
8085 		.read_u64 = mem_cgroup_read_u64,
8086 	},
8087 	{
8088 		.name = "memsw.limit_in_bytes",
8089 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
8090 		.write = mem_cgroup_write,
8091 		.read_u64 = mem_cgroup_read_u64,
8092 	},
8093 	{
8094 		.name = "memsw.failcnt",
8095 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
8096 		.write = mem_cgroup_reset,
8097 		.read_u64 = mem_cgroup_read_u64,
8098 	},
8099 	{ },	/* terminate */
8100 };
8101 
8102 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
8103 /**
8104  * obj_cgroup_may_zswap - check if this cgroup can zswap
8105  * @objcg: the object cgroup
8106  *
8107  * Check if the hierarchical zswap limit has been reached.
8108  *
8109  * This doesn't check for specific headroom, and it is not atomic
8110  * either. But with zswap, the size of the allocation is only known
8111  * once compression has occurred, and this optimistic pre-check avoids
8112  * spending cycles on compression when there is already no room left
8113  * or zswap is disabled altogether somewhere in the hierarchy.
8114  */
8115 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
8116 {
8117 	struct mem_cgroup *memcg, *original_memcg;
8118 	bool ret = true;
8119 
8120 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
8121 		return true;
8122 
8123 	original_memcg = get_mem_cgroup_from_objcg(objcg);
8124 	for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
8125 	     memcg = parent_mem_cgroup(memcg)) {
8126 		unsigned long max = READ_ONCE(memcg->zswap_max);
8127 		unsigned long pages;
8128 
8129 		if (max == PAGE_COUNTER_MAX)
8130 			continue;
8131 		if (max == 0) {
8132 			ret = false;
8133 			break;
8134 		}
8135 
8136 		/*
8137 		 * mem_cgroup_flush_stats() ignores small changes. Use
8138 		 * do_flush_stats() directly to get accurate stats for charging.
8139 		 */
8140 		do_flush_stats(memcg);
8141 		pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
8142 		if (pages < max)
8143 			continue;
8144 		ret = false;
8145 		break;
8146 	}
8147 	mem_cgroup_put(original_memcg);
8148 	return ret;
8149 }
8150 
8151 /**
8152  * obj_cgroup_charge_zswap - charge compression backend memory
8153  * @objcg: the object cgroup
8154  * @size: size of compressed object
8155  *
8156  * This forces the charge after obj_cgroup_may_zswap() allowed
8157  * compression and storage in zwap for this cgroup to go ahead.
8158  */
8159 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
8160 {
8161 	struct mem_cgroup *memcg;
8162 
8163 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
8164 		return;
8165 
8166 	VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
8167 
8168 	/* PF_MEMALLOC context, charging must succeed */
8169 	if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
8170 		VM_WARN_ON_ONCE(1);
8171 
8172 	rcu_read_lock();
8173 	memcg = obj_cgroup_memcg(objcg);
8174 	mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
8175 	mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
8176 	rcu_read_unlock();
8177 }
8178 
8179 /**
8180  * obj_cgroup_uncharge_zswap - uncharge compression backend memory
8181  * @objcg: the object cgroup
8182  * @size: size of compressed object
8183  *
8184  * Uncharges zswap memory on page in.
8185  */
8186 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
8187 {
8188 	struct mem_cgroup *memcg;
8189 
8190 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
8191 		return;
8192 
8193 	obj_cgroup_uncharge(objcg, size);
8194 
8195 	rcu_read_lock();
8196 	memcg = obj_cgroup_memcg(objcg);
8197 	mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
8198 	mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
8199 	rcu_read_unlock();
8200 }
8201 
8202 bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
8203 {
8204 	/* if zswap is disabled, do not block pages going to the swapping device */
8205 	return !is_zswap_enabled() || !memcg || READ_ONCE(memcg->zswap_writeback);
8206 }
8207 
8208 static u64 zswap_current_read(struct cgroup_subsys_state *css,
8209 			      struct cftype *cft)
8210 {
8211 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
8212 
8213 	mem_cgroup_flush_stats(memcg);
8214 	return memcg_page_state(memcg, MEMCG_ZSWAP_B);
8215 }
8216 
8217 static int zswap_max_show(struct seq_file *m, void *v)
8218 {
8219 	return seq_puts_memcg_tunable(m,
8220 		READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
8221 }
8222 
8223 static ssize_t zswap_max_write(struct kernfs_open_file *of,
8224 			       char *buf, size_t nbytes, loff_t off)
8225 {
8226 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8227 	unsigned long max;
8228 	int err;
8229 
8230 	buf = strstrip(buf);
8231 	err = page_counter_memparse(buf, "max", &max);
8232 	if (err)
8233 		return err;
8234 
8235 	xchg(&memcg->zswap_max, max);
8236 
8237 	return nbytes;
8238 }
8239 
8240 static int zswap_writeback_show(struct seq_file *m, void *v)
8241 {
8242 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
8243 
8244 	seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback));
8245 	return 0;
8246 }
8247 
8248 static ssize_t zswap_writeback_write(struct kernfs_open_file *of,
8249 				char *buf, size_t nbytes, loff_t off)
8250 {
8251 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8252 	int zswap_writeback;
8253 	ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback);
8254 
8255 	if (parse_ret)
8256 		return parse_ret;
8257 
8258 	if (zswap_writeback != 0 && zswap_writeback != 1)
8259 		return -EINVAL;
8260 
8261 	WRITE_ONCE(memcg->zswap_writeback, zswap_writeback);
8262 	return nbytes;
8263 }
8264 
8265 static struct cftype zswap_files[] = {
8266 	{
8267 		.name = "zswap.current",
8268 		.flags = CFTYPE_NOT_ON_ROOT,
8269 		.read_u64 = zswap_current_read,
8270 	},
8271 	{
8272 		.name = "zswap.max",
8273 		.flags = CFTYPE_NOT_ON_ROOT,
8274 		.seq_show = zswap_max_show,
8275 		.write = zswap_max_write,
8276 	},
8277 	{
8278 		.name = "zswap.writeback",
8279 		.seq_show = zswap_writeback_show,
8280 		.write = zswap_writeback_write,
8281 	},
8282 	{ }	/* terminate */
8283 };
8284 #endif /* CONFIG_MEMCG_KMEM && CONFIG_ZSWAP */
8285 
8286 static int __init mem_cgroup_swap_init(void)
8287 {
8288 	if (mem_cgroup_disabled())
8289 		return 0;
8290 
8291 	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
8292 	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
8293 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
8294 	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
8295 #endif
8296 	return 0;
8297 }
8298 subsys_initcall(mem_cgroup_swap_init);
8299 
8300 #endif /* CONFIG_SWAP */
8301